diff --git a/source/tools/detect/irqoff/bpf/irqoff.bpf.c b/source/tools/detect/irqoff/bpf/irqoff.bpf.c index 8d8f2a34cce6a29dcd9d5d4aa63ded8d61101917..f31b3d43fa226490117e166c5d979b63e3ab4c23 100644 --- a/source/tools/detect/irqoff/bpf/irqoff.bpf.c +++ b/source/tools/detect/irqoff/bpf/irqoff.bpf.c @@ -4,7 +4,24 @@ #include #include "../irqoff.h" +#define PERF_MAX_STACK_DEPTH 127 #define MAX_ENTRIES 10240 +#define BPF_F_FAST_STACK_CMP (1ULL << 9) +#define KERN_STACKID_FLAGS (0 | BPF_F_FAST_STACK_CMP) + +struct bpf_map_def SEC("maps") args_map = { + .type = BPF_MAP_TYPE_HASH, + .key_size = sizeof(int), + .value_size = sizeof(struct args), + .max_entries = 1, +}; + +struct bpf_map_def SEC("maps") stackmap = { + .type = BPF_MAP_TYPE_STACK_TRACE, + .key_size = sizeof(u32), + .value_size = PERF_MAX_STACK_DEPTH * sizeof(u64), + .max_entries = 10000, +}; struct { __uint(type, BPF_MAP_TYPE_ARRAY); @@ -107,6 +124,7 @@ int on_irqoff_event(struct bpf_perf_event_data *ctx) event.delay = delta/1000; event.pid = bpf_get_current_pid_tgid(); bpf_get_current_comm(&event.comm, sizeof(event.comm)); + event.ret = bpf_get_stackid(ctx, &stackmap, KERN_STACKID_FLAGS); bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, &event, sizeof(event)); } diff --git a/source/tools/detect/irqoff/irqoff.c b/source/tools/detect/irqoff/irqoff.c index 9c3e239ccf636813bed1ed7d8b2e4705e5bb29ce..2197cd1a23fb8da1df8741c10b7ff93e7864dea5 100644 --- a/source/tools/detect/irqoff/irqoff.c +++ b/source/tools/detect/irqoff/irqoff.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -26,9 +27,14 @@ struct env { .duration = 10, }; -__u64 threshold; +static struct ksym *ksyms; +static int stackmp_fd; +static __u64 threshold; volatile sig_atomic_t exiting = 0; +void print_stack(int fd, __u32 ret, struct ksym *syms); +int load_kallsyms(struct ksym **pksyms); + const char *argp_program_version = "irqoff 0.1"; const char argp_program_doc[] = "Catch the irq-off time more than threshold.\n" @@ -148,6 +154,7 @@ void handle_event(void *ctx, int cpu, void *data, __u32 data_sz) strftime(ts, sizeof(ts), "%F_%H:%M:%S", tm); fprintf(stdout, "%-21s %-5d %-15s %-8d %-10llu\n", ts, e->cpu, e->comm, e->pid, e->delay); + print_stack(stackmp_fd, e->ret, ksyms); } void irqoff_handler(int poll_fd) @@ -217,6 +224,11 @@ int main(int argc, char **argv) libbpf_set_print(libbpf_print_fn); bump_memlock_rlimit(); + err = load_kallsyms(&ksyms); + if (err) { + fprintf(stderr, "Failed to load kallsyms\n"); + return err; + } nr_cpus = libbpf_num_possible_cpus(); if (nr_cpus < 0) { @@ -243,6 +255,7 @@ int main(int argc, char **argv) map_fd = bpf_map__fd(obj->maps.argmap); ent_fd = bpf_map__fd(obj->maps.events); + stackmp_fd = bpf_map__fd(obj->maps.stackmap); args_key = 0; args.threshold = threshold; diff --git a/source/tools/detect/irqoff/irqoff.h b/source/tools/detect/irqoff/irqoff.h index 4302797592c1329c67288a04204341156c5b2bd3..cb93b793aca293ca74bdec7b797cb20fbcd55b2e 100644 --- a/source/tools/detect/irqoff/irqoff.h +++ b/source/tools/detect/irqoff/irqoff.h @@ -14,9 +14,14 @@ struct args { }; struct event { - __u32 pid, cpu; + __u32 ret, pid, cpu; __u64 delay; char comm[TASK_COMM_LEN]; }; +struct ksym { + long addr; + char *name; +}; + #endif /* __LLCSTAT_H */ diff --git a/source/tools/detect/irqoff/stacktrace.c b/source/tools/detect/irqoff/stacktrace.c new file mode 100644 index 0000000000000000000000000000000000000000..d91207cf4de7ad213a704dd023aa9b7c19369cb9 --- /dev/null +++ b/source/tools/detect/irqoff/stacktrace.c @@ -0,0 +1,118 @@ +#include +#include +#include +//#include +//#include +//#include +#include +#include +//#include +#include +#include +#include +#include +#include "irqoff.h" + +#define MAX_SYMS 300000 +#define PERF_MAX_STACK_DEPTH 127 + +static int sym_cnt; + +static int ksym_cmp(const void *p1, const void *p2) +{ + return ((struct ksym *)p1)->addr - ((struct ksym *)p2)->addr; +} + +int load_kallsyms(struct ksym **pksyms) +{ + struct ksym *syms; + FILE *f = fopen("/proc/kallsyms", "r"); + char func[256], buf[256]; + char symbol; + void *addr; + int i = 0; + + if (!f) + return -ENOENT; + + syms = malloc(MAX_SYMS * sizeof(struct ksym)); + if (!syms) + return errno; + + while (!feof(f)) { + if (!fgets(buf, sizeof(buf), f)) + break; + if (sscanf(buf, "%p %c %s", &addr, &symbol, func) != 3) + break; + if (!addr) + continue; + syms[i].addr = (long) addr; + syms[i].name = strdup(func); + i++; + if (i > MAX_SYMS) { + printf("Warning: no space on ksym array!\n"); + break; + } + } + fclose(f); + sym_cnt = i; + qsort(syms, sym_cnt, sizeof(struct ksym), ksym_cmp); + *pksyms = syms; + return 0; +} + +struct ksym *ksym_search(long key, struct ksym *syms) +{ + int start = 0, end = sym_cnt; + int result; + + /* kallsyms not loaded. return NULL */ + if (sym_cnt <= 0) + return NULL; + + while (start < end) { + size_t mid = start + (end - start) / 2; + + result = key - syms[mid].addr; + if (result < 0) + end = mid; + else if (result > 0) + start = mid + 1; + else + return &syms[mid]; + } + + if (start >= 1 && syms[start - 1].addr < key && + key < syms[start].addr) + /* valid ksym */ + return &syms[start - 1]; + + /* out of range. return _stext */ + return &syms[0]; +} + +static void print_ksym(__u64 addr, struct ksym *psym) +{ + struct ksym *sym; + + if (!addr) + return; + + sym = ksym_search(addr, psym); + fprintf(stdout, "<0x%llx> %s\n", addr, sym->name); +} + +void print_stack(int fd, __u32 ret, struct ksym *syms) +{ + int i; + __u64 ip[PERF_MAX_STACK_DEPTH] = {}; + + if (bpf_map_lookup_elem(fd, &ret, &ip) == 0) { + for (i = 0; i < PERF_MAX_STACK_DEPTH - 1; i++) + print_ksym(ip[i], syms); + } else { + if ((int)(ret) < 0) + fprintf(stdout, "<0x0000000000000000>:error=%d\n", (int)(ret)); + } +} +