From 7f90b78c0d1adf31237a93085fed895b163dde70 Mon Sep 17 00:00:00 2001 From: Tengda Wu Date: Mon, 9 Jun 2025 13:25:45 +0000 Subject: [PATCH 1/6] perf stat: Support inherit events during fork() for bperf mainline inclusion from mainline-v6.13-rc1 commit 07dc3a6de33098b0dd2ab73ef43fe721abed4825 category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ICDJO7 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=07dc3a6de33098b0dd2ab73ef43fe721abed4825 -------------------------------- bperf has a nice ability to share PMUs, but it still does not support inherit events during fork(), resulting in some deviations in its stat results compared with perf. perf stat result: $ ./perf stat -e cycles,instructions -- ./perf test -w sqrtloop Performance counter stats for './perf test -w sqrtloop': 2,316,038,116 cycles 2,859,350,725 instructions 1.009603637 seconds time elapsed 1.004196000 seconds user 0.003950000 seconds sys bperf stat result: $ ./perf stat --bpf-counters -e cycles,instructions -- \ ./perf test -w sqrtloop Performance counter stats for './perf test -w sqrtloop': 18,762,093 cycles 23,487,766 instructions 1.008913769 seconds time elapsed 1.003248000 seconds user 0.004069000 seconds sys In order to support event inheritance, two new bpf programs are added to monitor the fork and exit of tasks respectively. When a task is created, add it to the filter map to enable counting, and reuse the `accum_key` of its parent task to count together with the parent task. When a task exits, remove it from the filter map to disable counting. After support: $ ./perf stat --bpf-counters -e cycles,instructions -- \ ./perf test -w sqrtloop Performance counter stats for './perf test -w sqrtloop': 2,316,252,189 cycles 2,859,946,547 instructions 1.009422314 seconds time elapsed 1.003597000 seconds user 0.004270000 seconds sys Signed-off-by: Tengda Wu Cc: song@kernel.org Cc: bpf@vger.kernel.org Link: https://lore.kernel.org/r/20241021110201.325617-2-wutengda@huaweicloud.com Signed-off-by: Namhyung Kim Signed-off-by: Xiaomeng Zhang --- tools/perf/builtin-stat.c | 1 + tools/perf/util/bpf_counter.c | 35 +++++-- tools/perf/util/bpf_skel/bperf_follower.bpf.c | 98 +++++++++++++++++-- tools/perf/util/bpf_skel/bperf_u.h | 5 + tools/perf/util/target.h | 1 + 5 files changed, 126 insertions(+), 14 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 9692ebdd7f11..712c170eb778 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -2624,6 +2624,7 @@ int cmd_stat(int argc, const char **argv) } else if (big_num_opt == 0) /* User passed --no-big-num */ stat_config.big_num = false; + target.inherit = !stat_config.no_inherit; err = target__validate(&target); if (err) { target__strerror(&target, err, errbuf, BUFSIZ); diff --git a/tools/perf/util/bpf_counter.c b/tools/perf/util/bpf_counter.c index 6732cbbcf9b3..8afa07917312 100644 --- a/tools/perf/util/bpf_counter.c +++ b/tools/perf/util/bpf_counter.c @@ -391,6 +391,7 @@ static int bperf_check_target(struct evsel *evsel, } static struct perf_cpu_map *all_cpu_map; +static __u32 filter_entry_cnt; static int bperf_reload_leader_program(struct evsel *evsel, int attr_map_fd, struct perf_event_attr_map_entry *entry) @@ -441,12 +442,32 @@ static int bperf_reload_leader_program(struct evsel *evsel, int attr_map_fd, return err; } +static int bperf_attach_follower_program(struct bperf_follower_bpf *skel, + enum bperf_filter_type filter_type, + bool inherit) +{ + struct bpf_link *link; + int err = 0; + + if ((filter_type == BPERF_FILTER_PID || + filter_type == BPERF_FILTER_TGID) && inherit) + /* attach all follower bpf progs to enable event inheritance */ + err = bperf_follower_bpf__attach(skel); + else { + link = bpf_program__attach(skel->progs.fexit_XXX); + if (IS_ERR(link)) + err = PTR_ERR(link); + } + + return err; +} + static int bperf__load(struct evsel *evsel, struct target *target) { struct perf_event_attr_map_entry entry = {0xffffffff, 0xffffffff}; int attr_map_fd, diff_map_fd = -1, err; enum bperf_filter_type filter_type; - __u32 filter_entry_cnt, i; + __u32 i; if (bperf_check_target(evsel, target, &filter_type, &filter_entry_cnt)) return -1; @@ -526,9 +547,6 @@ static int bperf__load(struct evsel *evsel, struct target *target) /* set up reading map */ bpf_map__set_max_entries(evsel->follower_skel->maps.accum_readings, filter_entry_cnt); - /* set up follower filter based on target */ - bpf_map__set_max_entries(evsel->follower_skel->maps.filter, - filter_entry_cnt); err = bperf_follower_bpf__load(evsel->follower_skel); if (err) { pr_err("Failed to load follower skeleton\n"); @@ -540,6 +558,7 @@ static int bperf__load(struct evsel *evsel, struct target *target) for (i = 0; i < filter_entry_cnt; i++) { int filter_map_fd; __u32 key; + struct bperf_filter_value fval = { i, 0 }; if (filter_type == BPERF_FILTER_PID || filter_type == BPERF_FILTER_TGID) @@ -550,12 +569,14 @@ static int bperf__load(struct evsel *evsel, struct target *target) break; filter_map_fd = bpf_map__fd(evsel->follower_skel->maps.filter); - bpf_map_update_elem(filter_map_fd, &key, &i, BPF_ANY); + bpf_map_update_elem(filter_map_fd, &key, &fval, BPF_ANY); } evsel->follower_skel->bss->type = filter_type; + evsel->follower_skel->bss->inherit = target->inherit; - err = bperf_follower_bpf__attach(evsel->follower_skel); + err = bperf_attach_follower_program(evsel->follower_skel, filter_type, + target->inherit); out: if (err && evsel->bperf_leader_link_fd >= 0) @@ -620,7 +641,7 @@ static int bperf__read(struct evsel *evsel) bperf_sync_counters(evsel); reading_map_fd = bpf_map__fd(skel->maps.accum_readings); - for (i = 0; i < bpf_map__max_entries(skel->maps.accum_readings); i++) { + for (i = 0; i < filter_entry_cnt; i++) { struct perf_cpu entry; __u32 cpu; diff --git a/tools/perf/util/bpf_skel/bperf_follower.bpf.c b/tools/perf/util/bpf_skel/bperf_follower.bpf.c index f193998530d4..0595063139a3 100644 --- a/tools/perf/util/bpf_skel/bperf_follower.bpf.c +++ b/tools/perf/util/bpf_skel/bperf_follower.bpf.c @@ -5,6 +5,8 @@ #include #include "bperf_u.h" +#define MAX_ENTRIES 102400 + struct { __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); __uint(key_size, sizeof(__u32)); @@ -22,25 +24,29 @@ struct { struct { __uint(type, BPF_MAP_TYPE_HASH); __uint(key_size, sizeof(__u32)); - __uint(value_size, sizeof(__u32)); + __uint(value_size, sizeof(struct bperf_filter_value)); + __uint(max_entries, MAX_ENTRIES); + __uint(map_flags, BPF_F_NO_PREALLOC); } filter SEC(".maps"); enum bperf_filter_type type = 0; int enabled = 0; +int inherit; SEC("fexit/XXX") int BPF_PROG(fexit_XXX) { struct bpf_perf_event_value *diff_val, *accum_val; __u32 filter_key, zero = 0; - __u32 *accum_key; + __u32 accum_key; + struct bperf_filter_value *fval; if (!enabled) return 0; switch (type) { case BPERF_FILTER_GLOBAL: - accum_key = &zero; + accum_key = zero; goto do_add; case BPERF_FILTER_CPU: filter_key = bpf_get_smp_processor_id(); @@ -49,22 +55,34 @@ int BPF_PROG(fexit_XXX) filter_key = bpf_get_current_pid_tgid() & 0xffffffff; break; case BPERF_FILTER_TGID: - filter_key = bpf_get_current_pid_tgid() >> 32; + /* Use pid as the filter_key to exclude new task counts + * when inherit is disabled. Don't worry about the existing + * children in TGID losing their counts, bpf_counter has + * already added them to the filter map via perf_thread_map + * before this bpf prog runs. + */ + filter_key = inherit ? + bpf_get_current_pid_tgid() >> 32 : + bpf_get_current_pid_tgid() & 0xffffffff; break; default: return 0; } - accum_key = bpf_map_lookup_elem(&filter, &filter_key); - if (!accum_key) + fval = bpf_map_lookup_elem(&filter, &filter_key); + if (!fval) return 0; + accum_key = fval->accum_key; + if (fval->exited) + bpf_map_delete_elem(&filter, &filter_key); + do_add: diff_val = bpf_map_lookup_elem(&diff_readings, &zero); if (!diff_val) return 0; - accum_val = bpf_map_lookup_elem(&accum_readings, accum_key); + accum_val = bpf_map_lookup_elem(&accum_readings, &accum_key); if (!accum_val) return 0; @@ -75,4 +93,70 @@ int BPF_PROG(fexit_XXX) return 0; } +/* The program is only used for PID or TGID filter types. */ +SEC("tp_btf/task_newtask") +int BPF_PROG(on_newtask, struct task_struct *task, __u64 clone_flags) +{ + __u32 parent_key, child_key; + struct bperf_filter_value *parent_fval; + struct bperf_filter_value child_fval = { 0 }; + + if (!enabled) + return 0; + + switch (type) { + case BPERF_FILTER_PID: + parent_key = bpf_get_current_pid_tgid() & 0xffffffff; + child_key = task->pid; + break; + case BPERF_FILTER_TGID: + parent_key = bpf_get_current_pid_tgid() >> 32; + child_key = task->tgid; + if (child_key == parent_key) + return 0; + break; + default: + return 0; + } + + /* Check if the current task is one of the target tasks to be counted */ + parent_fval = bpf_map_lookup_elem(&filter, &parent_key); + if (!parent_fval) + return 0; + + /* Start counting for the new task by adding it into filter map, + * inherit the accum key of its parent task so that they can be + * counted together. + */ + child_fval.accum_key = parent_fval->accum_key; + child_fval.exited = 0; + bpf_map_update_elem(&filter, &child_key, &child_fval, BPF_NOEXIST); + + return 0; +} + +/* The program is only used for PID or TGID filter types. */ +SEC("tp_btf/sched_process_exit") +int BPF_PROG(on_exittask, struct task_struct *task) +{ + __u32 pid; + struct bperf_filter_value *fval; + + if (!enabled) + return 0; + + /* Stop counting for this task by removing it from filter map. + * For TGID type, if the pid can be found in the map, it means that + * this pid belongs to the leader task. After the task exits, the + * tgid of its child tasks (if any) will be 1, so the pid can be + * safely removed. + */ + pid = task->pid; + fval = bpf_map_lookup_elem(&filter, &pid); + if (fval) + fval->exited = 1; + + return 0; +} + char LICENSE[] SEC("license") = "Dual BSD/GPL"; diff --git a/tools/perf/util/bpf_skel/bperf_u.h b/tools/perf/util/bpf_skel/bperf_u.h index 1ce0c2c905c1..4a4a753980be 100644 --- a/tools/perf/util/bpf_skel/bperf_u.h +++ b/tools/perf/util/bpf_skel/bperf_u.h @@ -11,4 +11,9 @@ enum bperf_filter_type { BPERF_FILTER_TGID, }; +struct bperf_filter_value { + __u32 accum_key; + __u8 exited; +}; + #endif /* __BPERF_STAT_U_H */ diff --git a/tools/perf/util/target.h b/tools/perf/util/target.h index d582cae8e105..2ee2cc30340f 100644 --- a/tools/perf/util/target.h +++ b/tools/perf/util/target.h @@ -17,6 +17,7 @@ struct target { bool default_per_cpu; bool per_thread; bool use_bpf; + bool inherit; int initial_delay; const char *attr_map; }; -- Gitee From 4539062a34ae0ad7c5e0a8f9420097b15e3886c5 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Mon, 9 Jun 2025 13:25:46 +0000 Subject: [PATCH 2/6] perf test bpf-counters: Add test for BPF event modifier mainline inclusion from mainline-v6.10-rc1 commit d9bd1d4264baddf7ab8baae86e91674d369f22de category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ICDJO7 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=d9bd1d4264baddf7ab8baae86e91674d369f22de -------------------------------- Refactor test to better enable sharing of logic, to give an idea of progress and introduce test functions. Add test of measuring both cycles and cycles:b simultaneously. Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Athira Rajeev Cc: Ingo Molnar Cc: Jiri Olsa Cc: Kan Liang Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Song Liu Cc: Thomas Richter Link: https://lore.kernel.org/r/20240416170014.985191-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Xiaomeng Zhang --- tools/perf/tests/shell/stat_bpf_counters.sh | 75 ++++++++++++++------- 1 file changed, 52 insertions(+), 23 deletions(-) diff --git a/tools/perf/tests/shell/stat_bpf_counters.sh b/tools/perf/tests/shell/stat_bpf_counters.sh index a87bb2814b4c..ac1035a56e05 100755 --- a/tools/perf/tests/shell/stat_bpf_counters.sh +++ b/tools/perf/tests/shell/stat_bpf_counters.sh @@ -4,21 +4,59 @@ set -e +workload="perf bench sched messaging -g 1 -l 100 -t" + # check whether $2 is within +/- 10% of $1 compare_number() { - first_num=$1 - second_num=$2 - - # upper bound is first_num * 110% - upper=$(expr $first_num + $first_num / 10 ) - # lower bound is first_num * 90% - lower=$(expr $first_num - $first_num / 10 ) - - if [ $second_num -gt $upper ] || [ $second_num -lt $lower ]; then - echo "The difference between $first_num and $second_num are greater than 10%." - exit 1 - fi + first_num=$1 + second_num=$2 + + # upper bound is first_num * 120% + upper=$(expr $first_num + $first_num / 5 ) + # lower bound is first_num * 80% + lower=$(expr $first_num - $first_num / 5 ) + + if [ $second_num -gt $upper ] || [ $second_num -lt $lower ]; then + echo "The difference between $first_num and $second_num are greater than 20%." + exit 1 + fi +} + +check_counts() +{ + base_cycles=$1 + bpf_cycles=$2 + + if [ "$base_cycles" = "&1 | awk '/cycles/ {print $1}') + bpf_cycles=$(perf stat --no-big-num --bpf-counters -e cycles -- $workload 2>&1 | awk '/cycles/ {print $1}') + check_counts $base_cycles $bpf_cycles + compare_number $base_cycles $bpf_cycles + echo "[Success]" +} + +test_bpf_modifier() +{ + printf "Testing bpf event modifier " + stat_output=$(perf stat --no-big-num -e cycles/name=base_cycles/,cycles/name=bpf_cycles/b -- $workload 2>&1) + base_cycles=$(echo "$stat_output"| awk '/base_cycles/ {print $1}') + bpf_cycles=$(echo "$stat_output"| awk '/bpf_cycles/ {print $1}') + check_counts $base_cycles $bpf_cycles + compare_number $base_cycles $bpf_cycles + echo "[Success]" } # skip if --bpf-counters is not supported @@ -30,16 +68,7 @@ if ! perf stat -e cycles --bpf-counters true > /dev/null 2>&1; then exit 2 fi -base_cycles=$(perf stat --no-big-num -e cycles -- perf bench sched messaging -g 1 -l 100 -t 2>&1 | awk '/cycles/ {print $1}') -if [ "$base_cycles" = "&1 | awk '/cycles/ {print $1}') -if [ "$bpf_cycles" = " Date: Mon, 9 Jun 2025 13:25:47 +0000 Subject: [PATCH 3/6] perf test stat_bpf_counter.sh: Stabilize the test results mainline inclusion from mainline-v6.10-rc1 commit d9bd1d4264baddf7ab8baae86e91674d369f22de category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ICDJO7 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=d9bd1d4264baddf7ab8baae86e91674d369f22de ------------------------------- The test has been failing for some time when two separate runs of perf benchmarks are recorded for cycles events and their counts are compared, while once the recording was done with option --bpf-counters and once without it. It is expected that the count of the samples should be within a certain range, firstly the difference was set to be within 10%, which was then later raised to 20%. However, the test case keeps failing on certain architectures as recording the provided benchmark can produce completely different counts based on the current load of the system. Sampling two separate runs on intel-eaglestream-spr-13 of "perf stat --no-big-num -e cycles -- perf bench sched messaging -g 1 -l 100 -t": Performance counter stats for 'perf bench sched messaging -g 1 -l 100 -t': 396782898 cycles 0.010051983 seconds time elapsed 0.008664000 seconds user 0.097058000 seconds sys Performance counter stats for 'perf bench sched messaging -g 1 -l 100 -t': 1431133032 cycles 0.021803714 seconds time elapsed 0.023377000 seconds user 0.349918000 seconds sys , which is ranging from 400mil to 1400mil samples. Instead of recording the cycles use instructions event, which provides more stable values. At the same time change the tested workload to one of the provided testing workloads by perf that is not based on a scheduler, which can provide another dependency on the current load. Sampling instructions event with the new workload provide much more stable results on intel-eaglestream-spr-13 of "perf stat --no-big-num -e instructions -- perf test -w brstack": Performance counter stats for 'perf test -w brstack': 64584494 instructions 0.009173945 seconds time elapsed 0.007262000 seconds user 0.002071000 seconds sys Performance counter stats for 'perf test -w brstack': 64672669 instructions 0.008888135 seconds time elapsed 0.005018000 seconds user 0.004018000 seconds sys Signed-off-by: Veronika Molnarova Acked-by: Namhyung Kim Cc: mpetlan@redhat.com Signed-off-by: Namhyung Kim Link: https://lore.kernel.org/r/20240625092001.10909-1-vmolnaro@redhat.com Signed-off-by: Xiaomeng Zhang --- tools/perf/tests/shell/stat_bpf_counters.sh | 36 ++++++++++----------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/tools/perf/tests/shell/stat_bpf_counters.sh b/tools/perf/tests/shell/stat_bpf_counters.sh index ac1035a56e05..892ddccae1e0 100755 --- a/tools/perf/tests/shell/stat_bpf_counters.sh +++ b/tools/perf/tests/shell/stat_bpf_counters.sh @@ -4,7 +4,7 @@ set -e -workload="perf bench sched messaging -g 1 -l 100 -t" +workload="perf test -w brstack" # check whether $2 is within +/- 10% of $1 compare_number() @@ -25,15 +25,15 @@ compare_number() check_counts() { - base_cycles=$1 - bpf_cycles=$2 + base_instructions=$1 + bpf_instructions=$2 - if [ "$base_cycles" = "&1 | awk '/cycles/ {print $1}') - bpf_cycles=$(perf stat --no-big-num --bpf-counters -e cycles -- $workload 2>&1 | awk '/cycles/ {print $1}') - check_counts $base_cycles $bpf_cycles - compare_number $base_cycles $bpf_cycles + base_instructions=$(perf stat --no-big-num -e instructions -- $workload 2>&1 | awk '/instructions/ {print $1}') + bpf_instructions=$(perf stat --no-big-num --bpf-counters -e instructions -- $workload 2>&1 | awk '/instructions/ {print $1}') + check_counts $base_instructions $bpf_instructions + compare_number $base_instructions $bpf_instructions echo "[Success]" } test_bpf_modifier() { printf "Testing bpf event modifier " - stat_output=$(perf stat --no-big-num -e cycles/name=base_cycles/,cycles/name=bpf_cycles/b -- $workload 2>&1) - base_cycles=$(echo "$stat_output"| awk '/base_cycles/ {print $1}') - bpf_cycles=$(echo "$stat_output"| awk '/bpf_cycles/ {print $1}') - check_counts $base_cycles $bpf_cycles - compare_number $base_cycles $bpf_cycles + stat_output=$(perf stat --no-big-num -e instructions/name=base_instructions/,instructions/name=bpf_instructions/b -- $workload 2>&1) + base_instructions=$(echo "$stat_output"| awk '/base_instructions/ {print $1}') + bpf_instructions=$(echo "$stat_output"| awk '/bpf_instructions/ {print $1}') + check_counts $base_instructions $bpf_instructions + compare_number $base_instructions $bpf_instructions echo "[Success]" } # skip if --bpf-counters is not supported -if ! perf stat -e cycles --bpf-counters true > /dev/null 2>&1; then +if ! perf stat -e instructions --bpf-counters true > /dev/null 2>&1; then if [ "$1" = "-v" ]; then echo "Skipping: --bpf-counters not supported" - perf --no-pager stat -e cycles --bpf-counters true || true + perf --no-pager stat -e instructions --bpf-counters true || true fi exit 2 fi -- Gitee From 0a8d18ac29ab6f9f450196cbf2e476a905ad8a5e Mon Sep 17 00:00:00 2001 From: Tengda Wu Date: Mon, 9 Jun 2025 13:25:48 +0000 Subject: [PATCH 4/6] perf test: Use sqrtloop workload to test bperf event mainline inclusion from mainline-v6.13-rc1 commit d36e5b36a2928b30e09ff59ce5ce2d5df935176e category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ICDJO7 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=d36e5b36a2928b30e09ff59ce5ce2d5df935176e ------------------------------- Replace `brstack` workload with `sqrtloop` workload, because `sqrtloop` workload contains fork(), which is suitable for testing the bperf event inheritance feature. Signed-off-by: Tengda Wu Cc: song@kernel.org Cc: bpf@vger.kernel.org Link: https://lore.kernel.org/r/20241021110201.325617-3-wutengda@huaweicloud.com Signed-off-by: Namhyung Kim Signed-off-by: Xiaomeng Zhang --- tools/perf/tests/shell/stat_bpf_counters.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/tests/shell/stat_bpf_counters.sh b/tools/perf/tests/shell/stat_bpf_counters.sh index 892ddccae1e0..d5c135225cc1 100755 --- a/tools/perf/tests/shell/stat_bpf_counters.sh +++ b/tools/perf/tests/shell/stat_bpf_counters.sh @@ -4,7 +4,7 @@ set -e -workload="perf test -w brstack" +workload="perf test -w sqrtloop" # check whether $2 is within +/- 10% of $1 compare_number() -- Gitee From 626057043bf76032895c15fe32e979bdb3248dc5 Mon Sep 17 00:00:00 2001 From: Xiaomeng Zhang Date: Mon, 9 Jun 2025 13:25:49 +0000 Subject: [PATCH 5/6] perf stat: Increase perf_attr_map entries Offering: HULK hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ICDJO7 -------------------------------- bperf restricts the size of perf_attr_map's entries to 16, which cannot hold all events in many scenarios. A typical example is when the user specifies `-a -ddd` ([0]). And in other cases such as top-down analysis, which often requires a set of more than 16 PMUs to be collected simultaneously. Fix this by increase perf_attr_map entries to 100, and an event number check has been introduced when bperf__load() to ensure that users receive a more friendly prompt when the event limit is reached. [0] https://lore.kernel.org/all/20230104064402.1551516-3-namhyung@kernel.org/ Fixes: 7fac83aaf2ee ("perf stat: Introduce 'bperf' to share hardware PMCs with BPF") Signed-off-by: Tengda Wu Signed-off-by: Xiaomeng Zhang --- tools/perf/util/bpf_counter.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/bpf_counter.c b/tools/perf/util/bpf_counter.c index 8afa07917312..57fc6bab64ba 100644 --- a/tools/perf/util/bpf_counter.c +++ b/tools/perf/util/bpf_counter.c @@ -28,7 +28,7 @@ #include "bpf_skel/bperf_leader.skel.h" #include "bpf_skel/bperf_follower.skel.h" -#define ATTR_MAP_SIZE 16 +#define ATTR_MAP_SIZE 100 static inline void *u64_to_ptr(__u64 ptr) { @@ -469,6 +469,12 @@ static int bperf__load(struct evsel *evsel, struct target *target) enum bperf_filter_type filter_type; __u32 i; + if (evsel->evlist->core.nr_entries > ATTR_MAP_SIZE) { + pr_err("Too many events, please limit to %d or less\n", + ATTR_MAP_SIZE); + return -1; + } + if (bperf_check_target(evsel, target, &filter_type, &filter_entry_cnt)) return -1; -- Gitee From 4f081c0b085e5b0335b1ad1a715e67284ef0b1ed Mon Sep 17 00:00:00 2001 From: Xiaomeng Zhang Date: Mon, 9 Jun 2025 13:25:50 +0000 Subject: [PATCH 6/6] perf stat: Fix incorrect display of bperf when event count is 0 Offering: HULK hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ICDJO7 -------------------------------- There are 2 possible reasons for the event count being 0: not supported and not counted. Perf distinguishes between these two possibilities through `evsel->supported`, but in bperf, this value is always false. This is because bperf is prematurely break or continue in the evlist__for_each_cpu loop under __run_perf_stat(), skipping the `counter->supported` assignment, resulting in bperf incorrectly displaying when the count is 0. The most direct way to fix it is to do `evsel->supported` assignment when opening an event in bperf. However, since bperf only opens events when loading the leader, the followers are not aware of whether the event is supported or not. Therefore, we store the `evsel->supported` value in a common location, which is the `perf_event_attr_map`, to achieve synchronization of event support across perf sessions. Fixes: 7fac83aaf2ee ("perf stat: Introduce 'bperf' to share hardware PMCs with BPF") Signed-off-by: Tengda Wu Signed-off-by: Xiaomeng Zhang --- tools/lib/perf/include/perf/bpf_perf.h | 1 + tools/perf/util/bpf_counter.c | 18 ++++++++++-------- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/tools/lib/perf/include/perf/bpf_perf.h b/tools/lib/perf/include/perf/bpf_perf.h index e7cf6ba7b674..64c8d211726d 100644 --- a/tools/lib/perf/include/perf/bpf_perf.h +++ b/tools/lib/perf/include/perf/bpf_perf.h @@ -23,6 +23,7 @@ struct perf_event_attr_map_entry { __u32 link_id; __u32 diff_map_id; + __u8 supported; }; /* default attr_map name */ diff --git a/tools/perf/util/bpf_counter.c b/tools/perf/util/bpf_counter.c index 57fc6bab64ba..6e54b6318766 100644 --- a/tools/perf/util/bpf_counter.c +++ b/tools/perf/util/bpf_counter.c @@ -423,18 +423,19 @@ static int bperf_reload_leader_program(struct evsel *evsel, int attr_map_fd, diff_map_fd = bpf_map__fd(skel->maps.diff_readings); entry->link_id = bpf_link_get_id(link_fd); entry->diff_map_id = bpf_map_get_id(diff_map_fd); - err = bpf_map_update_elem(attr_map_fd, &evsel->core.attr, entry, BPF_ANY); - assert(err == 0); - - evsel->bperf_leader_link_fd = bpf_link_get_fd_by_id(entry->link_id); - assert(evsel->bperf_leader_link_fd >= 0); - /* * save leader_skel for install_pe, which is called within * following evsel__open_per_cpu call */ evsel->leader_skel = skel; - evsel__open_per_cpu(evsel, all_cpu_map, -1); + if (!evsel__open_per_cpu(evsel, all_cpu_map, -1)) + entry->supported = true; + + err = bpf_map_update_elem(attr_map_fd, &evsel->core.attr, entry, BPF_ANY); + assert(err == 0); + + evsel->bperf_leader_link_fd = bpf_link_get_fd_by_id(entry->link_id); + assert(evsel->bperf_leader_link_fd >= 0); out: bperf_leader_bpf__destroy(skel); @@ -464,7 +465,7 @@ static int bperf_attach_follower_program(struct bperf_follower_bpf *skel, static int bperf__load(struct evsel *evsel, struct target *target) { - struct perf_event_attr_map_entry entry = {0xffffffff, 0xffffffff}; + struct perf_event_attr_map_entry entry = {0xffffffff, 0xffffffff, false}; int attr_map_fd, diff_map_fd = -1, err; enum bperf_filter_type filter_type; __u32 i; @@ -512,6 +513,7 @@ static int bperf__load(struct evsel *evsel, struct target *target) err = -1; goto out; } + evsel->supported = entry.supported; /* * The bpf_link holds reference to the leader program, and the * leader program holds reference to the maps. Therefore, if -- Gitee