1 // SPDX-License-Identifier: GPL-2.0
2
3 /* Copyright (c) 2019 Facebook */
4
5 #include <assert.h>
6 #include <limits.h>
7 #include <unistd.h>
8 #include <sys/file.h>
9 #include <sys/time.h>
10 #include <linux/err.h>
11 #include <linux/zalloc.h>
12 #include <api/fs/fs.h>
13 #include <perf/bpf_perf.h>
14
15 #include "bpf_counter.h"
16 #include "bpf-utils.h"
17 #include "counts.h"
18 #include "debug.h"
19 #include "evsel.h"
20 #include "evlist.h"
21 #include "target.h"
22 #include "cgroup.h"
23 #include "cpumap.h"
24 #include "thread_map.h"
25
26 #include "bpf_skel/bpf_prog_profiler.skel.h"
27 #include "bpf_skel/bperf_u.h"
28 #include "bpf_skel/bperf_leader.skel.h"
29 #include "bpf_skel/bperf_follower.skel.h"
30
31 #define ATTR_MAP_SIZE 16
32
u64_to_ptr(__u64 ptr)33 static inline void *u64_to_ptr(__u64 ptr)
34 {
35 return (void *)(unsigned long)ptr;
36 }
37
bpf_counter_alloc(void)38 static struct bpf_counter *bpf_counter_alloc(void)
39 {
40 struct bpf_counter *counter;
41
42 counter = zalloc(sizeof(*counter));
43 if (counter)
44 INIT_LIST_HEAD(&counter->list);
45 return counter;
46 }
47
bpf_program_profiler__destroy(struct evsel * evsel)48 static int bpf_program_profiler__destroy(struct evsel *evsel)
49 {
50 struct bpf_counter *counter, *tmp;
51
52 list_for_each_entry_safe(counter, tmp,
53 &evsel->bpf_counter_list, list) {
54 list_del_init(&counter->list);
55 bpf_prog_profiler_bpf__destroy(counter->skel);
56 free(counter);
57 }
58 assert(list_empty(&evsel->bpf_counter_list));
59
60 return 0;
61 }
62
bpf_target_prog_name(int tgt_fd)63 static char *bpf_target_prog_name(int tgt_fd)
64 {
65 struct bpf_func_info *func_info;
66 struct perf_bpil *info_linear;
67 const struct btf_type *t;
68 struct btf *btf = NULL;
69 char *name = NULL;
70
71 info_linear = get_bpf_prog_info_linear(tgt_fd, 1UL << PERF_BPIL_FUNC_INFO);
72 if (IS_ERR_OR_NULL(info_linear)) {
73 pr_debug("failed to get info_linear for prog FD %d\n", tgt_fd);
74 return NULL;
75 }
76
77 if (info_linear->info.btf_id == 0) {
78 pr_debug("prog FD %d doesn't have valid btf\n", tgt_fd);
79 goto out;
80 }
81
82 btf = btf__load_from_kernel_by_id(info_linear->info.btf_id);
83 if (libbpf_get_error(btf)) {
84 pr_debug("failed to load btf for prog FD %d\n", tgt_fd);
85 goto out;
86 }
87
88 func_info = u64_to_ptr(info_linear->info.func_info);
89 t = btf__type_by_id(btf, func_info[0].type_id);
90 if (!t) {
91 pr_debug("btf %d doesn't have type %d\n",
92 info_linear->info.btf_id, func_info[0].type_id);
93 goto out;
94 }
95 name = strdup(btf__name_by_offset(btf, t->name_off));
96 out:
97 btf__free(btf);
98 free(info_linear);
99 return name;
100 }
101
bpf_program_profiler_load_one(struct evsel * evsel,u32 prog_id)102 static int bpf_program_profiler_load_one(struct evsel *evsel, u32 prog_id)
103 {
104 struct bpf_prog_profiler_bpf *skel;
105 struct bpf_counter *counter;
106 struct bpf_program *prog;
107 char *prog_name;
108 int prog_fd;
109 int err;
110
111 prog_fd = bpf_prog_get_fd_by_id(prog_id);
112 if (prog_fd < 0) {
113 pr_err("Failed to open fd for bpf prog %u\n", prog_id);
114 return -1;
115 }
116 counter = bpf_counter_alloc();
117 if (!counter) {
118 close(prog_fd);
119 return -1;
120 }
121
122 skel = bpf_prog_profiler_bpf__open();
123 if (!skel) {
124 pr_err("Failed to open bpf skeleton\n");
125 goto err_out;
126 }
127
128 skel->rodata->num_cpu = evsel__nr_cpus(evsel);
129
130 bpf_map__set_max_entries(skel->maps.events, evsel__nr_cpus(evsel));
131 bpf_map__set_max_entries(skel->maps.fentry_readings, 1);
132 bpf_map__set_max_entries(skel->maps.accum_readings, 1);
133
134 prog_name = bpf_target_prog_name(prog_fd);
135 if (!prog_name) {
136 pr_err("Failed to get program name for bpf prog %u. Does it have BTF?\n", prog_id);
137 goto err_out;
138 }
139
140 bpf_object__for_each_program(prog, skel->obj) {
141 err = bpf_program__set_attach_target(prog, prog_fd, prog_name);
142 if (err) {
143 pr_err("bpf_program__set_attach_target failed.\n"
144 "Does bpf prog %u have BTF?\n", prog_id);
145 goto err_out;
146 }
147 }
148 set_max_rlimit();
149 err = bpf_prog_profiler_bpf__load(skel);
150 if (err) {
151 pr_err("bpf_prog_profiler_bpf__load failed\n");
152 goto err_out;
153 }
154
155 assert(skel != NULL);
156 counter->skel = skel;
157 list_add(&counter->list, &evsel->bpf_counter_list);
158 close(prog_fd);
159 return 0;
160 err_out:
161 bpf_prog_profiler_bpf__destroy(skel);
162 free(counter);
163 close(prog_fd);
164 return -1;
165 }
166
bpf_program_profiler__load(struct evsel * evsel,struct target * target)167 static int bpf_program_profiler__load(struct evsel *evsel, struct target *target)
168 {
169 char *bpf_str, *bpf_str_, *tok, *saveptr = NULL, *p;
170 u32 prog_id;
171 int ret;
172
173 bpf_str_ = bpf_str = strdup(target->bpf_str);
174 if (!bpf_str)
175 return -1;
176
177 while ((tok = strtok_r(bpf_str, ",", &saveptr)) != NULL) {
178 prog_id = strtoul(tok, &p, 10);
179 if (prog_id == 0 || prog_id == UINT_MAX ||
180 (*p != '\0' && *p != ',')) {
181 pr_err("Failed to parse bpf prog ids %s\n",
182 target->bpf_str);
183 return -1;
184 }
185
186 ret = bpf_program_profiler_load_one(evsel, prog_id);
187 if (ret) {
188 bpf_program_profiler__destroy(evsel);
189 free(bpf_str_);
190 return -1;
191 }
192 bpf_str = NULL;
193 }
194 free(bpf_str_);
195 return 0;
196 }
197
bpf_program_profiler__enable(struct evsel * evsel)198 static int bpf_program_profiler__enable(struct evsel *evsel)
199 {
200 struct bpf_counter *counter;
201 int ret;
202
203 list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
204 assert(counter->skel != NULL);
205 ret = bpf_prog_profiler_bpf__attach(counter->skel);
206 if (ret) {
207 bpf_program_profiler__destroy(evsel);
208 return ret;
209 }
210 }
211 return 0;
212 }
213
bpf_program_profiler__disable(struct evsel * evsel)214 static int bpf_program_profiler__disable(struct evsel *evsel)
215 {
216 struct bpf_counter *counter;
217
218 list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
219 assert(counter->skel != NULL);
220 bpf_prog_profiler_bpf__detach(counter->skel);
221 }
222 return 0;
223 }
224
bpf_program_profiler__read(struct evsel * evsel)225 static int bpf_program_profiler__read(struct evsel *evsel)
226 {
227 // BPF_MAP_TYPE_PERCPU_ARRAY uses /sys/devices/system/cpu/possible
228 // Sometimes possible > online, like on a Ryzen 3900X that has 24
229 // threads but its possible showed 0-31 -acme
230 int num_cpu_bpf = libbpf_num_possible_cpus();
231 struct bpf_perf_event_value values[num_cpu_bpf];
232 struct bpf_counter *counter;
233 struct perf_counts_values *counts;
234 int reading_map_fd;
235 __u32 key = 0;
236 int err, idx, bpf_cpu;
237
238 if (list_empty(&evsel->bpf_counter_list))
239 return -EAGAIN;
240
241 perf_cpu_map__for_each_idx(idx, evsel__cpus(evsel)) {
242 counts = perf_counts(evsel->counts, idx, 0);
243 counts->val = 0;
244 counts->ena = 0;
245 counts->run = 0;
246 }
247 list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
248 struct bpf_prog_profiler_bpf *skel = counter->skel;
249
250 assert(skel != NULL);
251 reading_map_fd = bpf_map__fd(skel->maps.accum_readings);
252
253 err = bpf_map_lookup_elem(reading_map_fd, &key, values);
254 if (err) {
255 pr_err("failed to read value\n");
256 return err;
257 }
258
259 for (bpf_cpu = 0; bpf_cpu < num_cpu_bpf; bpf_cpu++) {
260 idx = perf_cpu_map__idx(evsel__cpus(evsel),
261 (struct perf_cpu){.cpu = bpf_cpu});
262 if (idx == -1)
263 continue;
264 counts = perf_counts(evsel->counts, idx, 0);
265 counts->val += values[bpf_cpu].counter;
266 counts->ena += values[bpf_cpu].enabled;
267 counts->run += values[bpf_cpu].running;
268 }
269 }
270 return 0;
271 }
272
bpf_program_profiler__install_pe(struct evsel * evsel,int cpu_map_idx,int fd)273 static int bpf_program_profiler__install_pe(struct evsel *evsel, int cpu_map_idx,
274 int fd)
275 {
276 struct bpf_prog_profiler_bpf *skel;
277 struct bpf_counter *counter;
278 int ret;
279
280 list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
281 skel = counter->skel;
282 assert(skel != NULL);
283
284 ret = bpf_map_update_elem(bpf_map__fd(skel->maps.events),
285 &cpu_map_idx, &fd, BPF_ANY);
286 if (ret)
287 return ret;
288 }
289 return 0;
290 }
291
292 struct bpf_counter_ops bpf_program_profiler_ops = {
293 .load = bpf_program_profiler__load,
294 .enable = bpf_program_profiler__enable,
295 .disable = bpf_program_profiler__disable,
296 .read = bpf_program_profiler__read,
297 .destroy = bpf_program_profiler__destroy,
298 .install_pe = bpf_program_profiler__install_pe,
299 };
300
bperf_attr_map_compatible(int attr_map_fd)301 static bool bperf_attr_map_compatible(int attr_map_fd)
302 {
303 struct bpf_map_info map_info = {0};
304 __u32 map_info_len = sizeof(map_info);
305 int err;
306
307 err = bpf_obj_get_info_by_fd(attr_map_fd, &map_info, &map_info_len);
308
309 if (err)
310 return false;
311 return (map_info.key_size == sizeof(struct perf_event_attr)) &&
312 (map_info.value_size == sizeof(struct perf_event_attr_map_entry));
313 }
314
315 #ifndef HAVE_LIBBPF_BPF_MAP_CREATE
316 LIBBPF_API int bpf_create_map(enum bpf_map_type map_type, int key_size,
317 int value_size, int max_entries, __u32 map_flags);
318 int
bpf_map_create(enum bpf_map_type map_type,const char * map_name __maybe_unused,__u32 key_size,__u32 value_size,__u32 max_entries,const struct bpf_map_create_opts * opts __maybe_unused)319 bpf_map_create(enum bpf_map_type map_type,
320 const char *map_name __maybe_unused,
321 __u32 key_size,
322 __u32 value_size,
323 __u32 max_entries,
324 const struct bpf_map_create_opts *opts __maybe_unused)
325 {
326 #pragma GCC diagnostic push
327 #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
328 return bpf_create_map(map_type, key_size, value_size, max_entries, 0);
329 #pragma GCC diagnostic pop
330 }
331 #endif
332
bperf_lock_attr_map(struct target * target)333 static int bperf_lock_attr_map(struct target *target)
334 {
335 char path[PATH_MAX];
336 int map_fd, err;
337
338 if (target->attr_map) {
339 scnprintf(path, PATH_MAX, "%s", target->attr_map);
340 } else {
341 scnprintf(path, PATH_MAX, "%s/fs/bpf/%s", sysfs__mountpoint(),
342 BPF_PERF_DEFAULT_ATTR_MAP_PATH);
343 }
344
345 if (access(path, F_OK)) {
346 map_fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL,
347 sizeof(struct perf_event_attr),
348 sizeof(struct perf_event_attr_map_entry),
349 ATTR_MAP_SIZE, NULL);
350 if (map_fd < 0)
351 return -1;
352
353 err = bpf_obj_pin(map_fd, path);
354 if (err) {
355 /* someone pinned the map in parallel? */
356 close(map_fd);
357 map_fd = bpf_obj_get(path);
358 if (map_fd < 0)
359 return -1;
360 }
361 } else {
362 map_fd = bpf_obj_get(path);
363 if (map_fd < 0)
364 return -1;
365 }
366
367 if (!bperf_attr_map_compatible(map_fd)) {
368 close(map_fd);
369 return -1;
370
371 }
372 err = flock(map_fd, LOCK_EX);
373 if (err) {
374 close(map_fd);
375 return -1;
376 }
377 return map_fd;
378 }
379
bperf_check_target(struct evsel * evsel,struct target * target,enum bperf_filter_type * filter_type,__u32 * filter_entry_cnt)380 static int bperf_check_target(struct evsel *evsel,
381 struct target *target,
382 enum bperf_filter_type *filter_type,
383 __u32 *filter_entry_cnt)
384 {
385 if (evsel->core.leader->nr_members > 1) {
386 pr_err("bpf managed perf events do not yet support groups.\n");
387 return -1;
388 }
389
390 /* determine filter type based on target */
391 if (target->system_wide) {
392 *filter_type = BPERF_FILTER_GLOBAL;
393 *filter_entry_cnt = 1;
394 } else if (target->cpu_list) {
395 *filter_type = BPERF_FILTER_CPU;
396 *filter_entry_cnt = perf_cpu_map__nr(evsel__cpus(evsel));
397 } else if (target->tid) {
398 *filter_type = BPERF_FILTER_PID;
399 *filter_entry_cnt = perf_thread_map__nr(evsel->core.threads);
400 } else if (target->pid || evsel->evlist->workload.pid != -1) {
401 *filter_type = BPERF_FILTER_TGID;
402 *filter_entry_cnt = perf_thread_map__nr(evsel->core.threads);
403 } else {
404 pr_err("bpf managed perf events do not yet support these targets.\n");
405 return -1;
406 }
407
408 return 0;
409 }
410
411 static struct perf_cpu_map *all_cpu_map;
412
bperf_reload_leader_program(struct evsel * evsel,int attr_map_fd,struct perf_event_attr_map_entry * entry)413 static int bperf_reload_leader_program(struct evsel *evsel, int attr_map_fd,
414 struct perf_event_attr_map_entry *entry)
415 {
416 struct bperf_leader_bpf *skel = bperf_leader_bpf__open();
417 int link_fd, diff_map_fd, err;
418 struct bpf_link *link = NULL;
419
420 if (!skel) {
421 pr_err("Failed to open leader skeleton\n");
422 return -1;
423 }
424
425 bpf_map__set_max_entries(skel->maps.events, libbpf_num_possible_cpus());
426 err = bperf_leader_bpf__load(skel);
427 if (err) {
428 pr_err("Failed to load leader skeleton\n");
429 goto out;
430 }
431
432 link = bpf_program__attach(skel->progs.on_switch);
433 if (IS_ERR(link)) {
434 pr_err("Failed to attach leader program\n");
435 err = PTR_ERR(link);
436 goto out;
437 }
438
439 link_fd = bpf_link__fd(link);
440 diff_map_fd = bpf_map__fd(skel->maps.diff_readings);
441 entry->link_id = bpf_link_get_id(link_fd);
442 entry->diff_map_id = bpf_map_get_id(diff_map_fd);
443 err = bpf_map_update_elem(attr_map_fd, &evsel->core.attr, entry, BPF_ANY);
444 assert(err == 0);
445
446 evsel->bperf_leader_link_fd = bpf_link_get_fd_by_id(entry->link_id);
447 assert(evsel->bperf_leader_link_fd >= 0);
448
449 /*
450 * save leader_skel for install_pe, which is called within
451 * following evsel__open_per_cpu call
452 */
453 evsel->leader_skel = skel;
454 evsel__open_per_cpu(evsel, all_cpu_map, -1);
455
456 out:
457 bperf_leader_bpf__destroy(skel);
458 bpf_link__destroy(link);
459 return err;
460 }
461
bperf__load(struct evsel * evsel,struct target * target)462 static int bperf__load(struct evsel *evsel, struct target *target)
463 {
464 struct perf_event_attr_map_entry entry = {0xffffffff, 0xffffffff};
465 int attr_map_fd, diff_map_fd = -1, err;
466 enum bperf_filter_type filter_type;
467 __u32 filter_entry_cnt, i;
468
469 if (bperf_check_target(evsel, target, &filter_type, &filter_entry_cnt))
470 return -1;
471
472 if (!all_cpu_map) {
473 all_cpu_map = perf_cpu_map__new(NULL);
474 if (!all_cpu_map)
475 return -1;
476 }
477
478 evsel->bperf_leader_prog_fd = -1;
479 evsel->bperf_leader_link_fd = -1;
480
481 /*
482 * Step 1: hold a fd on the leader program and the bpf_link, if
483 * the program is not already gone, reload the program.
484 * Use flock() to ensure exclusive access to the perf_event_attr
485 * map.
486 */
487 attr_map_fd = bperf_lock_attr_map(target);
488 if (attr_map_fd < 0) {
489 pr_err("Failed to lock perf_event_attr map\n");
490 return -1;
491 }
492
493 err = bpf_map_lookup_elem(attr_map_fd, &evsel->core.attr, &entry);
494 if (err) {
495 err = bpf_map_update_elem(attr_map_fd, &evsel->core.attr, &entry, BPF_ANY);
496 if (err)
497 goto out;
498 }
499
500 evsel->bperf_leader_link_fd = bpf_link_get_fd_by_id(entry.link_id);
501 if (evsel->bperf_leader_link_fd < 0 &&
502 bperf_reload_leader_program(evsel, attr_map_fd, &entry)) {
503 err = -1;
504 goto out;
505 }
506 /*
507 * The bpf_link holds reference to the leader program, and the
508 * leader program holds reference to the maps. Therefore, if
509 * link_id is valid, diff_map_id should also be valid.
510 */
511 evsel->bperf_leader_prog_fd = bpf_prog_get_fd_by_id(
512 bpf_link_get_prog_id(evsel->bperf_leader_link_fd));
513 assert(evsel->bperf_leader_prog_fd >= 0);
514
515 diff_map_fd = bpf_map_get_fd_by_id(entry.diff_map_id);
516 assert(diff_map_fd >= 0);
517
518 /*
519 * bperf uses BPF_PROG_TEST_RUN to get accurate reading. Check
520 * whether the kernel support it
521 */
522 err = bperf_trigger_reading(evsel->bperf_leader_prog_fd, 0);
523 if (err) {
524 pr_err("The kernel does not support test_run for raw_tp BPF programs.\n"
525 "Therefore, --use-bpf might show inaccurate readings\n");
526 goto out;
527 }
528
529 /* Step 2: load the follower skeleton */
530 evsel->follower_skel = bperf_follower_bpf__open();
531 if (!evsel->follower_skel) {
532 err = -1;
533 pr_err("Failed to open follower skeleton\n");
534 goto out;
535 }
536
537 /* attach fexit program to the leader program */
538 bpf_program__set_attach_target(evsel->follower_skel->progs.fexit_XXX,
539 evsel->bperf_leader_prog_fd, "on_switch");
540
541 /* connect to leader diff_reading map */
542 bpf_map__reuse_fd(evsel->follower_skel->maps.diff_readings, diff_map_fd);
543
544 /* set up reading map */
545 bpf_map__set_max_entries(evsel->follower_skel->maps.accum_readings,
546 filter_entry_cnt);
547 /* set up follower filter based on target */
548 bpf_map__set_max_entries(evsel->follower_skel->maps.filter,
549 filter_entry_cnt);
550 err = bperf_follower_bpf__load(evsel->follower_skel);
551 if (err) {
552 pr_err("Failed to load follower skeleton\n");
553 bperf_follower_bpf__destroy(evsel->follower_skel);
554 evsel->follower_skel = NULL;
555 goto out;
556 }
557
558 for (i = 0; i < filter_entry_cnt; i++) {
559 int filter_map_fd;
560 __u32 key;
561
562 if (filter_type == BPERF_FILTER_PID ||
563 filter_type == BPERF_FILTER_TGID)
564 key = evsel->core.threads->map[i].pid;
565 else if (filter_type == BPERF_FILTER_CPU)
566 key = evsel->core.cpus->map[i].cpu;
567 else
568 break;
569
570 filter_map_fd = bpf_map__fd(evsel->follower_skel->maps.filter);
571 bpf_map_update_elem(filter_map_fd, &key, &i, BPF_ANY);
572 }
573
574 evsel->follower_skel->bss->type = filter_type;
575
576 err = bperf_follower_bpf__attach(evsel->follower_skel);
577
578 out:
579 if (err && evsel->bperf_leader_link_fd >= 0)
580 close(evsel->bperf_leader_link_fd);
581 if (err && evsel->bperf_leader_prog_fd >= 0)
582 close(evsel->bperf_leader_prog_fd);
583 if (diff_map_fd >= 0)
584 close(diff_map_fd);
585
586 flock(attr_map_fd, LOCK_UN);
587 close(attr_map_fd);
588
589 return err;
590 }
591
bperf__install_pe(struct evsel * evsel,int cpu_map_idx,int fd)592 static int bperf__install_pe(struct evsel *evsel, int cpu_map_idx, int fd)
593 {
594 struct bperf_leader_bpf *skel = evsel->leader_skel;
595
596 return bpf_map_update_elem(bpf_map__fd(skel->maps.events),
597 &cpu_map_idx, &fd, BPF_ANY);
598 }
599
600 /*
601 * trigger the leader prog on each cpu, so the accum_reading map could get
602 * the latest readings.
603 */
bperf_sync_counters(struct evsel * evsel)604 static int bperf_sync_counters(struct evsel *evsel)
605 {
606 int num_cpu, i, cpu;
607
608 num_cpu = all_cpu_map->nr;
609 for (i = 0; i < num_cpu; i++) {
610 cpu = all_cpu_map->map[i].cpu;
611 bperf_trigger_reading(evsel->bperf_leader_prog_fd, cpu);
612 }
613 return 0;
614 }
615
bperf__enable(struct evsel * evsel)616 static int bperf__enable(struct evsel *evsel)
617 {
618 evsel->follower_skel->bss->enabled = 1;
619 return 0;
620 }
621
bperf__disable(struct evsel * evsel)622 static int bperf__disable(struct evsel *evsel)
623 {
624 evsel->follower_skel->bss->enabled = 0;
625 return 0;
626 }
627
bperf__read(struct evsel * evsel)628 static int bperf__read(struct evsel *evsel)
629 {
630 struct bperf_follower_bpf *skel = evsel->follower_skel;
631 __u32 num_cpu_bpf = cpu__max_cpu().cpu;
632 struct bpf_perf_event_value values[num_cpu_bpf];
633 struct perf_counts_values *counts;
634 int reading_map_fd, err = 0;
635 __u32 i;
636 int j;
637
638 bperf_sync_counters(evsel);
639 reading_map_fd = bpf_map__fd(skel->maps.accum_readings);
640
641 for (i = 0; i < bpf_map__max_entries(skel->maps.accum_readings); i++) {
642 struct perf_cpu entry;
643 __u32 cpu;
644
645 err = bpf_map_lookup_elem(reading_map_fd, &i, values);
646 if (err)
647 goto out;
648 switch (evsel->follower_skel->bss->type) {
649 case BPERF_FILTER_GLOBAL:
650 assert(i == 0);
651
652 perf_cpu_map__for_each_cpu(entry, j, evsel__cpus(evsel)) {
653 counts = perf_counts(evsel->counts, j, 0);
654 counts->val = values[entry.cpu].counter;
655 counts->ena = values[entry.cpu].enabled;
656 counts->run = values[entry.cpu].running;
657 }
658 break;
659 case BPERF_FILTER_CPU:
660 cpu = perf_cpu_map__cpu(evsel__cpus(evsel), i).cpu;
661 assert(cpu >= 0);
662 counts = perf_counts(evsel->counts, i, 0);
663 counts->val = values[cpu].counter;
664 counts->ena = values[cpu].enabled;
665 counts->run = values[cpu].running;
666 break;
667 case BPERF_FILTER_PID:
668 case BPERF_FILTER_TGID:
669 counts = perf_counts(evsel->counts, 0, i);
670 counts->val = 0;
671 counts->ena = 0;
672 counts->run = 0;
673
674 for (cpu = 0; cpu < num_cpu_bpf; cpu++) {
675 counts->val += values[cpu].counter;
676 counts->ena += values[cpu].enabled;
677 counts->run += values[cpu].running;
678 }
679 break;
680 default:
681 break;
682 }
683 }
684 out:
685 return err;
686 }
687
bperf__destroy(struct evsel * evsel)688 static int bperf__destroy(struct evsel *evsel)
689 {
690 bperf_follower_bpf__destroy(evsel->follower_skel);
691 close(evsel->bperf_leader_prog_fd);
692 close(evsel->bperf_leader_link_fd);
693 return 0;
694 }
695
696 /*
697 * bperf: share hardware PMCs with BPF
698 *
699 * perf uses performance monitoring counters (PMC) to monitor system
700 * performance. The PMCs are limited hardware resources. For example,
701 * Intel CPUs have 3x fixed PMCs and 4x programmable PMCs per cpu.
702 *
703 * Modern data center systems use these PMCs in many different ways:
704 * system level monitoring, (maybe nested) container level monitoring, per
705 * process monitoring, profiling (in sample mode), etc. In some cases,
706 * there are more active perf_events than available hardware PMCs. To allow
707 * all perf_events to have a chance to run, it is necessary to do expensive
708 * time multiplexing of events.
709 *
710 * On the other hand, many monitoring tools count the common metrics
711 * (cycles, instructions). It is a waste to have multiple tools create
712 * multiple perf_events of "cycles" and occupy multiple PMCs.
713 *
714 * bperf tries to reduce such wastes by allowing multiple perf_events of
715 * "cycles" or "instructions" (at different scopes) to share PMUs. Instead
716 * of having each perf-stat session to read its own perf_events, bperf uses
717 * BPF programs to read the perf_events and aggregate readings to BPF maps.
718 * Then, the perf-stat session(s) reads the values from these BPF maps.
719 *
720 * ||
721 * shared progs and maps <- || -> per session progs and maps
722 * ||
723 * --------------- ||
724 * | perf_events | ||
725 * --------------- fexit || -----------------
726 * | --------||----> | follower prog |
727 * --------------- / || --- -----------------
728 * cs -> | leader prog |/ ||/ | |
729 * --> --------------- /|| -------------- ------------------
730 * / | | / || | filter map | | accum_readings |
731 * / ------------ ------------ || -------------- ------------------
732 * | | prev map | | diff map | || |
733 * | ------------ ------------ || |
734 * \ || |
735 * = \ ==================================================== | ============
736 * \ / user space
737 * \ /
738 * \ /
739 * BPF_PROG_TEST_RUN BPF_MAP_LOOKUP_ELEM
740 * \ /
741 * \ /
742 * \------ perf-stat ----------------------/
743 *
744 * The figure above shows the architecture of bperf. Note that the figure
745 * is divided into 3 regions: shared progs and maps (top left), per session
746 * progs and maps (top right), and user space (bottom).
747 *
748 * The leader prog is triggered on each context switch (cs). The leader
749 * prog reads perf_events and stores the difference (current_reading -
750 * previous_reading) to the diff map. For the same metric, e.g. "cycles",
751 * multiple perf-stat sessions share the same leader prog.
752 *
753 * Each perf-stat session creates a follower prog as fexit program to the
754 * leader prog. It is possible to attach up to BPF_MAX_TRAMP_PROGS (38)
755 * follower progs to the same leader prog. The follower prog checks current
756 * task and processor ID to decide whether to add the value from the diff
757 * map to its accumulated reading map (accum_readings).
758 *
759 * Finally, perf-stat user space reads the value from accum_reading map.
760 *
761 * Besides context switch, it is also necessary to trigger the leader prog
762 * before perf-stat reads the value. Otherwise, the accum_reading map may
763 * not have the latest reading from the perf_events. This is achieved by
764 * triggering the event via sys_bpf(BPF_PROG_TEST_RUN) to each CPU.
765 *
766 * Comment before the definition of struct perf_event_attr_map_entry
767 * describes how different sessions of perf-stat share information about
768 * the leader prog.
769 */
770
771 struct bpf_counter_ops bperf_ops = {
772 .load = bperf__load,
773 .enable = bperf__enable,
774 .disable = bperf__disable,
775 .read = bperf__read,
776 .install_pe = bperf__install_pe,
777 .destroy = bperf__destroy,
778 };
779
780 extern struct bpf_counter_ops bperf_cgrp_ops;
781
bpf_counter_skip(struct evsel * evsel)782 static inline bool bpf_counter_skip(struct evsel *evsel)
783 {
784 return list_empty(&evsel->bpf_counter_list) &&
785 evsel->follower_skel == NULL;
786 }
787
bpf_counter__install_pe(struct evsel * evsel,int cpu_map_idx,int fd)788 int bpf_counter__install_pe(struct evsel *evsel, int cpu_map_idx, int fd)
789 {
790 if (bpf_counter_skip(evsel))
791 return 0;
792 return evsel->bpf_counter_ops->install_pe(evsel, cpu_map_idx, fd);
793 }
794
bpf_counter__load(struct evsel * evsel,struct target * target)795 int bpf_counter__load(struct evsel *evsel, struct target *target)
796 {
797 if (target->bpf_str)
798 evsel->bpf_counter_ops = &bpf_program_profiler_ops;
799 else if (cgrp_event_expanded && target->use_bpf)
800 evsel->bpf_counter_ops = &bperf_cgrp_ops;
801 else if (target->use_bpf || evsel->bpf_counter ||
802 evsel__match_bpf_counter_events(evsel->name))
803 evsel->bpf_counter_ops = &bperf_ops;
804
805 if (evsel->bpf_counter_ops)
806 return evsel->bpf_counter_ops->load(evsel, target);
807 return 0;
808 }
809
bpf_counter__enable(struct evsel * evsel)810 int bpf_counter__enable(struct evsel *evsel)
811 {
812 if (bpf_counter_skip(evsel))
813 return 0;
814 return evsel->bpf_counter_ops->enable(evsel);
815 }
816
bpf_counter__disable(struct evsel * evsel)817 int bpf_counter__disable(struct evsel *evsel)
818 {
819 if (bpf_counter_skip(evsel))
820 return 0;
821 return evsel->bpf_counter_ops->disable(evsel);
822 }
823
bpf_counter__read(struct evsel * evsel)824 int bpf_counter__read(struct evsel *evsel)
825 {
826 if (bpf_counter_skip(evsel))
827 return -EAGAIN;
828 return evsel->bpf_counter_ops->read(evsel);
829 }
830
bpf_counter__destroy(struct evsel * evsel)831 void bpf_counter__destroy(struct evsel *evsel)
832 {
833 if (bpf_counter_skip(evsel))
834 return;
835 evsel->bpf_counter_ops->destroy(evsel);
836 evsel->bpf_counter_ops = NULL;
837 }
838