1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * bpf-loader.c
4 *
5 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
6 * Copyright (C) 2015 Huawei Inc.
7 */
8
9 #include <linux/bpf.h>
10 #include <bpf/libbpf.h>
11 #include <bpf/bpf.h>
12 #include <linux/filter.h>
13 #include <linux/err.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/zalloc.h>
17 #include <errno.h>
18 #include <stdlib.h>
19 #include "debug.h"
20 #include "evlist.h"
21 #include "bpf-loader.h"
22 #include "bpf-prologue.h"
23 #include "probe-event.h"
24 #include "probe-finder.h" // for MAX_PROBES
25 #include "parse-events.h"
26 #include "strfilter.h"
27 #include "util.h"
28 #include "llvm-utils.h"
29 #include "c++/clang-c.h"
30 #ifdef HAVE_LIBBPF_SUPPORT
31 #include <bpf/hashmap.h>
32 #else
33 #include "util/hashmap.h"
34 #endif
35 #include "asm/bug.h"
36
37 #include <internal/xyarray.h>
38
39 #ifndef HAVE_LIBBPF_BPF_PROGRAM__SET_INSNS
bpf_program__set_insns(struct bpf_program * prog __maybe_unused,struct bpf_insn * new_insns __maybe_unused,size_t new_insn_cnt __maybe_unused)40 int bpf_program__set_insns(struct bpf_program *prog __maybe_unused,
41 struct bpf_insn *new_insns __maybe_unused, size_t new_insn_cnt __maybe_unused)
42 {
43 pr_err("%s: not support, update libbpf\n", __func__);
44 return -ENOTSUP;
45 }
46
libbpf_register_prog_handler(const char * sec __maybe_unused,enum bpf_prog_type prog_type __maybe_unused,enum bpf_attach_type exp_attach_type __maybe_unused,const struct libbpf_prog_handler_opts * opts __maybe_unused)47 int libbpf_register_prog_handler(const char *sec __maybe_unused,
48 enum bpf_prog_type prog_type __maybe_unused,
49 enum bpf_attach_type exp_attach_type __maybe_unused,
50 const struct libbpf_prog_handler_opts *opts __maybe_unused)
51 {
52 pr_err("%s: not support, update libbpf\n", __func__);
53 return -ENOTSUP;
54 }
55 #endif
56
57 /* temporarily disable libbpf deprecation warnings */
58 #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
59
libbpf_perf_print(enum libbpf_print_level level,const char * fmt,va_list args)60 static int libbpf_perf_print(enum libbpf_print_level level __attribute__((unused)),
61 const char *fmt, va_list args)
62 {
63 return veprintf(1, verbose, pr_fmt(fmt), args);
64 }
65
66 struct bpf_prog_priv {
67 bool is_tp;
68 char *sys_name;
69 char *evt_name;
70 struct perf_probe_event pev;
71 bool need_prologue;
72 struct bpf_insn *insns_buf;
73 int nr_types;
74 int *type_mapping;
75 int *prologue_fds;
76 };
77
78 struct bpf_perf_object {
79 struct list_head list;
80 struct bpf_object *obj;
81 };
82
83 struct bpf_preproc_result {
84 struct bpf_insn *new_insn_ptr;
85 int new_insn_cnt;
86 };
87
88 static LIST_HEAD(bpf_objects_list);
89 static struct hashmap *bpf_program_hash;
90 static struct hashmap *bpf_map_hash;
91
92 static struct bpf_perf_object *
bpf_perf_object__next(struct bpf_perf_object * prev)93 bpf_perf_object__next(struct bpf_perf_object *prev)
94 {
95 if (!prev) {
96 if (list_empty(&bpf_objects_list))
97 return NULL;
98
99 return list_first_entry(&bpf_objects_list, struct bpf_perf_object, list);
100 }
101 if (list_is_last(&prev->list, &bpf_objects_list))
102 return NULL;
103
104 return list_next_entry(prev, list);
105 }
106
107 #define bpf_perf_object__for_each(perf_obj, tmp) \
108 for ((perf_obj) = bpf_perf_object__next(NULL), \
109 (tmp) = bpf_perf_object__next(perf_obj); \
110 (perf_obj) != NULL; \
111 (perf_obj) = (tmp), (tmp) = bpf_perf_object__next(tmp))
112
113 static bool libbpf_initialized;
114 static int libbpf_sec_handler;
115
bpf_perf_object__add(struct bpf_object * obj)116 static int bpf_perf_object__add(struct bpf_object *obj)
117 {
118 struct bpf_perf_object *perf_obj = zalloc(sizeof(*perf_obj));
119
120 if (perf_obj) {
121 INIT_LIST_HEAD(&perf_obj->list);
122 perf_obj->obj = obj;
123 list_add_tail(&perf_obj->list, &bpf_objects_list);
124 }
125 return perf_obj ? 0 : -ENOMEM;
126 }
127
program_priv(const struct bpf_program * prog)128 static void *program_priv(const struct bpf_program *prog)
129 {
130 void *priv;
131
132 if (IS_ERR_OR_NULL(bpf_program_hash))
133 return NULL;
134 if (!hashmap__find(bpf_program_hash, prog, &priv))
135 return NULL;
136 return priv;
137 }
138
139 static struct bpf_insn prologue_init_insn[] = {
140 BPF_MOV64_IMM(BPF_REG_2, 0),
141 BPF_MOV64_IMM(BPF_REG_3, 0),
142 BPF_MOV64_IMM(BPF_REG_4, 0),
143 BPF_MOV64_IMM(BPF_REG_5, 0),
144 };
145
libbpf_prog_prepare_load_fn(struct bpf_program * prog,struct bpf_prog_load_opts * opts __maybe_unused,long cookie __maybe_unused)146 static int libbpf_prog_prepare_load_fn(struct bpf_program *prog,
147 struct bpf_prog_load_opts *opts __maybe_unused,
148 long cookie __maybe_unused)
149 {
150 size_t init_size_cnt = ARRAY_SIZE(prologue_init_insn);
151 size_t orig_insn_cnt, insn_cnt, init_size, orig_size;
152 struct bpf_prog_priv *priv = program_priv(prog);
153 const struct bpf_insn *orig_insn;
154 struct bpf_insn *insn;
155
156 if (IS_ERR_OR_NULL(priv)) {
157 pr_debug("bpf: failed to get private field\n");
158 return -BPF_LOADER_ERRNO__INTERNAL;
159 }
160
161 if (!priv->need_prologue)
162 return 0;
163
164 /* prepend initialization code to program instructions */
165 orig_insn = bpf_program__insns(prog);
166 orig_insn_cnt = bpf_program__insn_cnt(prog);
167 init_size = init_size_cnt * sizeof(*insn);
168 orig_size = orig_insn_cnt * sizeof(*insn);
169
170 insn_cnt = orig_insn_cnt + init_size_cnt;
171 insn = malloc(insn_cnt * sizeof(*insn));
172 if (!insn)
173 return -ENOMEM;
174
175 memcpy(insn, prologue_init_insn, init_size);
176 memcpy((char *) insn + init_size, orig_insn, orig_size);
177 bpf_program__set_insns(prog, insn, insn_cnt);
178 return 0;
179 }
180
libbpf_init(void)181 static int libbpf_init(void)
182 {
183 LIBBPF_OPTS(libbpf_prog_handler_opts, handler_opts,
184 .prog_prepare_load_fn = libbpf_prog_prepare_load_fn,
185 );
186
187 if (libbpf_initialized)
188 return 0;
189
190 libbpf_set_print(libbpf_perf_print);
191 libbpf_sec_handler = libbpf_register_prog_handler(NULL, BPF_PROG_TYPE_KPROBE,
192 0, &handler_opts);
193 if (libbpf_sec_handler < 0) {
194 pr_debug("bpf: failed to register libbpf section handler: %d\n",
195 libbpf_sec_handler);
196 return -BPF_LOADER_ERRNO__INTERNAL;
197 }
198 libbpf_initialized = true;
199 return 0;
200 }
201
202 struct bpf_object *
bpf__prepare_load_buffer(void * obj_buf,size_t obj_buf_sz,const char * name)203 bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name)
204 {
205 LIBBPF_OPTS(bpf_object_open_opts, opts, .object_name = name);
206 struct bpf_object *obj;
207 int err;
208
209 err = libbpf_init();
210 if (err)
211 return ERR_PTR(err);
212
213 obj = bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
214 if (IS_ERR_OR_NULL(obj)) {
215 pr_debug("bpf: failed to load buffer\n");
216 return ERR_PTR(-EINVAL);
217 }
218
219 if (bpf_perf_object__add(obj)) {
220 bpf_object__close(obj);
221 return ERR_PTR(-ENOMEM);
222 }
223
224 return obj;
225 }
226
bpf_perf_object__close(struct bpf_perf_object * perf_obj)227 static void bpf_perf_object__close(struct bpf_perf_object *perf_obj)
228 {
229 list_del(&perf_obj->list);
230 bpf_object__close(perf_obj->obj);
231 free(perf_obj);
232 }
233
bpf__prepare_load(const char * filename,bool source)234 struct bpf_object *bpf__prepare_load(const char *filename, bool source)
235 {
236 LIBBPF_OPTS(bpf_object_open_opts, opts, .object_name = filename);
237 struct bpf_object *obj;
238 int err;
239
240 err = libbpf_init();
241 if (err)
242 return ERR_PTR(err);
243
244 if (source) {
245 void *obj_buf;
246 size_t obj_buf_sz;
247
248 perf_clang__init();
249 err = perf_clang__compile_bpf(filename, &obj_buf, &obj_buf_sz);
250 perf_clang__cleanup();
251 if (err) {
252 pr_debug("bpf: builtin compilation failed: %d, try external compiler\n", err);
253 err = llvm__compile_bpf(filename, &obj_buf, &obj_buf_sz);
254 if (err)
255 return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
256 } else
257 pr_debug("bpf: successful builtin compilation\n");
258 obj = bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
259
260 if (!IS_ERR_OR_NULL(obj) && llvm_param.dump_obj)
261 llvm__dump_obj(filename, obj_buf, obj_buf_sz);
262
263 free(obj_buf);
264 } else {
265 obj = bpf_object__open(filename);
266 }
267
268 if (IS_ERR_OR_NULL(obj)) {
269 pr_debug("bpf: failed to load %s\n", filename);
270 return obj;
271 }
272
273 if (bpf_perf_object__add(obj)) {
274 bpf_object__close(obj);
275 return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
276 }
277
278 return obj;
279 }
280
close_prologue_programs(struct bpf_prog_priv * priv)281 static void close_prologue_programs(struct bpf_prog_priv *priv)
282 {
283 struct perf_probe_event *pev;
284 int i, fd;
285
286 if (!priv->need_prologue)
287 return;
288 pev = &priv->pev;
289 for (i = 0; i < pev->ntevs; i++) {
290 fd = priv->prologue_fds[i];
291 if (fd != -1)
292 close(fd);
293 }
294 }
295
296 static void
clear_prog_priv(const struct bpf_program * prog __maybe_unused,void * _priv)297 clear_prog_priv(const struct bpf_program *prog __maybe_unused,
298 void *_priv)
299 {
300 struct bpf_prog_priv *priv = _priv;
301
302 close_prologue_programs(priv);
303 cleanup_perf_probe_events(&priv->pev, 1);
304 zfree(&priv->insns_buf);
305 zfree(&priv->prologue_fds);
306 zfree(&priv->type_mapping);
307 zfree(&priv->sys_name);
308 zfree(&priv->evt_name);
309 free(priv);
310 }
311
bpf_program_hash_free(void)312 static void bpf_program_hash_free(void)
313 {
314 struct hashmap_entry *cur;
315 size_t bkt;
316
317 if (IS_ERR_OR_NULL(bpf_program_hash))
318 return;
319
320 hashmap__for_each_entry(bpf_program_hash, cur, bkt)
321 clear_prog_priv(cur->key, cur->value);
322
323 hashmap__free(bpf_program_hash);
324 bpf_program_hash = NULL;
325 }
326
327 static void bpf_map_hash_free(void);
328
bpf__clear(void)329 void bpf__clear(void)
330 {
331 struct bpf_perf_object *perf_obj, *tmp;
332
333 bpf_perf_object__for_each(perf_obj, tmp) {
334 bpf__unprobe(perf_obj->obj);
335 bpf_perf_object__close(perf_obj);
336 }
337
338 bpf_program_hash_free();
339 bpf_map_hash_free();
340 }
341
ptr_hash(const void * __key,void * ctx __maybe_unused)342 static size_t ptr_hash(const void *__key, void *ctx __maybe_unused)
343 {
344 return (size_t) __key;
345 }
346
ptr_equal(const void * key1,const void * key2,void * ctx __maybe_unused)347 static bool ptr_equal(const void *key1, const void *key2,
348 void *ctx __maybe_unused)
349 {
350 return key1 == key2;
351 }
352
program_set_priv(struct bpf_program * prog,void * priv)353 static int program_set_priv(struct bpf_program *prog, void *priv)
354 {
355 void *old_priv;
356
357 /*
358 * Should not happen, we warn about it in the
359 * caller function - config_bpf_program
360 */
361 if (IS_ERR(bpf_program_hash))
362 return PTR_ERR(bpf_program_hash);
363
364 if (!bpf_program_hash) {
365 bpf_program_hash = hashmap__new(ptr_hash, ptr_equal, NULL);
366 if (IS_ERR(bpf_program_hash))
367 return PTR_ERR(bpf_program_hash);
368 }
369
370 old_priv = program_priv(prog);
371 if (old_priv) {
372 clear_prog_priv(prog, old_priv);
373 return hashmap__set(bpf_program_hash, prog, priv, NULL, NULL);
374 }
375 return hashmap__add(bpf_program_hash, prog, priv);
376 }
377
378 static int
prog_config__exec(const char * value,struct perf_probe_event * pev)379 prog_config__exec(const char *value, struct perf_probe_event *pev)
380 {
381 pev->uprobes = true;
382 pev->target = strdup(value);
383 if (!pev->target)
384 return -ENOMEM;
385 return 0;
386 }
387
388 static int
prog_config__module(const char * value,struct perf_probe_event * pev)389 prog_config__module(const char *value, struct perf_probe_event *pev)
390 {
391 pev->uprobes = false;
392 pev->target = strdup(value);
393 if (!pev->target)
394 return -ENOMEM;
395 return 0;
396 }
397
398 static int
prog_config__bool(const char * value,bool * pbool,bool invert)399 prog_config__bool(const char *value, bool *pbool, bool invert)
400 {
401 int err;
402 bool bool_value;
403
404 if (!pbool)
405 return -EINVAL;
406
407 err = strtobool(value, &bool_value);
408 if (err)
409 return err;
410
411 *pbool = invert ? !bool_value : bool_value;
412 return 0;
413 }
414
415 static int
prog_config__inlines(const char * value,struct perf_probe_event * pev __maybe_unused)416 prog_config__inlines(const char *value,
417 struct perf_probe_event *pev __maybe_unused)
418 {
419 return prog_config__bool(value, &probe_conf.no_inlines, true);
420 }
421
422 static int
prog_config__force(const char * value,struct perf_probe_event * pev __maybe_unused)423 prog_config__force(const char *value,
424 struct perf_probe_event *pev __maybe_unused)
425 {
426 return prog_config__bool(value, &probe_conf.force_add, false);
427 }
428
429 static struct {
430 const char *key;
431 const char *usage;
432 const char *desc;
433 int (*func)(const char *, struct perf_probe_event *);
434 } bpf_prog_config_terms[] = {
435 {
436 .key = "exec",
437 .usage = "exec=<full path of file>",
438 .desc = "Set uprobe target",
439 .func = prog_config__exec,
440 },
441 {
442 .key = "module",
443 .usage = "module=<module name> ",
444 .desc = "Set kprobe module",
445 .func = prog_config__module,
446 },
447 {
448 .key = "inlines",
449 .usage = "inlines=[yes|no] ",
450 .desc = "Probe at inline symbol",
451 .func = prog_config__inlines,
452 },
453 {
454 .key = "force",
455 .usage = "force=[yes|no] ",
456 .desc = "Forcibly add events with existing name",
457 .func = prog_config__force,
458 },
459 };
460
461 static int
do_prog_config(const char * key,const char * value,struct perf_probe_event * pev)462 do_prog_config(const char *key, const char *value,
463 struct perf_probe_event *pev)
464 {
465 unsigned int i;
466
467 pr_debug("config bpf program: %s=%s\n", key, value);
468 for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
469 if (strcmp(key, bpf_prog_config_terms[i].key) == 0)
470 return bpf_prog_config_terms[i].func(value, pev);
471
472 pr_debug("BPF: ERROR: invalid program config option: %s=%s\n",
473 key, value);
474
475 pr_debug("\nHint: Valid options are:\n");
476 for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
477 pr_debug("\t%s:\t%s\n", bpf_prog_config_terms[i].usage,
478 bpf_prog_config_terms[i].desc);
479 pr_debug("\n");
480
481 return -BPF_LOADER_ERRNO__PROGCONF_TERM;
482 }
483
484 static const char *
parse_prog_config_kvpair(const char * config_str,struct perf_probe_event * pev)485 parse_prog_config_kvpair(const char *config_str, struct perf_probe_event *pev)
486 {
487 char *text = strdup(config_str);
488 char *sep, *line;
489 const char *main_str = NULL;
490 int err = 0;
491
492 if (!text) {
493 pr_debug("Not enough memory: dup config_str failed\n");
494 return ERR_PTR(-ENOMEM);
495 }
496
497 line = text;
498 while ((sep = strchr(line, ';'))) {
499 char *equ;
500
501 *sep = '\0';
502 equ = strchr(line, '=');
503 if (!equ) {
504 pr_warning("WARNING: invalid config in BPF object: %s\n",
505 line);
506 pr_warning("\tShould be 'key=value'.\n");
507 goto nextline;
508 }
509 *equ = '\0';
510
511 err = do_prog_config(line, equ + 1, pev);
512 if (err)
513 break;
514 nextline:
515 line = sep + 1;
516 }
517
518 if (!err)
519 main_str = config_str + (line - text);
520 free(text);
521
522 return err ? ERR_PTR(err) : main_str;
523 }
524
525 static int
parse_prog_config(const char * config_str,const char ** p_main_str,bool * is_tp,struct perf_probe_event * pev)526 parse_prog_config(const char *config_str, const char **p_main_str,
527 bool *is_tp, struct perf_probe_event *pev)
528 {
529 int err;
530 const char *main_str = parse_prog_config_kvpair(config_str, pev);
531
532 if (IS_ERR(main_str))
533 return PTR_ERR(main_str);
534
535 *p_main_str = main_str;
536 if (!strchr(main_str, '=')) {
537 /* Is a tracepoint event? */
538 const char *s = strchr(main_str, ':');
539
540 if (!s) {
541 pr_debug("bpf: '%s' is not a valid tracepoint\n",
542 config_str);
543 return -BPF_LOADER_ERRNO__CONFIG;
544 }
545
546 *is_tp = true;
547 return 0;
548 }
549
550 *is_tp = false;
551 err = parse_perf_probe_command(main_str, pev);
552 if (err < 0) {
553 pr_debug("bpf: '%s' is not a valid config string\n",
554 config_str);
555 /* parse failed, don't need clear pev. */
556 return -BPF_LOADER_ERRNO__CONFIG;
557 }
558 return 0;
559 }
560
561 static int
config_bpf_program(struct bpf_program * prog)562 config_bpf_program(struct bpf_program *prog)
563 {
564 struct perf_probe_event *pev = NULL;
565 struct bpf_prog_priv *priv = NULL;
566 const char *config_str, *main_str;
567 bool is_tp = false;
568 int err;
569
570 /* Initialize per-program probing setting */
571 probe_conf.no_inlines = false;
572 probe_conf.force_add = false;
573
574 priv = calloc(sizeof(*priv), 1);
575 if (!priv) {
576 pr_debug("bpf: failed to alloc priv\n");
577 return -ENOMEM;
578 }
579 pev = &priv->pev;
580
581 config_str = bpf_program__section_name(prog);
582 pr_debug("bpf: config program '%s'\n", config_str);
583 err = parse_prog_config(config_str, &main_str, &is_tp, pev);
584 if (err)
585 goto errout;
586
587 if (is_tp) {
588 char *s = strchr(main_str, ':');
589
590 priv->is_tp = true;
591 priv->sys_name = strndup(main_str, s - main_str);
592 priv->evt_name = strdup(s + 1);
593 goto set_priv;
594 }
595
596 if (pev->group && strcmp(pev->group, PERF_BPF_PROBE_GROUP)) {
597 pr_debug("bpf: '%s': group for event is set and not '%s'.\n",
598 config_str, PERF_BPF_PROBE_GROUP);
599 err = -BPF_LOADER_ERRNO__GROUP;
600 goto errout;
601 } else if (!pev->group)
602 pev->group = strdup(PERF_BPF_PROBE_GROUP);
603
604 if (!pev->group) {
605 pr_debug("bpf: strdup failed\n");
606 err = -ENOMEM;
607 goto errout;
608 }
609
610 if (!pev->event) {
611 pr_debug("bpf: '%s': event name is missing. Section name should be 'key=value'\n",
612 config_str);
613 err = -BPF_LOADER_ERRNO__EVENTNAME;
614 goto errout;
615 }
616 pr_debug("bpf: config '%s' is ok\n", config_str);
617
618 set_priv:
619 err = program_set_priv(prog, priv);
620 if (err) {
621 pr_debug("Failed to set priv for program '%s'\n", config_str);
622 goto errout;
623 }
624
625 return 0;
626
627 errout:
628 if (pev)
629 clear_perf_probe_event(pev);
630 free(priv);
631 return err;
632 }
633
bpf__prepare_probe(void)634 static int bpf__prepare_probe(void)
635 {
636 static int err = 0;
637 static bool initialized = false;
638
639 /*
640 * Make err static, so if init failed the first, bpf__prepare_probe()
641 * fails each time without calling init_probe_symbol_maps multiple
642 * times.
643 */
644 if (initialized)
645 return err;
646
647 initialized = true;
648 err = init_probe_symbol_maps(false);
649 if (err < 0)
650 pr_debug("Failed to init_probe_symbol_maps\n");
651 probe_conf.max_probes = MAX_PROBES;
652 return err;
653 }
654
655 static int
preproc_gen_prologue(struct bpf_program * prog,int n,const struct bpf_insn * orig_insns,int orig_insns_cnt,struct bpf_preproc_result * res)656 preproc_gen_prologue(struct bpf_program *prog, int n,
657 const struct bpf_insn *orig_insns, int orig_insns_cnt,
658 struct bpf_preproc_result *res)
659 {
660 struct bpf_prog_priv *priv = program_priv(prog);
661 struct probe_trace_event *tev;
662 struct perf_probe_event *pev;
663 struct bpf_insn *buf;
664 size_t prologue_cnt = 0;
665 int i, err;
666
667 if (IS_ERR_OR_NULL(priv) || priv->is_tp)
668 goto errout;
669
670 pev = &priv->pev;
671
672 if (n < 0 || n >= priv->nr_types)
673 goto errout;
674
675 /* Find a tev belongs to that type */
676 for (i = 0; i < pev->ntevs; i++) {
677 if (priv->type_mapping[i] == n)
678 break;
679 }
680
681 if (i >= pev->ntevs) {
682 pr_debug("Internal error: prologue type %d not found\n", n);
683 return -BPF_LOADER_ERRNO__PROLOGUE;
684 }
685
686 tev = &pev->tevs[i];
687
688 buf = priv->insns_buf;
689 err = bpf__gen_prologue(tev->args, tev->nargs,
690 buf, &prologue_cnt,
691 BPF_MAXINSNS - orig_insns_cnt);
692 if (err) {
693 const char *title;
694
695 title = bpf_program__section_name(prog);
696 pr_debug("Failed to generate prologue for program %s\n",
697 title);
698 return err;
699 }
700
701 memcpy(&buf[prologue_cnt], orig_insns,
702 sizeof(struct bpf_insn) * orig_insns_cnt);
703
704 res->new_insn_ptr = buf;
705 res->new_insn_cnt = prologue_cnt + orig_insns_cnt;
706 return 0;
707
708 errout:
709 pr_debug("Internal error in preproc_gen_prologue\n");
710 return -BPF_LOADER_ERRNO__PROLOGUE;
711 }
712
713 /*
714 * compare_tev_args is reflexive, transitive and antisymmetric.
715 * I can proof it but this margin is too narrow to contain.
716 */
compare_tev_args(const void * ptev1,const void * ptev2)717 static int compare_tev_args(const void *ptev1, const void *ptev2)
718 {
719 int i, ret;
720 const struct probe_trace_event *tev1 =
721 *(const struct probe_trace_event **)ptev1;
722 const struct probe_trace_event *tev2 =
723 *(const struct probe_trace_event **)ptev2;
724
725 ret = tev2->nargs - tev1->nargs;
726 if (ret)
727 return ret;
728
729 for (i = 0; i < tev1->nargs; i++) {
730 struct probe_trace_arg *arg1, *arg2;
731 struct probe_trace_arg_ref *ref1, *ref2;
732
733 arg1 = &tev1->args[i];
734 arg2 = &tev2->args[i];
735
736 ret = strcmp(arg1->value, arg2->value);
737 if (ret)
738 return ret;
739
740 ref1 = arg1->ref;
741 ref2 = arg2->ref;
742
743 while (ref1 && ref2) {
744 ret = ref2->offset - ref1->offset;
745 if (ret)
746 return ret;
747
748 ref1 = ref1->next;
749 ref2 = ref2->next;
750 }
751
752 if (ref1 || ref2)
753 return ref2 ? 1 : -1;
754 }
755
756 return 0;
757 }
758
759 /*
760 * Assign a type number to each tevs in a pev.
761 * mapping is an array with same slots as tevs in that pev.
762 * nr_types will be set to number of types.
763 */
map_prologue(struct perf_probe_event * pev,int * mapping,int * nr_types)764 static int map_prologue(struct perf_probe_event *pev, int *mapping,
765 int *nr_types)
766 {
767 int i, type = 0;
768 struct probe_trace_event **ptevs;
769
770 size_t array_sz = sizeof(*ptevs) * pev->ntevs;
771
772 ptevs = malloc(array_sz);
773 if (!ptevs) {
774 pr_debug("Not enough memory: alloc ptevs failed\n");
775 return -ENOMEM;
776 }
777
778 pr_debug("In map_prologue, ntevs=%d\n", pev->ntevs);
779 for (i = 0; i < pev->ntevs; i++)
780 ptevs[i] = &pev->tevs[i];
781
782 qsort(ptevs, pev->ntevs, sizeof(*ptevs),
783 compare_tev_args);
784
785 for (i = 0; i < pev->ntevs; i++) {
786 int n;
787
788 n = ptevs[i] - pev->tevs;
789 if (i == 0) {
790 mapping[n] = type;
791 pr_debug("mapping[%d]=%d\n", n, type);
792 continue;
793 }
794
795 if (compare_tev_args(ptevs + i, ptevs + i - 1) == 0)
796 mapping[n] = type;
797 else
798 mapping[n] = ++type;
799
800 pr_debug("mapping[%d]=%d\n", n, mapping[n]);
801 }
802 free(ptevs);
803 *nr_types = type + 1;
804
805 return 0;
806 }
807
hook_load_preprocessor(struct bpf_program * prog)808 static int hook_load_preprocessor(struct bpf_program *prog)
809 {
810 struct bpf_prog_priv *priv = program_priv(prog);
811 struct perf_probe_event *pev;
812 bool need_prologue = false;
813 int i;
814
815 if (IS_ERR_OR_NULL(priv)) {
816 pr_debug("Internal error when hook preprocessor\n");
817 return -BPF_LOADER_ERRNO__INTERNAL;
818 }
819
820 if (priv->is_tp) {
821 priv->need_prologue = false;
822 return 0;
823 }
824
825 pev = &priv->pev;
826 for (i = 0; i < pev->ntevs; i++) {
827 struct probe_trace_event *tev = &pev->tevs[i];
828
829 if (tev->nargs > 0) {
830 need_prologue = true;
831 break;
832 }
833 }
834
835 /*
836 * Since all tevs don't have argument, we don't need generate
837 * prologue.
838 */
839 if (!need_prologue) {
840 priv->need_prologue = false;
841 return 0;
842 }
843
844 priv->need_prologue = true;
845 priv->insns_buf = malloc(sizeof(struct bpf_insn) * BPF_MAXINSNS);
846 if (!priv->insns_buf) {
847 pr_debug("Not enough memory: alloc insns_buf failed\n");
848 return -ENOMEM;
849 }
850
851 priv->prologue_fds = malloc(sizeof(int) * pev->ntevs);
852 if (!priv->prologue_fds) {
853 pr_debug("Not enough memory: alloc prologue fds failed\n");
854 return -ENOMEM;
855 }
856 memset(priv->prologue_fds, -1, sizeof(int) * pev->ntevs);
857
858 priv->type_mapping = malloc(sizeof(int) * pev->ntevs);
859 if (!priv->type_mapping) {
860 pr_debug("Not enough memory: alloc type_mapping failed\n");
861 return -ENOMEM;
862 }
863 memset(priv->type_mapping, -1,
864 sizeof(int) * pev->ntevs);
865
866 return map_prologue(pev, priv->type_mapping, &priv->nr_types);
867 }
868
bpf__probe(struct bpf_object * obj)869 int bpf__probe(struct bpf_object *obj)
870 {
871 int err = 0;
872 struct bpf_program *prog;
873 struct bpf_prog_priv *priv;
874 struct perf_probe_event *pev;
875
876 err = bpf__prepare_probe();
877 if (err) {
878 pr_debug("bpf__prepare_probe failed\n");
879 return err;
880 }
881
882 bpf_object__for_each_program(prog, obj) {
883 err = config_bpf_program(prog);
884 if (err)
885 goto out;
886
887 priv = program_priv(prog);
888 if (IS_ERR_OR_NULL(priv)) {
889 if (!priv)
890 err = -BPF_LOADER_ERRNO__INTERNAL;
891 else
892 err = PTR_ERR(priv);
893 goto out;
894 }
895
896 if (priv->is_tp) {
897 bpf_program__set_type(prog, BPF_PROG_TYPE_TRACEPOINT);
898 continue;
899 }
900
901 bpf_program__set_type(prog, BPF_PROG_TYPE_KPROBE);
902 pev = &priv->pev;
903
904 err = convert_perf_probe_events(pev, 1);
905 if (err < 0) {
906 pr_debug("bpf_probe: failed to convert perf probe events\n");
907 goto out;
908 }
909
910 err = apply_perf_probe_events(pev, 1);
911 if (err < 0) {
912 pr_debug("bpf_probe: failed to apply perf probe events\n");
913 goto out;
914 }
915
916 /*
917 * After probing, let's consider prologue, which
918 * adds program fetcher to BPF programs.
919 *
920 * hook_load_preprocessor() hooks pre-processor
921 * to bpf_program, let it generate prologue
922 * dynamically during loading.
923 */
924 err = hook_load_preprocessor(prog);
925 if (err)
926 goto out;
927 }
928 out:
929 return err < 0 ? err : 0;
930 }
931
932 #define EVENTS_WRITE_BUFSIZE 4096
bpf__unprobe(struct bpf_object * obj)933 int bpf__unprobe(struct bpf_object *obj)
934 {
935 int err, ret = 0;
936 struct bpf_program *prog;
937
938 bpf_object__for_each_program(prog, obj) {
939 struct bpf_prog_priv *priv = program_priv(prog);
940 int i;
941
942 if (IS_ERR_OR_NULL(priv) || priv->is_tp)
943 continue;
944
945 for (i = 0; i < priv->pev.ntevs; i++) {
946 struct probe_trace_event *tev = &priv->pev.tevs[i];
947 char name_buf[EVENTS_WRITE_BUFSIZE];
948 struct strfilter *delfilter;
949
950 snprintf(name_buf, EVENTS_WRITE_BUFSIZE,
951 "%s:%s", tev->group, tev->event);
952 name_buf[EVENTS_WRITE_BUFSIZE - 1] = '\0';
953
954 delfilter = strfilter__new(name_buf, NULL);
955 if (!delfilter) {
956 pr_debug("Failed to create filter for unprobing\n");
957 ret = -ENOMEM;
958 continue;
959 }
960
961 err = del_perf_probe_events(delfilter);
962 strfilter__delete(delfilter);
963 if (err) {
964 pr_debug("Failed to delete %s\n", name_buf);
965 ret = err;
966 continue;
967 }
968 }
969 }
970 return ret;
971 }
972
bpf_object__load_prologue(struct bpf_object * obj)973 static int bpf_object__load_prologue(struct bpf_object *obj)
974 {
975 int init_cnt = ARRAY_SIZE(prologue_init_insn);
976 const struct bpf_insn *orig_insns;
977 struct bpf_preproc_result res;
978 struct perf_probe_event *pev;
979 struct bpf_program *prog;
980 int orig_insns_cnt;
981
982 bpf_object__for_each_program(prog, obj) {
983 struct bpf_prog_priv *priv = program_priv(prog);
984 int err, i, fd;
985
986 if (IS_ERR_OR_NULL(priv)) {
987 pr_debug("bpf: failed to get private field\n");
988 return -BPF_LOADER_ERRNO__INTERNAL;
989 }
990
991 if (!priv->need_prologue)
992 continue;
993
994 /*
995 * For each program that needs prologue we do following:
996 *
997 * - take its current instructions and use them
998 * to generate the new code with prologue
999 * - load new instructions with bpf_prog_load
1000 * and keep the fd in prologue_fds
1001 * - new fd will be used in bpf__foreach_event
1002 * to connect this program with perf evsel
1003 */
1004 orig_insns = bpf_program__insns(prog);
1005 orig_insns_cnt = bpf_program__insn_cnt(prog);
1006
1007 pev = &priv->pev;
1008 for (i = 0; i < pev->ntevs; i++) {
1009 /*
1010 * Skipping artificall prologue_init_insn instructions
1011 * (init_cnt), so the prologue can be generated instead
1012 * of them.
1013 */
1014 err = preproc_gen_prologue(prog, i,
1015 orig_insns + init_cnt,
1016 orig_insns_cnt - init_cnt,
1017 &res);
1018 if (err)
1019 return err;
1020
1021 fd = bpf_prog_load(bpf_program__get_type(prog),
1022 bpf_program__name(prog), "GPL",
1023 res.new_insn_ptr,
1024 res.new_insn_cnt, NULL);
1025 if (fd < 0) {
1026 char bf[128];
1027
1028 libbpf_strerror(-errno, bf, sizeof(bf));
1029 pr_debug("bpf: load objects with prologue failed: err=%d: (%s)\n",
1030 -errno, bf);
1031 return -errno;
1032 }
1033 priv->prologue_fds[i] = fd;
1034 }
1035 /*
1036 * We no longer need the original program,
1037 * we can unload it.
1038 */
1039 bpf_program__unload(prog);
1040 }
1041 return 0;
1042 }
1043
bpf__load(struct bpf_object * obj)1044 int bpf__load(struct bpf_object *obj)
1045 {
1046 int err;
1047
1048 err = bpf_object__load(obj);
1049 if (err) {
1050 char bf[128];
1051 libbpf_strerror(err, bf, sizeof(bf));
1052 pr_debug("bpf: load objects failed: err=%d: (%s)\n", err, bf);
1053 return err;
1054 }
1055 return bpf_object__load_prologue(obj);
1056 }
1057
bpf__foreach_event(struct bpf_object * obj,bpf_prog_iter_callback_t func,void * arg)1058 int bpf__foreach_event(struct bpf_object *obj,
1059 bpf_prog_iter_callback_t func,
1060 void *arg)
1061 {
1062 struct bpf_program *prog;
1063 int err;
1064
1065 bpf_object__for_each_program(prog, obj) {
1066 struct bpf_prog_priv *priv = program_priv(prog);
1067 struct probe_trace_event *tev;
1068 struct perf_probe_event *pev;
1069 int i, fd;
1070
1071 if (IS_ERR_OR_NULL(priv)) {
1072 pr_debug("bpf: failed to get private field\n");
1073 return -BPF_LOADER_ERRNO__INTERNAL;
1074 }
1075
1076 if (priv->is_tp) {
1077 fd = bpf_program__fd(prog);
1078 err = (*func)(priv->sys_name, priv->evt_name, fd, obj, arg);
1079 if (err) {
1080 pr_debug("bpf: tracepoint call back failed, stop iterate\n");
1081 return err;
1082 }
1083 continue;
1084 }
1085
1086 pev = &priv->pev;
1087 for (i = 0; i < pev->ntevs; i++) {
1088 tev = &pev->tevs[i];
1089
1090 if (priv->need_prologue)
1091 fd = priv->prologue_fds[i];
1092 else
1093 fd = bpf_program__fd(prog);
1094
1095 if (fd < 0) {
1096 pr_debug("bpf: failed to get file descriptor\n");
1097 return fd;
1098 }
1099
1100 err = (*func)(tev->group, tev->event, fd, obj, arg);
1101 if (err) {
1102 pr_debug("bpf: call back failed, stop iterate\n");
1103 return err;
1104 }
1105 }
1106 }
1107 return 0;
1108 }
1109
1110 enum bpf_map_op_type {
1111 BPF_MAP_OP_SET_VALUE,
1112 BPF_MAP_OP_SET_EVSEL,
1113 };
1114
1115 enum bpf_map_key_type {
1116 BPF_MAP_KEY_ALL,
1117 BPF_MAP_KEY_RANGES,
1118 };
1119
1120 struct bpf_map_op {
1121 struct list_head list;
1122 enum bpf_map_op_type op_type;
1123 enum bpf_map_key_type key_type;
1124 union {
1125 struct parse_events_array array;
1126 } k;
1127 union {
1128 u64 value;
1129 struct evsel *evsel;
1130 } v;
1131 };
1132
1133 struct bpf_map_priv {
1134 struct list_head ops_list;
1135 };
1136
1137 static void
bpf_map_op__delete(struct bpf_map_op * op)1138 bpf_map_op__delete(struct bpf_map_op *op)
1139 {
1140 if (!list_empty(&op->list))
1141 list_del_init(&op->list);
1142 if (op->key_type == BPF_MAP_KEY_RANGES)
1143 parse_events__clear_array(&op->k.array);
1144 free(op);
1145 }
1146
1147 static void
bpf_map_priv__purge(struct bpf_map_priv * priv)1148 bpf_map_priv__purge(struct bpf_map_priv *priv)
1149 {
1150 struct bpf_map_op *pos, *n;
1151
1152 list_for_each_entry_safe(pos, n, &priv->ops_list, list) {
1153 list_del_init(&pos->list);
1154 bpf_map_op__delete(pos);
1155 }
1156 }
1157
1158 static void
bpf_map_priv__clear(const struct bpf_map * map __maybe_unused,void * _priv)1159 bpf_map_priv__clear(const struct bpf_map *map __maybe_unused,
1160 void *_priv)
1161 {
1162 struct bpf_map_priv *priv = _priv;
1163
1164 bpf_map_priv__purge(priv);
1165 free(priv);
1166 }
1167
map_priv(const struct bpf_map * map)1168 static void *map_priv(const struct bpf_map *map)
1169 {
1170 void *priv;
1171
1172 if (IS_ERR_OR_NULL(bpf_map_hash))
1173 return NULL;
1174 if (!hashmap__find(bpf_map_hash, map, &priv))
1175 return NULL;
1176 return priv;
1177 }
1178
bpf_map_hash_free(void)1179 static void bpf_map_hash_free(void)
1180 {
1181 struct hashmap_entry *cur;
1182 size_t bkt;
1183
1184 if (IS_ERR_OR_NULL(bpf_map_hash))
1185 return;
1186
1187 hashmap__for_each_entry(bpf_map_hash, cur, bkt)
1188 bpf_map_priv__clear(cur->key, cur->value);
1189
1190 hashmap__free(bpf_map_hash);
1191 bpf_map_hash = NULL;
1192 }
1193
map_set_priv(struct bpf_map * map,void * priv)1194 static int map_set_priv(struct bpf_map *map, void *priv)
1195 {
1196 void *old_priv;
1197
1198 if (WARN_ON_ONCE(IS_ERR(bpf_map_hash)))
1199 return PTR_ERR(bpf_program_hash);
1200
1201 if (!bpf_map_hash) {
1202 bpf_map_hash = hashmap__new(ptr_hash, ptr_equal, NULL);
1203 if (IS_ERR(bpf_map_hash))
1204 return PTR_ERR(bpf_map_hash);
1205 }
1206
1207 old_priv = map_priv(map);
1208 if (old_priv) {
1209 bpf_map_priv__clear(map, old_priv);
1210 return hashmap__set(bpf_map_hash, map, priv, NULL, NULL);
1211 }
1212 return hashmap__add(bpf_map_hash, map, priv);
1213 }
1214
1215 static int
bpf_map_op_setkey(struct bpf_map_op * op,struct parse_events_term * term)1216 bpf_map_op_setkey(struct bpf_map_op *op, struct parse_events_term *term)
1217 {
1218 op->key_type = BPF_MAP_KEY_ALL;
1219 if (!term)
1220 return 0;
1221
1222 if (term->array.nr_ranges) {
1223 size_t memsz = term->array.nr_ranges *
1224 sizeof(op->k.array.ranges[0]);
1225
1226 op->k.array.ranges = memdup(term->array.ranges, memsz);
1227 if (!op->k.array.ranges) {
1228 pr_debug("Not enough memory to alloc indices for map\n");
1229 return -ENOMEM;
1230 }
1231 op->key_type = BPF_MAP_KEY_RANGES;
1232 op->k.array.nr_ranges = term->array.nr_ranges;
1233 }
1234 return 0;
1235 }
1236
1237 static struct bpf_map_op *
bpf_map_op__new(struct parse_events_term * term)1238 bpf_map_op__new(struct parse_events_term *term)
1239 {
1240 struct bpf_map_op *op;
1241 int err;
1242
1243 op = zalloc(sizeof(*op));
1244 if (!op) {
1245 pr_debug("Failed to alloc bpf_map_op\n");
1246 return ERR_PTR(-ENOMEM);
1247 }
1248 INIT_LIST_HEAD(&op->list);
1249
1250 err = bpf_map_op_setkey(op, term);
1251 if (err) {
1252 free(op);
1253 return ERR_PTR(err);
1254 }
1255 return op;
1256 }
1257
1258 static struct bpf_map_op *
bpf_map_op__clone(struct bpf_map_op * op)1259 bpf_map_op__clone(struct bpf_map_op *op)
1260 {
1261 struct bpf_map_op *newop;
1262
1263 newop = memdup(op, sizeof(*op));
1264 if (!newop) {
1265 pr_debug("Failed to alloc bpf_map_op\n");
1266 return NULL;
1267 }
1268
1269 INIT_LIST_HEAD(&newop->list);
1270 if (op->key_type == BPF_MAP_KEY_RANGES) {
1271 size_t memsz = op->k.array.nr_ranges *
1272 sizeof(op->k.array.ranges[0]);
1273
1274 newop->k.array.ranges = memdup(op->k.array.ranges, memsz);
1275 if (!newop->k.array.ranges) {
1276 pr_debug("Failed to alloc indices for map\n");
1277 free(newop);
1278 return NULL;
1279 }
1280 }
1281
1282 return newop;
1283 }
1284
1285 static struct bpf_map_priv *
bpf_map_priv__clone(struct bpf_map_priv * priv)1286 bpf_map_priv__clone(struct bpf_map_priv *priv)
1287 {
1288 struct bpf_map_priv *newpriv;
1289 struct bpf_map_op *pos, *newop;
1290
1291 newpriv = zalloc(sizeof(*newpriv));
1292 if (!newpriv) {
1293 pr_debug("Not enough memory to alloc map private\n");
1294 return NULL;
1295 }
1296 INIT_LIST_HEAD(&newpriv->ops_list);
1297
1298 list_for_each_entry(pos, &priv->ops_list, list) {
1299 newop = bpf_map_op__clone(pos);
1300 if (!newop) {
1301 bpf_map_priv__purge(newpriv);
1302 return NULL;
1303 }
1304 list_add_tail(&newop->list, &newpriv->ops_list);
1305 }
1306
1307 return newpriv;
1308 }
1309
1310 static int
bpf_map__add_op(struct bpf_map * map,struct bpf_map_op * op)1311 bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op)
1312 {
1313 const char *map_name = bpf_map__name(map);
1314 struct bpf_map_priv *priv = map_priv(map);
1315
1316 if (IS_ERR(priv)) {
1317 pr_debug("Failed to get private from map %s\n", map_name);
1318 return PTR_ERR(priv);
1319 }
1320
1321 if (!priv) {
1322 priv = zalloc(sizeof(*priv));
1323 if (!priv) {
1324 pr_debug("Not enough memory to alloc map private\n");
1325 return -ENOMEM;
1326 }
1327 INIT_LIST_HEAD(&priv->ops_list);
1328
1329 if (map_set_priv(map, priv)) {
1330 free(priv);
1331 return -BPF_LOADER_ERRNO__INTERNAL;
1332 }
1333 }
1334
1335 list_add_tail(&op->list, &priv->ops_list);
1336 return 0;
1337 }
1338
1339 static struct bpf_map_op *
bpf_map__add_newop(struct bpf_map * map,struct parse_events_term * term)1340 bpf_map__add_newop(struct bpf_map *map, struct parse_events_term *term)
1341 {
1342 struct bpf_map_op *op;
1343 int err;
1344
1345 op = bpf_map_op__new(term);
1346 if (IS_ERR(op))
1347 return op;
1348
1349 err = bpf_map__add_op(map, op);
1350 if (err) {
1351 bpf_map_op__delete(op);
1352 return ERR_PTR(err);
1353 }
1354 return op;
1355 }
1356
1357 static int
__bpf_map__config_value(struct bpf_map * map,struct parse_events_term * term)1358 __bpf_map__config_value(struct bpf_map *map,
1359 struct parse_events_term *term)
1360 {
1361 struct bpf_map_op *op;
1362 const char *map_name = bpf_map__name(map);
1363
1364 if (!map) {
1365 pr_debug("Map '%s' is invalid\n", map_name);
1366 return -BPF_LOADER_ERRNO__INTERNAL;
1367 }
1368
1369 if (bpf_map__type(map) != BPF_MAP_TYPE_ARRAY) {
1370 pr_debug("Map %s type is not BPF_MAP_TYPE_ARRAY\n",
1371 map_name);
1372 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1373 }
1374 if (bpf_map__key_size(map) < sizeof(unsigned int)) {
1375 pr_debug("Map %s has incorrect key size\n", map_name);
1376 return -BPF_LOADER_ERRNO__OBJCONF_MAP_KEYSIZE;
1377 }
1378 switch (bpf_map__value_size(map)) {
1379 case 1:
1380 case 2:
1381 case 4:
1382 case 8:
1383 break;
1384 default:
1385 pr_debug("Map %s has incorrect value size\n", map_name);
1386 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
1387 }
1388
1389 op = bpf_map__add_newop(map, term);
1390 if (IS_ERR(op))
1391 return PTR_ERR(op);
1392 op->op_type = BPF_MAP_OP_SET_VALUE;
1393 op->v.value = term->val.num;
1394 return 0;
1395 }
1396
1397 static int
bpf_map__config_value(struct bpf_map * map,struct parse_events_term * term,struct evlist * evlist __maybe_unused)1398 bpf_map__config_value(struct bpf_map *map,
1399 struct parse_events_term *term,
1400 struct evlist *evlist __maybe_unused)
1401 {
1402 if (!term->err_val) {
1403 pr_debug("Config value not set\n");
1404 return -BPF_LOADER_ERRNO__OBJCONF_CONF;
1405 }
1406
1407 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) {
1408 pr_debug("ERROR: wrong value type for 'value'\n");
1409 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
1410 }
1411
1412 return __bpf_map__config_value(map, term);
1413 }
1414
1415 static int
__bpf_map__config_event(struct bpf_map * map,struct parse_events_term * term,struct evlist * evlist)1416 __bpf_map__config_event(struct bpf_map *map,
1417 struct parse_events_term *term,
1418 struct evlist *evlist)
1419 {
1420 struct bpf_map_op *op;
1421 const char *map_name = bpf_map__name(map);
1422 struct evsel *evsel = evlist__find_evsel_by_str(evlist, term->val.str);
1423
1424 if (!evsel) {
1425 pr_debug("Event (for '%s') '%s' doesn't exist\n",
1426 map_name, term->val.str);
1427 return -BPF_LOADER_ERRNO__OBJCONF_MAP_NOEVT;
1428 }
1429
1430 if (!map) {
1431 pr_debug("Map '%s' is invalid\n", map_name);
1432 return PTR_ERR(map);
1433 }
1434
1435 /*
1436 * No need to check key_size and value_size:
1437 * kernel has already checked them.
1438 */
1439 if (bpf_map__type(map) != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
1440 pr_debug("Map %s type is not BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
1441 map_name);
1442 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1443 }
1444
1445 op = bpf_map__add_newop(map, term);
1446 if (IS_ERR(op))
1447 return PTR_ERR(op);
1448 op->op_type = BPF_MAP_OP_SET_EVSEL;
1449 op->v.evsel = evsel;
1450 return 0;
1451 }
1452
1453 static int
bpf_map__config_event(struct bpf_map * map,struct parse_events_term * term,struct evlist * evlist)1454 bpf_map__config_event(struct bpf_map *map,
1455 struct parse_events_term *term,
1456 struct evlist *evlist)
1457 {
1458 if (!term->err_val) {
1459 pr_debug("Config value not set\n");
1460 return -BPF_LOADER_ERRNO__OBJCONF_CONF;
1461 }
1462
1463 if (term->type_val != PARSE_EVENTS__TERM_TYPE_STR) {
1464 pr_debug("ERROR: wrong value type for 'event'\n");
1465 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
1466 }
1467
1468 return __bpf_map__config_event(map, term, evlist);
1469 }
1470
1471 struct bpf_obj_config__map_func {
1472 const char *config_opt;
1473 int (*config_func)(struct bpf_map *, struct parse_events_term *,
1474 struct evlist *);
1475 };
1476
1477 struct bpf_obj_config__map_func bpf_obj_config__map_funcs[] = {
1478 {"value", bpf_map__config_value},
1479 {"event", bpf_map__config_event},
1480 };
1481
1482 static int
config_map_indices_range_check(struct parse_events_term * term,struct bpf_map * map,const char * map_name)1483 config_map_indices_range_check(struct parse_events_term *term,
1484 struct bpf_map *map,
1485 const char *map_name)
1486 {
1487 struct parse_events_array *array = &term->array;
1488 unsigned int i;
1489
1490 if (!array->nr_ranges)
1491 return 0;
1492 if (!array->ranges) {
1493 pr_debug("ERROR: map %s: array->nr_ranges is %d but range array is NULL\n",
1494 map_name, (int)array->nr_ranges);
1495 return -BPF_LOADER_ERRNO__INTERNAL;
1496 }
1497
1498 if (!map) {
1499 pr_debug("Map '%s' is invalid\n", map_name);
1500 return -BPF_LOADER_ERRNO__INTERNAL;
1501 }
1502
1503 for (i = 0; i < array->nr_ranges; i++) {
1504 unsigned int start = array->ranges[i].start;
1505 size_t length = array->ranges[i].length;
1506 unsigned int idx = start + length - 1;
1507
1508 if (idx >= bpf_map__max_entries(map)) {
1509 pr_debug("ERROR: index %d too large\n", idx);
1510 return -BPF_LOADER_ERRNO__OBJCONF_MAP_IDX2BIG;
1511 }
1512 }
1513 return 0;
1514 }
1515
1516 static int
bpf__obj_config_map(struct bpf_object * obj,struct parse_events_term * term,struct evlist * evlist,int * key_scan_pos)1517 bpf__obj_config_map(struct bpf_object *obj,
1518 struct parse_events_term *term,
1519 struct evlist *evlist,
1520 int *key_scan_pos)
1521 {
1522 /* key is "map:<mapname>.<config opt>" */
1523 char *map_name = strdup(term->config + sizeof("map:") - 1);
1524 struct bpf_map *map;
1525 int err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
1526 char *map_opt;
1527 size_t i;
1528
1529 if (!map_name)
1530 return -ENOMEM;
1531
1532 map_opt = strchr(map_name, '.');
1533 if (!map_opt) {
1534 pr_debug("ERROR: Invalid map config: %s\n", map_name);
1535 goto out;
1536 }
1537
1538 *map_opt++ = '\0';
1539 if (*map_opt == '\0') {
1540 pr_debug("ERROR: Invalid map option: %s\n", term->config);
1541 goto out;
1542 }
1543
1544 map = bpf_object__find_map_by_name(obj, map_name);
1545 if (!map) {
1546 pr_debug("ERROR: Map %s doesn't exist\n", map_name);
1547 err = -BPF_LOADER_ERRNO__OBJCONF_MAP_NOTEXIST;
1548 goto out;
1549 }
1550
1551 *key_scan_pos += strlen(map_opt);
1552 err = config_map_indices_range_check(term, map, map_name);
1553 if (err)
1554 goto out;
1555 *key_scan_pos -= strlen(map_opt);
1556
1557 for (i = 0; i < ARRAY_SIZE(bpf_obj_config__map_funcs); i++) {
1558 struct bpf_obj_config__map_func *func =
1559 &bpf_obj_config__map_funcs[i];
1560
1561 if (strcmp(map_opt, func->config_opt) == 0) {
1562 err = func->config_func(map, term, evlist);
1563 goto out;
1564 }
1565 }
1566
1567 pr_debug("ERROR: Invalid map config option '%s'\n", map_opt);
1568 err = -BPF_LOADER_ERRNO__OBJCONF_MAP_OPT;
1569 out:
1570 if (!err)
1571 *key_scan_pos += strlen(map_opt);
1572
1573 free(map_name);
1574 return err;
1575 }
1576
bpf__config_obj(struct bpf_object * obj,struct parse_events_term * term,struct evlist * evlist,int * error_pos)1577 int bpf__config_obj(struct bpf_object *obj,
1578 struct parse_events_term *term,
1579 struct evlist *evlist,
1580 int *error_pos)
1581 {
1582 int key_scan_pos = 0;
1583 int err;
1584
1585 if (!obj || !term || !term->config)
1586 return -EINVAL;
1587
1588 if (strstarts(term->config, "map:")) {
1589 key_scan_pos = sizeof("map:") - 1;
1590 err = bpf__obj_config_map(obj, term, evlist, &key_scan_pos);
1591 goto out;
1592 }
1593 err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
1594 out:
1595 if (error_pos)
1596 *error_pos = key_scan_pos;
1597 return err;
1598
1599 }
1600
1601 typedef int (*map_config_func_t)(const char *name, int map_fd,
1602 const struct bpf_map *map,
1603 struct bpf_map_op *op,
1604 void *pkey, void *arg);
1605
1606 static int
foreach_key_array_all(map_config_func_t func,void * arg,const char * name,int map_fd,const struct bpf_map * map,struct bpf_map_op * op)1607 foreach_key_array_all(map_config_func_t func,
1608 void *arg, const char *name,
1609 int map_fd, const struct bpf_map *map,
1610 struct bpf_map_op *op)
1611 {
1612 unsigned int i;
1613 int err;
1614
1615 for (i = 0; i < bpf_map__max_entries(map); i++) {
1616 err = func(name, map_fd, map, op, &i, arg);
1617 if (err) {
1618 pr_debug("ERROR: failed to insert value to %s[%u]\n",
1619 name, i);
1620 return err;
1621 }
1622 }
1623 return 0;
1624 }
1625
1626 static int
foreach_key_array_ranges(map_config_func_t func,void * arg,const char * name,int map_fd,const struct bpf_map * map,struct bpf_map_op * op)1627 foreach_key_array_ranges(map_config_func_t func, void *arg,
1628 const char *name, int map_fd,
1629 const struct bpf_map *map,
1630 struct bpf_map_op *op)
1631 {
1632 unsigned int i, j;
1633 int err;
1634
1635 for (i = 0; i < op->k.array.nr_ranges; i++) {
1636 unsigned int start = op->k.array.ranges[i].start;
1637 size_t length = op->k.array.ranges[i].length;
1638
1639 for (j = 0; j < length; j++) {
1640 unsigned int idx = start + j;
1641
1642 err = func(name, map_fd, map, op, &idx, arg);
1643 if (err) {
1644 pr_debug("ERROR: failed to insert value to %s[%u]\n",
1645 name, idx);
1646 return err;
1647 }
1648 }
1649 }
1650 return 0;
1651 }
1652
1653 static int
bpf_map_config_foreach_key(struct bpf_map * map,map_config_func_t func,void * arg)1654 bpf_map_config_foreach_key(struct bpf_map *map,
1655 map_config_func_t func,
1656 void *arg)
1657 {
1658 int err, map_fd, type;
1659 struct bpf_map_op *op;
1660 const char *name = bpf_map__name(map);
1661 struct bpf_map_priv *priv = map_priv(map);
1662
1663 if (IS_ERR(priv)) {
1664 pr_debug("ERROR: failed to get private from map %s\n", name);
1665 return -BPF_LOADER_ERRNO__INTERNAL;
1666 }
1667 if (!priv || list_empty(&priv->ops_list)) {
1668 pr_debug("INFO: nothing to config for map %s\n", name);
1669 return 0;
1670 }
1671
1672 if (!map) {
1673 pr_debug("Map '%s' is invalid\n", name);
1674 return -BPF_LOADER_ERRNO__INTERNAL;
1675 }
1676 map_fd = bpf_map__fd(map);
1677 if (map_fd < 0) {
1678 pr_debug("ERROR: failed to get fd from map %s\n", name);
1679 return map_fd;
1680 }
1681
1682 type = bpf_map__type(map);
1683 list_for_each_entry(op, &priv->ops_list, list) {
1684 switch (type) {
1685 case BPF_MAP_TYPE_ARRAY:
1686 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
1687 switch (op->key_type) {
1688 case BPF_MAP_KEY_ALL:
1689 err = foreach_key_array_all(func, arg, name,
1690 map_fd, map, op);
1691 break;
1692 case BPF_MAP_KEY_RANGES:
1693 err = foreach_key_array_ranges(func, arg, name,
1694 map_fd, map, op);
1695 break;
1696 default:
1697 pr_debug("ERROR: keytype for map '%s' invalid\n",
1698 name);
1699 return -BPF_LOADER_ERRNO__INTERNAL;
1700 }
1701 if (err)
1702 return err;
1703 break;
1704 default:
1705 pr_debug("ERROR: type of '%s' incorrect\n", name);
1706 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1707 }
1708 }
1709
1710 return 0;
1711 }
1712
1713 static int
apply_config_value_for_key(int map_fd,void * pkey,size_t val_size,u64 val)1714 apply_config_value_for_key(int map_fd, void *pkey,
1715 size_t val_size, u64 val)
1716 {
1717 int err = 0;
1718
1719 switch (val_size) {
1720 case 1: {
1721 u8 _val = (u8)(val);
1722 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1723 break;
1724 }
1725 case 2: {
1726 u16 _val = (u16)(val);
1727 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1728 break;
1729 }
1730 case 4: {
1731 u32 _val = (u32)(val);
1732 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1733 break;
1734 }
1735 case 8: {
1736 err = bpf_map_update_elem(map_fd, pkey, &val, BPF_ANY);
1737 break;
1738 }
1739 default:
1740 pr_debug("ERROR: invalid value size\n");
1741 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
1742 }
1743 if (err && errno)
1744 err = -errno;
1745 return err;
1746 }
1747
1748 static int
apply_config_evsel_for_key(const char * name,int map_fd,void * pkey,struct evsel * evsel)1749 apply_config_evsel_for_key(const char *name, int map_fd, void *pkey,
1750 struct evsel *evsel)
1751 {
1752 struct xyarray *xy = evsel->core.fd;
1753 struct perf_event_attr *attr;
1754 unsigned int key, events;
1755 bool check_pass = false;
1756 int *evt_fd;
1757 int err;
1758
1759 if (!xy) {
1760 pr_debug("ERROR: evsel not ready for map %s\n", name);
1761 return -BPF_LOADER_ERRNO__INTERNAL;
1762 }
1763
1764 if (xy->row_size / xy->entry_size != 1) {
1765 pr_debug("ERROR: Dimension of target event is incorrect for map %s\n",
1766 name);
1767 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM;
1768 }
1769
1770 attr = &evsel->core.attr;
1771 if (attr->inherit) {
1772 pr_debug("ERROR: Can't put inherit event into map %s\n", name);
1773 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH;
1774 }
1775
1776 if (evsel__is_bpf_output(evsel))
1777 check_pass = true;
1778 if (attr->type == PERF_TYPE_RAW)
1779 check_pass = true;
1780 if (attr->type == PERF_TYPE_HARDWARE)
1781 check_pass = true;
1782 if (!check_pass) {
1783 pr_debug("ERROR: Event type is wrong for map %s\n", name);
1784 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE;
1785 }
1786
1787 events = xy->entries / (xy->row_size / xy->entry_size);
1788 key = *((unsigned int *)pkey);
1789 if (key >= events) {
1790 pr_debug("ERROR: there is no event %d for map %s\n",
1791 key, name);
1792 return -BPF_LOADER_ERRNO__OBJCONF_MAP_MAPSIZE;
1793 }
1794 evt_fd = xyarray__entry(xy, key, 0);
1795 err = bpf_map_update_elem(map_fd, pkey, evt_fd, BPF_ANY);
1796 if (err && errno)
1797 err = -errno;
1798 return err;
1799 }
1800
1801 static int
apply_obj_config_map_for_key(const char * name,int map_fd,const struct bpf_map * map,struct bpf_map_op * op,void * pkey,void * arg __maybe_unused)1802 apply_obj_config_map_for_key(const char *name, int map_fd,
1803 const struct bpf_map *map,
1804 struct bpf_map_op *op,
1805 void *pkey, void *arg __maybe_unused)
1806 {
1807 int err;
1808
1809 switch (op->op_type) {
1810 case BPF_MAP_OP_SET_VALUE:
1811 err = apply_config_value_for_key(map_fd, pkey,
1812 bpf_map__value_size(map),
1813 op->v.value);
1814 break;
1815 case BPF_MAP_OP_SET_EVSEL:
1816 err = apply_config_evsel_for_key(name, map_fd, pkey,
1817 op->v.evsel);
1818 break;
1819 default:
1820 pr_debug("ERROR: unknown value type for '%s'\n", name);
1821 err = -BPF_LOADER_ERRNO__INTERNAL;
1822 }
1823 return err;
1824 }
1825
1826 static int
apply_obj_config_map(struct bpf_map * map)1827 apply_obj_config_map(struct bpf_map *map)
1828 {
1829 return bpf_map_config_foreach_key(map,
1830 apply_obj_config_map_for_key,
1831 NULL);
1832 }
1833
1834 static int
apply_obj_config_object(struct bpf_object * obj)1835 apply_obj_config_object(struct bpf_object *obj)
1836 {
1837 struct bpf_map *map;
1838 int err;
1839
1840 bpf_object__for_each_map(map, obj) {
1841 err = apply_obj_config_map(map);
1842 if (err)
1843 return err;
1844 }
1845 return 0;
1846 }
1847
bpf__apply_obj_config(void)1848 int bpf__apply_obj_config(void)
1849 {
1850 struct bpf_perf_object *perf_obj, *tmp;
1851 int err;
1852
1853 bpf_perf_object__for_each(perf_obj, tmp) {
1854 err = apply_obj_config_object(perf_obj->obj);
1855 if (err)
1856 return err;
1857 }
1858
1859 return 0;
1860 }
1861
1862 #define bpf__perf_for_each_map(map, pobj, tmp) \
1863 bpf_perf_object__for_each(pobj, tmp) \
1864 bpf_object__for_each_map(map, pobj->obj)
1865
1866 #define bpf__perf_for_each_map_named(map, pobj, pobjtmp, name) \
1867 bpf__perf_for_each_map(map, pobj, pobjtmp) \
1868 if (bpf_map__name(map) && (strcmp(name, bpf_map__name(map)) == 0))
1869
bpf__setup_output_event(struct evlist * evlist,const char * name)1870 struct evsel *bpf__setup_output_event(struct evlist *evlist, const char *name)
1871 {
1872 struct bpf_map_priv *tmpl_priv = NULL;
1873 struct bpf_perf_object *perf_obj, *tmp;
1874 struct evsel *evsel = NULL;
1875 struct bpf_map *map;
1876 int err;
1877 bool need_init = false;
1878
1879 bpf__perf_for_each_map_named(map, perf_obj, tmp, name) {
1880 struct bpf_map_priv *priv = map_priv(map);
1881
1882 if (IS_ERR(priv))
1883 return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL);
1884
1885 /*
1886 * No need to check map type: type should have been
1887 * verified by kernel.
1888 */
1889 if (!need_init && !priv)
1890 need_init = !priv;
1891 if (!tmpl_priv && priv)
1892 tmpl_priv = priv;
1893 }
1894
1895 if (!need_init)
1896 return NULL;
1897
1898 if (!tmpl_priv) {
1899 char *event_definition = NULL;
1900
1901 if (asprintf(&event_definition, "bpf-output/no-inherit=1,name=%s/", name) < 0)
1902 return ERR_PTR(-ENOMEM);
1903
1904 err = parse_event(evlist, event_definition);
1905 free(event_definition);
1906
1907 if (err) {
1908 pr_debug("ERROR: failed to create the \"%s\" bpf-output event\n", name);
1909 return ERR_PTR(-err);
1910 }
1911
1912 evsel = evlist__last(evlist);
1913 }
1914
1915 bpf__perf_for_each_map_named(map, perf_obj, tmp, name) {
1916 struct bpf_map_priv *priv = map_priv(map);
1917
1918 if (IS_ERR(priv))
1919 return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL);
1920 if (priv)
1921 continue;
1922
1923 if (tmpl_priv) {
1924 priv = bpf_map_priv__clone(tmpl_priv);
1925 if (!priv)
1926 return ERR_PTR(-ENOMEM);
1927
1928 err = map_set_priv(map, priv);
1929 if (err) {
1930 bpf_map_priv__clear(map, priv);
1931 return ERR_PTR(err);
1932 }
1933 } else if (evsel) {
1934 struct bpf_map_op *op;
1935
1936 op = bpf_map__add_newop(map, NULL);
1937 if (IS_ERR(op))
1938 return ERR_CAST(op);
1939 op->op_type = BPF_MAP_OP_SET_EVSEL;
1940 op->v.evsel = evsel;
1941 }
1942 }
1943
1944 return evsel;
1945 }
1946
bpf__setup_stdout(struct evlist * evlist)1947 int bpf__setup_stdout(struct evlist *evlist)
1948 {
1949 struct evsel *evsel = bpf__setup_output_event(evlist, "__bpf_stdout__");
1950 return PTR_ERR_OR_ZERO(evsel);
1951 }
1952
1953 #define ERRNO_OFFSET(e) ((e) - __BPF_LOADER_ERRNO__START)
1954 #define ERRCODE_OFFSET(c) ERRNO_OFFSET(BPF_LOADER_ERRNO__##c)
1955 #define NR_ERRNO (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START)
1956
1957 static const char *bpf_loader_strerror_table[NR_ERRNO] = {
1958 [ERRCODE_OFFSET(CONFIG)] = "Invalid config string",
1959 [ERRCODE_OFFSET(GROUP)] = "Invalid group name",
1960 [ERRCODE_OFFSET(EVENTNAME)] = "No event name found in config string",
1961 [ERRCODE_OFFSET(INTERNAL)] = "BPF loader internal error",
1962 [ERRCODE_OFFSET(COMPILE)] = "Error when compiling BPF scriptlet",
1963 [ERRCODE_OFFSET(PROGCONF_TERM)] = "Invalid program config term in config string",
1964 [ERRCODE_OFFSET(PROLOGUE)] = "Failed to generate prologue",
1965 [ERRCODE_OFFSET(PROLOGUE2BIG)] = "Prologue too big for program",
1966 [ERRCODE_OFFSET(PROLOGUEOOB)] = "Offset out of bound for prologue",
1967 [ERRCODE_OFFSET(OBJCONF_OPT)] = "Invalid object config option",
1968 [ERRCODE_OFFSET(OBJCONF_CONF)] = "Config value not set (missing '=')",
1969 [ERRCODE_OFFSET(OBJCONF_MAP_OPT)] = "Invalid object map config option",
1970 [ERRCODE_OFFSET(OBJCONF_MAP_NOTEXIST)] = "Target map doesn't exist",
1971 [ERRCODE_OFFSET(OBJCONF_MAP_VALUE)] = "Incorrect value type for map",
1972 [ERRCODE_OFFSET(OBJCONF_MAP_TYPE)] = "Incorrect map type",
1973 [ERRCODE_OFFSET(OBJCONF_MAP_KEYSIZE)] = "Incorrect map key size",
1974 [ERRCODE_OFFSET(OBJCONF_MAP_VALUESIZE)] = "Incorrect map value size",
1975 [ERRCODE_OFFSET(OBJCONF_MAP_NOEVT)] = "Event not found for map setting",
1976 [ERRCODE_OFFSET(OBJCONF_MAP_MAPSIZE)] = "Invalid map size for event setting",
1977 [ERRCODE_OFFSET(OBJCONF_MAP_EVTDIM)] = "Event dimension too large",
1978 [ERRCODE_OFFSET(OBJCONF_MAP_EVTINH)] = "Doesn't support inherit event",
1979 [ERRCODE_OFFSET(OBJCONF_MAP_EVTTYPE)] = "Wrong event type for map",
1980 [ERRCODE_OFFSET(OBJCONF_MAP_IDX2BIG)] = "Index too large",
1981 };
1982
1983 static int
bpf_loader_strerror(int err,char * buf,size_t size)1984 bpf_loader_strerror(int err, char *buf, size_t size)
1985 {
1986 char sbuf[STRERR_BUFSIZE];
1987 const char *msg;
1988
1989 if (!buf || !size)
1990 return -1;
1991
1992 err = err > 0 ? err : -err;
1993
1994 if (err >= __LIBBPF_ERRNO__START)
1995 return libbpf_strerror(err, buf, size);
1996
1997 if (err >= __BPF_LOADER_ERRNO__START && err < __BPF_LOADER_ERRNO__END) {
1998 msg = bpf_loader_strerror_table[ERRNO_OFFSET(err)];
1999 snprintf(buf, size, "%s", msg);
2000 buf[size - 1] = '\0';
2001 return 0;
2002 }
2003
2004 if (err >= __BPF_LOADER_ERRNO__END)
2005 snprintf(buf, size, "Unknown bpf loader error %d", err);
2006 else
2007 snprintf(buf, size, "%s",
2008 str_error_r(err, sbuf, sizeof(sbuf)));
2009
2010 buf[size - 1] = '\0';
2011 return -1;
2012 }
2013
2014 #define bpf__strerror_head(err, buf, size) \
2015 char sbuf[STRERR_BUFSIZE], *emsg;\
2016 if (!size)\
2017 return 0;\
2018 if (err < 0)\
2019 err = -err;\
2020 bpf_loader_strerror(err, sbuf, sizeof(sbuf));\
2021 emsg = sbuf;\
2022 switch (err) {\
2023 default:\
2024 scnprintf(buf, size, "%s", emsg);\
2025 break;
2026
2027 #define bpf__strerror_entry(val, fmt...)\
2028 case val: {\
2029 scnprintf(buf, size, fmt);\
2030 break;\
2031 }
2032
2033 #define bpf__strerror_end(buf, size)\
2034 }\
2035 buf[size - 1] = '\0';
2036
bpf__strerror_prepare_load(const char * filename,bool source,int err,char * buf,size_t size)2037 int bpf__strerror_prepare_load(const char *filename, bool source,
2038 int err, char *buf, size_t size)
2039 {
2040 size_t n;
2041 int ret;
2042
2043 n = snprintf(buf, size, "Failed to load %s%s: ",
2044 filename, source ? " from source" : "");
2045 if (n >= size) {
2046 buf[size - 1] = '\0';
2047 return 0;
2048 }
2049 buf += n;
2050 size -= n;
2051
2052 ret = bpf_loader_strerror(err, buf, size);
2053 buf[size - 1] = '\0';
2054 return ret;
2055 }
2056
bpf__strerror_probe(struct bpf_object * obj __maybe_unused,int err,char * buf,size_t size)2057 int bpf__strerror_probe(struct bpf_object *obj __maybe_unused,
2058 int err, char *buf, size_t size)
2059 {
2060 bpf__strerror_head(err, buf, size);
2061 case BPF_LOADER_ERRNO__PROGCONF_TERM: {
2062 scnprintf(buf, size, "%s (add -v to see detail)", emsg);
2063 break;
2064 }
2065 bpf__strerror_entry(EEXIST, "Probe point exist. Try 'perf probe -d \"*\"' and set 'force=yes'");
2066 bpf__strerror_entry(EACCES, "You need to be root");
2067 bpf__strerror_entry(EPERM, "You need to be root, and /proc/sys/kernel/kptr_restrict should be 0");
2068 bpf__strerror_entry(ENOENT, "You need to check probing points in BPF file");
2069 bpf__strerror_end(buf, size);
2070 return 0;
2071 }
2072
bpf__strerror_load(struct bpf_object * obj,int err,char * buf,size_t size)2073 int bpf__strerror_load(struct bpf_object *obj,
2074 int err, char *buf, size_t size)
2075 {
2076 bpf__strerror_head(err, buf, size);
2077 case LIBBPF_ERRNO__KVER: {
2078 unsigned int obj_kver = bpf_object__kversion(obj);
2079 unsigned int real_kver;
2080
2081 if (fetch_kernel_version(&real_kver, NULL, 0)) {
2082 scnprintf(buf, size, "Unable to fetch kernel version");
2083 break;
2084 }
2085
2086 if (obj_kver != real_kver) {
2087 scnprintf(buf, size,
2088 "'version' ("KVER_FMT") doesn't match running kernel ("KVER_FMT")",
2089 KVER_PARAM(obj_kver),
2090 KVER_PARAM(real_kver));
2091 break;
2092 }
2093
2094 scnprintf(buf, size, "Failed to load program for unknown reason");
2095 break;
2096 }
2097 bpf__strerror_end(buf, size);
2098 return 0;
2099 }
2100
bpf__strerror_config_obj(struct bpf_object * obj __maybe_unused,struct parse_events_term * term __maybe_unused,struct evlist * evlist __maybe_unused,int * error_pos __maybe_unused,int err,char * buf,size_t size)2101 int bpf__strerror_config_obj(struct bpf_object *obj __maybe_unused,
2102 struct parse_events_term *term __maybe_unused,
2103 struct evlist *evlist __maybe_unused,
2104 int *error_pos __maybe_unused, int err,
2105 char *buf, size_t size)
2106 {
2107 bpf__strerror_head(err, buf, size);
2108 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE,
2109 "Can't use this config term with this map type");
2110 bpf__strerror_end(buf, size);
2111 return 0;
2112 }
2113
bpf__strerror_apply_obj_config(int err,char * buf,size_t size)2114 int bpf__strerror_apply_obj_config(int err, char *buf, size_t size)
2115 {
2116 bpf__strerror_head(err, buf, size);
2117 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM,
2118 "Cannot set event to BPF map in multi-thread tracing");
2119 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH,
2120 "%s (Hint: use -i to turn off inherit)", emsg);
2121 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE,
2122 "Can only put raw, hardware and BPF output event into a BPF map");
2123 bpf__strerror_end(buf, size);
2124 return 0;
2125 }
2126
bpf__strerror_setup_output_event(struct evlist * evlist __maybe_unused,int err,char * buf,size_t size)2127 int bpf__strerror_setup_output_event(struct evlist *evlist __maybe_unused,
2128 int err, char *buf, size_t size)
2129 {
2130 bpf__strerror_head(err, buf, size);
2131 bpf__strerror_end(buf, size);
2132 return 0;
2133 }
2134