1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2019 Facebook */
3
4 #ifndef _GNU_SOURCE
5 #define _GNU_SOURCE
6 #endif
7 #include <ctype.h>
8 #include <errno.h>
9 #include <fcntl.h>
10 #include <linux/err.h>
11 #include <stdbool.h>
12 #include <stdio.h>
13 #include <string.h>
14 #include <unistd.h>
15 #include <bpf/bpf.h>
16 #include <bpf/libbpf.h>
17 #include <bpf/libbpf_internal.h>
18 #include <sys/types.h>
19 #include <sys/stat.h>
20 #include <sys/mman.h>
21 #include <bpf/btf.h>
22
23 #include "json_writer.h"
24 #include "main.h"
25
26 #define MAX_OBJ_NAME_LEN 64
27
sanitize_identifier(char * name)28 static void sanitize_identifier(char *name)
29 {
30 int i;
31
32 for (i = 0; name[i]; i++)
33 if (!isalnum(name[i]) && name[i] != '_')
34 name[i] = '_';
35 }
36
str_has_prefix(const char * str,const char * prefix)37 static bool str_has_prefix(const char *str, const char *prefix)
38 {
39 return strncmp(str, prefix, strlen(prefix)) == 0;
40 }
41
str_has_suffix(const char * str,const char * suffix)42 static bool str_has_suffix(const char *str, const char *suffix)
43 {
44 size_t i, n1 = strlen(str), n2 = strlen(suffix);
45
46 if (n1 < n2)
47 return false;
48
49 for (i = 0; i < n2; i++) {
50 if (str[n1 - i - 1] != suffix[n2 - i - 1])
51 return false;
52 }
53
54 return true;
55 }
56
get_obj_name(char * name,const char * file)57 static void get_obj_name(char *name, const char *file)
58 {
59 /* Using basename() GNU version which doesn't modify arg. */
60 strncpy(name, basename(file), MAX_OBJ_NAME_LEN - 1);
61 name[MAX_OBJ_NAME_LEN - 1] = '\0';
62 if (str_has_suffix(name, ".o"))
63 name[strlen(name) - 2] = '\0';
64 sanitize_identifier(name);
65 }
66
get_header_guard(char * guard,const char * obj_name,const char * suffix)67 static void get_header_guard(char *guard, const char *obj_name, const char *suffix)
68 {
69 int i;
70
71 sprintf(guard, "__%s_%s__", obj_name, suffix);
72 for (i = 0; guard[i]; i++)
73 guard[i] = toupper(guard[i]);
74 }
75
get_map_ident(const struct bpf_map * map,char * buf,size_t buf_sz)76 static bool get_map_ident(const struct bpf_map *map, char *buf, size_t buf_sz)
77 {
78 static const char *sfxs[] = { ".data", ".rodata", ".bss", ".kconfig" };
79 const char *name = bpf_map__name(map);
80 int i, n;
81
82 if (!bpf_map__is_internal(map)) {
83 snprintf(buf, buf_sz, "%s", name);
84 return true;
85 }
86
87 for (i = 0, n = ARRAY_SIZE(sfxs); i < n; i++) {
88 const char *sfx = sfxs[i], *p;
89
90 p = strstr(name, sfx);
91 if (p) {
92 snprintf(buf, buf_sz, "%s", p + 1);
93 sanitize_identifier(buf);
94 return true;
95 }
96 }
97
98 return false;
99 }
100
get_datasec_ident(const char * sec_name,char * buf,size_t buf_sz)101 static bool get_datasec_ident(const char *sec_name, char *buf, size_t buf_sz)
102 {
103 static const char *pfxs[] = { ".data", ".rodata", ".bss", ".kconfig" };
104 int i, n;
105
106 for (i = 0, n = ARRAY_SIZE(pfxs); i < n; i++) {
107 const char *pfx = pfxs[i];
108
109 if (str_has_prefix(sec_name, pfx)) {
110 snprintf(buf, buf_sz, "%s", sec_name + 1);
111 sanitize_identifier(buf);
112 return true;
113 }
114 }
115
116 return false;
117 }
118
codegen_btf_dump_printf(void * ctx,const char * fmt,va_list args)119 static void codegen_btf_dump_printf(void *ctx, const char *fmt, va_list args)
120 {
121 vprintf(fmt, args);
122 }
123
codegen_datasec_def(struct bpf_object * obj,struct btf * btf,struct btf_dump * d,const struct btf_type * sec,const char * obj_name)124 static int codegen_datasec_def(struct bpf_object *obj,
125 struct btf *btf,
126 struct btf_dump *d,
127 const struct btf_type *sec,
128 const char *obj_name)
129 {
130 const char *sec_name = btf__name_by_offset(btf, sec->name_off);
131 const struct btf_var_secinfo *sec_var = btf_var_secinfos(sec);
132 int i, err, off = 0, pad_cnt = 0, vlen = btf_vlen(sec);
133 char var_ident[256], sec_ident[256];
134 bool strip_mods = false;
135
136 if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident)))
137 return 0;
138
139 if (strcmp(sec_name, ".kconfig") != 0)
140 strip_mods = true;
141
142 printf(" struct %s__%s {\n", obj_name, sec_ident);
143 for (i = 0; i < vlen; i++, sec_var++) {
144 const struct btf_type *var = btf__type_by_id(btf, sec_var->type);
145 const char *var_name = btf__name_by_offset(btf, var->name_off);
146 DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts,
147 .field_name = var_ident,
148 .indent_level = 2,
149 .strip_mods = strip_mods,
150 );
151 int need_off = sec_var->offset, align_off, align;
152 __u32 var_type_id = var->type;
153
154 /* static variables are not exposed through BPF skeleton */
155 if (btf_var(var)->linkage == BTF_VAR_STATIC)
156 continue;
157
158 if (off > need_off) {
159 p_err("Something is wrong for %s's variable #%d: need offset %d, already at %d.\n",
160 sec_name, i, need_off, off);
161 return -EINVAL;
162 }
163
164 align = btf__align_of(btf, var->type);
165 if (align <= 0) {
166 p_err("Failed to determine alignment of variable '%s': %d",
167 var_name, align);
168 return -EINVAL;
169 }
170 /* Assume 32-bit architectures when generating data section
171 * struct memory layout. Given bpftool can't know which target
172 * host architecture it's emitting skeleton for, we need to be
173 * conservative and assume 32-bit one to ensure enough padding
174 * bytes are generated for pointer and long types. This will
175 * still work correctly for 64-bit architectures, because in
176 * the worst case we'll generate unnecessary padding field,
177 * which on 64-bit architectures is not strictly necessary and
178 * would be handled by natural 8-byte alignment. But it still
179 * will be a correct memory layout, based on recorded offsets
180 * in BTF.
181 */
182 if (align > 4)
183 align = 4;
184
185 align_off = (off + align - 1) / align * align;
186 if (align_off != need_off) {
187 printf("\t\tchar __pad%d[%d];\n",
188 pad_cnt, need_off - off);
189 pad_cnt++;
190 }
191
192 /* sanitize variable name, e.g., for static vars inside
193 * a function, it's name is '<function name>.<variable name>',
194 * which we'll turn into a '<function name>_<variable name>'
195 */
196 var_ident[0] = '\0';
197 strncat(var_ident, var_name, sizeof(var_ident) - 1);
198 sanitize_identifier(var_ident);
199
200 printf("\t\t");
201 err = btf_dump__emit_type_decl(d, var_type_id, &opts);
202 if (err)
203 return err;
204 printf(";\n");
205
206 off = sec_var->offset + sec_var->size;
207 }
208 printf(" } *%s;\n", sec_ident);
209 return 0;
210 }
211
find_type_for_map(struct btf * btf,const char * map_ident)212 static const struct btf_type *find_type_for_map(struct btf *btf, const char *map_ident)
213 {
214 int n = btf__type_cnt(btf), i;
215 char sec_ident[256];
216
217 for (i = 1; i < n; i++) {
218 const struct btf_type *t = btf__type_by_id(btf, i);
219 const char *name;
220
221 if (!btf_is_datasec(t))
222 continue;
223
224 name = btf__str_by_offset(btf, t->name_off);
225 if (!get_datasec_ident(name, sec_ident, sizeof(sec_ident)))
226 continue;
227
228 if (strcmp(sec_ident, map_ident) == 0)
229 return t;
230 }
231 return NULL;
232 }
233
is_internal_mmapable_map(const struct bpf_map * map,char * buf,size_t sz)234 static bool is_internal_mmapable_map(const struct bpf_map *map, char *buf, size_t sz)
235 {
236 if (!bpf_map__is_internal(map) || !(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
237 return false;
238
239 if (!get_map_ident(map, buf, sz))
240 return false;
241
242 return true;
243 }
244
codegen_datasecs(struct bpf_object * obj,const char * obj_name)245 static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
246 {
247 struct btf *btf = bpf_object__btf(obj);
248 struct btf_dump *d;
249 struct bpf_map *map;
250 const struct btf_type *sec;
251 char map_ident[256];
252 int err = 0;
253
254 d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
255 err = libbpf_get_error(d);
256 if (err)
257 return err;
258
259 bpf_object__for_each_map(map, obj) {
260 /* only generate definitions for memory-mapped internal maps */
261 if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident)))
262 continue;
263
264 sec = find_type_for_map(btf, map_ident);
265
266 /* In some cases (e.g., sections like .rodata.cst16 containing
267 * compiler allocated string constants only) there will be
268 * special internal maps with no corresponding DATASEC BTF
269 * type. In such case, generate empty structs for each such
270 * map. It will still be memory-mapped and its contents
271 * accessible from user-space through BPF skeleton.
272 */
273 if (!sec) {
274 printf(" struct %s__%s {\n", obj_name, map_ident);
275 printf(" } *%s;\n", map_ident);
276 } else {
277 err = codegen_datasec_def(obj, btf, d, sec, obj_name);
278 if (err)
279 goto out;
280 }
281 }
282
283
284 out:
285 btf_dump__free(d);
286 return err;
287 }
288
btf_is_ptr_to_func_proto(const struct btf * btf,const struct btf_type * v)289 static bool btf_is_ptr_to_func_proto(const struct btf *btf,
290 const struct btf_type *v)
291 {
292 return btf_is_ptr(v) && btf_is_func_proto(btf__type_by_id(btf, v->type));
293 }
294
codegen_subskel_datasecs(struct bpf_object * obj,const char * obj_name)295 static int codegen_subskel_datasecs(struct bpf_object *obj, const char *obj_name)
296 {
297 struct btf *btf = bpf_object__btf(obj);
298 struct btf_dump *d;
299 struct bpf_map *map;
300 const struct btf_type *sec, *var;
301 const struct btf_var_secinfo *sec_var;
302 int i, err = 0, vlen;
303 char map_ident[256], sec_ident[256];
304 bool strip_mods = false, needs_typeof = false;
305 const char *sec_name, *var_name;
306 __u32 var_type_id;
307
308 d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
309 if (!d)
310 return -errno;
311
312 bpf_object__for_each_map(map, obj) {
313 /* only generate definitions for memory-mapped internal maps */
314 if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident)))
315 continue;
316
317 sec = find_type_for_map(btf, map_ident);
318 if (!sec)
319 continue;
320
321 sec_name = btf__name_by_offset(btf, sec->name_off);
322 if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident)))
323 continue;
324
325 strip_mods = strcmp(sec_name, ".kconfig") != 0;
326 printf(" struct %s__%s {\n", obj_name, sec_ident);
327
328 sec_var = btf_var_secinfos(sec);
329 vlen = btf_vlen(sec);
330 for (i = 0; i < vlen; i++, sec_var++) {
331 DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts,
332 .indent_level = 2,
333 .strip_mods = strip_mods,
334 /* we'll print the name separately */
335 .field_name = "",
336 );
337
338 var = btf__type_by_id(btf, sec_var->type);
339 var_name = btf__name_by_offset(btf, var->name_off);
340 var_type_id = var->type;
341
342 /* static variables are not exposed through BPF skeleton */
343 if (btf_var(var)->linkage == BTF_VAR_STATIC)
344 continue;
345
346 /* The datasec member has KIND_VAR but we want the
347 * underlying type of the variable (e.g. KIND_INT).
348 */
349 var = skip_mods_and_typedefs(btf, var->type, NULL);
350
351 printf("\t\t");
352 /* Func and array members require special handling.
353 * Instead of producing `typename *var`, they produce
354 * `typeof(typename) *var`. This allows us to keep a
355 * similar syntax where the identifier is just prefixed
356 * by *, allowing us to ignore C declaration minutiae.
357 */
358 needs_typeof = btf_is_array(var) || btf_is_ptr_to_func_proto(btf, var);
359 if (needs_typeof)
360 printf("typeof(");
361
362 err = btf_dump__emit_type_decl(d, var_type_id, &opts);
363 if (err)
364 goto out;
365
366 if (needs_typeof)
367 printf(")");
368
369 printf(" *%s;\n", var_name);
370 }
371 printf(" } %s;\n", sec_ident);
372 }
373
374 out:
375 btf_dump__free(d);
376 return err;
377 }
378
codegen(const char * template,...)379 static void codegen(const char *template, ...)
380 {
381 const char *src, *end;
382 int skip_tabs = 0, n;
383 char *s, *dst;
384 va_list args;
385 char c;
386
387 n = strlen(template);
388 s = malloc(n + 1);
389 if (!s)
390 exit(-1);
391 src = template;
392 dst = s;
393
394 /* find out "baseline" indentation to skip */
395 while ((c = *src++)) {
396 if (c == '\t') {
397 skip_tabs++;
398 } else if (c == '\n') {
399 break;
400 } else {
401 p_err("unrecognized character at pos %td in template '%s': '%c'",
402 src - template - 1, template, c);
403 free(s);
404 exit(-1);
405 }
406 }
407
408 while (*src) {
409 /* skip baseline indentation tabs */
410 for (n = skip_tabs; n > 0; n--, src++) {
411 if (*src != '\t') {
412 p_err("not enough tabs at pos %td in template '%s'",
413 src - template - 1, template);
414 free(s);
415 exit(-1);
416 }
417 }
418 /* trim trailing whitespace */
419 end = strchrnul(src, '\n');
420 for (n = end - src; n > 0 && isspace(src[n - 1]); n--)
421 ;
422 memcpy(dst, src, n);
423 dst += n;
424 if (*end)
425 *dst++ = '\n';
426 src = *end ? end + 1 : end;
427 }
428 *dst++ = '\0';
429
430 /* print out using adjusted template */
431 va_start(args, template);
432 n = vprintf(s, args);
433 va_end(args);
434
435 free(s);
436 }
437
print_hex(const char * data,int data_sz)438 static void print_hex(const char *data, int data_sz)
439 {
440 int i, len;
441
442 for (i = 0, len = 0; i < data_sz; i++) {
443 int w = data[i] ? 4 : 2;
444
445 len += w;
446 if (len > 78) {
447 printf("\\\n");
448 len = w;
449 }
450 if (!data[i])
451 printf("\\0");
452 else
453 printf("\\x%02x", (unsigned char)data[i]);
454 }
455 }
456
bpf_map_mmap_sz(const struct bpf_map * map)457 static size_t bpf_map_mmap_sz(const struct bpf_map *map)
458 {
459 long page_sz = sysconf(_SC_PAGE_SIZE);
460 size_t map_sz;
461
462 map_sz = (size_t)roundup(bpf_map__value_size(map), 8) * bpf_map__max_entries(map);
463 map_sz = roundup(map_sz, page_sz);
464 return map_sz;
465 }
466
467 /* Emit type size asserts for all top-level fields in memory-mapped internal maps. */
codegen_asserts(struct bpf_object * obj,const char * obj_name)468 static void codegen_asserts(struct bpf_object *obj, const char *obj_name)
469 {
470 struct btf *btf = bpf_object__btf(obj);
471 struct bpf_map *map;
472 struct btf_var_secinfo *sec_var;
473 int i, vlen;
474 const struct btf_type *sec;
475 char map_ident[256], var_ident[256];
476
477 if (!btf)
478 return;
479
480 codegen("\
481 \n\
482 __attribute__((unused)) static void \n\
483 %1$s__assert(struct %1$s *s __attribute__((unused))) \n\
484 { \n\
485 #ifdef __cplusplus \n\
486 #define _Static_assert static_assert \n\
487 #endif \n\
488 ", obj_name);
489
490 bpf_object__for_each_map(map, obj) {
491 if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident)))
492 continue;
493
494 sec = find_type_for_map(btf, map_ident);
495 if (!sec) {
496 /* best effort, couldn't find the type for this map */
497 continue;
498 }
499
500 sec_var = btf_var_secinfos(sec);
501 vlen = btf_vlen(sec);
502
503 for (i = 0; i < vlen; i++, sec_var++) {
504 const struct btf_type *var = btf__type_by_id(btf, sec_var->type);
505 const char *var_name = btf__name_by_offset(btf, var->name_off);
506 long var_size;
507
508 /* static variables are not exposed through BPF skeleton */
509 if (btf_var(var)->linkage == BTF_VAR_STATIC)
510 continue;
511
512 var_size = btf__resolve_size(btf, var->type);
513 if (var_size < 0)
514 continue;
515
516 var_ident[0] = '\0';
517 strncat(var_ident, var_name, sizeof(var_ident) - 1);
518 sanitize_identifier(var_ident);
519
520 printf("\t_Static_assert(sizeof(s->%s->%s) == %ld, \"unexpected size of '%s'\");\n",
521 map_ident, var_ident, var_size, var_ident);
522 }
523 }
524 codegen("\
525 \n\
526 #ifdef __cplusplus \n\
527 #undef _Static_assert \n\
528 #endif \n\
529 } \n\
530 ");
531 }
532
codegen_attach_detach(struct bpf_object * obj,const char * obj_name)533 static void codegen_attach_detach(struct bpf_object *obj, const char *obj_name)
534 {
535 struct bpf_program *prog;
536
537 bpf_object__for_each_program(prog, obj) {
538 const char *tp_name;
539
540 codegen("\
541 \n\
542 \n\
543 static inline int \n\
544 %1$s__%2$s__attach(struct %1$s *skel) \n\
545 { \n\
546 int prog_fd = skel->progs.%2$s.prog_fd; \n\
547 ", obj_name, bpf_program__name(prog));
548
549 switch (bpf_program__type(prog)) {
550 case BPF_PROG_TYPE_RAW_TRACEPOINT:
551 tp_name = strchr(bpf_program__section_name(prog), '/') + 1;
552 printf("\tint fd = skel_raw_tracepoint_open(\"%s\", prog_fd);\n", tp_name);
553 break;
554 case BPF_PROG_TYPE_TRACING:
555 case BPF_PROG_TYPE_LSM:
556 if (bpf_program__expected_attach_type(prog) == BPF_TRACE_ITER)
557 printf("\tint fd = skel_link_create(prog_fd, 0, BPF_TRACE_ITER);\n");
558 else
559 printf("\tint fd = skel_raw_tracepoint_open(NULL, prog_fd);\n");
560 break;
561 default:
562 printf("\tint fd = ((void)prog_fd, 0); /* auto-attach not supported */\n");
563 break;
564 }
565 codegen("\
566 \n\
567 \n\
568 if (fd > 0) \n\
569 skel->links.%1$s_fd = fd; \n\
570 return fd; \n\
571 } \n\
572 ", bpf_program__name(prog));
573 }
574
575 codegen("\
576 \n\
577 \n\
578 static inline int \n\
579 %1$s__attach(struct %1$s *skel) \n\
580 { \n\
581 int ret = 0; \n\
582 \n\
583 ", obj_name);
584
585 bpf_object__for_each_program(prog, obj) {
586 codegen("\
587 \n\
588 ret = ret < 0 ? ret : %1$s__%2$s__attach(skel); \n\
589 ", obj_name, bpf_program__name(prog));
590 }
591
592 codegen("\
593 \n\
594 return ret < 0 ? ret : 0; \n\
595 } \n\
596 \n\
597 static inline void \n\
598 %1$s__detach(struct %1$s *skel) \n\
599 { \n\
600 ", obj_name);
601
602 bpf_object__for_each_program(prog, obj) {
603 codegen("\
604 \n\
605 skel_closenz(skel->links.%1$s_fd); \n\
606 ", bpf_program__name(prog));
607 }
608
609 codegen("\
610 \n\
611 } \n\
612 ");
613 }
614
codegen_destroy(struct bpf_object * obj,const char * obj_name)615 static void codegen_destroy(struct bpf_object *obj, const char *obj_name)
616 {
617 struct bpf_program *prog;
618 struct bpf_map *map;
619 char ident[256];
620
621 codegen("\
622 \n\
623 static void \n\
624 %1$s__destroy(struct %1$s *skel) \n\
625 { \n\
626 if (!skel) \n\
627 return; \n\
628 %1$s__detach(skel); \n\
629 ",
630 obj_name);
631
632 bpf_object__for_each_program(prog, obj) {
633 codegen("\
634 \n\
635 skel_closenz(skel->progs.%1$s.prog_fd); \n\
636 ", bpf_program__name(prog));
637 }
638
639 bpf_object__for_each_map(map, obj) {
640 if (!get_map_ident(map, ident, sizeof(ident)))
641 continue;
642 if (bpf_map__is_internal(map) &&
643 (bpf_map__map_flags(map) & BPF_F_MMAPABLE))
644 printf("\tskel_free_map_data(skel->%1$s, skel->maps.%1$s.initial_value, %2$zd);\n",
645 ident, bpf_map_mmap_sz(map));
646 codegen("\
647 \n\
648 skel_closenz(skel->maps.%1$s.map_fd); \n\
649 ", ident);
650 }
651 codegen("\
652 \n\
653 skel_free(skel); \n\
654 } \n\
655 ",
656 obj_name);
657 }
658
gen_trace(struct bpf_object * obj,const char * obj_name,const char * header_guard)659 static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *header_guard)
660 {
661 DECLARE_LIBBPF_OPTS(gen_loader_opts, opts);
662 struct bpf_map *map;
663 char ident[256];
664 int err = 0;
665
666 err = bpf_object__gen_loader(obj, &opts);
667 if (err)
668 return err;
669
670 err = bpf_object__load(obj);
671 if (err) {
672 p_err("failed to load object file");
673 goto out;
674 }
675 /* If there was no error during load then gen_loader_opts
676 * are populated with the loader program.
677 */
678
679 /* finish generating 'struct skel' */
680 codegen("\
681 \n\
682 }; \n\
683 ", obj_name);
684
685
686 codegen_attach_detach(obj, obj_name);
687
688 codegen_destroy(obj, obj_name);
689
690 codegen("\
691 \n\
692 static inline struct %1$s * \n\
693 %1$s__open(void) \n\
694 { \n\
695 struct %1$s *skel; \n\
696 \n\
697 skel = skel_alloc(sizeof(*skel)); \n\
698 if (!skel) \n\
699 goto cleanup; \n\
700 skel->ctx.sz = (void *)&skel->links - (void *)skel; \n\
701 ",
702 obj_name, opts.data_sz);
703 bpf_object__for_each_map(map, obj) {
704 const void *mmap_data = NULL;
705 size_t mmap_size = 0;
706
707 if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
708 continue;
709
710 codegen("\
711 \n\
712 skel->%1$s = skel_prep_map_data((void *)\"\\ \n\
713 ", ident);
714 mmap_data = bpf_map__initial_value(map, &mmap_size);
715 print_hex(mmap_data, mmap_size);
716 codegen("\
717 \n\
718 \", %1$zd, %2$zd); \n\
719 if (!skel->%3$s) \n\
720 goto cleanup; \n\
721 skel->maps.%3$s.initial_value = (__u64) (long) skel->%3$s;\n\
722 ", bpf_map_mmap_sz(map), mmap_size, ident);
723 }
724 codegen("\
725 \n\
726 return skel; \n\
727 cleanup: \n\
728 %1$s__destroy(skel); \n\
729 return NULL; \n\
730 } \n\
731 \n\
732 static inline int \n\
733 %1$s__load(struct %1$s *skel) \n\
734 { \n\
735 struct bpf_load_and_run_opts opts = {}; \n\
736 int err; \n\
737 \n\
738 opts.ctx = (struct bpf_loader_ctx *)skel; \n\
739 opts.data_sz = %2$d; \n\
740 opts.data = (void *)\"\\ \n\
741 ",
742 obj_name, opts.data_sz);
743 print_hex(opts.data, opts.data_sz);
744 codegen("\
745 \n\
746 \"; \n\
747 ");
748
749 codegen("\
750 \n\
751 opts.insns_sz = %d; \n\
752 opts.insns = (void *)\"\\ \n\
753 ",
754 opts.insns_sz);
755 print_hex(opts.insns, opts.insns_sz);
756 codegen("\
757 \n\
758 \"; \n\
759 err = bpf_load_and_run(&opts); \n\
760 if (err < 0) \n\
761 return err; \n\
762 ", obj_name);
763 bpf_object__for_each_map(map, obj) {
764 const char *mmap_flags;
765
766 if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
767 continue;
768
769 if (bpf_map__map_flags(map) & BPF_F_RDONLY_PROG)
770 mmap_flags = "PROT_READ";
771 else
772 mmap_flags = "PROT_READ | PROT_WRITE";
773
774 codegen("\
775 \n\
776 skel->%1$s = skel_finalize_map_data(&skel->maps.%1$s.initial_value, \n\
777 %2$zd, %3$s, skel->maps.%1$s.map_fd);\n\
778 if (!skel->%1$s) \n\
779 return -ENOMEM; \n\
780 ",
781 ident, bpf_map_mmap_sz(map), mmap_flags);
782 }
783 codegen("\
784 \n\
785 return 0; \n\
786 } \n\
787 \n\
788 static inline struct %1$s * \n\
789 %1$s__open_and_load(void) \n\
790 { \n\
791 struct %1$s *skel; \n\
792 \n\
793 skel = %1$s__open(); \n\
794 if (!skel) \n\
795 return NULL; \n\
796 if (%1$s__load(skel)) { \n\
797 %1$s__destroy(skel); \n\
798 return NULL; \n\
799 } \n\
800 return skel; \n\
801 } \n\
802 \n\
803 ", obj_name);
804
805 codegen_asserts(obj, obj_name);
806
807 codegen("\
808 \n\
809 \n\
810 #endif /* %s */ \n\
811 ",
812 header_guard);
813 err = 0;
814 out:
815 return err;
816 }
817
818 static void
codegen_maps_skeleton(struct bpf_object * obj,size_t map_cnt,bool mmaped)819 codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped)
820 {
821 struct bpf_map *map;
822 char ident[256];
823 size_t i;
824
825 if (!map_cnt)
826 return;
827
828 codegen("\
829 \n\
830 \n\
831 /* maps */ \n\
832 s->map_cnt = %zu; \n\
833 s->map_skel_sz = sizeof(*s->maps); \n\
834 s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\
835 if (!s->maps) { \n\
836 err = -ENOMEM; \n\
837 goto err; \n\
838 } \n\
839 ",
840 map_cnt
841 );
842 i = 0;
843 bpf_object__for_each_map(map, obj) {
844 if (!get_map_ident(map, ident, sizeof(ident)))
845 continue;
846
847 codegen("\
848 \n\
849 \n\
850 s->maps[%zu].name = \"%s\"; \n\
851 s->maps[%zu].map = &obj->maps.%s; \n\
852 ",
853 i, bpf_map__name(map), i, ident);
854 /* memory-mapped internal maps */
855 if (mmaped && is_internal_mmapable_map(map, ident, sizeof(ident))) {
856 printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n",
857 i, ident);
858 }
859 i++;
860 }
861 }
862
863 static void
codegen_progs_skeleton(struct bpf_object * obj,size_t prog_cnt,bool populate_links)864 codegen_progs_skeleton(struct bpf_object *obj, size_t prog_cnt, bool populate_links)
865 {
866 struct bpf_program *prog;
867 int i;
868
869 if (!prog_cnt)
870 return;
871
872 codegen("\
873 \n\
874 \n\
875 /* programs */ \n\
876 s->prog_cnt = %zu; \n\
877 s->prog_skel_sz = sizeof(*s->progs); \n\
878 s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\
879 if (!s->progs) { \n\
880 err = -ENOMEM; \n\
881 goto err; \n\
882 } \n\
883 ",
884 prog_cnt
885 );
886 i = 0;
887 bpf_object__for_each_program(prog, obj) {
888 codegen("\
889 \n\
890 \n\
891 s->progs[%1$zu].name = \"%2$s\"; \n\
892 s->progs[%1$zu].prog = &obj->progs.%2$s;\n\
893 ",
894 i, bpf_program__name(prog));
895
896 if (populate_links) {
897 codegen("\
898 \n\
899 s->progs[%1$zu].link = &obj->links.%2$s;\n\
900 ",
901 i, bpf_program__name(prog));
902 }
903 i++;
904 }
905 }
906
do_skeleton(int argc,char ** argv)907 static int do_skeleton(int argc, char **argv)
908 {
909 char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")];
910 size_t map_cnt = 0, prog_cnt = 0, file_sz, mmap_sz;
911 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
912 char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data;
913 struct bpf_object *obj = NULL;
914 const char *file;
915 char ident[256];
916 struct bpf_program *prog;
917 int fd, err = -1;
918 struct bpf_map *map;
919 struct btf *btf;
920 struct stat st;
921
922 if (!REQ_ARGS(1)) {
923 usage();
924 return -1;
925 }
926 file = GET_ARG();
927
928 while (argc) {
929 if (!REQ_ARGS(2))
930 return -1;
931
932 if (is_prefix(*argv, "name")) {
933 NEXT_ARG();
934
935 if (obj_name[0] != '\0') {
936 p_err("object name already specified");
937 return -1;
938 }
939
940 strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1);
941 obj_name[MAX_OBJ_NAME_LEN - 1] = '\0';
942 } else {
943 p_err("unknown arg %s", *argv);
944 return -1;
945 }
946
947 NEXT_ARG();
948 }
949
950 if (argc) {
951 p_err("extra unknown arguments");
952 return -1;
953 }
954
955 if (stat(file, &st)) {
956 p_err("failed to stat() %s: %s", file, strerror(errno));
957 return -1;
958 }
959 file_sz = st.st_size;
960 mmap_sz = roundup(file_sz, sysconf(_SC_PAGE_SIZE));
961 fd = open(file, O_RDONLY);
962 if (fd < 0) {
963 p_err("failed to open() %s: %s", file, strerror(errno));
964 return -1;
965 }
966 obj_data = mmap(NULL, mmap_sz, PROT_READ, MAP_PRIVATE, fd, 0);
967 if (obj_data == MAP_FAILED) {
968 obj_data = NULL;
969 p_err("failed to mmap() %s: %s", file, strerror(errno));
970 goto out;
971 }
972 if (obj_name[0] == '\0')
973 get_obj_name(obj_name, file);
974 opts.object_name = obj_name;
975 if (verifier_logs)
976 /* log_level1 + log_level2 + stats, but not stable UAPI */
977 opts.kernel_log_level = 1 + 2 + 4;
978 obj = bpf_object__open_mem(obj_data, file_sz, &opts);
979 err = libbpf_get_error(obj);
980 if (err) {
981 char err_buf[256];
982
983 libbpf_strerror(err, err_buf, sizeof(err_buf));
984 p_err("failed to open BPF object file: %s", err_buf);
985 obj = NULL;
986 goto out;
987 }
988
989 bpf_object__for_each_map(map, obj) {
990 if (!get_map_ident(map, ident, sizeof(ident))) {
991 p_err("ignoring unrecognized internal map '%s'...",
992 bpf_map__name(map));
993 continue;
994 }
995 map_cnt++;
996 }
997 bpf_object__for_each_program(prog, obj) {
998 prog_cnt++;
999 }
1000
1001 get_header_guard(header_guard, obj_name, "SKEL_H");
1002 if (use_loader) {
1003 codegen("\
1004 \n\
1005 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
1006 /* THIS FILE IS AUTOGENERATED BY BPFTOOL! */ \n\
1007 #ifndef %2$s \n\
1008 #define %2$s \n\
1009 \n\
1010 #include <bpf/skel_internal.h> \n\
1011 \n\
1012 struct %1$s { \n\
1013 struct bpf_loader_ctx ctx; \n\
1014 ",
1015 obj_name, header_guard
1016 );
1017 } else {
1018 codegen("\
1019 \n\
1020 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
1021 \n\
1022 /* THIS FILE IS AUTOGENERATED BY BPFTOOL! */ \n\
1023 #ifndef %2$s \n\
1024 #define %2$s \n\
1025 \n\
1026 #include <errno.h> \n\
1027 #include <stdlib.h> \n\
1028 #include <bpf/libbpf.h> \n\
1029 \n\
1030 struct %1$s { \n\
1031 struct bpf_object_skeleton *skeleton; \n\
1032 struct bpf_object *obj; \n\
1033 ",
1034 obj_name, header_guard
1035 );
1036 }
1037
1038 if (map_cnt) {
1039 printf("\tstruct {\n");
1040 bpf_object__for_each_map(map, obj) {
1041 if (!get_map_ident(map, ident, sizeof(ident)))
1042 continue;
1043 if (use_loader)
1044 printf("\t\tstruct bpf_map_desc %s;\n", ident);
1045 else
1046 printf("\t\tstruct bpf_map *%s;\n", ident);
1047 }
1048 printf("\t} maps;\n");
1049 }
1050
1051 if (prog_cnt) {
1052 printf("\tstruct {\n");
1053 bpf_object__for_each_program(prog, obj) {
1054 if (use_loader)
1055 printf("\t\tstruct bpf_prog_desc %s;\n",
1056 bpf_program__name(prog));
1057 else
1058 printf("\t\tstruct bpf_program *%s;\n",
1059 bpf_program__name(prog));
1060 }
1061 printf("\t} progs;\n");
1062 printf("\tstruct {\n");
1063 bpf_object__for_each_program(prog, obj) {
1064 if (use_loader)
1065 printf("\t\tint %s_fd;\n",
1066 bpf_program__name(prog));
1067 else
1068 printf("\t\tstruct bpf_link *%s;\n",
1069 bpf_program__name(prog));
1070 }
1071 printf("\t} links;\n");
1072 }
1073
1074 btf = bpf_object__btf(obj);
1075 if (btf) {
1076 err = codegen_datasecs(obj, obj_name);
1077 if (err)
1078 goto out;
1079 }
1080 if (use_loader) {
1081 err = gen_trace(obj, obj_name, header_guard);
1082 goto out;
1083 }
1084
1085 codegen("\
1086 \n\
1087 \n\
1088 #ifdef __cplusplus \n\
1089 static inline struct %1$s *open(const struct bpf_object_open_opts *opts = nullptr);\n\
1090 static inline struct %1$s *open_and_load(); \n\
1091 static inline int load(struct %1$s *skel); \n\
1092 static inline int attach(struct %1$s *skel); \n\
1093 static inline void detach(struct %1$s *skel); \n\
1094 static inline void destroy(struct %1$s *skel); \n\
1095 static inline const void *elf_bytes(size_t *sz); \n\
1096 #endif /* __cplusplus */ \n\
1097 }; \n\
1098 \n\
1099 static void \n\
1100 %1$s__destroy(struct %1$s *obj) \n\
1101 { \n\
1102 if (!obj) \n\
1103 return; \n\
1104 if (obj->skeleton) \n\
1105 bpf_object__destroy_skeleton(obj->skeleton);\n\
1106 free(obj); \n\
1107 } \n\
1108 \n\
1109 static inline int \n\
1110 %1$s__create_skeleton(struct %1$s *obj); \n\
1111 \n\
1112 static inline struct %1$s * \n\
1113 %1$s__open_opts(const struct bpf_object_open_opts *opts) \n\
1114 { \n\
1115 struct %1$s *obj; \n\
1116 int err; \n\
1117 \n\
1118 obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\
1119 if (!obj) { \n\
1120 errno = ENOMEM; \n\
1121 return NULL; \n\
1122 } \n\
1123 \n\
1124 err = %1$s__create_skeleton(obj); \n\
1125 if (err) \n\
1126 goto err_out; \n\
1127 \n\
1128 err = bpf_object__open_skeleton(obj->skeleton, opts);\n\
1129 if (err) \n\
1130 goto err_out; \n\
1131 \n\
1132 return obj; \n\
1133 err_out: \n\
1134 %1$s__destroy(obj); \n\
1135 errno = -err; \n\
1136 return NULL; \n\
1137 } \n\
1138 \n\
1139 static inline struct %1$s * \n\
1140 %1$s__open(void) \n\
1141 { \n\
1142 return %1$s__open_opts(NULL); \n\
1143 } \n\
1144 \n\
1145 static inline int \n\
1146 %1$s__load(struct %1$s *obj) \n\
1147 { \n\
1148 return bpf_object__load_skeleton(obj->skeleton); \n\
1149 } \n\
1150 \n\
1151 static inline struct %1$s * \n\
1152 %1$s__open_and_load(void) \n\
1153 { \n\
1154 struct %1$s *obj; \n\
1155 int err; \n\
1156 \n\
1157 obj = %1$s__open(); \n\
1158 if (!obj) \n\
1159 return NULL; \n\
1160 err = %1$s__load(obj); \n\
1161 if (err) { \n\
1162 %1$s__destroy(obj); \n\
1163 errno = -err; \n\
1164 return NULL; \n\
1165 } \n\
1166 return obj; \n\
1167 } \n\
1168 \n\
1169 static inline int \n\
1170 %1$s__attach(struct %1$s *obj) \n\
1171 { \n\
1172 return bpf_object__attach_skeleton(obj->skeleton); \n\
1173 } \n\
1174 \n\
1175 static inline void \n\
1176 %1$s__detach(struct %1$s *obj) \n\
1177 { \n\
1178 bpf_object__detach_skeleton(obj->skeleton); \n\
1179 } \n\
1180 ",
1181 obj_name
1182 );
1183
1184 codegen("\
1185 \n\
1186 \n\
1187 static inline const void *%1$s__elf_bytes(size_t *sz); \n\
1188 \n\
1189 static inline int \n\
1190 %1$s__create_skeleton(struct %1$s *obj) \n\
1191 { \n\
1192 struct bpf_object_skeleton *s; \n\
1193 int err; \n\
1194 \n\
1195 s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\
1196 if (!s) { \n\
1197 err = -ENOMEM; \n\
1198 goto err; \n\
1199 } \n\
1200 \n\
1201 s->sz = sizeof(*s); \n\
1202 s->name = \"%1$s\"; \n\
1203 s->obj = &obj->obj; \n\
1204 ",
1205 obj_name
1206 );
1207
1208 codegen_maps_skeleton(obj, map_cnt, true /*mmaped*/);
1209 codegen_progs_skeleton(obj, prog_cnt, true /*populate_links*/);
1210
1211 codegen("\
1212 \n\
1213 \n\
1214 s->data = (void *)%2$s__elf_bytes(&s->data_sz); \n\
1215 \n\
1216 obj->skeleton = s; \n\
1217 return 0; \n\
1218 err: \n\
1219 bpf_object__destroy_skeleton(s); \n\
1220 return err; \n\
1221 } \n\
1222 \n\
1223 static inline const void *%2$s__elf_bytes(size_t *sz) \n\
1224 { \n\
1225 *sz = %1$d; \n\
1226 return (const void *)\"\\ \n\
1227 "
1228 , file_sz, obj_name);
1229
1230 /* embed contents of BPF object file */
1231 print_hex(obj_data, file_sz);
1232
1233 codegen("\
1234 \n\
1235 \"; \n\
1236 } \n\
1237 \n\
1238 #ifdef __cplusplus \n\
1239 struct %1$s *%1$s::open(const struct bpf_object_open_opts *opts) { return %1$s__open_opts(opts); }\n\
1240 struct %1$s *%1$s::open_and_load() { return %1$s__open_and_load(); } \n\
1241 int %1$s::load(struct %1$s *skel) { return %1$s__load(skel); } \n\
1242 int %1$s::attach(struct %1$s *skel) { return %1$s__attach(skel); } \n\
1243 void %1$s::detach(struct %1$s *skel) { %1$s__detach(skel); } \n\
1244 void %1$s::destroy(struct %1$s *skel) { %1$s__destroy(skel); } \n\
1245 const void *%1$s::elf_bytes(size_t *sz) { return %1$s__elf_bytes(sz); } \n\
1246 #endif /* __cplusplus */ \n\
1247 \n\
1248 ",
1249 obj_name);
1250
1251 codegen_asserts(obj, obj_name);
1252
1253 codegen("\
1254 \n\
1255 \n\
1256 #endif /* %1$s */ \n\
1257 ",
1258 header_guard);
1259 err = 0;
1260 out:
1261 bpf_object__close(obj);
1262 if (obj_data)
1263 munmap(obj_data, mmap_sz);
1264 close(fd);
1265 return err;
1266 }
1267
1268 /* Subskeletons are like skeletons, except they don't own the bpf_object,
1269 * associated maps, links, etc. Instead, they know about the existence of
1270 * variables, maps, programs and are able to find their locations
1271 * _at runtime_ from an already loaded bpf_object.
1272 *
1273 * This allows for library-like BPF objects to have userspace counterparts
1274 * with access to their own items without having to know anything about the
1275 * final BPF object that the library was linked into.
1276 */
do_subskeleton(int argc,char ** argv)1277 static int do_subskeleton(int argc, char **argv)
1278 {
1279 char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SUBSKEL_H__")];
1280 size_t i, len, file_sz, map_cnt = 0, prog_cnt = 0, mmap_sz, var_cnt = 0, var_idx = 0;
1281 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
1282 char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data;
1283 struct bpf_object *obj = NULL;
1284 const char *file, *var_name;
1285 char ident[256];
1286 int fd, err = -1, map_type_id;
1287 const struct bpf_map *map;
1288 struct bpf_program *prog;
1289 struct btf *btf;
1290 const struct btf_type *map_type, *var_type;
1291 const struct btf_var_secinfo *var;
1292 struct stat st;
1293
1294 if (!REQ_ARGS(1)) {
1295 usage();
1296 return -1;
1297 }
1298 file = GET_ARG();
1299
1300 while (argc) {
1301 if (!REQ_ARGS(2))
1302 return -1;
1303
1304 if (is_prefix(*argv, "name")) {
1305 NEXT_ARG();
1306
1307 if (obj_name[0] != '\0') {
1308 p_err("object name already specified");
1309 return -1;
1310 }
1311
1312 strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1);
1313 obj_name[MAX_OBJ_NAME_LEN - 1] = '\0';
1314 } else {
1315 p_err("unknown arg %s", *argv);
1316 return -1;
1317 }
1318
1319 NEXT_ARG();
1320 }
1321
1322 if (argc) {
1323 p_err("extra unknown arguments");
1324 return -1;
1325 }
1326
1327 if (use_loader) {
1328 p_err("cannot use loader for subskeletons");
1329 return -1;
1330 }
1331
1332 if (stat(file, &st)) {
1333 p_err("failed to stat() %s: %s", file, strerror(errno));
1334 return -1;
1335 }
1336 file_sz = st.st_size;
1337 mmap_sz = roundup(file_sz, sysconf(_SC_PAGE_SIZE));
1338 fd = open(file, O_RDONLY);
1339 if (fd < 0) {
1340 p_err("failed to open() %s: %s", file, strerror(errno));
1341 return -1;
1342 }
1343 obj_data = mmap(NULL, mmap_sz, PROT_READ, MAP_PRIVATE, fd, 0);
1344 if (obj_data == MAP_FAILED) {
1345 obj_data = NULL;
1346 p_err("failed to mmap() %s: %s", file, strerror(errno));
1347 goto out;
1348 }
1349 if (obj_name[0] == '\0')
1350 get_obj_name(obj_name, file);
1351
1352 /* The empty object name allows us to use bpf_map__name and produce
1353 * ELF section names out of it. (".data" instead of "obj.data")
1354 */
1355 opts.object_name = "";
1356 obj = bpf_object__open_mem(obj_data, file_sz, &opts);
1357 if (!obj) {
1358 char err_buf[256];
1359
1360 libbpf_strerror(errno, err_buf, sizeof(err_buf));
1361 p_err("failed to open BPF object file: %s", err_buf);
1362 obj = NULL;
1363 goto out;
1364 }
1365
1366 btf = bpf_object__btf(obj);
1367 if (!btf) {
1368 err = -1;
1369 p_err("need btf type information for %s", obj_name);
1370 goto out;
1371 }
1372
1373 bpf_object__for_each_program(prog, obj) {
1374 prog_cnt++;
1375 }
1376
1377 /* First, count how many variables we have to find.
1378 * We need this in advance so the subskel can allocate the right
1379 * amount of storage.
1380 */
1381 bpf_object__for_each_map(map, obj) {
1382 if (!get_map_ident(map, ident, sizeof(ident)))
1383 continue;
1384
1385 /* Also count all maps that have a name */
1386 map_cnt++;
1387
1388 if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
1389 continue;
1390
1391 map_type_id = bpf_map__btf_value_type_id(map);
1392 if (map_type_id <= 0) {
1393 err = map_type_id;
1394 goto out;
1395 }
1396 map_type = btf__type_by_id(btf, map_type_id);
1397
1398 var = btf_var_secinfos(map_type);
1399 len = btf_vlen(map_type);
1400 for (i = 0; i < len; i++, var++) {
1401 var_type = btf__type_by_id(btf, var->type);
1402
1403 if (btf_var(var_type)->linkage == BTF_VAR_STATIC)
1404 continue;
1405
1406 var_cnt++;
1407 }
1408 }
1409
1410 get_header_guard(header_guard, obj_name, "SUBSKEL_H");
1411 codegen("\
1412 \n\
1413 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
1414 \n\
1415 /* THIS FILE IS AUTOGENERATED! */ \n\
1416 #ifndef %2$s \n\
1417 #define %2$s \n\
1418 \n\
1419 #include <errno.h> \n\
1420 #include <stdlib.h> \n\
1421 #include <bpf/libbpf.h> \n\
1422 \n\
1423 struct %1$s { \n\
1424 struct bpf_object *obj; \n\
1425 struct bpf_object_subskeleton *subskel; \n\
1426 ", obj_name, header_guard);
1427
1428 if (map_cnt) {
1429 printf("\tstruct {\n");
1430 bpf_object__for_each_map(map, obj) {
1431 if (!get_map_ident(map, ident, sizeof(ident)))
1432 continue;
1433 printf("\t\tstruct bpf_map *%s;\n", ident);
1434 }
1435 printf("\t} maps;\n");
1436 }
1437
1438 if (prog_cnt) {
1439 printf("\tstruct {\n");
1440 bpf_object__for_each_program(prog, obj) {
1441 printf("\t\tstruct bpf_program *%s;\n",
1442 bpf_program__name(prog));
1443 }
1444 printf("\t} progs;\n");
1445 }
1446
1447 err = codegen_subskel_datasecs(obj, obj_name);
1448 if (err)
1449 goto out;
1450
1451 /* emit code that will allocate enough storage for all symbols */
1452 codegen("\
1453 \n\
1454 \n\
1455 #ifdef __cplusplus \n\
1456 static inline struct %1$s *open(const struct bpf_object *src);\n\
1457 static inline void destroy(struct %1$s *skel); \n\
1458 #endif /* __cplusplus */ \n\
1459 }; \n\
1460 \n\
1461 static inline void \n\
1462 %1$s__destroy(struct %1$s *skel) \n\
1463 { \n\
1464 if (!skel) \n\
1465 return; \n\
1466 if (skel->subskel) \n\
1467 bpf_object__destroy_subskeleton(skel->subskel);\n\
1468 free(skel); \n\
1469 } \n\
1470 \n\
1471 static inline struct %1$s * \n\
1472 %1$s__open(const struct bpf_object *src) \n\
1473 { \n\
1474 struct %1$s *obj; \n\
1475 struct bpf_object_subskeleton *s; \n\
1476 int err; \n\
1477 \n\
1478 obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\
1479 if (!obj) { \n\
1480 err = -ENOMEM; \n\
1481 goto err; \n\
1482 } \n\
1483 s = (struct bpf_object_subskeleton *)calloc(1, sizeof(*s));\n\
1484 if (!s) { \n\
1485 err = -ENOMEM; \n\
1486 goto err; \n\
1487 } \n\
1488 s->sz = sizeof(*s); \n\
1489 s->obj = src; \n\
1490 s->var_skel_sz = sizeof(*s->vars); \n\
1491 obj->subskel = s; \n\
1492 \n\
1493 /* vars */ \n\
1494 s->var_cnt = %2$d; \n\
1495 s->vars = (struct bpf_var_skeleton *)calloc(%2$d, sizeof(*s->vars));\n\
1496 if (!s->vars) { \n\
1497 err = -ENOMEM; \n\
1498 goto err; \n\
1499 } \n\
1500 ",
1501 obj_name, var_cnt
1502 );
1503
1504 /* walk through each symbol and emit the runtime representation */
1505 bpf_object__for_each_map(map, obj) {
1506 if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
1507 continue;
1508
1509 map_type_id = bpf_map__btf_value_type_id(map);
1510 if (map_type_id <= 0)
1511 /* skip over internal maps with no type*/
1512 continue;
1513
1514 map_type = btf__type_by_id(btf, map_type_id);
1515 var = btf_var_secinfos(map_type);
1516 len = btf_vlen(map_type);
1517 for (i = 0; i < len; i++, var++) {
1518 var_type = btf__type_by_id(btf, var->type);
1519 var_name = btf__name_by_offset(btf, var_type->name_off);
1520
1521 if (btf_var(var_type)->linkage == BTF_VAR_STATIC)
1522 continue;
1523
1524 /* Note that we use the dot prefix in .data as the
1525 * field access operator i.e. maps%s becomes maps.data
1526 */
1527 codegen("\
1528 \n\
1529 \n\
1530 s->vars[%3$d].name = \"%1$s\"; \n\
1531 s->vars[%3$d].map = &obj->maps.%2$s; \n\
1532 s->vars[%3$d].addr = (void **) &obj->%2$s.%1$s;\n\
1533 ", var_name, ident, var_idx);
1534
1535 var_idx++;
1536 }
1537 }
1538
1539 codegen_maps_skeleton(obj, map_cnt, false /*mmaped*/);
1540 codegen_progs_skeleton(obj, prog_cnt, false /*links*/);
1541
1542 codegen("\
1543 \n\
1544 \n\
1545 err = bpf_object__open_subskeleton(s); \n\
1546 if (err) \n\
1547 goto err; \n\
1548 \n\
1549 return obj; \n\
1550 err: \n\
1551 %1$s__destroy(obj); \n\
1552 errno = -err; \n\
1553 return NULL; \n\
1554 } \n\
1555 \n\
1556 #ifdef __cplusplus \n\
1557 struct %1$s *%1$s::open(const struct bpf_object *src) { return %1$s__open(src); }\n\
1558 void %1$s::destroy(struct %1$s *skel) { %1$s__destroy(skel); }\n\
1559 #endif /* __cplusplus */ \n\
1560 \n\
1561 #endif /* %2$s */ \n\
1562 ",
1563 obj_name, header_guard);
1564 err = 0;
1565 out:
1566 bpf_object__close(obj);
1567 if (obj_data)
1568 munmap(obj_data, mmap_sz);
1569 close(fd);
1570 return err;
1571 }
1572
do_object(int argc,char ** argv)1573 static int do_object(int argc, char **argv)
1574 {
1575 struct bpf_linker *linker;
1576 const char *output_file, *file;
1577 int err = 0;
1578
1579 if (!REQ_ARGS(2)) {
1580 usage();
1581 return -1;
1582 }
1583
1584 output_file = GET_ARG();
1585
1586 linker = bpf_linker__new(output_file, NULL);
1587 if (!linker) {
1588 p_err("failed to create BPF linker instance");
1589 return -1;
1590 }
1591
1592 while (argc) {
1593 file = GET_ARG();
1594
1595 err = bpf_linker__add_file(linker, file, NULL);
1596 if (err) {
1597 p_err("failed to link '%s': %s (%d)", file, strerror(errno), errno);
1598 goto out;
1599 }
1600 }
1601
1602 err = bpf_linker__finalize(linker);
1603 if (err) {
1604 p_err("failed to finalize ELF file: %s (%d)", strerror(errno), errno);
1605 goto out;
1606 }
1607
1608 err = 0;
1609 out:
1610 bpf_linker__free(linker);
1611 return err;
1612 }
1613
do_help(int argc,char ** argv)1614 static int do_help(int argc, char **argv)
1615 {
1616 if (json_output) {
1617 jsonw_null(json_wtr);
1618 return 0;
1619 }
1620
1621 fprintf(stderr,
1622 "Usage: %1$s %2$s object OUTPUT_FILE INPUT_FILE [INPUT_FILE...]\n"
1623 " %1$s %2$s skeleton FILE [name OBJECT_NAME]\n"
1624 " %1$s %2$s subskeleton FILE [name OBJECT_NAME]\n"
1625 " %1$s %2$s min_core_btf INPUT OUTPUT OBJECT [OBJECT...]\n"
1626 " %1$s %2$s help\n"
1627 "\n"
1628 " " HELP_SPEC_OPTIONS " |\n"
1629 " {-L|--use-loader} }\n"
1630 "",
1631 bin_name, "gen");
1632
1633 return 0;
1634 }
1635
btf_save_raw(const struct btf * btf,const char * path)1636 static int btf_save_raw(const struct btf *btf, const char *path)
1637 {
1638 const void *data;
1639 FILE *f = NULL;
1640 __u32 data_sz;
1641 int err = 0;
1642
1643 data = btf__raw_data(btf, &data_sz);
1644 if (!data)
1645 return -ENOMEM;
1646
1647 f = fopen(path, "wb");
1648 if (!f)
1649 return -errno;
1650
1651 if (fwrite(data, 1, data_sz, f) != data_sz)
1652 err = -errno;
1653
1654 fclose(f);
1655 return err;
1656 }
1657
1658 struct btfgen_info {
1659 struct btf *src_btf;
1660 struct btf *marked_btf; /* btf structure used to mark used types */
1661 };
1662
btfgen_hash_fn(const void * key,void * ctx)1663 static size_t btfgen_hash_fn(const void *key, void *ctx)
1664 {
1665 return (size_t)key;
1666 }
1667
btfgen_equal_fn(const void * k1,const void * k2,void * ctx)1668 static bool btfgen_equal_fn(const void *k1, const void *k2, void *ctx)
1669 {
1670 return k1 == k2;
1671 }
1672
u32_as_hash_key(__u32 x)1673 static void *u32_as_hash_key(__u32 x)
1674 {
1675 return (void *)(uintptr_t)x;
1676 }
1677
btfgen_free_info(struct btfgen_info * info)1678 static void btfgen_free_info(struct btfgen_info *info)
1679 {
1680 if (!info)
1681 return;
1682
1683 btf__free(info->src_btf);
1684 btf__free(info->marked_btf);
1685
1686 free(info);
1687 }
1688
1689 static struct btfgen_info *
btfgen_new_info(const char * targ_btf_path)1690 btfgen_new_info(const char *targ_btf_path)
1691 {
1692 struct btfgen_info *info;
1693 int err;
1694
1695 info = calloc(1, sizeof(*info));
1696 if (!info)
1697 return NULL;
1698
1699 info->src_btf = btf__parse(targ_btf_path, NULL);
1700 if (!info->src_btf) {
1701 err = -errno;
1702 p_err("failed parsing '%s' BTF file: %s", targ_btf_path, strerror(errno));
1703 goto err_out;
1704 }
1705
1706 info->marked_btf = btf__parse(targ_btf_path, NULL);
1707 if (!info->marked_btf) {
1708 err = -errno;
1709 p_err("failed parsing '%s' BTF file: %s", targ_btf_path, strerror(errno));
1710 goto err_out;
1711 }
1712
1713 return info;
1714
1715 err_out:
1716 btfgen_free_info(info);
1717 errno = -err;
1718 return NULL;
1719 }
1720
1721 #define MARKED UINT32_MAX
1722
btfgen_mark_member(struct btfgen_info * info,int type_id,int idx)1723 static void btfgen_mark_member(struct btfgen_info *info, int type_id, int idx)
1724 {
1725 const struct btf_type *t = btf__type_by_id(info->marked_btf, type_id);
1726 struct btf_member *m = btf_members(t) + idx;
1727
1728 m->name_off = MARKED;
1729 }
1730
1731 static int
btfgen_mark_type(struct btfgen_info * info,unsigned int type_id,bool follow_pointers)1732 btfgen_mark_type(struct btfgen_info *info, unsigned int type_id, bool follow_pointers)
1733 {
1734 const struct btf_type *btf_type = btf__type_by_id(info->src_btf, type_id);
1735 struct btf_type *cloned_type;
1736 struct btf_param *param;
1737 struct btf_array *array;
1738 int err, i;
1739
1740 if (type_id == 0)
1741 return 0;
1742
1743 /* mark type on cloned BTF as used */
1744 cloned_type = (struct btf_type *) btf__type_by_id(info->marked_btf, type_id);
1745 cloned_type->name_off = MARKED;
1746
1747 /* recursively mark other types needed by it */
1748 switch (btf_kind(btf_type)) {
1749 case BTF_KIND_UNKN:
1750 case BTF_KIND_INT:
1751 case BTF_KIND_FLOAT:
1752 case BTF_KIND_ENUM:
1753 case BTF_KIND_ENUM64:
1754 case BTF_KIND_STRUCT:
1755 case BTF_KIND_UNION:
1756 break;
1757 case BTF_KIND_PTR:
1758 if (follow_pointers) {
1759 err = btfgen_mark_type(info, btf_type->type, follow_pointers);
1760 if (err)
1761 return err;
1762 }
1763 break;
1764 case BTF_KIND_CONST:
1765 case BTF_KIND_RESTRICT:
1766 case BTF_KIND_VOLATILE:
1767 case BTF_KIND_TYPEDEF:
1768 err = btfgen_mark_type(info, btf_type->type, follow_pointers);
1769 if (err)
1770 return err;
1771 break;
1772 case BTF_KIND_ARRAY:
1773 array = btf_array(btf_type);
1774
1775 /* mark array type */
1776 err = btfgen_mark_type(info, array->type, follow_pointers);
1777 /* mark array's index type */
1778 err = err ? : btfgen_mark_type(info, array->index_type, follow_pointers);
1779 if (err)
1780 return err;
1781 break;
1782 case BTF_KIND_FUNC_PROTO:
1783 /* mark ret type */
1784 err = btfgen_mark_type(info, btf_type->type, follow_pointers);
1785 if (err)
1786 return err;
1787
1788 /* mark parameters types */
1789 param = btf_params(btf_type);
1790 for (i = 0; i < btf_vlen(btf_type); i++) {
1791 err = btfgen_mark_type(info, param->type, follow_pointers);
1792 if (err)
1793 return err;
1794 param++;
1795 }
1796 break;
1797 /* tells if some other type needs to be handled */
1798 default:
1799 p_err("unsupported kind: %s (%d)", btf_kind_str(btf_type), type_id);
1800 return -EINVAL;
1801 }
1802
1803 return 0;
1804 }
1805
btfgen_record_field_relo(struct btfgen_info * info,struct bpf_core_spec * targ_spec)1806 static int btfgen_record_field_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
1807 {
1808 struct btf *btf = info->src_btf;
1809 const struct btf_type *btf_type;
1810 struct btf_member *btf_member;
1811 struct btf_array *array;
1812 unsigned int type_id = targ_spec->root_type_id;
1813 int idx, err;
1814
1815 /* mark root type */
1816 btf_type = btf__type_by_id(btf, type_id);
1817 err = btfgen_mark_type(info, type_id, false);
1818 if (err)
1819 return err;
1820
1821 /* mark types for complex types (arrays, unions, structures) */
1822 for (int i = 1; i < targ_spec->raw_len; i++) {
1823 /* skip typedefs and mods */
1824 while (btf_is_mod(btf_type) || btf_is_typedef(btf_type)) {
1825 type_id = btf_type->type;
1826 btf_type = btf__type_by_id(btf, type_id);
1827 }
1828
1829 switch (btf_kind(btf_type)) {
1830 case BTF_KIND_STRUCT:
1831 case BTF_KIND_UNION:
1832 idx = targ_spec->raw_spec[i];
1833 btf_member = btf_members(btf_type) + idx;
1834
1835 /* mark member */
1836 btfgen_mark_member(info, type_id, idx);
1837
1838 /* mark member's type */
1839 type_id = btf_member->type;
1840 btf_type = btf__type_by_id(btf, type_id);
1841 err = btfgen_mark_type(info, type_id, false);
1842 if (err)
1843 return err;
1844 break;
1845 case BTF_KIND_ARRAY:
1846 array = btf_array(btf_type);
1847 type_id = array->type;
1848 btf_type = btf__type_by_id(btf, type_id);
1849 break;
1850 default:
1851 p_err("unsupported kind: %s (%d)",
1852 btf_kind_str(btf_type), btf_type->type);
1853 return -EINVAL;
1854 }
1855 }
1856
1857 return 0;
1858 }
1859
1860 /* Mark types, members, and member types. Compared to btfgen_record_field_relo,
1861 * this function does not rely on the target spec for inferring members, but
1862 * uses the associated BTF.
1863 *
1864 * The `behind_ptr` argument is used to stop marking of composite types reached
1865 * through a pointer. This way, we can keep BTF size in check while providing
1866 * reasonable match semantics.
1867 */
btfgen_mark_type_match(struct btfgen_info * info,__u32 type_id,bool behind_ptr)1868 static int btfgen_mark_type_match(struct btfgen_info *info, __u32 type_id, bool behind_ptr)
1869 {
1870 const struct btf_type *btf_type;
1871 struct btf *btf = info->src_btf;
1872 struct btf_type *cloned_type;
1873 int i, err;
1874
1875 if (type_id == 0)
1876 return 0;
1877
1878 btf_type = btf__type_by_id(btf, type_id);
1879 /* mark type on cloned BTF as used */
1880 cloned_type = (struct btf_type *)btf__type_by_id(info->marked_btf, type_id);
1881 cloned_type->name_off = MARKED;
1882
1883 switch (btf_kind(btf_type)) {
1884 case BTF_KIND_UNKN:
1885 case BTF_KIND_INT:
1886 case BTF_KIND_FLOAT:
1887 case BTF_KIND_ENUM:
1888 case BTF_KIND_ENUM64:
1889 break;
1890 case BTF_KIND_STRUCT:
1891 case BTF_KIND_UNION: {
1892 struct btf_member *m = btf_members(btf_type);
1893 __u16 vlen = btf_vlen(btf_type);
1894
1895 if (behind_ptr)
1896 break;
1897
1898 for (i = 0; i < vlen; i++, m++) {
1899 /* mark member */
1900 btfgen_mark_member(info, type_id, i);
1901
1902 /* mark member's type */
1903 err = btfgen_mark_type_match(info, m->type, false);
1904 if (err)
1905 return err;
1906 }
1907 break;
1908 }
1909 case BTF_KIND_CONST:
1910 case BTF_KIND_FWD:
1911 case BTF_KIND_RESTRICT:
1912 case BTF_KIND_TYPEDEF:
1913 case BTF_KIND_VOLATILE:
1914 return btfgen_mark_type_match(info, btf_type->type, behind_ptr);
1915 case BTF_KIND_PTR:
1916 return btfgen_mark_type_match(info, btf_type->type, true);
1917 case BTF_KIND_ARRAY: {
1918 struct btf_array *array;
1919
1920 array = btf_array(btf_type);
1921 /* mark array type */
1922 err = btfgen_mark_type_match(info, array->type, false);
1923 /* mark array's index type */
1924 err = err ? : btfgen_mark_type_match(info, array->index_type, false);
1925 if (err)
1926 return err;
1927 break;
1928 }
1929 case BTF_KIND_FUNC_PROTO: {
1930 __u16 vlen = btf_vlen(btf_type);
1931 struct btf_param *param;
1932
1933 /* mark ret type */
1934 err = btfgen_mark_type_match(info, btf_type->type, false);
1935 if (err)
1936 return err;
1937
1938 /* mark parameters types */
1939 param = btf_params(btf_type);
1940 for (i = 0; i < vlen; i++) {
1941 err = btfgen_mark_type_match(info, param->type, false);
1942 if (err)
1943 return err;
1944 param++;
1945 }
1946 break;
1947 }
1948 /* tells if some other type needs to be handled */
1949 default:
1950 p_err("unsupported kind: %s (%d)", btf_kind_str(btf_type), type_id);
1951 return -EINVAL;
1952 }
1953
1954 return 0;
1955 }
1956
1957 /* Mark types, members, and member types. Compared to btfgen_record_field_relo,
1958 * this function does not rely on the target spec for inferring members, but
1959 * uses the associated BTF.
1960 */
btfgen_record_type_match_relo(struct btfgen_info * info,struct bpf_core_spec * targ_spec)1961 static int btfgen_record_type_match_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
1962 {
1963 return btfgen_mark_type_match(info, targ_spec->root_type_id, false);
1964 }
1965
btfgen_record_type_relo(struct btfgen_info * info,struct bpf_core_spec * targ_spec)1966 static int btfgen_record_type_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
1967 {
1968 return btfgen_mark_type(info, targ_spec->root_type_id, true);
1969 }
1970
btfgen_record_enumval_relo(struct btfgen_info * info,struct bpf_core_spec * targ_spec)1971 static int btfgen_record_enumval_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
1972 {
1973 return btfgen_mark_type(info, targ_spec->root_type_id, false);
1974 }
1975
btfgen_record_reloc(struct btfgen_info * info,struct bpf_core_spec * res)1976 static int btfgen_record_reloc(struct btfgen_info *info, struct bpf_core_spec *res)
1977 {
1978 switch (res->relo_kind) {
1979 case BPF_CORE_FIELD_BYTE_OFFSET:
1980 case BPF_CORE_FIELD_BYTE_SIZE:
1981 case BPF_CORE_FIELD_EXISTS:
1982 case BPF_CORE_FIELD_SIGNED:
1983 case BPF_CORE_FIELD_LSHIFT_U64:
1984 case BPF_CORE_FIELD_RSHIFT_U64:
1985 return btfgen_record_field_relo(info, res);
1986 case BPF_CORE_TYPE_ID_LOCAL: /* BPF_CORE_TYPE_ID_LOCAL doesn't require kernel BTF */
1987 return 0;
1988 case BPF_CORE_TYPE_ID_TARGET:
1989 case BPF_CORE_TYPE_EXISTS:
1990 case BPF_CORE_TYPE_SIZE:
1991 return btfgen_record_type_relo(info, res);
1992 case BPF_CORE_TYPE_MATCHES:
1993 return btfgen_record_type_match_relo(info, res);
1994 case BPF_CORE_ENUMVAL_EXISTS:
1995 case BPF_CORE_ENUMVAL_VALUE:
1996 return btfgen_record_enumval_relo(info, res);
1997 default:
1998 return -EINVAL;
1999 }
2000 }
2001
2002 static struct bpf_core_cand_list *
btfgen_find_cands(const struct btf * local_btf,const struct btf * targ_btf,__u32 local_id)2003 btfgen_find_cands(const struct btf *local_btf, const struct btf *targ_btf, __u32 local_id)
2004 {
2005 const struct btf_type *local_type;
2006 struct bpf_core_cand_list *cands = NULL;
2007 struct bpf_core_cand local_cand = {};
2008 size_t local_essent_len;
2009 const char *local_name;
2010 int err;
2011
2012 local_cand.btf = local_btf;
2013 local_cand.id = local_id;
2014
2015 local_type = btf__type_by_id(local_btf, local_id);
2016 if (!local_type) {
2017 err = -EINVAL;
2018 goto err_out;
2019 }
2020
2021 local_name = btf__name_by_offset(local_btf, local_type->name_off);
2022 if (!local_name) {
2023 err = -EINVAL;
2024 goto err_out;
2025 }
2026 local_essent_len = bpf_core_essential_name_len(local_name);
2027
2028 cands = calloc(1, sizeof(*cands));
2029 if (!cands)
2030 return NULL;
2031
2032 err = bpf_core_add_cands(&local_cand, local_essent_len, targ_btf, "vmlinux", 1, cands);
2033 if (err)
2034 goto err_out;
2035
2036 return cands;
2037
2038 err_out:
2039 bpf_core_free_cands(cands);
2040 errno = -err;
2041 return NULL;
2042 }
2043
2044 /* Record relocation information for a single BPF object */
btfgen_record_obj(struct btfgen_info * info,const char * obj_path)2045 static int btfgen_record_obj(struct btfgen_info *info, const char *obj_path)
2046 {
2047 const struct btf_ext_info_sec *sec;
2048 const struct bpf_core_relo *relo;
2049 const struct btf_ext_info *seg;
2050 struct hashmap_entry *entry;
2051 struct hashmap *cand_cache = NULL;
2052 struct btf_ext *btf_ext = NULL;
2053 unsigned int relo_idx;
2054 struct btf *btf = NULL;
2055 size_t i;
2056 int err;
2057
2058 btf = btf__parse(obj_path, &btf_ext);
2059 if (!btf) {
2060 err = -errno;
2061 p_err("failed to parse BPF object '%s': %s", obj_path, strerror(errno));
2062 return err;
2063 }
2064
2065 if (!btf_ext) {
2066 p_err("failed to parse BPF object '%s': section %s not found",
2067 obj_path, BTF_EXT_ELF_SEC);
2068 err = -EINVAL;
2069 goto out;
2070 }
2071
2072 if (btf_ext->core_relo_info.len == 0) {
2073 err = 0;
2074 goto out;
2075 }
2076
2077 cand_cache = hashmap__new(btfgen_hash_fn, btfgen_equal_fn, NULL);
2078 if (IS_ERR(cand_cache)) {
2079 err = PTR_ERR(cand_cache);
2080 goto out;
2081 }
2082
2083 seg = &btf_ext->core_relo_info;
2084 for_each_btf_ext_sec(seg, sec) {
2085 for_each_btf_ext_rec(seg, sec, relo_idx, relo) {
2086 struct bpf_core_spec specs_scratch[3] = {};
2087 struct bpf_core_relo_res targ_res = {};
2088 struct bpf_core_cand_list *cands = NULL;
2089 const void *type_key = u32_as_hash_key(relo->type_id);
2090 const char *sec_name = btf__name_by_offset(btf, sec->sec_name_off);
2091
2092 if (relo->kind != BPF_CORE_TYPE_ID_LOCAL &&
2093 !hashmap__find(cand_cache, type_key, (void **)&cands)) {
2094 cands = btfgen_find_cands(btf, info->src_btf, relo->type_id);
2095 if (!cands) {
2096 err = -errno;
2097 goto out;
2098 }
2099
2100 err = hashmap__set(cand_cache, type_key, cands, NULL, NULL);
2101 if (err)
2102 goto out;
2103 }
2104
2105 err = bpf_core_calc_relo_insn(sec_name, relo, relo_idx, btf, cands,
2106 specs_scratch, &targ_res);
2107 if (err)
2108 goto out;
2109
2110 /* specs_scratch[2] is the target spec */
2111 err = btfgen_record_reloc(info, &specs_scratch[2]);
2112 if (err)
2113 goto out;
2114 }
2115 }
2116
2117 out:
2118 btf__free(btf);
2119 btf_ext__free(btf_ext);
2120
2121 if (!IS_ERR_OR_NULL(cand_cache)) {
2122 hashmap__for_each_entry(cand_cache, entry, i) {
2123 bpf_core_free_cands(entry->value);
2124 }
2125 hashmap__free(cand_cache);
2126 }
2127
2128 return err;
2129 }
2130
btfgen_remap_id(__u32 * type_id,void * ctx)2131 static int btfgen_remap_id(__u32 *type_id, void *ctx)
2132 {
2133 unsigned int *ids = ctx;
2134
2135 *type_id = ids[*type_id];
2136
2137 return 0;
2138 }
2139
2140 /* Generate BTF from relocation information previously recorded */
btfgen_get_btf(struct btfgen_info * info)2141 static struct btf *btfgen_get_btf(struct btfgen_info *info)
2142 {
2143 struct btf *btf_new = NULL;
2144 unsigned int *ids = NULL;
2145 unsigned int i, n = btf__type_cnt(info->marked_btf);
2146 int err = 0;
2147
2148 btf_new = btf__new_empty();
2149 if (!btf_new) {
2150 err = -errno;
2151 goto err_out;
2152 }
2153
2154 ids = calloc(n, sizeof(*ids));
2155 if (!ids) {
2156 err = -errno;
2157 goto err_out;
2158 }
2159
2160 /* first pass: add all marked types to btf_new and add their new ids to the ids map */
2161 for (i = 1; i < n; i++) {
2162 const struct btf_type *cloned_type, *type;
2163 const char *name;
2164 int new_id;
2165
2166 cloned_type = btf__type_by_id(info->marked_btf, i);
2167
2168 if (cloned_type->name_off != MARKED)
2169 continue;
2170
2171 type = btf__type_by_id(info->src_btf, i);
2172
2173 /* add members for struct and union */
2174 if (btf_is_composite(type)) {
2175 struct btf_member *cloned_m, *m;
2176 unsigned short vlen;
2177 int idx_src;
2178
2179 name = btf__str_by_offset(info->src_btf, type->name_off);
2180
2181 if (btf_is_struct(type))
2182 err = btf__add_struct(btf_new, name, type->size);
2183 else
2184 err = btf__add_union(btf_new, name, type->size);
2185
2186 if (err < 0)
2187 goto err_out;
2188 new_id = err;
2189
2190 cloned_m = btf_members(cloned_type);
2191 m = btf_members(type);
2192 vlen = btf_vlen(cloned_type);
2193 for (idx_src = 0; idx_src < vlen; idx_src++, cloned_m++, m++) {
2194 /* add only members that are marked as used */
2195 if (cloned_m->name_off != MARKED)
2196 continue;
2197
2198 name = btf__str_by_offset(info->src_btf, m->name_off);
2199 err = btf__add_field(btf_new, name, m->type,
2200 btf_member_bit_offset(cloned_type, idx_src),
2201 btf_member_bitfield_size(cloned_type, idx_src));
2202 if (err < 0)
2203 goto err_out;
2204 }
2205 } else {
2206 err = btf__add_type(btf_new, info->src_btf, type);
2207 if (err < 0)
2208 goto err_out;
2209 new_id = err;
2210 }
2211
2212 /* add ID mapping */
2213 ids[i] = new_id;
2214 }
2215
2216 /* second pass: fix up type ids */
2217 for (i = 1; i < btf__type_cnt(btf_new); i++) {
2218 struct btf_type *btf_type = (struct btf_type *) btf__type_by_id(btf_new, i);
2219
2220 err = btf_type_visit_type_ids(btf_type, btfgen_remap_id, ids);
2221 if (err)
2222 goto err_out;
2223 }
2224
2225 free(ids);
2226 return btf_new;
2227
2228 err_out:
2229 btf__free(btf_new);
2230 free(ids);
2231 errno = -err;
2232 return NULL;
2233 }
2234
2235 /* Create minimized BTF file for a set of BPF objects.
2236 *
2237 * The BTFGen algorithm is divided in two main parts: (1) collect the
2238 * BTF types that are involved in relocations and (2) generate the BTF
2239 * object using the collected types.
2240 *
2241 * In order to collect the types involved in the relocations, we parse
2242 * the BTF and BTF.ext sections of the BPF objects and use
2243 * bpf_core_calc_relo_insn() to get the target specification, this
2244 * indicates how the types and fields are used in a relocation.
2245 *
2246 * Types are recorded in different ways according to the kind of the
2247 * relocation. For field-based relocations only the members that are
2248 * actually used are saved in order to reduce the size of the generated
2249 * BTF file. For type-based relocations empty struct / unions are
2250 * generated and for enum-based relocations the whole type is saved.
2251 *
2252 * The second part of the algorithm generates the BTF object. It creates
2253 * an empty BTF object and fills it with the types recorded in the
2254 * previous step. This function takes care of only adding the structure
2255 * and union members that were marked as used and it also fixes up the
2256 * type IDs on the generated BTF object.
2257 */
minimize_btf(const char * src_btf,const char * dst_btf,const char * objspaths[])2258 static int minimize_btf(const char *src_btf, const char *dst_btf, const char *objspaths[])
2259 {
2260 struct btfgen_info *info;
2261 struct btf *btf_new = NULL;
2262 int err, i;
2263
2264 info = btfgen_new_info(src_btf);
2265 if (!info) {
2266 err = -errno;
2267 p_err("failed to allocate info structure: %s", strerror(errno));
2268 goto out;
2269 }
2270
2271 for (i = 0; objspaths[i] != NULL; i++) {
2272 err = btfgen_record_obj(info, objspaths[i]);
2273 if (err) {
2274 p_err("error recording relocations for %s: %s", objspaths[i],
2275 strerror(errno));
2276 goto out;
2277 }
2278 }
2279
2280 btf_new = btfgen_get_btf(info);
2281 if (!btf_new) {
2282 err = -errno;
2283 p_err("error generating BTF: %s", strerror(errno));
2284 goto out;
2285 }
2286
2287 err = btf_save_raw(btf_new, dst_btf);
2288 if (err) {
2289 p_err("error saving btf file: %s", strerror(errno));
2290 goto out;
2291 }
2292
2293 out:
2294 btf__free(btf_new);
2295 btfgen_free_info(info);
2296
2297 return err;
2298 }
2299
do_min_core_btf(int argc,char ** argv)2300 static int do_min_core_btf(int argc, char **argv)
2301 {
2302 const char *input, *output, **objs;
2303 int i, err;
2304
2305 if (!REQ_ARGS(3)) {
2306 usage();
2307 return -1;
2308 }
2309
2310 input = GET_ARG();
2311 output = GET_ARG();
2312
2313 objs = (const char **) calloc(argc + 1, sizeof(*objs));
2314 if (!objs) {
2315 p_err("failed to allocate array for object names");
2316 return -ENOMEM;
2317 }
2318
2319 i = 0;
2320 while (argc)
2321 objs[i++] = GET_ARG();
2322
2323 err = minimize_btf(input, output, objs);
2324 free(objs);
2325 return err;
2326 }
2327
2328 static const struct cmd cmds[] = {
2329 { "object", do_object },
2330 { "skeleton", do_skeleton },
2331 { "subskeleton", do_subskeleton },
2332 { "min_core_btf", do_min_core_btf},
2333 { "help", do_help },
2334 { 0 }
2335 };
2336
do_gen(int argc,char ** argv)2337 int do_gen(int argc, char **argv)
2338 {
2339 return cmd_select(cmds, argc, argv, do_help);
2340 }
2341