1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2019 Facebook */
3
4 #ifndef _GNU_SOURCE
5 #define _GNU_SOURCE
6 #endif
7 #include <ctype.h>
8 #include <errno.h>
9 #include <fcntl.h>
10 #include <linux/err.h>
11 #include <stdbool.h>
12 #include <stdio.h>
13 #include <string.h>
14 #include <unistd.h>
15 #include <bpf/bpf.h>
16 #include <bpf/libbpf.h>
17 #include <bpf/libbpf_internal.h>
18 #include <sys/types.h>
19 #include <sys/stat.h>
20 #include <sys/mman.h>
21 #include <bpf/btf.h>
22
23 #include "json_writer.h"
24 #include "main.h"
25
26 #define MAX_OBJ_NAME_LEN 64
27
sanitize_identifier(char * name)28 static void sanitize_identifier(char *name)
29 {
30 int i;
31
32 for (i = 0; name[i]; i++)
33 if (!isalnum(name[i]) && name[i] != '_')
34 name[i] = '_';
35 }
36
str_has_prefix(const char * str,const char * prefix)37 static bool str_has_prefix(const char *str, const char *prefix)
38 {
39 return strncmp(str, prefix, strlen(prefix)) == 0;
40 }
41
str_has_suffix(const char * str,const char * suffix)42 static bool str_has_suffix(const char *str, const char *suffix)
43 {
44 size_t i, n1 = strlen(str), n2 = strlen(suffix);
45
46 if (n1 < n2)
47 return false;
48
49 for (i = 0; i < n2; i++) {
50 if (str[n1 - i - 1] != suffix[n2 - i - 1])
51 return false;
52 }
53
54 return true;
55 }
56
get_obj_name(char * name,const char * file)57 static void get_obj_name(char *name, const char *file)
58 {
59 /* Using basename() GNU version which doesn't modify arg. */
60 strncpy(name, basename(file), MAX_OBJ_NAME_LEN - 1);
61 name[MAX_OBJ_NAME_LEN - 1] = '\0';
62 if (str_has_suffix(name, ".o"))
63 name[strlen(name) - 2] = '\0';
64 sanitize_identifier(name);
65 }
66
get_header_guard(char * guard,const char * obj_name,const char * suffix)67 static void get_header_guard(char *guard, const char *obj_name, const char *suffix)
68 {
69 int i;
70
71 sprintf(guard, "__%s_%s__", obj_name, suffix);
72 for (i = 0; guard[i]; i++)
73 guard[i] = toupper(guard[i]);
74 }
75
get_map_ident(const struct bpf_map * map,char * buf,size_t buf_sz)76 static bool get_map_ident(const struct bpf_map *map, char *buf, size_t buf_sz)
77 {
78 static const char *sfxs[] = { ".data", ".rodata", ".bss", ".kconfig" };
79 const char *name = bpf_map__name(map);
80 int i, n;
81
82 if (!bpf_map__is_internal(map)) {
83 snprintf(buf, buf_sz, "%s", name);
84 return true;
85 }
86
87 for (i = 0, n = ARRAY_SIZE(sfxs); i < n; i++) {
88 const char *sfx = sfxs[i], *p;
89
90 p = strstr(name, sfx);
91 if (p) {
92 snprintf(buf, buf_sz, "%s", p + 1);
93 sanitize_identifier(buf);
94 return true;
95 }
96 }
97
98 return false;
99 }
100
get_datasec_ident(const char * sec_name,char * buf,size_t buf_sz)101 static bool get_datasec_ident(const char *sec_name, char *buf, size_t buf_sz)
102 {
103 static const char *pfxs[] = { ".data", ".rodata", ".bss", ".kconfig" };
104 int i, n;
105
106 for (i = 0, n = ARRAY_SIZE(pfxs); i < n; i++) {
107 const char *pfx = pfxs[i];
108
109 if (str_has_prefix(sec_name, pfx)) {
110 snprintf(buf, buf_sz, "%s", sec_name + 1);
111 sanitize_identifier(buf);
112 return true;
113 }
114 }
115
116 return false;
117 }
118
codegen_btf_dump_printf(void * ctx,const char * fmt,va_list args)119 static void codegen_btf_dump_printf(void *ctx, const char *fmt, va_list args)
120 {
121 vprintf(fmt, args);
122 }
123
codegen_datasec_def(struct bpf_object * obj,struct btf * btf,struct btf_dump * d,const struct btf_type * sec,const char * obj_name)124 static int codegen_datasec_def(struct bpf_object *obj,
125 struct btf *btf,
126 struct btf_dump *d,
127 const struct btf_type *sec,
128 const char *obj_name)
129 {
130 const char *sec_name = btf__name_by_offset(btf, sec->name_off);
131 const struct btf_var_secinfo *sec_var = btf_var_secinfos(sec);
132 int i, err, off = 0, pad_cnt = 0, vlen = btf_vlen(sec);
133 char var_ident[256], sec_ident[256];
134 bool strip_mods = false;
135
136 if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident)))
137 return 0;
138
139 if (strcmp(sec_name, ".kconfig") != 0)
140 strip_mods = true;
141
142 printf(" struct %s__%s {\n", obj_name, sec_ident);
143 for (i = 0; i < vlen; i++, sec_var++) {
144 const struct btf_type *var = btf__type_by_id(btf, sec_var->type);
145 const char *var_name = btf__name_by_offset(btf, var->name_off);
146 DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts,
147 .field_name = var_ident,
148 .indent_level = 2,
149 .strip_mods = strip_mods,
150 );
151 int need_off = sec_var->offset, align_off, align;
152 __u32 var_type_id = var->type;
153
154 /* static variables are not exposed through BPF skeleton */
155 if (btf_var(var)->linkage == BTF_VAR_STATIC)
156 continue;
157
158 if (off > need_off) {
159 p_err("Something is wrong for %s's variable #%d: need offset %d, already at %d.\n",
160 sec_name, i, need_off, off);
161 return -EINVAL;
162 }
163
164 align = btf__align_of(btf, var->type);
165 if (align <= 0) {
166 p_err("Failed to determine alignment of variable '%s': %d",
167 var_name, align);
168 return -EINVAL;
169 }
170 /* Assume 32-bit architectures when generating data section
171 * struct memory layout. Given bpftool can't know which target
172 * host architecture it's emitting skeleton for, we need to be
173 * conservative and assume 32-bit one to ensure enough padding
174 * bytes are generated for pointer and long types. This will
175 * still work correctly for 64-bit architectures, because in
176 * the worst case we'll generate unnecessary padding field,
177 * which on 64-bit architectures is not strictly necessary and
178 * would be handled by natural 8-byte alignment. But it still
179 * will be a correct memory layout, based on recorded offsets
180 * in BTF.
181 */
182 if (align > 4)
183 align = 4;
184
185 align_off = (off + align - 1) / align * align;
186 if (align_off != need_off) {
187 printf("\t\tchar __pad%d[%d];\n",
188 pad_cnt, need_off - off);
189 pad_cnt++;
190 }
191
192 /* sanitize variable name, e.g., for static vars inside
193 * a function, it's name is '<function name>.<variable name>',
194 * which we'll turn into a '<function name>_<variable name>'
195 */
196 var_ident[0] = '\0';
197 strncat(var_ident, var_name, sizeof(var_ident) - 1);
198 sanitize_identifier(var_ident);
199
200 printf("\t\t");
201 err = btf_dump__emit_type_decl(d, var_type_id, &opts);
202 if (err)
203 return err;
204 printf(";\n");
205
206 off = sec_var->offset + sec_var->size;
207 }
208 printf(" } *%s;\n", sec_ident);
209 return 0;
210 }
211
find_type_for_map(struct btf * btf,const char * map_ident)212 static const struct btf_type *find_type_for_map(struct btf *btf, const char *map_ident)
213 {
214 int n = btf__type_cnt(btf), i;
215 char sec_ident[256];
216
217 for (i = 1; i < n; i++) {
218 const struct btf_type *t = btf__type_by_id(btf, i);
219 const char *name;
220
221 if (!btf_is_datasec(t))
222 continue;
223
224 name = btf__str_by_offset(btf, t->name_off);
225 if (!get_datasec_ident(name, sec_ident, sizeof(sec_ident)))
226 continue;
227
228 if (strcmp(sec_ident, map_ident) == 0)
229 return t;
230 }
231 return NULL;
232 }
233
is_internal_mmapable_map(const struct bpf_map * map,char * buf,size_t sz)234 static bool is_internal_mmapable_map(const struct bpf_map *map, char *buf, size_t sz)
235 {
236 if (!bpf_map__is_internal(map) || !(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
237 return false;
238
239 if (!get_map_ident(map, buf, sz))
240 return false;
241
242 return true;
243 }
244
codegen_datasecs(struct bpf_object * obj,const char * obj_name)245 static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
246 {
247 struct btf *btf = bpf_object__btf(obj);
248 struct btf_dump *d;
249 struct bpf_map *map;
250 const struct btf_type *sec;
251 char map_ident[256];
252 int err = 0;
253
254 d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
255 err = libbpf_get_error(d);
256 if (err)
257 return err;
258
259 bpf_object__for_each_map(map, obj) {
260 /* only generate definitions for memory-mapped internal maps */
261 if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident)))
262 continue;
263
264 sec = find_type_for_map(btf, map_ident);
265
266 /* In some cases (e.g., sections like .rodata.cst16 containing
267 * compiler allocated string constants only) there will be
268 * special internal maps with no corresponding DATASEC BTF
269 * type. In such case, generate empty structs for each such
270 * map. It will still be memory-mapped and its contents
271 * accessible from user-space through BPF skeleton.
272 */
273 if (!sec) {
274 printf(" struct %s__%s {\n", obj_name, map_ident);
275 printf(" } *%s;\n", map_ident);
276 } else {
277 err = codegen_datasec_def(obj, btf, d, sec, obj_name);
278 if (err)
279 goto out;
280 }
281 }
282
283
284 out:
285 btf_dump__free(d);
286 return err;
287 }
288
btf_is_ptr_to_func_proto(const struct btf * btf,const struct btf_type * v)289 static bool btf_is_ptr_to_func_proto(const struct btf *btf,
290 const struct btf_type *v)
291 {
292 return btf_is_ptr(v) && btf_is_func_proto(btf__type_by_id(btf, v->type));
293 }
294
codegen_subskel_datasecs(struct bpf_object * obj,const char * obj_name)295 static int codegen_subskel_datasecs(struct bpf_object *obj, const char *obj_name)
296 {
297 struct btf *btf = bpf_object__btf(obj);
298 struct btf_dump *d;
299 struct bpf_map *map;
300 const struct btf_type *sec, *var;
301 const struct btf_var_secinfo *sec_var;
302 int i, err = 0, vlen;
303 char map_ident[256], sec_ident[256];
304 bool strip_mods = false, needs_typeof = false;
305 const char *sec_name, *var_name;
306 __u32 var_type_id;
307
308 d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
309 if (!d)
310 return -errno;
311
312 bpf_object__for_each_map(map, obj) {
313 /* only generate definitions for memory-mapped internal maps */
314 if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident)))
315 continue;
316
317 sec = find_type_for_map(btf, map_ident);
318 if (!sec)
319 continue;
320
321 sec_name = btf__name_by_offset(btf, sec->name_off);
322 if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident)))
323 continue;
324
325 strip_mods = strcmp(sec_name, ".kconfig") != 0;
326 printf(" struct %s__%s {\n", obj_name, sec_ident);
327
328 sec_var = btf_var_secinfos(sec);
329 vlen = btf_vlen(sec);
330 for (i = 0; i < vlen; i++, sec_var++) {
331 DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts,
332 .indent_level = 2,
333 .strip_mods = strip_mods,
334 /* we'll print the name separately */
335 .field_name = "",
336 );
337
338 var = btf__type_by_id(btf, sec_var->type);
339 var_name = btf__name_by_offset(btf, var->name_off);
340 var_type_id = var->type;
341
342 /* static variables are not exposed through BPF skeleton */
343 if (btf_var(var)->linkage == BTF_VAR_STATIC)
344 continue;
345
346 /* The datasec member has KIND_VAR but we want the
347 * underlying type of the variable (e.g. KIND_INT).
348 */
349 var = skip_mods_and_typedefs(btf, var->type, NULL);
350
351 printf("\t\t");
352 /* Func and array members require special handling.
353 * Instead of producing `typename *var`, they produce
354 * `typeof(typename) *var`. This allows us to keep a
355 * similar syntax where the identifier is just prefixed
356 * by *, allowing us to ignore C declaration minutiae.
357 */
358 needs_typeof = btf_is_array(var) || btf_is_ptr_to_func_proto(btf, var);
359 if (needs_typeof)
360 printf("typeof(");
361
362 err = btf_dump__emit_type_decl(d, var_type_id, &opts);
363 if (err)
364 goto out;
365
366 if (needs_typeof)
367 printf(")");
368
369 printf(" *%s;\n", var_name);
370 }
371 printf(" } %s;\n", sec_ident);
372 }
373
374 out:
375 btf_dump__free(d);
376 return err;
377 }
378
codegen(const char * template,...)379 static void codegen(const char *template, ...)
380 {
381 const char *src, *end;
382 int skip_tabs = 0, n;
383 char *s, *dst;
384 va_list args;
385 char c;
386
387 n = strlen(template);
388 s = malloc(n + 1);
389 if (!s)
390 exit(-1);
391 src = template;
392 dst = s;
393
394 /* find out "baseline" indentation to skip */
395 while ((c = *src++)) {
396 if (c == '\t') {
397 skip_tabs++;
398 } else if (c == '\n') {
399 break;
400 } else {
401 p_err("unrecognized character at pos %td in template '%s': '%c'",
402 src - template - 1, template, c);
403 free(s);
404 exit(-1);
405 }
406 }
407
408 while (*src) {
409 /* skip baseline indentation tabs */
410 for (n = skip_tabs; n > 0; n--, src++) {
411 if (*src != '\t') {
412 p_err("not enough tabs at pos %td in template '%s'",
413 src - template - 1, template);
414 free(s);
415 exit(-1);
416 }
417 }
418 /* trim trailing whitespace */
419 end = strchrnul(src, '\n');
420 for (n = end - src; n > 0 && isspace(src[n - 1]); n--)
421 ;
422 memcpy(dst, src, n);
423 dst += n;
424 if (*end)
425 *dst++ = '\n';
426 src = *end ? end + 1 : end;
427 }
428 *dst++ = '\0';
429
430 /* print out using adjusted template */
431 va_start(args, template);
432 n = vprintf(s, args);
433 va_end(args);
434
435 free(s);
436 }
437
print_hex(const char * data,int data_sz)438 static void print_hex(const char *data, int data_sz)
439 {
440 int i, len;
441
442 for (i = 0, len = 0; i < data_sz; i++) {
443 int w = data[i] ? 4 : 2;
444
445 len += w;
446 if (len > 78) {
447 printf("\\\n");
448 len = w;
449 }
450 if (!data[i])
451 printf("\\0");
452 else
453 printf("\\x%02x", (unsigned char)data[i]);
454 }
455 }
456
bpf_map_mmap_sz(const struct bpf_map * map)457 static size_t bpf_map_mmap_sz(const struct bpf_map *map)
458 {
459 long page_sz = sysconf(_SC_PAGE_SIZE);
460 size_t map_sz;
461
462 map_sz = (size_t)roundup(bpf_map__value_size(map), 8) * bpf_map__max_entries(map);
463 map_sz = roundup(map_sz, page_sz);
464 return map_sz;
465 }
466
467 /* Emit type size asserts for all top-level fields in memory-mapped internal maps. */
codegen_asserts(struct bpf_object * obj,const char * obj_name)468 static void codegen_asserts(struct bpf_object *obj, const char *obj_name)
469 {
470 struct btf *btf = bpf_object__btf(obj);
471 struct bpf_map *map;
472 struct btf_var_secinfo *sec_var;
473 int i, vlen;
474 const struct btf_type *sec;
475 char map_ident[256], var_ident[256];
476
477 codegen("\
478 \n\
479 __attribute__((unused)) static void \n\
480 %1$s__assert(struct %1$s *s __attribute__((unused))) \n\
481 { \n\
482 #ifdef __cplusplus \n\
483 #define _Static_assert static_assert \n\
484 #endif \n\
485 ", obj_name);
486
487 bpf_object__for_each_map(map, obj) {
488 if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident)))
489 continue;
490
491 sec = find_type_for_map(btf, map_ident);
492 if (!sec) {
493 /* best effort, couldn't find the type for this map */
494 continue;
495 }
496
497 sec_var = btf_var_secinfos(sec);
498 vlen = btf_vlen(sec);
499
500 for (i = 0; i < vlen; i++, sec_var++) {
501 const struct btf_type *var = btf__type_by_id(btf, sec_var->type);
502 const char *var_name = btf__name_by_offset(btf, var->name_off);
503 long var_size;
504
505 /* static variables are not exposed through BPF skeleton */
506 if (btf_var(var)->linkage == BTF_VAR_STATIC)
507 continue;
508
509 var_size = btf__resolve_size(btf, var->type);
510 if (var_size < 0)
511 continue;
512
513 var_ident[0] = '\0';
514 strncat(var_ident, var_name, sizeof(var_ident) - 1);
515 sanitize_identifier(var_ident);
516
517 printf("\t_Static_assert(sizeof(s->%s->%s) == %ld, \"unexpected size of '%s'\");\n",
518 map_ident, var_ident, var_size, var_ident);
519 }
520 }
521 codegen("\
522 \n\
523 #ifdef __cplusplus \n\
524 #undef _Static_assert \n\
525 #endif \n\
526 } \n\
527 ");
528 }
529
codegen_attach_detach(struct bpf_object * obj,const char * obj_name)530 static void codegen_attach_detach(struct bpf_object *obj, const char *obj_name)
531 {
532 struct bpf_program *prog;
533
534 bpf_object__for_each_program(prog, obj) {
535 const char *tp_name;
536
537 codegen("\
538 \n\
539 \n\
540 static inline int \n\
541 %1$s__%2$s__attach(struct %1$s *skel) \n\
542 { \n\
543 int prog_fd = skel->progs.%2$s.prog_fd; \n\
544 ", obj_name, bpf_program__name(prog));
545
546 switch (bpf_program__type(prog)) {
547 case BPF_PROG_TYPE_RAW_TRACEPOINT:
548 tp_name = strchr(bpf_program__section_name(prog), '/') + 1;
549 printf("\tint fd = skel_raw_tracepoint_open(\"%s\", prog_fd);\n", tp_name);
550 break;
551 case BPF_PROG_TYPE_TRACING:
552 case BPF_PROG_TYPE_LSM:
553 if (bpf_program__expected_attach_type(prog) == BPF_TRACE_ITER)
554 printf("\tint fd = skel_link_create(prog_fd, 0, BPF_TRACE_ITER);\n");
555 else
556 printf("\tint fd = skel_raw_tracepoint_open(NULL, prog_fd);\n");
557 break;
558 default:
559 printf("\tint fd = ((void)prog_fd, 0); /* auto-attach not supported */\n");
560 break;
561 }
562 codegen("\
563 \n\
564 \n\
565 if (fd > 0) \n\
566 skel->links.%1$s_fd = fd; \n\
567 return fd; \n\
568 } \n\
569 ", bpf_program__name(prog));
570 }
571
572 codegen("\
573 \n\
574 \n\
575 static inline int \n\
576 %1$s__attach(struct %1$s *skel) \n\
577 { \n\
578 int ret = 0; \n\
579 \n\
580 ", obj_name);
581
582 bpf_object__for_each_program(prog, obj) {
583 codegen("\
584 \n\
585 ret = ret < 0 ? ret : %1$s__%2$s__attach(skel); \n\
586 ", obj_name, bpf_program__name(prog));
587 }
588
589 codegen("\
590 \n\
591 return ret < 0 ? ret : 0; \n\
592 } \n\
593 \n\
594 static inline void \n\
595 %1$s__detach(struct %1$s *skel) \n\
596 { \n\
597 ", obj_name);
598
599 bpf_object__for_each_program(prog, obj) {
600 codegen("\
601 \n\
602 skel_closenz(skel->links.%1$s_fd); \n\
603 ", bpf_program__name(prog));
604 }
605
606 codegen("\
607 \n\
608 } \n\
609 ");
610 }
611
codegen_destroy(struct bpf_object * obj,const char * obj_name)612 static void codegen_destroy(struct bpf_object *obj, const char *obj_name)
613 {
614 struct bpf_program *prog;
615 struct bpf_map *map;
616 char ident[256];
617
618 codegen("\
619 \n\
620 static void \n\
621 %1$s__destroy(struct %1$s *skel) \n\
622 { \n\
623 if (!skel) \n\
624 return; \n\
625 %1$s__detach(skel); \n\
626 ",
627 obj_name);
628
629 bpf_object__for_each_program(prog, obj) {
630 codegen("\
631 \n\
632 skel_closenz(skel->progs.%1$s.prog_fd); \n\
633 ", bpf_program__name(prog));
634 }
635
636 bpf_object__for_each_map(map, obj) {
637 if (!get_map_ident(map, ident, sizeof(ident)))
638 continue;
639 if (bpf_map__is_internal(map) &&
640 (bpf_map__map_flags(map) & BPF_F_MMAPABLE))
641 printf("\tskel_free_map_data(skel->%1$s, skel->maps.%1$s.initial_value, %2$zd);\n",
642 ident, bpf_map_mmap_sz(map));
643 codegen("\
644 \n\
645 skel_closenz(skel->maps.%1$s.map_fd); \n\
646 ", ident);
647 }
648 codegen("\
649 \n\
650 skel_free(skel); \n\
651 } \n\
652 ",
653 obj_name);
654 }
655
gen_trace(struct bpf_object * obj,const char * obj_name,const char * header_guard)656 static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *header_guard)
657 {
658 DECLARE_LIBBPF_OPTS(gen_loader_opts, opts);
659 struct bpf_map *map;
660 char ident[256];
661 int err = 0;
662
663 err = bpf_object__gen_loader(obj, &opts);
664 if (err)
665 return err;
666
667 err = bpf_object__load(obj);
668 if (err) {
669 p_err("failed to load object file");
670 goto out;
671 }
672 /* If there was no error during load then gen_loader_opts
673 * are populated with the loader program.
674 */
675
676 /* finish generating 'struct skel' */
677 codegen("\
678 \n\
679 }; \n\
680 ", obj_name);
681
682
683 codegen_attach_detach(obj, obj_name);
684
685 codegen_destroy(obj, obj_name);
686
687 codegen("\
688 \n\
689 static inline struct %1$s * \n\
690 %1$s__open(void) \n\
691 { \n\
692 struct %1$s *skel; \n\
693 \n\
694 skel = skel_alloc(sizeof(*skel)); \n\
695 if (!skel) \n\
696 goto cleanup; \n\
697 skel->ctx.sz = (void *)&skel->links - (void *)skel; \n\
698 ",
699 obj_name, opts.data_sz);
700 bpf_object__for_each_map(map, obj) {
701 const void *mmap_data = NULL;
702 size_t mmap_size = 0;
703
704 if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
705 continue;
706
707 codegen("\
708 \n\
709 skel->%1$s = skel_prep_map_data((void *)\"\\ \n\
710 ", ident);
711 mmap_data = bpf_map__initial_value(map, &mmap_size);
712 print_hex(mmap_data, mmap_size);
713 codegen("\
714 \n\
715 \", %1$zd, %2$zd); \n\
716 if (!skel->%3$s) \n\
717 goto cleanup; \n\
718 skel->maps.%3$s.initial_value = (__u64) (long) skel->%3$s;\n\
719 ", bpf_map_mmap_sz(map), mmap_size, ident);
720 }
721 codegen("\
722 \n\
723 return skel; \n\
724 cleanup: \n\
725 %1$s__destroy(skel); \n\
726 return NULL; \n\
727 } \n\
728 \n\
729 static inline int \n\
730 %1$s__load(struct %1$s *skel) \n\
731 { \n\
732 struct bpf_load_and_run_opts opts = {}; \n\
733 int err; \n\
734 \n\
735 opts.ctx = (struct bpf_loader_ctx *)skel; \n\
736 opts.data_sz = %2$d; \n\
737 opts.data = (void *)\"\\ \n\
738 ",
739 obj_name, opts.data_sz);
740 print_hex(opts.data, opts.data_sz);
741 codegen("\
742 \n\
743 \"; \n\
744 ");
745
746 codegen("\
747 \n\
748 opts.insns_sz = %d; \n\
749 opts.insns = (void *)\"\\ \n\
750 ",
751 opts.insns_sz);
752 print_hex(opts.insns, opts.insns_sz);
753 codegen("\
754 \n\
755 \"; \n\
756 err = bpf_load_and_run(&opts); \n\
757 if (err < 0) \n\
758 return err; \n\
759 ", obj_name);
760 bpf_object__for_each_map(map, obj) {
761 const char *mmap_flags;
762
763 if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
764 continue;
765
766 if (bpf_map__map_flags(map) & BPF_F_RDONLY_PROG)
767 mmap_flags = "PROT_READ";
768 else
769 mmap_flags = "PROT_READ | PROT_WRITE";
770
771 codegen("\
772 \n\
773 skel->%1$s = skel_finalize_map_data(&skel->maps.%1$s.initial_value, \n\
774 %2$zd, %3$s, skel->maps.%1$s.map_fd);\n\
775 if (!skel->%1$s) \n\
776 return -ENOMEM; \n\
777 ",
778 ident, bpf_map_mmap_sz(map), mmap_flags);
779 }
780 codegen("\
781 \n\
782 return 0; \n\
783 } \n\
784 \n\
785 static inline struct %1$s * \n\
786 %1$s__open_and_load(void) \n\
787 { \n\
788 struct %1$s *skel; \n\
789 \n\
790 skel = %1$s__open(); \n\
791 if (!skel) \n\
792 return NULL; \n\
793 if (%1$s__load(skel)) { \n\
794 %1$s__destroy(skel); \n\
795 return NULL; \n\
796 } \n\
797 return skel; \n\
798 } \n\
799 \n\
800 ", obj_name);
801
802 codegen_asserts(obj, obj_name);
803
804 codegen("\
805 \n\
806 \n\
807 #endif /* %s */ \n\
808 ",
809 header_guard);
810 err = 0;
811 out:
812 return err;
813 }
814
815 static void
codegen_maps_skeleton(struct bpf_object * obj,size_t map_cnt,bool mmaped)816 codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped)
817 {
818 struct bpf_map *map;
819 char ident[256];
820 size_t i;
821
822 if (!map_cnt)
823 return;
824
825 codegen("\
826 \n\
827 \n\
828 /* maps */ \n\
829 s->map_cnt = %zu; \n\
830 s->map_skel_sz = sizeof(*s->maps); \n\
831 s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\
832 if (!s->maps) { \n\
833 err = -ENOMEM; \n\
834 goto err; \n\
835 } \n\
836 ",
837 map_cnt
838 );
839 i = 0;
840 bpf_object__for_each_map(map, obj) {
841 if (!get_map_ident(map, ident, sizeof(ident)))
842 continue;
843
844 codegen("\
845 \n\
846 \n\
847 s->maps[%zu].name = \"%s\"; \n\
848 s->maps[%zu].map = &obj->maps.%s; \n\
849 ",
850 i, bpf_map__name(map), i, ident);
851 /* memory-mapped internal maps */
852 if (mmaped && is_internal_mmapable_map(map, ident, sizeof(ident))) {
853 printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n",
854 i, ident);
855 }
856 i++;
857 }
858 }
859
860 static void
codegen_progs_skeleton(struct bpf_object * obj,size_t prog_cnt,bool populate_links)861 codegen_progs_skeleton(struct bpf_object *obj, size_t prog_cnt, bool populate_links)
862 {
863 struct bpf_program *prog;
864 int i;
865
866 if (!prog_cnt)
867 return;
868
869 codegen("\
870 \n\
871 \n\
872 /* programs */ \n\
873 s->prog_cnt = %zu; \n\
874 s->prog_skel_sz = sizeof(*s->progs); \n\
875 s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\
876 if (!s->progs) { \n\
877 err = -ENOMEM; \n\
878 goto err; \n\
879 } \n\
880 ",
881 prog_cnt
882 );
883 i = 0;
884 bpf_object__for_each_program(prog, obj) {
885 codegen("\
886 \n\
887 \n\
888 s->progs[%1$zu].name = \"%2$s\"; \n\
889 s->progs[%1$zu].prog = &obj->progs.%2$s;\n\
890 ",
891 i, bpf_program__name(prog));
892
893 if (populate_links) {
894 codegen("\
895 \n\
896 s->progs[%1$zu].link = &obj->links.%2$s;\n\
897 ",
898 i, bpf_program__name(prog));
899 }
900 i++;
901 }
902 }
903
do_skeleton(int argc,char ** argv)904 static int do_skeleton(int argc, char **argv)
905 {
906 char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")];
907 size_t map_cnt = 0, prog_cnt = 0, file_sz, mmap_sz;
908 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
909 char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data;
910 struct bpf_object *obj = NULL;
911 const char *file;
912 char ident[256];
913 struct bpf_program *prog;
914 int fd, err = -1;
915 struct bpf_map *map;
916 struct btf *btf;
917 struct stat st;
918
919 if (!REQ_ARGS(1)) {
920 usage();
921 return -1;
922 }
923 file = GET_ARG();
924
925 while (argc) {
926 if (!REQ_ARGS(2))
927 return -1;
928
929 if (is_prefix(*argv, "name")) {
930 NEXT_ARG();
931
932 if (obj_name[0] != '\0') {
933 p_err("object name already specified");
934 return -1;
935 }
936
937 strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1);
938 obj_name[MAX_OBJ_NAME_LEN - 1] = '\0';
939 } else {
940 p_err("unknown arg %s", *argv);
941 return -1;
942 }
943
944 NEXT_ARG();
945 }
946
947 if (argc) {
948 p_err("extra unknown arguments");
949 return -1;
950 }
951
952 if (stat(file, &st)) {
953 p_err("failed to stat() %s: %s", file, strerror(errno));
954 return -1;
955 }
956 file_sz = st.st_size;
957 mmap_sz = roundup(file_sz, sysconf(_SC_PAGE_SIZE));
958 fd = open(file, O_RDONLY);
959 if (fd < 0) {
960 p_err("failed to open() %s: %s", file, strerror(errno));
961 return -1;
962 }
963 obj_data = mmap(NULL, mmap_sz, PROT_READ, MAP_PRIVATE, fd, 0);
964 if (obj_data == MAP_FAILED) {
965 obj_data = NULL;
966 p_err("failed to mmap() %s: %s", file, strerror(errno));
967 goto out;
968 }
969 if (obj_name[0] == '\0')
970 get_obj_name(obj_name, file);
971 opts.object_name = obj_name;
972 if (verifier_logs)
973 /* log_level1 + log_level2 + stats, but not stable UAPI */
974 opts.kernel_log_level = 1 + 2 + 4;
975 obj = bpf_object__open_mem(obj_data, file_sz, &opts);
976 err = libbpf_get_error(obj);
977 if (err) {
978 char err_buf[256];
979
980 libbpf_strerror(err, err_buf, sizeof(err_buf));
981 p_err("failed to open BPF object file: %s", err_buf);
982 obj = NULL;
983 goto out;
984 }
985
986 bpf_object__for_each_map(map, obj) {
987 if (!get_map_ident(map, ident, sizeof(ident))) {
988 p_err("ignoring unrecognized internal map '%s'...",
989 bpf_map__name(map));
990 continue;
991 }
992 map_cnt++;
993 }
994 bpf_object__for_each_program(prog, obj) {
995 prog_cnt++;
996 }
997
998 get_header_guard(header_guard, obj_name, "SKEL_H");
999 if (use_loader) {
1000 codegen("\
1001 \n\
1002 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
1003 /* THIS FILE IS AUTOGENERATED BY BPFTOOL! */ \n\
1004 #ifndef %2$s \n\
1005 #define %2$s \n\
1006 \n\
1007 #include <bpf/skel_internal.h> \n\
1008 \n\
1009 struct %1$s { \n\
1010 struct bpf_loader_ctx ctx; \n\
1011 ",
1012 obj_name, header_guard
1013 );
1014 } else {
1015 codegen("\
1016 \n\
1017 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
1018 \n\
1019 /* THIS FILE IS AUTOGENERATED BY BPFTOOL! */ \n\
1020 #ifndef %2$s \n\
1021 #define %2$s \n\
1022 \n\
1023 #include <errno.h> \n\
1024 #include <stdlib.h> \n\
1025 #include <bpf/libbpf.h> \n\
1026 \n\
1027 struct %1$s { \n\
1028 struct bpf_object_skeleton *skeleton; \n\
1029 struct bpf_object *obj; \n\
1030 ",
1031 obj_name, header_guard
1032 );
1033 }
1034
1035 if (map_cnt) {
1036 printf("\tstruct {\n");
1037 bpf_object__for_each_map(map, obj) {
1038 if (!get_map_ident(map, ident, sizeof(ident)))
1039 continue;
1040 if (use_loader)
1041 printf("\t\tstruct bpf_map_desc %s;\n", ident);
1042 else
1043 printf("\t\tstruct bpf_map *%s;\n", ident);
1044 }
1045 printf("\t} maps;\n");
1046 }
1047
1048 if (prog_cnt) {
1049 printf("\tstruct {\n");
1050 bpf_object__for_each_program(prog, obj) {
1051 if (use_loader)
1052 printf("\t\tstruct bpf_prog_desc %s;\n",
1053 bpf_program__name(prog));
1054 else
1055 printf("\t\tstruct bpf_program *%s;\n",
1056 bpf_program__name(prog));
1057 }
1058 printf("\t} progs;\n");
1059 printf("\tstruct {\n");
1060 bpf_object__for_each_program(prog, obj) {
1061 if (use_loader)
1062 printf("\t\tint %s_fd;\n",
1063 bpf_program__name(prog));
1064 else
1065 printf("\t\tstruct bpf_link *%s;\n",
1066 bpf_program__name(prog));
1067 }
1068 printf("\t} links;\n");
1069 }
1070
1071 btf = bpf_object__btf(obj);
1072 if (btf) {
1073 err = codegen_datasecs(obj, obj_name);
1074 if (err)
1075 goto out;
1076 }
1077 if (use_loader) {
1078 err = gen_trace(obj, obj_name, header_guard);
1079 goto out;
1080 }
1081
1082 codegen("\
1083 \n\
1084 \n\
1085 #ifdef __cplusplus \n\
1086 static inline struct %1$s *open(const struct bpf_object_open_opts *opts = nullptr);\n\
1087 static inline struct %1$s *open_and_load(); \n\
1088 static inline int load(struct %1$s *skel); \n\
1089 static inline int attach(struct %1$s *skel); \n\
1090 static inline void detach(struct %1$s *skel); \n\
1091 static inline void destroy(struct %1$s *skel); \n\
1092 static inline const void *elf_bytes(size_t *sz); \n\
1093 #endif /* __cplusplus */ \n\
1094 }; \n\
1095 \n\
1096 static void \n\
1097 %1$s__destroy(struct %1$s *obj) \n\
1098 { \n\
1099 if (!obj) \n\
1100 return; \n\
1101 if (obj->skeleton) \n\
1102 bpf_object__destroy_skeleton(obj->skeleton);\n\
1103 free(obj); \n\
1104 } \n\
1105 \n\
1106 static inline int \n\
1107 %1$s__create_skeleton(struct %1$s *obj); \n\
1108 \n\
1109 static inline struct %1$s * \n\
1110 %1$s__open_opts(const struct bpf_object_open_opts *opts) \n\
1111 { \n\
1112 struct %1$s *obj; \n\
1113 int err; \n\
1114 \n\
1115 obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\
1116 if (!obj) { \n\
1117 errno = ENOMEM; \n\
1118 return NULL; \n\
1119 } \n\
1120 \n\
1121 err = %1$s__create_skeleton(obj); \n\
1122 if (err) \n\
1123 goto err_out; \n\
1124 \n\
1125 err = bpf_object__open_skeleton(obj->skeleton, opts);\n\
1126 if (err) \n\
1127 goto err_out; \n\
1128 \n\
1129 return obj; \n\
1130 err_out: \n\
1131 %1$s__destroy(obj); \n\
1132 errno = -err; \n\
1133 return NULL; \n\
1134 } \n\
1135 \n\
1136 static inline struct %1$s * \n\
1137 %1$s__open(void) \n\
1138 { \n\
1139 return %1$s__open_opts(NULL); \n\
1140 } \n\
1141 \n\
1142 static inline int \n\
1143 %1$s__load(struct %1$s *obj) \n\
1144 { \n\
1145 return bpf_object__load_skeleton(obj->skeleton); \n\
1146 } \n\
1147 \n\
1148 static inline struct %1$s * \n\
1149 %1$s__open_and_load(void) \n\
1150 { \n\
1151 struct %1$s *obj; \n\
1152 int err; \n\
1153 \n\
1154 obj = %1$s__open(); \n\
1155 if (!obj) \n\
1156 return NULL; \n\
1157 err = %1$s__load(obj); \n\
1158 if (err) { \n\
1159 %1$s__destroy(obj); \n\
1160 errno = -err; \n\
1161 return NULL; \n\
1162 } \n\
1163 return obj; \n\
1164 } \n\
1165 \n\
1166 static inline int \n\
1167 %1$s__attach(struct %1$s *obj) \n\
1168 { \n\
1169 return bpf_object__attach_skeleton(obj->skeleton); \n\
1170 } \n\
1171 \n\
1172 static inline void \n\
1173 %1$s__detach(struct %1$s *obj) \n\
1174 { \n\
1175 return bpf_object__detach_skeleton(obj->skeleton); \n\
1176 } \n\
1177 ",
1178 obj_name
1179 );
1180
1181 codegen("\
1182 \n\
1183 \n\
1184 static inline const void *%1$s__elf_bytes(size_t *sz); \n\
1185 \n\
1186 static inline int \n\
1187 %1$s__create_skeleton(struct %1$s *obj) \n\
1188 { \n\
1189 struct bpf_object_skeleton *s; \n\
1190 int err; \n\
1191 \n\
1192 s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\
1193 if (!s) { \n\
1194 err = -ENOMEM; \n\
1195 goto err; \n\
1196 } \n\
1197 \n\
1198 s->sz = sizeof(*s); \n\
1199 s->name = \"%1$s\"; \n\
1200 s->obj = &obj->obj; \n\
1201 ",
1202 obj_name
1203 );
1204
1205 codegen_maps_skeleton(obj, map_cnt, true /*mmaped*/);
1206 codegen_progs_skeleton(obj, prog_cnt, true /*populate_links*/);
1207
1208 codegen("\
1209 \n\
1210 \n\
1211 s->data = (void *)%2$s__elf_bytes(&s->data_sz); \n\
1212 \n\
1213 obj->skeleton = s; \n\
1214 return 0; \n\
1215 err: \n\
1216 bpf_object__destroy_skeleton(s); \n\
1217 return err; \n\
1218 } \n\
1219 \n\
1220 static inline const void *%2$s__elf_bytes(size_t *sz) \n\
1221 { \n\
1222 *sz = %1$d; \n\
1223 return (const void *)\"\\ \n\
1224 "
1225 , file_sz, obj_name);
1226
1227 /* embed contents of BPF object file */
1228 print_hex(obj_data, file_sz);
1229
1230 codegen("\
1231 \n\
1232 \"; \n\
1233 } \n\
1234 \n\
1235 #ifdef __cplusplus \n\
1236 struct %1$s *%1$s::open(const struct bpf_object_open_opts *opts) { return %1$s__open_opts(opts); }\n\
1237 struct %1$s *%1$s::open_and_load() { return %1$s__open_and_load(); } \n\
1238 int %1$s::load(struct %1$s *skel) { return %1$s__load(skel); } \n\
1239 int %1$s::attach(struct %1$s *skel) { return %1$s__attach(skel); } \n\
1240 void %1$s::detach(struct %1$s *skel) { %1$s__detach(skel); } \n\
1241 void %1$s::destroy(struct %1$s *skel) { %1$s__destroy(skel); } \n\
1242 const void *%1$s::elf_bytes(size_t *sz) { return %1$s__elf_bytes(sz); } \n\
1243 #endif /* __cplusplus */ \n\
1244 \n\
1245 ",
1246 obj_name);
1247
1248 codegen_asserts(obj, obj_name);
1249
1250 codegen("\
1251 \n\
1252 \n\
1253 #endif /* %1$s */ \n\
1254 ",
1255 header_guard);
1256 err = 0;
1257 out:
1258 bpf_object__close(obj);
1259 if (obj_data)
1260 munmap(obj_data, mmap_sz);
1261 close(fd);
1262 return err;
1263 }
1264
1265 /* Subskeletons are like skeletons, except they don't own the bpf_object,
1266 * associated maps, links, etc. Instead, they know about the existence of
1267 * variables, maps, programs and are able to find their locations
1268 * _at runtime_ from an already loaded bpf_object.
1269 *
1270 * This allows for library-like BPF objects to have userspace counterparts
1271 * with access to their own items without having to know anything about the
1272 * final BPF object that the library was linked into.
1273 */
do_subskeleton(int argc,char ** argv)1274 static int do_subskeleton(int argc, char **argv)
1275 {
1276 char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SUBSKEL_H__")];
1277 size_t i, len, file_sz, map_cnt = 0, prog_cnt = 0, mmap_sz, var_cnt = 0, var_idx = 0;
1278 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
1279 char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data;
1280 struct bpf_object *obj = NULL;
1281 const char *file, *var_name;
1282 char ident[256];
1283 int fd, err = -1, map_type_id;
1284 const struct bpf_map *map;
1285 struct bpf_program *prog;
1286 struct btf *btf;
1287 const struct btf_type *map_type, *var_type;
1288 const struct btf_var_secinfo *var;
1289 struct stat st;
1290
1291 if (!REQ_ARGS(1)) {
1292 usage();
1293 return -1;
1294 }
1295 file = GET_ARG();
1296
1297 while (argc) {
1298 if (!REQ_ARGS(2))
1299 return -1;
1300
1301 if (is_prefix(*argv, "name")) {
1302 NEXT_ARG();
1303
1304 if (obj_name[0] != '\0') {
1305 p_err("object name already specified");
1306 return -1;
1307 }
1308
1309 strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1);
1310 obj_name[MAX_OBJ_NAME_LEN - 1] = '\0';
1311 } else {
1312 p_err("unknown arg %s", *argv);
1313 return -1;
1314 }
1315
1316 NEXT_ARG();
1317 }
1318
1319 if (argc) {
1320 p_err("extra unknown arguments");
1321 return -1;
1322 }
1323
1324 if (use_loader) {
1325 p_err("cannot use loader for subskeletons");
1326 return -1;
1327 }
1328
1329 if (stat(file, &st)) {
1330 p_err("failed to stat() %s: %s", file, strerror(errno));
1331 return -1;
1332 }
1333 file_sz = st.st_size;
1334 mmap_sz = roundup(file_sz, sysconf(_SC_PAGE_SIZE));
1335 fd = open(file, O_RDONLY);
1336 if (fd < 0) {
1337 p_err("failed to open() %s: %s", file, strerror(errno));
1338 return -1;
1339 }
1340 obj_data = mmap(NULL, mmap_sz, PROT_READ, MAP_PRIVATE, fd, 0);
1341 if (obj_data == MAP_FAILED) {
1342 obj_data = NULL;
1343 p_err("failed to mmap() %s: %s", file, strerror(errno));
1344 goto out;
1345 }
1346 if (obj_name[0] == '\0')
1347 get_obj_name(obj_name, file);
1348
1349 /* The empty object name allows us to use bpf_map__name and produce
1350 * ELF section names out of it. (".data" instead of "obj.data")
1351 */
1352 opts.object_name = "";
1353 obj = bpf_object__open_mem(obj_data, file_sz, &opts);
1354 if (!obj) {
1355 char err_buf[256];
1356
1357 libbpf_strerror(errno, err_buf, sizeof(err_buf));
1358 p_err("failed to open BPF object file: %s", err_buf);
1359 obj = NULL;
1360 goto out;
1361 }
1362
1363 btf = bpf_object__btf(obj);
1364 if (!btf) {
1365 err = -1;
1366 p_err("need btf type information for %s", obj_name);
1367 goto out;
1368 }
1369
1370 bpf_object__for_each_program(prog, obj) {
1371 prog_cnt++;
1372 }
1373
1374 /* First, count how many variables we have to find.
1375 * We need this in advance so the subskel can allocate the right
1376 * amount of storage.
1377 */
1378 bpf_object__for_each_map(map, obj) {
1379 if (!get_map_ident(map, ident, sizeof(ident)))
1380 continue;
1381
1382 /* Also count all maps that have a name */
1383 map_cnt++;
1384
1385 if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
1386 continue;
1387
1388 map_type_id = bpf_map__btf_value_type_id(map);
1389 if (map_type_id <= 0) {
1390 err = map_type_id;
1391 goto out;
1392 }
1393 map_type = btf__type_by_id(btf, map_type_id);
1394
1395 var = btf_var_secinfos(map_type);
1396 len = btf_vlen(map_type);
1397 for (i = 0; i < len; i++, var++) {
1398 var_type = btf__type_by_id(btf, var->type);
1399
1400 if (btf_var(var_type)->linkage == BTF_VAR_STATIC)
1401 continue;
1402
1403 var_cnt++;
1404 }
1405 }
1406
1407 get_header_guard(header_guard, obj_name, "SUBSKEL_H");
1408 codegen("\
1409 \n\
1410 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
1411 \n\
1412 /* THIS FILE IS AUTOGENERATED! */ \n\
1413 #ifndef %2$s \n\
1414 #define %2$s \n\
1415 \n\
1416 #include <errno.h> \n\
1417 #include <stdlib.h> \n\
1418 #include <bpf/libbpf.h> \n\
1419 \n\
1420 struct %1$s { \n\
1421 struct bpf_object *obj; \n\
1422 struct bpf_object_subskeleton *subskel; \n\
1423 ", obj_name, header_guard);
1424
1425 if (map_cnt) {
1426 printf("\tstruct {\n");
1427 bpf_object__for_each_map(map, obj) {
1428 if (!get_map_ident(map, ident, sizeof(ident)))
1429 continue;
1430 printf("\t\tstruct bpf_map *%s;\n", ident);
1431 }
1432 printf("\t} maps;\n");
1433 }
1434
1435 if (prog_cnt) {
1436 printf("\tstruct {\n");
1437 bpf_object__for_each_program(prog, obj) {
1438 printf("\t\tstruct bpf_program *%s;\n",
1439 bpf_program__name(prog));
1440 }
1441 printf("\t} progs;\n");
1442 }
1443
1444 err = codegen_subskel_datasecs(obj, obj_name);
1445 if (err)
1446 goto out;
1447
1448 /* emit code that will allocate enough storage for all symbols */
1449 codegen("\
1450 \n\
1451 \n\
1452 #ifdef __cplusplus \n\
1453 static inline struct %1$s *open(const struct bpf_object *src);\n\
1454 static inline void destroy(struct %1$s *skel); \n\
1455 #endif /* __cplusplus */ \n\
1456 }; \n\
1457 \n\
1458 static inline void \n\
1459 %1$s__destroy(struct %1$s *skel) \n\
1460 { \n\
1461 if (!skel) \n\
1462 return; \n\
1463 if (skel->subskel) \n\
1464 bpf_object__destroy_subskeleton(skel->subskel);\n\
1465 free(skel); \n\
1466 } \n\
1467 \n\
1468 static inline struct %1$s * \n\
1469 %1$s__open(const struct bpf_object *src) \n\
1470 { \n\
1471 struct %1$s *obj; \n\
1472 struct bpf_object_subskeleton *s; \n\
1473 int err; \n\
1474 \n\
1475 obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\
1476 if (!obj) { \n\
1477 err = -ENOMEM; \n\
1478 goto err; \n\
1479 } \n\
1480 s = (struct bpf_object_subskeleton *)calloc(1, sizeof(*s));\n\
1481 if (!s) { \n\
1482 err = -ENOMEM; \n\
1483 goto err; \n\
1484 } \n\
1485 s->sz = sizeof(*s); \n\
1486 s->obj = src; \n\
1487 s->var_skel_sz = sizeof(*s->vars); \n\
1488 obj->subskel = s; \n\
1489 \n\
1490 /* vars */ \n\
1491 s->var_cnt = %2$d; \n\
1492 s->vars = (struct bpf_var_skeleton *)calloc(%2$d, sizeof(*s->vars));\n\
1493 if (!s->vars) { \n\
1494 err = -ENOMEM; \n\
1495 goto err; \n\
1496 } \n\
1497 ",
1498 obj_name, var_cnt
1499 );
1500
1501 /* walk through each symbol and emit the runtime representation */
1502 bpf_object__for_each_map(map, obj) {
1503 if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
1504 continue;
1505
1506 map_type_id = bpf_map__btf_value_type_id(map);
1507 if (map_type_id <= 0)
1508 /* skip over internal maps with no type*/
1509 continue;
1510
1511 map_type = btf__type_by_id(btf, map_type_id);
1512 var = btf_var_secinfos(map_type);
1513 len = btf_vlen(map_type);
1514 for (i = 0; i < len; i++, var++) {
1515 var_type = btf__type_by_id(btf, var->type);
1516 var_name = btf__name_by_offset(btf, var_type->name_off);
1517
1518 if (btf_var(var_type)->linkage == BTF_VAR_STATIC)
1519 continue;
1520
1521 /* Note that we use the dot prefix in .data as the
1522 * field access operator i.e. maps%s becomes maps.data
1523 */
1524 codegen("\
1525 \n\
1526 \n\
1527 s->vars[%3$d].name = \"%1$s\"; \n\
1528 s->vars[%3$d].map = &obj->maps.%2$s; \n\
1529 s->vars[%3$d].addr = (void **) &obj->%2$s.%1$s;\n\
1530 ", var_name, ident, var_idx);
1531
1532 var_idx++;
1533 }
1534 }
1535
1536 codegen_maps_skeleton(obj, map_cnt, false /*mmaped*/);
1537 codegen_progs_skeleton(obj, prog_cnt, false /*links*/);
1538
1539 codegen("\
1540 \n\
1541 \n\
1542 err = bpf_object__open_subskeleton(s); \n\
1543 if (err) \n\
1544 goto err; \n\
1545 \n\
1546 return obj; \n\
1547 err: \n\
1548 %1$s__destroy(obj); \n\
1549 errno = -err; \n\
1550 return NULL; \n\
1551 } \n\
1552 \n\
1553 #ifdef __cplusplus \n\
1554 struct %1$s *%1$s::open(const struct bpf_object *src) { return %1$s__open(src); }\n\
1555 void %1$s::destroy(struct %1$s *skel) { %1$s__destroy(skel); }\n\
1556 #endif /* __cplusplus */ \n\
1557 \n\
1558 #endif /* %2$s */ \n\
1559 ",
1560 obj_name, header_guard);
1561 err = 0;
1562 out:
1563 bpf_object__close(obj);
1564 if (obj_data)
1565 munmap(obj_data, mmap_sz);
1566 close(fd);
1567 return err;
1568 }
1569
do_object(int argc,char ** argv)1570 static int do_object(int argc, char **argv)
1571 {
1572 struct bpf_linker *linker;
1573 const char *output_file, *file;
1574 int err = 0;
1575
1576 if (!REQ_ARGS(2)) {
1577 usage();
1578 return -1;
1579 }
1580
1581 output_file = GET_ARG();
1582
1583 linker = bpf_linker__new(output_file, NULL);
1584 if (!linker) {
1585 p_err("failed to create BPF linker instance");
1586 return -1;
1587 }
1588
1589 while (argc) {
1590 file = GET_ARG();
1591
1592 err = bpf_linker__add_file(linker, file, NULL);
1593 if (err) {
1594 p_err("failed to link '%s': %s (%d)", file, strerror(err), err);
1595 goto out;
1596 }
1597 }
1598
1599 err = bpf_linker__finalize(linker);
1600 if (err) {
1601 p_err("failed to finalize ELF file: %s (%d)", strerror(err), err);
1602 goto out;
1603 }
1604
1605 err = 0;
1606 out:
1607 bpf_linker__free(linker);
1608 return err;
1609 }
1610
do_help(int argc,char ** argv)1611 static int do_help(int argc, char **argv)
1612 {
1613 if (json_output) {
1614 jsonw_null(json_wtr);
1615 return 0;
1616 }
1617
1618 fprintf(stderr,
1619 "Usage: %1$s %2$s object OUTPUT_FILE INPUT_FILE [INPUT_FILE...]\n"
1620 " %1$s %2$s skeleton FILE [name OBJECT_NAME]\n"
1621 " %1$s %2$s subskeleton FILE [name OBJECT_NAME]\n"
1622 " %1$s %2$s min_core_btf INPUT OUTPUT OBJECT [OBJECT...]\n"
1623 " %1$s %2$s help\n"
1624 "\n"
1625 " " HELP_SPEC_OPTIONS " |\n"
1626 " {-L|--use-loader} }\n"
1627 "",
1628 bin_name, "gen");
1629
1630 return 0;
1631 }
1632
btf_save_raw(const struct btf * btf,const char * path)1633 static int btf_save_raw(const struct btf *btf, const char *path)
1634 {
1635 const void *data;
1636 FILE *f = NULL;
1637 __u32 data_sz;
1638 int err = 0;
1639
1640 data = btf__raw_data(btf, &data_sz);
1641 if (!data)
1642 return -ENOMEM;
1643
1644 f = fopen(path, "wb");
1645 if (!f)
1646 return -errno;
1647
1648 if (fwrite(data, 1, data_sz, f) != data_sz)
1649 err = -errno;
1650
1651 fclose(f);
1652 return err;
1653 }
1654
1655 struct btfgen_info {
1656 struct btf *src_btf;
1657 struct btf *marked_btf; /* btf structure used to mark used types */
1658 };
1659
btfgen_hash_fn(const void * key,void * ctx)1660 static size_t btfgen_hash_fn(const void *key, void *ctx)
1661 {
1662 return (size_t)key;
1663 }
1664
btfgen_equal_fn(const void * k1,const void * k2,void * ctx)1665 static bool btfgen_equal_fn(const void *k1, const void *k2, void *ctx)
1666 {
1667 return k1 == k2;
1668 }
1669
u32_as_hash_key(__u32 x)1670 static void *u32_as_hash_key(__u32 x)
1671 {
1672 return (void *)(uintptr_t)x;
1673 }
1674
btfgen_free_info(struct btfgen_info * info)1675 static void btfgen_free_info(struct btfgen_info *info)
1676 {
1677 if (!info)
1678 return;
1679
1680 btf__free(info->src_btf);
1681 btf__free(info->marked_btf);
1682
1683 free(info);
1684 }
1685
1686 static struct btfgen_info *
btfgen_new_info(const char * targ_btf_path)1687 btfgen_new_info(const char *targ_btf_path)
1688 {
1689 struct btfgen_info *info;
1690 int err;
1691
1692 info = calloc(1, sizeof(*info));
1693 if (!info)
1694 return NULL;
1695
1696 info->src_btf = btf__parse(targ_btf_path, NULL);
1697 if (!info->src_btf) {
1698 err = -errno;
1699 p_err("failed parsing '%s' BTF file: %s", targ_btf_path, strerror(errno));
1700 goto err_out;
1701 }
1702
1703 info->marked_btf = btf__parse(targ_btf_path, NULL);
1704 if (!info->marked_btf) {
1705 err = -errno;
1706 p_err("failed parsing '%s' BTF file: %s", targ_btf_path, strerror(errno));
1707 goto err_out;
1708 }
1709
1710 return info;
1711
1712 err_out:
1713 btfgen_free_info(info);
1714 errno = -err;
1715 return NULL;
1716 }
1717
1718 #define MARKED UINT32_MAX
1719
btfgen_mark_member(struct btfgen_info * info,int type_id,int idx)1720 static void btfgen_mark_member(struct btfgen_info *info, int type_id, int idx)
1721 {
1722 const struct btf_type *t = btf__type_by_id(info->marked_btf, type_id);
1723 struct btf_member *m = btf_members(t) + idx;
1724
1725 m->name_off = MARKED;
1726 }
1727
1728 static int
btfgen_mark_type(struct btfgen_info * info,unsigned int type_id,bool follow_pointers)1729 btfgen_mark_type(struct btfgen_info *info, unsigned int type_id, bool follow_pointers)
1730 {
1731 const struct btf_type *btf_type = btf__type_by_id(info->src_btf, type_id);
1732 struct btf_type *cloned_type;
1733 struct btf_param *param;
1734 struct btf_array *array;
1735 int err, i;
1736
1737 if (type_id == 0)
1738 return 0;
1739
1740 /* mark type on cloned BTF as used */
1741 cloned_type = (struct btf_type *) btf__type_by_id(info->marked_btf, type_id);
1742 cloned_type->name_off = MARKED;
1743
1744 /* recursively mark other types needed by it */
1745 switch (btf_kind(btf_type)) {
1746 case BTF_KIND_UNKN:
1747 case BTF_KIND_INT:
1748 case BTF_KIND_FLOAT:
1749 case BTF_KIND_ENUM:
1750 case BTF_KIND_STRUCT:
1751 case BTF_KIND_UNION:
1752 break;
1753 case BTF_KIND_PTR:
1754 if (follow_pointers) {
1755 err = btfgen_mark_type(info, btf_type->type, follow_pointers);
1756 if (err)
1757 return err;
1758 }
1759 break;
1760 case BTF_KIND_CONST:
1761 case BTF_KIND_VOLATILE:
1762 case BTF_KIND_TYPEDEF:
1763 err = btfgen_mark_type(info, btf_type->type, follow_pointers);
1764 if (err)
1765 return err;
1766 break;
1767 case BTF_KIND_ARRAY:
1768 array = btf_array(btf_type);
1769
1770 /* mark array type */
1771 err = btfgen_mark_type(info, array->type, follow_pointers);
1772 /* mark array's index type */
1773 err = err ? : btfgen_mark_type(info, array->index_type, follow_pointers);
1774 if (err)
1775 return err;
1776 break;
1777 case BTF_KIND_FUNC_PROTO:
1778 /* mark ret type */
1779 err = btfgen_mark_type(info, btf_type->type, follow_pointers);
1780 if (err)
1781 return err;
1782
1783 /* mark parameters types */
1784 param = btf_params(btf_type);
1785 for (i = 0; i < btf_vlen(btf_type); i++) {
1786 err = btfgen_mark_type(info, param->type, follow_pointers);
1787 if (err)
1788 return err;
1789 param++;
1790 }
1791 break;
1792 /* tells if some other type needs to be handled */
1793 default:
1794 p_err("unsupported kind: %s (%d)", btf_kind_str(btf_type), type_id);
1795 return -EINVAL;
1796 }
1797
1798 return 0;
1799 }
1800
btfgen_record_field_relo(struct btfgen_info * info,struct bpf_core_spec * targ_spec)1801 static int btfgen_record_field_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
1802 {
1803 struct btf *btf = info->src_btf;
1804 const struct btf_type *btf_type;
1805 struct btf_member *btf_member;
1806 struct btf_array *array;
1807 unsigned int type_id = targ_spec->root_type_id;
1808 int idx, err;
1809
1810 /* mark root type */
1811 btf_type = btf__type_by_id(btf, type_id);
1812 err = btfgen_mark_type(info, type_id, false);
1813 if (err)
1814 return err;
1815
1816 /* mark types for complex types (arrays, unions, structures) */
1817 for (int i = 1; i < targ_spec->raw_len; i++) {
1818 /* skip typedefs and mods */
1819 while (btf_is_mod(btf_type) || btf_is_typedef(btf_type)) {
1820 type_id = btf_type->type;
1821 btf_type = btf__type_by_id(btf, type_id);
1822 }
1823
1824 switch (btf_kind(btf_type)) {
1825 case BTF_KIND_STRUCT:
1826 case BTF_KIND_UNION:
1827 idx = targ_spec->raw_spec[i];
1828 btf_member = btf_members(btf_type) + idx;
1829
1830 /* mark member */
1831 btfgen_mark_member(info, type_id, idx);
1832
1833 /* mark member's type */
1834 type_id = btf_member->type;
1835 btf_type = btf__type_by_id(btf, type_id);
1836 err = btfgen_mark_type(info, type_id, false);
1837 if (err)
1838 return err;
1839 break;
1840 case BTF_KIND_ARRAY:
1841 array = btf_array(btf_type);
1842 type_id = array->type;
1843 btf_type = btf__type_by_id(btf, type_id);
1844 break;
1845 default:
1846 p_err("unsupported kind: %s (%d)",
1847 btf_kind_str(btf_type), btf_type->type);
1848 return -EINVAL;
1849 }
1850 }
1851
1852 return 0;
1853 }
1854
btfgen_record_type_relo(struct btfgen_info * info,struct bpf_core_spec * targ_spec)1855 static int btfgen_record_type_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
1856 {
1857 return btfgen_mark_type(info, targ_spec->root_type_id, true);
1858 }
1859
btfgen_record_enumval_relo(struct btfgen_info * info,struct bpf_core_spec * targ_spec)1860 static int btfgen_record_enumval_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
1861 {
1862 return btfgen_mark_type(info, targ_spec->root_type_id, false);
1863 }
1864
btfgen_record_reloc(struct btfgen_info * info,struct bpf_core_spec * res)1865 static int btfgen_record_reloc(struct btfgen_info *info, struct bpf_core_spec *res)
1866 {
1867 switch (res->relo_kind) {
1868 case BPF_CORE_FIELD_BYTE_OFFSET:
1869 case BPF_CORE_FIELD_BYTE_SIZE:
1870 case BPF_CORE_FIELD_EXISTS:
1871 case BPF_CORE_FIELD_SIGNED:
1872 case BPF_CORE_FIELD_LSHIFT_U64:
1873 case BPF_CORE_FIELD_RSHIFT_U64:
1874 return btfgen_record_field_relo(info, res);
1875 case BPF_CORE_TYPE_ID_LOCAL: /* BPF_CORE_TYPE_ID_LOCAL doesn't require kernel BTF */
1876 return 0;
1877 case BPF_CORE_TYPE_ID_TARGET:
1878 case BPF_CORE_TYPE_EXISTS:
1879 case BPF_CORE_TYPE_SIZE:
1880 return btfgen_record_type_relo(info, res);
1881 case BPF_CORE_ENUMVAL_EXISTS:
1882 case BPF_CORE_ENUMVAL_VALUE:
1883 return btfgen_record_enumval_relo(info, res);
1884 default:
1885 return -EINVAL;
1886 }
1887 }
1888
1889 static struct bpf_core_cand_list *
btfgen_find_cands(const struct btf * local_btf,const struct btf * targ_btf,__u32 local_id)1890 btfgen_find_cands(const struct btf *local_btf, const struct btf *targ_btf, __u32 local_id)
1891 {
1892 const struct btf_type *local_type;
1893 struct bpf_core_cand_list *cands = NULL;
1894 struct bpf_core_cand local_cand = {};
1895 size_t local_essent_len;
1896 const char *local_name;
1897 int err;
1898
1899 local_cand.btf = local_btf;
1900 local_cand.id = local_id;
1901
1902 local_type = btf__type_by_id(local_btf, local_id);
1903 if (!local_type) {
1904 err = -EINVAL;
1905 goto err_out;
1906 }
1907
1908 local_name = btf__name_by_offset(local_btf, local_type->name_off);
1909 if (!local_name) {
1910 err = -EINVAL;
1911 goto err_out;
1912 }
1913 local_essent_len = bpf_core_essential_name_len(local_name);
1914
1915 cands = calloc(1, sizeof(*cands));
1916 if (!cands)
1917 return NULL;
1918
1919 err = bpf_core_add_cands(&local_cand, local_essent_len, targ_btf, "vmlinux", 1, cands);
1920 if (err)
1921 goto err_out;
1922
1923 return cands;
1924
1925 err_out:
1926 bpf_core_free_cands(cands);
1927 errno = -err;
1928 return NULL;
1929 }
1930
1931 /* Record relocation information for a single BPF object */
btfgen_record_obj(struct btfgen_info * info,const char * obj_path)1932 static int btfgen_record_obj(struct btfgen_info *info, const char *obj_path)
1933 {
1934 const struct btf_ext_info_sec *sec;
1935 const struct bpf_core_relo *relo;
1936 const struct btf_ext_info *seg;
1937 struct hashmap_entry *entry;
1938 struct hashmap *cand_cache = NULL;
1939 struct btf_ext *btf_ext = NULL;
1940 unsigned int relo_idx;
1941 struct btf *btf = NULL;
1942 size_t i;
1943 int err;
1944
1945 btf = btf__parse(obj_path, &btf_ext);
1946 if (!btf) {
1947 err = -errno;
1948 p_err("failed to parse BPF object '%s': %s", obj_path, strerror(errno));
1949 return err;
1950 }
1951
1952 if (!btf_ext) {
1953 p_err("failed to parse BPF object '%s': section %s not found",
1954 obj_path, BTF_EXT_ELF_SEC);
1955 err = -EINVAL;
1956 goto out;
1957 }
1958
1959 if (btf_ext->core_relo_info.len == 0) {
1960 err = 0;
1961 goto out;
1962 }
1963
1964 cand_cache = hashmap__new(btfgen_hash_fn, btfgen_equal_fn, NULL);
1965 if (IS_ERR(cand_cache)) {
1966 err = PTR_ERR(cand_cache);
1967 goto out;
1968 }
1969
1970 seg = &btf_ext->core_relo_info;
1971 for_each_btf_ext_sec(seg, sec) {
1972 for_each_btf_ext_rec(seg, sec, relo_idx, relo) {
1973 struct bpf_core_spec specs_scratch[3] = {};
1974 struct bpf_core_relo_res targ_res = {};
1975 struct bpf_core_cand_list *cands = NULL;
1976 const void *type_key = u32_as_hash_key(relo->type_id);
1977 const char *sec_name = btf__name_by_offset(btf, sec->sec_name_off);
1978
1979 if (relo->kind != BPF_CORE_TYPE_ID_LOCAL &&
1980 !hashmap__find(cand_cache, type_key, (void **)&cands)) {
1981 cands = btfgen_find_cands(btf, info->src_btf, relo->type_id);
1982 if (!cands) {
1983 err = -errno;
1984 goto out;
1985 }
1986
1987 err = hashmap__set(cand_cache, type_key, cands, NULL, NULL);
1988 if (err)
1989 goto out;
1990 }
1991
1992 err = bpf_core_calc_relo_insn(sec_name, relo, relo_idx, btf, cands,
1993 specs_scratch, &targ_res);
1994 if (err)
1995 goto out;
1996
1997 /* specs_scratch[2] is the target spec */
1998 err = btfgen_record_reloc(info, &specs_scratch[2]);
1999 if (err)
2000 goto out;
2001 }
2002 }
2003
2004 out:
2005 btf__free(btf);
2006 btf_ext__free(btf_ext);
2007
2008 if (!IS_ERR_OR_NULL(cand_cache)) {
2009 hashmap__for_each_entry(cand_cache, entry, i) {
2010 bpf_core_free_cands(entry->value);
2011 }
2012 hashmap__free(cand_cache);
2013 }
2014
2015 return err;
2016 }
2017
btfgen_remap_id(__u32 * type_id,void * ctx)2018 static int btfgen_remap_id(__u32 *type_id, void *ctx)
2019 {
2020 unsigned int *ids = ctx;
2021
2022 *type_id = ids[*type_id];
2023
2024 return 0;
2025 }
2026
2027 /* Generate BTF from relocation information previously recorded */
btfgen_get_btf(struct btfgen_info * info)2028 static struct btf *btfgen_get_btf(struct btfgen_info *info)
2029 {
2030 struct btf *btf_new = NULL;
2031 unsigned int *ids = NULL;
2032 unsigned int i, n = btf__type_cnt(info->marked_btf);
2033 int err = 0;
2034
2035 btf_new = btf__new_empty();
2036 if (!btf_new) {
2037 err = -errno;
2038 goto err_out;
2039 }
2040
2041 ids = calloc(n, sizeof(*ids));
2042 if (!ids) {
2043 err = -errno;
2044 goto err_out;
2045 }
2046
2047 /* first pass: add all marked types to btf_new and add their new ids to the ids map */
2048 for (i = 1; i < n; i++) {
2049 const struct btf_type *cloned_type, *type;
2050 const char *name;
2051 int new_id;
2052
2053 cloned_type = btf__type_by_id(info->marked_btf, i);
2054
2055 if (cloned_type->name_off != MARKED)
2056 continue;
2057
2058 type = btf__type_by_id(info->src_btf, i);
2059
2060 /* add members for struct and union */
2061 if (btf_is_composite(type)) {
2062 struct btf_member *cloned_m, *m;
2063 unsigned short vlen;
2064 int idx_src;
2065
2066 name = btf__str_by_offset(info->src_btf, type->name_off);
2067
2068 if (btf_is_struct(type))
2069 err = btf__add_struct(btf_new, name, type->size);
2070 else
2071 err = btf__add_union(btf_new, name, type->size);
2072
2073 if (err < 0)
2074 goto err_out;
2075 new_id = err;
2076
2077 cloned_m = btf_members(cloned_type);
2078 m = btf_members(type);
2079 vlen = btf_vlen(cloned_type);
2080 for (idx_src = 0; idx_src < vlen; idx_src++, cloned_m++, m++) {
2081 /* add only members that are marked as used */
2082 if (cloned_m->name_off != MARKED)
2083 continue;
2084
2085 name = btf__str_by_offset(info->src_btf, m->name_off);
2086 err = btf__add_field(btf_new, name, m->type,
2087 btf_member_bit_offset(cloned_type, idx_src),
2088 btf_member_bitfield_size(cloned_type, idx_src));
2089 if (err < 0)
2090 goto err_out;
2091 }
2092 } else {
2093 err = btf__add_type(btf_new, info->src_btf, type);
2094 if (err < 0)
2095 goto err_out;
2096 new_id = err;
2097 }
2098
2099 /* add ID mapping */
2100 ids[i] = new_id;
2101 }
2102
2103 /* second pass: fix up type ids */
2104 for (i = 1; i < btf__type_cnt(btf_new); i++) {
2105 struct btf_type *btf_type = (struct btf_type *) btf__type_by_id(btf_new, i);
2106
2107 err = btf_type_visit_type_ids(btf_type, btfgen_remap_id, ids);
2108 if (err)
2109 goto err_out;
2110 }
2111
2112 free(ids);
2113 return btf_new;
2114
2115 err_out:
2116 btf__free(btf_new);
2117 free(ids);
2118 errno = -err;
2119 return NULL;
2120 }
2121
2122 /* Create minimized BTF file for a set of BPF objects.
2123 *
2124 * The BTFGen algorithm is divided in two main parts: (1) collect the
2125 * BTF types that are involved in relocations and (2) generate the BTF
2126 * object using the collected types.
2127 *
2128 * In order to collect the types involved in the relocations, we parse
2129 * the BTF and BTF.ext sections of the BPF objects and use
2130 * bpf_core_calc_relo_insn() to get the target specification, this
2131 * indicates how the types and fields are used in a relocation.
2132 *
2133 * Types are recorded in different ways according to the kind of the
2134 * relocation. For field-based relocations only the members that are
2135 * actually used are saved in order to reduce the size of the generated
2136 * BTF file. For type-based relocations empty struct / unions are
2137 * generated and for enum-based relocations the whole type is saved.
2138 *
2139 * The second part of the algorithm generates the BTF object. It creates
2140 * an empty BTF object and fills it with the types recorded in the
2141 * previous step. This function takes care of only adding the structure
2142 * and union members that were marked as used and it also fixes up the
2143 * type IDs on the generated BTF object.
2144 */
minimize_btf(const char * src_btf,const char * dst_btf,const char * objspaths[])2145 static int minimize_btf(const char *src_btf, const char *dst_btf, const char *objspaths[])
2146 {
2147 struct btfgen_info *info;
2148 struct btf *btf_new = NULL;
2149 int err, i;
2150
2151 info = btfgen_new_info(src_btf);
2152 if (!info) {
2153 err = -errno;
2154 p_err("failed to allocate info structure: %s", strerror(errno));
2155 goto out;
2156 }
2157
2158 for (i = 0; objspaths[i] != NULL; i++) {
2159 err = btfgen_record_obj(info, objspaths[i]);
2160 if (err) {
2161 p_err("error recording relocations for %s: %s", objspaths[i],
2162 strerror(errno));
2163 goto out;
2164 }
2165 }
2166
2167 btf_new = btfgen_get_btf(info);
2168 if (!btf_new) {
2169 err = -errno;
2170 p_err("error generating BTF: %s", strerror(errno));
2171 goto out;
2172 }
2173
2174 err = btf_save_raw(btf_new, dst_btf);
2175 if (err) {
2176 p_err("error saving btf file: %s", strerror(errno));
2177 goto out;
2178 }
2179
2180 out:
2181 btf__free(btf_new);
2182 btfgen_free_info(info);
2183
2184 return err;
2185 }
2186
do_min_core_btf(int argc,char ** argv)2187 static int do_min_core_btf(int argc, char **argv)
2188 {
2189 const char *input, *output, **objs;
2190 int i, err;
2191
2192 if (!REQ_ARGS(3)) {
2193 usage();
2194 return -1;
2195 }
2196
2197 input = GET_ARG();
2198 output = GET_ARG();
2199
2200 objs = (const char **) calloc(argc + 1, sizeof(*objs));
2201 if (!objs) {
2202 p_err("failed to allocate array for object names");
2203 return -ENOMEM;
2204 }
2205
2206 i = 0;
2207 while (argc)
2208 objs[i++] = GET_ARG();
2209
2210 err = minimize_btf(input, output, objs);
2211 free(objs);
2212 return err;
2213 }
2214
2215 static const struct cmd cmds[] = {
2216 { "object", do_object },
2217 { "skeleton", do_skeleton },
2218 { "subskeleton", do_subskeleton },
2219 { "min_core_btf", do_min_core_btf},
2220 { "help", do_help },
2221 { 0 }
2222 };
2223
do_gen(int argc,char ** argv)2224 int do_gen(int argc, char **argv)
2225 {
2226 return cmd_select(cmds, argc, argv, do_help);
2227 }
2228