1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018 Facebook */
3
4 #include <uapi/linux/btf.h>
5 #include <uapi/linux/bpf.h>
6 #include <uapi/linux/bpf_perf_event.h>
7 #include <uapi/linux/types.h>
8 #include <linux/seq_file.h>
9 #include <linux/compiler.h>
10 #include <linux/ctype.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <linux/anon_inodes.h>
14 #include <linux/file.h>
15 #include <linux/uaccess.h>
16 #include <linux/kernel.h>
17 #include <linux/idr.h>
18 #include <linux/sort.h>
19 #include <linux/bpf_verifier.h>
20 #include <linux/btf.h>
21 #include <linux/btf_ids.h>
22 #include <linux/skmsg.h>
23 #include <linux/perf_event.h>
24 #include <linux/bsearch.h>
25 #include <linux/kobject.h>
26 #include <linux/sysfs.h>
27 #include <net/sock.h>
28 #include "../tools/lib/bpf/relo_core.h"
29
30 /* BTF (BPF Type Format) is the meta data format which describes
31 * the data types of BPF program/map. Hence, it basically focus
32 * on the C programming language which the modern BPF is primary
33 * using.
34 *
35 * ELF Section:
36 * ~~~~~~~~~~~
37 * The BTF data is stored under the ".BTF" ELF section
38 *
39 * struct btf_type:
40 * ~~~~~~~~~~~~~~~
41 * Each 'struct btf_type' object describes a C data type.
42 * Depending on the type it is describing, a 'struct btf_type'
43 * object may be followed by more data. F.e.
44 * To describe an array, 'struct btf_type' is followed by
45 * 'struct btf_array'.
46 *
47 * 'struct btf_type' and any extra data following it are
48 * 4 bytes aligned.
49 *
50 * Type section:
51 * ~~~~~~~~~~~~~
52 * The BTF type section contains a list of 'struct btf_type' objects.
53 * Each one describes a C type. Recall from the above section
54 * that a 'struct btf_type' object could be immediately followed by extra
55 * data in order to describe some particular C types.
56 *
57 * type_id:
58 * ~~~~~~~
59 * Each btf_type object is identified by a type_id. The type_id
60 * is implicitly implied by the location of the btf_type object in
61 * the BTF type section. The first one has type_id 1. The second
62 * one has type_id 2...etc. Hence, an earlier btf_type has
63 * a smaller type_id.
64 *
65 * A btf_type object may refer to another btf_type object by using
66 * type_id (i.e. the "type" in the "struct btf_type").
67 *
68 * NOTE that we cannot assume any reference-order.
69 * A btf_type object can refer to an earlier btf_type object
70 * but it can also refer to a later btf_type object.
71 *
72 * For example, to describe "const void *". A btf_type
73 * object describing "const" may refer to another btf_type
74 * object describing "void *". This type-reference is done
75 * by specifying type_id:
76 *
77 * [1] CONST (anon) type_id=2
78 * [2] PTR (anon) type_id=0
79 *
80 * The above is the btf_verifier debug log:
81 * - Each line started with "[?]" is a btf_type object
82 * - [?] is the type_id of the btf_type object.
83 * - CONST/PTR is the BTF_KIND_XXX
84 * - "(anon)" is the name of the type. It just
85 * happens that CONST and PTR has no name.
86 * - type_id=XXX is the 'u32 type' in btf_type
87 *
88 * NOTE: "void" has type_id 0
89 *
90 * String section:
91 * ~~~~~~~~~~~~~~
92 * The BTF string section contains the names used by the type section.
93 * Each string is referred by an "offset" from the beginning of the
94 * string section.
95 *
96 * Each string is '\0' terminated.
97 *
98 * The first character in the string section must be '\0'
99 * which is used to mean 'anonymous'. Some btf_type may not
100 * have a name.
101 */
102
103 /* BTF verification:
104 *
105 * To verify BTF data, two passes are needed.
106 *
107 * Pass #1
108 * ~~~~~~~
109 * The first pass is to collect all btf_type objects to
110 * an array: "btf->types".
111 *
112 * Depending on the C type that a btf_type is describing,
113 * a btf_type may be followed by extra data. We don't know
114 * how many btf_type is there, and more importantly we don't
115 * know where each btf_type is located in the type section.
116 *
117 * Without knowing the location of each type_id, most verifications
118 * cannot be done. e.g. an earlier btf_type may refer to a later
119 * btf_type (recall the "const void *" above), so we cannot
120 * check this type-reference in the first pass.
121 *
122 * In the first pass, it still does some verifications (e.g.
123 * checking the name is a valid offset to the string section).
124 *
125 * Pass #2
126 * ~~~~~~~
127 * The main focus is to resolve a btf_type that is referring
128 * to another type.
129 *
130 * We have to ensure the referring type:
131 * 1) does exist in the BTF (i.e. in btf->types[])
132 * 2) does not cause a loop:
133 * struct A {
134 * struct B b;
135 * };
136 *
137 * struct B {
138 * struct A a;
139 * };
140 *
141 * btf_type_needs_resolve() decides if a btf_type needs
142 * to be resolved.
143 *
144 * The needs_resolve type implements the "resolve()" ops which
145 * essentially does a DFS and detects backedge.
146 *
147 * During resolve (or DFS), different C types have different
148 * "RESOLVED" conditions.
149 *
150 * When resolving a BTF_KIND_STRUCT, we need to resolve all its
151 * members because a member is always referring to another
152 * type. A struct's member can be treated as "RESOLVED" if
153 * it is referring to a BTF_KIND_PTR. Otherwise, the
154 * following valid C struct would be rejected:
155 *
156 * struct A {
157 * int m;
158 * struct A *a;
159 * };
160 *
161 * When resolving a BTF_KIND_PTR, it needs to keep resolving if
162 * it is referring to another BTF_KIND_PTR. Otherwise, we cannot
163 * detect a pointer loop, e.g.:
164 * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR +
165 * ^ |
166 * +-----------------------------------------+
167 *
168 */
169
170 #define BITS_PER_U128 (sizeof(u64) * BITS_PER_BYTE * 2)
171 #define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
172 #define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
173 #define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
174 #define BITS_ROUNDUP_BYTES(bits) \
175 (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
176
177 #define BTF_INFO_MASK 0x9f00ffff
178 #define BTF_INT_MASK 0x0fffffff
179 #define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE)
180 #define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET)
181
182 /* 16MB for 64k structs and each has 16 members and
183 * a few MB spaces for the string section.
184 * The hard limit is S32_MAX.
185 */
186 #define BTF_MAX_SIZE (16 * 1024 * 1024)
187
188 #define for_each_member_from(i, from, struct_type, member) \
189 for (i = from, member = btf_type_member(struct_type) + from; \
190 i < btf_type_vlen(struct_type); \
191 i++, member++)
192
193 #define for_each_vsi_from(i, from, struct_type, member) \
194 for (i = from, member = btf_type_var_secinfo(struct_type) + from; \
195 i < btf_type_vlen(struct_type); \
196 i++, member++)
197
198 DEFINE_IDR(btf_idr);
199 DEFINE_SPINLOCK(btf_idr_lock);
200
201 enum btf_kfunc_hook {
202 BTF_KFUNC_HOOK_XDP,
203 BTF_KFUNC_HOOK_TC,
204 BTF_KFUNC_HOOK_STRUCT_OPS,
205 BTF_KFUNC_HOOK_TRACING,
206 BTF_KFUNC_HOOK_SYSCALL,
207 BTF_KFUNC_HOOK_MAX,
208 };
209
210 enum {
211 BTF_KFUNC_SET_MAX_CNT = 256,
212 BTF_DTOR_KFUNC_MAX_CNT = 256,
213 };
214
215 struct btf_kfunc_set_tab {
216 struct btf_id_set8 *sets[BTF_KFUNC_HOOK_MAX];
217 };
218
219 struct btf_id_dtor_kfunc_tab {
220 u32 cnt;
221 struct btf_id_dtor_kfunc dtors[];
222 };
223
224 struct btf {
225 void *data;
226 struct btf_type **types;
227 u32 *resolved_ids;
228 u32 *resolved_sizes;
229 const char *strings;
230 void *nohdr_data;
231 struct btf_header hdr;
232 u32 nr_types; /* includes VOID for base BTF */
233 u32 types_size;
234 u32 data_size;
235 refcount_t refcnt;
236 u32 id;
237 struct rcu_head rcu;
238 struct btf_kfunc_set_tab *kfunc_set_tab;
239 struct btf_id_dtor_kfunc_tab *dtor_kfunc_tab;
240
241 /* split BTF support */
242 struct btf *base_btf;
243 u32 start_id; /* first type ID in this BTF (0 for base BTF) */
244 u32 start_str_off; /* first string offset (0 for base BTF) */
245 char name[MODULE_NAME_LEN];
246 bool kernel_btf;
247 };
248
249 enum verifier_phase {
250 CHECK_META,
251 CHECK_TYPE,
252 };
253
254 struct resolve_vertex {
255 const struct btf_type *t;
256 u32 type_id;
257 u16 next_member;
258 };
259
260 enum visit_state {
261 NOT_VISITED,
262 VISITED,
263 RESOLVED,
264 };
265
266 enum resolve_mode {
267 RESOLVE_TBD, /* To Be Determined */
268 RESOLVE_PTR, /* Resolving for Pointer */
269 RESOLVE_STRUCT_OR_ARRAY, /* Resolving for struct/union
270 * or array
271 */
272 };
273
274 #define MAX_RESOLVE_DEPTH 32
275
276 struct btf_sec_info {
277 u32 off;
278 u32 len;
279 };
280
281 struct btf_verifier_env {
282 struct btf *btf;
283 u8 *visit_states;
284 struct resolve_vertex stack[MAX_RESOLVE_DEPTH];
285 struct bpf_verifier_log log;
286 u32 log_type_id;
287 u32 top_stack;
288 enum verifier_phase phase;
289 enum resolve_mode resolve_mode;
290 };
291
292 static const char * const btf_kind_str[NR_BTF_KINDS] = {
293 [BTF_KIND_UNKN] = "UNKNOWN",
294 [BTF_KIND_INT] = "INT",
295 [BTF_KIND_PTR] = "PTR",
296 [BTF_KIND_ARRAY] = "ARRAY",
297 [BTF_KIND_STRUCT] = "STRUCT",
298 [BTF_KIND_UNION] = "UNION",
299 [BTF_KIND_ENUM] = "ENUM",
300 [BTF_KIND_FWD] = "FWD",
301 [BTF_KIND_TYPEDEF] = "TYPEDEF",
302 [BTF_KIND_VOLATILE] = "VOLATILE",
303 [BTF_KIND_CONST] = "CONST",
304 [BTF_KIND_RESTRICT] = "RESTRICT",
305 [BTF_KIND_FUNC] = "FUNC",
306 [BTF_KIND_FUNC_PROTO] = "FUNC_PROTO",
307 [BTF_KIND_VAR] = "VAR",
308 [BTF_KIND_DATASEC] = "DATASEC",
309 [BTF_KIND_FLOAT] = "FLOAT",
310 [BTF_KIND_DECL_TAG] = "DECL_TAG",
311 [BTF_KIND_TYPE_TAG] = "TYPE_TAG",
312 [BTF_KIND_ENUM64] = "ENUM64",
313 };
314
btf_type_str(const struct btf_type * t)315 const char *btf_type_str(const struct btf_type *t)
316 {
317 return btf_kind_str[BTF_INFO_KIND(t->info)];
318 }
319
320 /* Chunk size we use in safe copy of data to be shown. */
321 #define BTF_SHOW_OBJ_SAFE_SIZE 32
322
323 /*
324 * This is the maximum size of a base type value (equivalent to a
325 * 128-bit int); if we are at the end of our safe buffer and have
326 * less than 16 bytes space we can't be assured of being able
327 * to copy the next type safely, so in such cases we will initiate
328 * a new copy.
329 */
330 #define BTF_SHOW_OBJ_BASE_TYPE_SIZE 16
331
332 /* Type name size */
333 #define BTF_SHOW_NAME_SIZE 80
334
335 /*
336 * Common data to all BTF show operations. Private show functions can add
337 * their own data to a structure containing a struct btf_show and consult it
338 * in the show callback. See btf_type_show() below.
339 *
340 * One challenge with showing nested data is we want to skip 0-valued
341 * data, but in order to figure out whether a nested object is all zeros
342 * we need to walk through it. As a result, we need to make two passes
343 * when handling structs, unions and arrays; the first path simply looks
344 * for nonzero data, while the second actually does the display. The first
345 * pass is signalled by show->state.depth_check being set, and if we
346 * encounter a non-zero value we set show->state.depth_to_show to
347 * the depth at which we encountered it. When we have completed the
348 * first pass, we will know if anything needs to be displayed if
349 * depth_to_show > depth. See btf_[struct,array]_show() for the
350 * implementation of this.
351 *
352 * Another problem is we want to ensure the data for display is safe to
353 * access. To support this, the anonymous "struct {} obj" tracks the data
354 * object and our safe copy of it. We copy portions of the data needed
355 * to the object "copy" buffer, but because its size is limited to
356 * BTF_SHOW_OBJ_COPY_LEN bytes, multiple copies may be required as we
357 * traverse larger objects for display.
358 *
359 * The various data type show functions all start with a call to
360 * btf_show_start_type() which returns a pointer to the safe copy
361 * of the data needed (or if BTF_SHOW_UNSAFE is specified, to the
362 * raw data itself). btf_show_obj_safe() is responsible for
363 * using copy_from_kernel_nofault() to update the safe data if necessary
364 * as we traverse the object's data. skbuff-like semantics are
365 * used:
366 *
367 * - obj.head points to the start of the toplevel object for display
368 * - obj.size is the size of the toplevel object
369 * - obj.data points to the current point in the original data at
370 * which our safe data starts. obj.data will advance as we copy
371 * portions of the data.
372 *
373 * In most cases a single copy will suffice, but larger data structures
374 * such as "struct task_struct" will require many copies. The logic in
375 * btf_show_obj_safe() handles the logic that determines if a new
376 * copy_from_kernel_nofault() is needed.
377 */
378 struct btf_show {
379 u64 flags;
380 void *target; /* target of show operation (seq file, buffer) */
381 void (*showfn)(struct btf_show *show, const char *fmt, va_list args);
382 const struct btf *btf;
383 /* below are used during iteration */
384 struct {
385 u8 depth;
386 u8 depth_to_show;
387 u8 depth_check;
388 u8 array_member:1,
389 array_terminated:1;
390 u16 array_encoding;
391 u32 type_id;
392 int status; /* non-zero for error */
393 const struct btf_type *type;
394 const struct btf_member *member;
395 char name[BTF_SHOW_NAME_SIZE]; /* space for member name/type */
396 } state;
397 struct {
398 u32 size;
399 void *head;
400 void *data;
401 u8 safe[BTF_SHOW_OBJ_SAFE_SIZE];
402 } obj;
403 };
404
405 struct btf_kind_operations {
406 s32 (*check_meta)(struct btf_verifier_env *env,
407 const struct btf_type *t,
408 u32 meta_left);
409 int (*resolve)(struct btf_verifier_env *env,
410 const struct resolve_vertex *v);
411 int (*check_member)(struct btf_verifier_env *env,
412 const struct btf_type *struct_type,
413 const struct btf_member *member,
414 const struct btf_type *member_type);
415 int (*check_kflag_member)(struct btf_verifier_env *env,
416 const struct btf_type *struct_type,
417 const struct btf_member *member,
418 const struct btf_type *member_type);
419 void (*log_details)(struct btf_verifier_env *env,
420 const struct btf_type *t);
421 void (*show)(const struct btf *btf, const struct btf_type *t,
422 u32 type_id, void *data, u8 bits_offsets,
423 struct btf_show *show);
424 };
425
426 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS];
427 static struct btf_type btf_void;
428
429 static int btf_resolve(struct btf_verifier_env *env,
430 const struct btf_type *t, u32 type_id);
431
432 static int btf_func_check(struct btf_verifier_env *env,
433 const struct btf_type *t);
434
btf_type_is_modifier(const struct btf_type * t)435 static bool btf_type_is_modifier(const struct btf_type *t)
436 {
437 /* Some of them is not strictly a C modifier
438 * but they are grouped into the same bucket
439 * for BTF concern:
440 * A type (t) that refers to another
441 * type through t->type AND its size cannot
442 * be determined without following the t->type.
443 *
444 * ptr does not fall into this bucket
445 * because its size is always sizeof(void *).
446 */
447 switch (BTF_INFO_KIND(t->info)) {
448 case BTF_KIND_TYPEDEF:
449 case BTF_KIND_VOLATILE:
450 case BTF_KIND_CONST:
451 case BTF_KIND_RESTRICT:
452 case BTF_KIND_TYPE_TAG:
453 return true;
454 }
455
456 return false;
457 }
458
btf_type_is_void(const struct btf_type * t)459 bool btf_type_is_void(const struct btf_type *t)
460 {
461 return t == &btf_void;
462 }
463
btf_type_is_fwd(const struct btf_type * t)464 static bool btf_type_is_fwd(const struct btf_type *t)
465 {
466 return BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
467 }
468
btf_type_nosize(const struct btf_type * t)469 static bool btf_type_nosize(const struct btf_type *t)
470 {
471 return btf_type_is_void(t) || btf_type_is_fwd(t) ||
472 btf_type_is_func(t) || btf_type_is_func_proto(t);
473 }
474
btf_type_nosize_or_null(const struct btf_type * t)475 static bool btf_type_nosize_or_null(const struct btf_type *t)
476 {
477 return !t || btf_type_nosize(t);
478 }
479
__btf_type_is_struct(const struct btf_type * t)480 static bool __btf_type_is_struct(const struct btf_type *t)
481 {
482 return BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT;
483 }
484
btf_type_is_array(const struct btf_type * t)485 static bool btf_type_is_array(const struct btf_type *t)
486 {
487 return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY;
488 }
489
btf_type_is_datasec(const struct btf_type * t)490 static bool btf_type_is_datasec(const struct btf_type *t)
491 {
492 return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC;
493 }
494
btf_type_is_decl_tag(const struct btf_type * t)495 static bool btf_type_is_decl_tag(const struct btf_type *t)
496 {
497 return BTF_INFO_KIND(t->info) == BTF_KIND_DECL_TAG;
498 }
499
btf_type_is_decl_tag_target(const struct btf_type * t)500 static bool btf_type_is_decl_tag_target(const struct btf_type *t)
501 {
502 return btf_type_is_func(t) || btf_type_is_struct(t) ||
503 btf_type_is_var(t) || btf_type_is_typedef(t);
504 }
505
btf_nr_types(const struct btf * btf)506 u32 btf_nr_types(const struct btf *btf)
507 {
508 u32 total = 0;
509
510 while (btf) {
511 total += btf->nr_types;
512 btf = btf->base_btf;
513 }
514
515 return total;
516 }
517
btf_find_by_name_kind(const struct btf * btf,const char * name,u8 kind)518 s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind)
519 {
520 const struct btf_type *t;
521 const char *tname;
522 u32 i, total;
523
524 total = btf_nr_types(btf);
525 for (i = 1; i < total; i++) {
526 t = btf_type_by_id(btf, i);
527 if (BTF_INFO_KIND(t->info) != kind)
528 continue;
529
530 tname = btf_name_by_offset(btf, t->name_off);
531 if (!strcmp(tname, name))
532 return i;
533 }
534
535 return -ENOENT;
536 }
537
bpf_find_btf_id(const char * name,u32 kind,struct btf ** btf_p)538 static s32 bpf_find_btf_id(const char *name, u32 kind, struct btf **btf_p)
539 {
540 struct btf *btf;
541 s32 ret;
542 int id;
543
544 btf = bpf_get_btf_vmlinux();
545 if (IS_ERR(btf))
546 return PTR_ERR(btf);
547 if (!btf)
548 return -EINVAL;
549
550 ret = btf_find_by_name_kind(btf, name, kind);
551 /* ret is never zero, since btf_find_by_name_kind returns
552 * positive btf_id or negative error.
553 */
554 if (ret > 0) {
555 btf_get(btf);
556 *btf_p = btf;
557 return ret;
558 }
559
560 /* If name is not found in vmlinux's BTF then search in module's BTFs */
561 spin_lock_bh(&btf_idr_lock);
562 idr_for_each_entry(&btf_idr, btf, id) {
563 if (!btf_is_module(btf))
564 continue;
565 /* linear search could be slow hence unlock/lock
566 * the IDR to avoiding holding it for too long
567 */
568 btf_get(btf);
569 spin_unlock_bh(&btf_idr_lock);
570 ret = btf_find_by_name_kind(btf, name, kind);
571 if (ret > 0) {
572 *btf_p = btf;
573 return ret;
574 }
575 spin_lock_bh(&btf_idr_lock);
576 btf_put(btf);
577 }
578 spin_unlock_bh(&btf_idr_lock);
579 return ret;
580 }
581
btf_type_skip_modifiers(const struct btf * btf,u32 id,u32 * res_id)582 const struct btf_type *btf_type_skip_modifiers(const struct btf *btf,
583 u32 id, u32 *res_id)
584 {
585 const struct btf_type *t = btf_type_by_id(btf, id);
586
587 while (btf_type_is_modifier(t)) {
588 id = t->type;
589 t = btf_type_by_id(btf, t->type);
590 }
591
592 if (res_id)
593 *res_id = id;
594
595 return t;
596 }
597
btf_type_resolve_ptr(const struct btf * btf,u32 id,u32 * res_id)598 const struct btf_type *btf_type_resolve_ptr(const struct btf *btf,
599 u32 id, u32 *res_id)
600 {
601 const struct btf_type *t;
602
603 t = btf_type_skip_modifiers(btf, id, NULL);
604 if (!btf_type_is_ptr(t))
605 return NULL;
606
607 return btf_type_skip_modifiers(btf, t->type, res_id);
608 }
609
btf_type_resolve_func_ptr(const struct btf * btf,u32 id,u32 * res_id)610 const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf,
611 u32 id, u32 *res_id)
612 {
613 const struct btf_type *ptype;
614
615 ptype = btf_type_resolve_ptr(btf, id, res_id);
616 if (ptype && btf_type_is_func_proto(ptype))
617 return ptype;
618
619 return NULL;
620 }
621
622 /* Types that act only as a source, not sink or intermediate
623 * type when resolving.
624 */
btf_type_is_resolve_source_only(const struct btf_type * t)625 static bool btf_type_is_resolve_source_only(const struct btf_type *t)
626 {
627 return btf_type_is_var(t) ||
628 btf_type_is_decl_tag(t) ||
629 btf_type_is_datasec(t);
630 }
631
632 /* What types need to be resolved?
633 *
634 * btf_type_is_modifier() is an obvious one.
635 *
636 * btf_type_is_struct() because its member refers to
637 * another type (through member->type).
638 *
639 * btf_type_is_var() because the variable refers to
640 * another type. btf_type_is_datasec() holds multiple
641 * btf_type_is_var() types that need resolving.
642 *
643 * btf_type_is_array() because its element (array->type)
644 * refers to another type. Array can be thought of a
645 * special case of struct while array just has the same
646 * member-type repeated by array->nelems of times.
647 */
btf_type_needs_resolve(const struct btf_type * t)648 static bool btf_type_needs_resolve(const struct btf_type *t)
649 {
650 return btf_type_is_modifier(t) ||
651 btf_type_is_ptr(t) ||
652 btf_type_is_struct(t) ||
653 btf_type_is_array(t) ||
654 btf_type_is_var(t) ||
655 btf_type_is_func(t) ||
656 btf_type_is_decl_tag(t) ||
657 btf_type_is_datasec(t);
658 }
659
660 /* t->size can be used */
btf_type_has_size(const struct btf_type * t)661 static bool btf_type_has_size(const struct btf_type *t)
662 {
663 switch (BTF_INFO_KIND(t->info)) {
664 case BTF_KIND_INT:
665 case BTF_KIND_STRUCT:
666 case BTF_KIND_UNION:
667 case BTF_KIND_ENUM:
668 case BTF_KIND_DATASEC:
669 case BTF_KIND_FLOAT:
670 case BTF_KIND_ENUM64:
671 return true;
672 }
673
674 return false;
675 }
676
btf_int_encoding_str(u8 encoding)677 static const char *btf_int_encoding_str(u8 encoding)
678 {
679 if (encoding == 0)
680 return "(none)";
681 else if (encoding == BTF_INT_SIGNED)
682 return "SIGNED";
683 else if (encoding == BTF_INT_CHAR)
684 return "CHAR";
685 else if (encoding == BTF_INT_BOOL)
686 return "BOOL";
687 else
688 return "UNKN";
689 }
690
btf_type_int(const struct btf_type * t)691 static u32 btf_type_int(const struct btf_type *t)
692 {
693 return *(u32 *)(t + 1);
694 }
695
btf_type_array(const struct btf_type * t)696 static const struct btf_array *btf_type_array(const struct btf_type *t)
697 {
698 return (const struct btf_array *)(t + 1);
699 }
700
btf_type_enum(const struct btf_type * t)701 static const struct btf_enum *btf_type_enum(const struct btf_type *t)
702 {
703 return (const struct btf_enum *)(t + 1);
704 }
705
btf_type_var(const struct btf_type * t)706 static const struct btf_var *btf_type_var(const struct btf_type *t)
707 {
708 return (const struct btf_var *)(t + 1);
709 }
710
btf_type_decl_tag(const struct btf_type * t)711 static const struct btf_decl_tag *btf_type_decl_tag(const struct btf_type *t)
712 {
713 return (const struct btf_decl_tag *)(t + 1);
714 }
715
btf_type_enum64(const struct btf_type * t)716 static const struct btf_enum64 *btf_type_enum64(const struct btf_type *t)
717 {
718 return (const struct btf_enum64 *)(t + 1);
719 }
720
btf_type_ops(const struct btf_type * t)721 static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
722 {
723 return kind_ops[BTF_INFO_KIND(t->info)];
724 }
725
btf_name_offset_valid(const struct btf * btf,u32 offset)726 static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
727 {
728 if (!BTF_STR_OFFSET_VALID(offset))
729 return false;
730
731 while (offset < btf->start_str_off)
732 btf = btf->base_btf;
733
734 offset -= btf->start_str_off;
735 return offset < btf->hdr.str_len;
736 }
737
__btf_name_char_ok(char c,bool first,bool dot_ok)738 static bool __btf_name_char_ok(char c, bool first, bool dot_ok)
739 {
740 if ((first ? !isalpha(c) :
741 !isalnum(c)) &&
742 c != '_' &&
743 ((c == '.' && !dot_ok) ||
744 c != '.'))
745 return false;
746 return true;
747 }
748
btf_str_by_offset(const struct btf * btf,u32 offset)749 static const char *btf_str_by_offset(const struct btf *btf, u32 offset)
750 {
751 while (offset < btf->start_str_off)
752 btf = btf->base_btf;
753
754 offset -= btf->start_str_off;
755 if (offset < btf->hdr.str_len)
756 return &btf->strings[offset];
757
758 return NULL;
759 }
760
__btf_name_valid(const struct btf * btf,u32 offset,bool dot_ok)761 static bool __btf_name_valid(const struct btf *btf, u32 offset, bool dot_ok)
762 {
763 /* offset must be valid */
764 const char *src = btf_str_by_offset(btf, offset);
765 const char *src_limit;
766
767 if (!__btf_name_char_ok(*src, true, dot_ok))
768 return false;
769
770 /* set a limit on identifier length */
771 src_limit = src + KSYM_NAME_LEN;
772 src++;
773 while (*src && src < src_limit) {
774 if (!__btf_name_char_ok(*src, false, dot_ok))
775 return false;
776 src++;
777 }
778
779 return !*src;
780 }
781
782 /* Only C-style identifier is permitted. This can be relaxed if
783 * necessary.
784 */
btf_name_valid_identifier(const struct btf * btf,u32 offset)785 static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
786 {
787 return __btf_name_valid(btf, offset, false);
788 }
789
btf_name_valid_section(const struct btf * btf,u32 offset)790 static bool btf_name_valid_section(const struct btf *btf, u32 offset)
791 {
792 return __btf_name_valid(btf, offset, true);
793 }
794
__btf_name_by_offset(const struct btf * btf,u32 offset)795 static const char *__btf_name_by_offset(const struct btf *btf, u32 offset)
796 {
797 const char *name;
798
799 if (!offset)
800 return "(anon)";
801
802 name = btf_str_by_offset(btf, offset);
803 return name ?: "(invalid-name-offset)";
804 }
805
btf_name_by_offset(const struct btf * btf,u32 offset)806 const char *btf_name_by_offset(const struct btf *btf, u32 offset)
807 {
808 return btf_str_by_offset(btf, offset);
809 }
810
btf_type_by_id(const struct btf * btf,u32 type_id)811 const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
812 {
813 while (type_id < btf->start_id)
814 btf = btf->base_btf;
815
816 type_id -= btf->start_id;
817 if (type_id >= btf->nr_types)
818 return NULL;
819 return btf->types[type_id];
820 }
821 EXPORT_SYMBOL_GPL(btf_type_by_id);
822
823 /*
824 * Regular int is not a bit field and it must be either
825 * u8/u16/u32/u64 or __int128.
826 */
btf_type_int_is_regular(const struct btf_type * t)827 static bool btf_type_int_is_regular(const struct btf_type *t)
828 {
829 u8 nr_bits, nr_bytes;
830 u32 int_data;
831
832 int_data = btf_type_int(t);
833 nr_bits = BTF_INT_BITS(int_data);
834 nr_bytes = BITS_ROUNDUP_BYTES(nr_bits);
835 if (BITS_PER_BYTE_MASKED(nr_bits) ||
836 BTF_INT_OFFSET(int_data) ||
837 (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) &&
838 nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64) &&
839 nr_bytes != (2 * sizeof(u64)))) {
840 return false;
841 }
842
843 return true;
844 }
845
846 /*
847 * Check that given struct member is a regular int with expected
848 * offset and size.
849 */
btf_member_is_reg_int(const struct btf * btf,const struct btf_type * s,const struct btf_member * m,u32 expected_offset,u32 expected_size)850 bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
851 const struct btf_member *m,
852 u32 expected_offset, u32 expected_size)
853 {
854 const struct btf_type *t;
855 u32 id, int_data;
856 u8 nr_bits;
857
858 id = m->type;
859 t = btf_type_id_size(btf, &id, NULL);
860 if (!t || !btf_type_is_int(t))
861 return false;
862
863 int_data = btf_type_int(t);
864 nr_bits = BTF_INT_BITS(int_data);
865 if (btf_type_kflag(s)) {
866 u32 bitfield_size = BTF_MEMBER_BITFIELD_SIZE(m->offset);
867 u32 bit_offset = BTF_MEMBER_BIT_OFFSET(m->offset);
868
869 /* if kflag set, int should be a regular int and
870 * bit offset should be at byte boundary.
871 */
872 return !bitfield_size &&
873 BITS_ROUNDUP_BYTES(bit_offset) == expected_offset &&
874 BITS_ROUNDUP_BYTES(nr_bits) == expected_size;
875 }
876
877 if (BTF_INT_OFFSET(int_data) ||
878 BITS_PER_BYTE_MASKED(m->offset) ||
879 BITS_ROUNDUP_BYTES(m->offset) != expected_offset ||
880 BITS_PER_BYTE_MASKED(nr_bits) ||
881 BITS_ROUNDUP_BYTES(nr_bits) != expected_size)
882 return false;
883
884 return true;
885 }
886
887 /* Similar to btf_type_skip_modifiers() but does not skip typedefs. */
btf_type_skip_qualifiers(const struct btf * btf,u32 id)888 static const struct btf_type *btf_type_skip_qualifiers(const struct btf *btf,
889 u32 id)
890 {
891 const struct btf_type *t = btf_type_by_id(btf, id);
892
893 while (btf_type_is_modifier(t) &&
894 BTF_INFO_KIND(t->info) != BTF_KIND_TYPEDEF) {
895 t = btf_type_by_id(btf, t->type);
896 }
897
898 return t;
899 }
900
901 #define BTF_SHOW_MAX_ITER 10
902
903 #define BTF_KIND_BIT(kind) (1ULL << kind)
904
905 /*
906 * Populate show->state.name with type name information.
907 * Format of type name is
908 *
909 * [.member_name = ] (type_name)
910 */
btf_show_name(struct btf_show * show)911 static const char *btf_show_name(struct btf_show *show)
912 {
913 /* BTF_MAX_ITER array suffixes "[]" */
914 const char *array_suffixes = "[][][][][][][][][][]";
915 const char *array_suffix = &array_suffixes[strlen(array_suffixes)];
916 /* BTF_MAX_ITER pointer suffixes "*" */
917 const char *ptr_suffixes = "**********";
918 const char *ptr_suffix = &ptr_suffixes[strlen(ptr_suffixes)];
919 const char *name = NULL, *prefix = "", *parens = "";
920 const struct btf_member *m = show->state.member;
921 const struct btf_type *t;
922 const struct btf_array *array;
923 u32 id = show->state.type_id;
924 const char *member = NULL;
925 bool show_member = false;
926 u64 kinds = 0;
927 int i;
928
929 show->state.name[0] = '\0';
930
931 /*
932 * Don't show type name if we're showing an array member;
933 * in that case we show the array type so don't need to repeat
934 * ourselves for each member.
935 */
936 if (show->state.array_member)
937 return "";
938
939 /* Retrieve member name, if any. */
940 if (m) {
941 member = btf_name_by_offset(show->btf, m->name_off);
942 show_member = strlen(member) > 0;
943 id = m->type;
944 }
945
946 /*
947 * Start with type_id, as we have resolved the struct btf_type *
948 * via btf_modifier_show() past the parent typedef to the child
949 * struct, int etc it is defined as. In such cases, the type_id
950 * still represents the starting type while the struct btf_type *
951 * in our show->state points at the resolved type of the typedef.
952 */
953 t = btf_type_by_id(show->btf, id);
954 if (!t)
955 return "";
956
957 /*
958 * The goal here is to build up the right number of pointer and
959 * array suffixes while ensuring the type name for a typedef
960 * is represented. Along the way we accumulate a list of
961 * BTF kinds we have encountered, since these will inform later
962 * display; for example, pointer types will not require an
963 * opening "{" for struct, we will just display the pointer value.
964 *
965 * We also want to accumulate the right number of pointer or array
966 * indices in the format string while iterating until we get to
967 * the typedef/pointee/array member target type.
968 *
969 * We start by pointing at the end of pointer and array suffix
970 * strings; as we accumulate pointers and arrays we move the pointer
971 * or array string backwards so it will show the expected number of
972 * '*' or '[]' for the type. BTF_SHOW_MAX_ITER of nesting of pointers
973 * and/or arrays and typedefs are supported as a precaution.
974 *
975 * We also want to get typedef name while proceeding to resolve
976 * type it points to so that we can add parentheses if it is a
977 * "typedef struct" etc.
978 */
979 for (i = 0; i < BTF_SHOW_MAX_ITER; i++) {
980
981 switch (BTF_INFO_KIND(t->info)) {
982 case BTF_KIND_TYPEDEF:
983 if (!name)
984 name = btf_name_by_offset(show->btf,
985 t->name_off);
986 kinds |= BTF_KIND_BIT(BTF_KIND_TYPEDEF);
987 id = t->type;
988 break;
989 case BTF_KIND_ARRAY:
990 kinds |= BTF_KIND_BIT(BTF_KIND_ARRAY);
991 parens = "[";
992 if (!t)
993 return "";
994 array = btf_type_array(t);
995 if (array_suffix > array_suffixes)
996 array_suffix -= 2;
997 id = array->type;
998 break;
999 case BTF_KIND_PTR:
1000 kinds |= BTF_KIND_BIT(BTF_KIND_PTR);
1001 if (ptr_suffix > ptr_suffixes)
1002 ptr_suffix -= 1;
1003 id = t->type;
1004 break;
1005 default:
1006 id = 0;
1007 break;
1008 }
1009 if (!id)
1010 break;
1011 t = btf_type_skip_qualifiers(show->btf, id);
1012 }
1013 /* We may not be able to represent this type; bail to be safe */
1014 if (i == BTF_SHOW_MAX_ITER)
1015 return "";
1016
1017 if (!name)
1018 name = btf_name_by_offset(show->btf, t->name_off);
1019
1020 switch (BTF_INFO_KIND(t->info)) {
1021 case BTF_KIND_STRUCT:
1022 case BTF_KIND_UNION:
1023 prefix = BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT ?
1024 "struct" : "union";
1025 /* if it's an array of struct/union, parens is already set */
1026 if (!(kinds & (BTF_KIND_BIT(BTF_KIND_ARRAY))))
1027 parens = "{";
1028 break;
1029 case BTF_KIND_ENUM:
1030 case BTF_KIND_ENUM64:
1031 prefix = "enum";
1032 break;
1033 default:
1034 break;
1035 }
1036
1037 /* pointer does not require parens */
1038 if (kinds & BTF_KIND_BIT(BTF_KIND_PTR))
1039 parens = "";
1040 /* typedef does not require struct/union/enum prefix */
1041 if (kinds & BTF_KIND_BIT(BTF_KIND_TYPEDEF))
1042 prefix = "";
1043
1044 if (!name)
1045 name = "";
1046
1047 /* Even if we don't want type name info, we want parentheses etc */
1048 if (show->flags & BTF_SHOW_NONAME)
1049 snprintf(show->state.name, sizeof(show->state.name), "%s",
1050 parens);
1051 else
1052 snprintf(show->state.name, sizeof(show->state.name),
1053 "%s%s%s(%s%s%s%s%s%s)%s",
1054 /* first 3 strings comprise ".member = " */
1055 show_member ? "." : "",
1056 show_member ? member : "",
1057 show_member ? " = " : "",
1058 /* ...next is our prefix (struct, enum, etc) */
1059 prefix,
1060 strlen(prefix) > 0 && strlen(name) > 0 ? " " : "",
1061 /* ...this is the type name itself */
1062 name,
1063 /* ...suffixed by the appropriate '*', '[]' suffixes */
1064 strlen(ptr_suffix) > 0 ? " " : "", ptr_suffix,
1065 array_suffix, parens);
1066
1067 return show->state.name;
1068 }
1069
__btf_show_indent(struct btf_show * show)1070 static const char *__btf_show_indent(struct btf_show *show)
1071 {
1072 const char *indents = " ";
1073 const char *indent = &indents[strlen(indents)];
1074
1075 if ((indent - show->state.depth) >= indents)
1076 return indent - show->state.depth;
1077 return indents;
1078 }
1079
btf_show_indent(struct btf_show * show)1080 static const char *btf_show_indent(struct btf_show *show)
1081 {
1082 return show->flags & BTF_SHOW_COMPACT ? "" : __btf_show_indent(show);
1083 }
1084
btf_show_newline(struct btf_show * show)1085 static const char *btf_show_newline(struct btf_show *show)
1086 {
1087 return show->flags & BTF_SHOW_COMPACT ? "" : "\n";
1088 }
1089
btf_show_delim(struct btf_show * show)1090 static const char *btf_show_delim(struct btf_show *show)
1091 {
1092 if (show->state.depth == 0)
1093 return "";
1094
1095 if ((show->flags & BTF_SHOW_COMPACT) && show->state.type &&
1096 BTF_INFO_KIND(show->state.type->info) == BTF_KIND_UNION)
1097 return "|";
1098
1099 return ",";
1100 }
1101
btf_show(struct btf_show * show,const char * fmt,...)1102 __printf(2, 3) static void btf_show(struct btf_show *show, const char *fmt, ...)
1103 {
1104 va_list args;
1105
1106 if (!show->state.depth_check) {
1107 va_start(args, fmt);
1108 show->showfn(show, fmt, args);
1109 va_end(args);
1110 }
1111 }
1112
1113 /* Macros are used here as btf_show_type_value[s]() prepends and appends
1114 * format specifiers to the format specifier passed in; these do the work of
1115 * adding indentation, delimiters etc while the caller simply has to specify
1116 * the type value(s) in the format specifier + value(s).
1117 */
1118 #define btf_show_type_value(show, fmt, value) \
1119 do { \
1120 if ((value) != (__typeof__(value))0 || \
1121 (show->flags & BTF_SHOW_ZERO) || \
1122 show->state.depth == 0) { \
1123 btf_show(show, "%s%s" fmt "%s%s", \
1124 btf_show_indent(show), \
1125 btf_show_name(show), \
1126 value, btf_show_delim(show), \
1127 btf_show_newline(show)); \
1128 if (show->state.depth > show->state.depth_to_show) \
1129 show->state.depth_to_show = show->state.depth; \
1130 } \
1131 } while (0)
1132
1133 #define btf_show_type_values(show, fmt, ...) \
1134 do { \
1135 btf_show(show, "%s%s" fmt "%s%s", btf_show_indent(show), \
1136 btf_show_name(show), \
1137 __VA_ARGS__, btf_show_delim(show), \
1138 btf_show_newline(show)); \
1139 if (show->state.depth > show->state.depth_to_show) \
1140 show->state.depth_to_show = show->state.depth; \
1141 } while (0)
1142
1143 /* How much is left to copy to safe buffer after @data? */
btf_show_obj_size_left(struct btf_show * show,void * data)1144 static int btf_show_obj_size_left(struct btf_show *show, void *data)
1145 {
1146 return show->obj.head + show->obj.size - data;
1147 }
1148
1149 /* Is object pointed to by @data of @size already copied to our safe buffer? */
btf_show_obj_is_safe(struct btf_show * show,void * data,int size)1150 static bool btf_show_obj_is_safe(struct btf_show *show, void *data, int size)
1151 {
1152 return data >= show->obj.data &&
1153 (data + size) < (show->obj.data + BTF_SHOW_OBJ_SAFE_SIZE);
1154 }
1155
1156 /*
1157 * If object pointed to by @data of @size falls within our safe buffer, return
1158 * the equivalent pointer to the same safe data. Assumes
1159 * copy_from_kernel_nofault() has already happened and our safe buffer is
1160 * populated.
1161 */
__btf_show_obj_safe(struct btf_show * show,void * data,int size)1162 static void *__btf_show_obj_safe(struct btf_show *show, void *data, int size)
1163 {
1164 if (btf_show_obj_is_safe(show, data, size))
1165 return show->obj.safe + (data - show->obj.data);
1166 return NULL;
1167 }
1168
1169 /*
1170 * Return a safe-to-access version of data pointed to by @data.
1171 * We do this by copying the relevant amount of information
1172 * to the struct btf_show obj.safe buffer using copy_from_kernel_nofault().
1173 *
1174 * If BTF_SHOW_UNSAFE is specified, just return data as-is; no
1175 * safe copy is needed.
1176 *
1177 * Otherwise we need to determine if we have the required amount
1178 * of data (determined by the @data pointer and the size of the
1179 * largest base type we can encounter (represented by
1180 * BTF_SHOW_OBJ_BASE_TYPE_SIZE). Having that much data ensures
1181 * that we will be able to print some of the current object,
1182 * and if more is needed a copy will be triggered.
1183 * Some objects such as structs will not fit into the buffer;
1184 * in such cases additional copies when we iterate over their
1185 * members may be needed.
1186 *
1187 * btf_show_obj_safe() is used to return a safe buffer for
1188 * btf_show_start_type(); this ensures that as we recurse into
1189 * nested types we always have safe data for the given type.
1190 * This approach is somewhat wasteful; it's possible for example
1191 * that when iterating over a large union we'll end up copying the
1192 * same data repeatedly, but the goal is safety not performance.
1193 * We use stack data as opposed to per-CPU buffers because the
1194 * iteration over a type can take some time, and preemption handling
1195 * would greatly complicate use of the safe buffer.
1196 */
btf_show_obj_safe(struct btf_show * show,const struct btf_type * t,void * data)1197 static void *btf_show_obj_safe(struct btf_show *show,
1198 const struct btf_type *t,
1199 void *data)
1200 {
1201 const struct btf_type *rt;
1202 int size_left, size;
1203 void *safe = NULL;
1204
1205 if (show->flags & BTF_SHOW_UNSAFE)
1206 return data;
1207
1208 rt = btf_resolve_size(show->btf, t, &size);
1209 if (IS_ERR(rt)) {
1210 show->state.status = PTR_ERR(rt);
1211 return NULL;
1212 }
1213
1214 /*
1215 * Is this toplevel object? If so, set total object size and
1216 * initialize pointers. Otherwise check if we still fall within
1217 * our safe object data.
1218 */
1219 if (show->state.depth == 0) {
1220 show->obj.size = size;
1221 show->obj.head = data;
1222 } else {
1223 /*
1224 * If the size of the current object is > our remaining
1225 * safe buffer we _may_ need to do a new copy. However
1226 * consider the case of a nested struct; it's size pushes
1227 * us over the safe buffer limit, but showing any individual
1228 * struct members does not. In such cases, we don't need
1229 * to initiate a fresh copy yet; however we definitely need
1230 * at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes left
1231 * in our buffer, regardless of the current object size.
1232 * The logic here is that as we resolve types we will
1233 * hit a base type at some point, and we need to be sure
1234 * the next chunk of data is safely available to display
1235 * that type info safely. We cannot rely on the size of
1236 * the current object here because it may be much larger
1237 * than our current buffer (e.g. task_struct is 8k).
1238 * All we want to do here is ensure that we can print the
1239 * next basic type, which we can if either
1240 * - the current type size is within the safe buffer; or
1241 * - at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes are left in
1242 * the safe buffer.
1243 */
1244 safe = __btf_show_obj_safe(show, data,
1245 min(size,
1246 BTF_SHOW_OBJ_BASE_TYPE_SIZE));
1247 }
1248
1249 /*
1250 * We need a new copy to our safe object, either because we haven't
1251 * yet copied and are initializing safe data, or because the data
1252 * we want falls outside the boundaries of the safe object.
1253 */
1254 if (!safe) {
1255 size_left = btf_show_obj_size_left(show, data);
1256 if (size_left > BTF_SHOW_OBJ_SAFE_SIZE)
1257 size_left = BTF_SHOW_OBJ_SAFE_SIZE;
1258 show->state.status = copy_from_kernel_nofault(show->obj.safe,
1259 data, size_left);
1260 if (!show->state.status) {
1261 show->obj.data = data;
1262 safe = show->obj.safe;
1263 }
1264 }
1265
1266 return safe;
1267 }
1268
1269 /*
1270 * Set the type we are starting to show and return a safe data pointer
1271 * to be used for showing the associated data.
1272 */
btf_show_start_type(struct btf_show * show,const struct btf_type * t,u32 type_id,void * data)1273 static void *btf_show_start_type(struct btf_show *show,
1274 const struct btf_type *t,
1275 u32 type_id, void *data)
1276 {
1277 show->state.type = t;
1278 show->state.type_id = type_id;
1279 show->state.name[0] = '\0';
1280
1281 return btf_show_obj_safe(show, t, data);
1282 }
1283
btf_show_end_type(struct btf_show * show)1284 static void btf_show_end_type(struct btf_show *show)
1285 {
1286 show->state.type = NULL;
1287 show->state.type_id = 0;
1288 show->state.name[0] = '\0';
1289 }
1290
btf_show_start_aggr_type(struct btf_show * show,const struct btf_type * t,u32 type_id,void * data)1291 static void *btf_show_start_aggr_type(struct btf_show *show,
1292 const struct btf_type *t,
1293 u32 type_id, void *data)
1294 {
1295 void *safe_data = btf_show_start_type(show, t, type_id, data);
1296
1297 if (!safe_data)
1298 return safe_data;
1299
1300 btf_show(show, "%s%s%s", btf_show_indent(show),
1301 btf_show_name(show),
1302 btf_show_newline(show));
1303 show->state.depth++;
1304 return safe_data;
1305 }
1306
btf_show_end_aggr_type(struct btf_show * show,const char * suffix)1307 static void btf_show_end_aggr_type(struct btf_show *show,
1308 const char *suffix)
1309 {
1310 show->state.depth--;
1311 btf_show(show, "%s%s%s%s", btf_show_indent(show), suffix,
1312 btf_show_delim(show), btf_show_newline(show));
1313 btf_show_end_type(show);
1314 }
1315
btf_show_start_member(struct btf_show * show,const struct btf_member * m)1316 static void btf_show_start_member(struct btf_show *show,
1317 const struct btf_member *m)
1318 {
1319 show->state.member = m;
1320 }
1321
btf_show_start_array_member(struct btf_show * show)1322 static void btf_show_start_array_member(struct btf_show *show)
1323 {
1324 show->state.array_member = 1;
1325 btf_show_start_member(show, NULL);
1326 }
1327
btf_show_end_member(struct btf_show * show)1328 static void btf_show_end_member(struct btf_show *show)
1329 {
1330 show->state.member = NULL;
1331 }
1332
btf_show_end_array_member(struct btf_show * show)1333 static void btf_show_end_array_member(struct btf_show *show)
1334 {
1335 show->state.array_member = 0;
1336 btf_show_end_member(show);
1337 }
1338
btf_show_start_array_type(struct btf_show * show,const struct btf_type * t,u32 type_id,u16 array_encoding,void * data)1339 static void *btf_show_start_array_type(struct btf_show *show,
1340 const struct btf_type *t,
1341 u32 type_id,
1342 u16 array_encoding,
1343 void *data)
1344 {
1345 show->state.array_encoding = array_encoding;
1346 show->state.array_terminated = 0;
1347 return btf_show_start_aggr_type(show, t, type_id, data);
1348 }
1349
btf_show_end_array_type(struct btf_show * show)1350 static void btf_show_end_array_type(struct btf_show *show)
1351 {
1352 show->state.array_encoding = 0;
1353 show->state.array_terminated = 0;
1354 btf_show_end_aggr_type(show, "]");
1355 }
1356
btf_show_start_struct_type(struct btf_show * show,const struct btf_type * t,u32 type_id,void * data)1357 static void *btf_show_start_struct_type(struct btf_show *show,
1358 const struct btf_type *t,
1359 u32 type_id,
1360 void *data)
1361 {
1362 return btf_show_start_aggr_type(show, t, type_id, data);
1363 }
1364
btf_show_end_struct_type(struct btf_show * show)1365 static void btf_show_end_struct_type(struct btf_show *show)
1366 {
1367 btf_show_end_aggr_type(show, "}");
1368 }
1369
__btf_verifier_log(struct bpf_verifier_log * log,const char * fmt,...)1370 __printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log,
1371 const char *fmt, ...)
1372 {
1373 va_list args;
1374
1375 va_start(args, fmt);
1376 bpf_verifier_vlog(log, fmt, args);
1377 va_end(args);
1378 }
1379
btf_verifier_log(struct btf_verifier_env * env,const char * fmt,...)1380 __printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env,
1381 const char *fmt, ...)
1382 {
1383 struct bpf_verifier_log *log = &env->log;
1384 va_list args;
1385
1386 if (!bpf_verifier_log_needed(log))
1387 return;
1388
1389 va_start(args, fmt);
1390 bpf_verifier_vlog(log, fmt, args);
1391 va_end(args);
1392 }
1393
__btf_verifier_log_type(struct btf_verifier_env * env,const struct btf_type * t,bool log_details,const char * fmt,...)1394 __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env,
1395 const struct btf_type *t,
1396 bool log_details,
1397 const char *fmt, ...)
1398 {
1399 struct bpf_verifier_log *log = &env->log;
1400 struct btf *btf = env->btf;
1401 va_list args;
1402
1403 if (!bpf_verifier_log_needed(log))
1404 return;
1405
1406 /* btf verifier prints all types it is processing via
1407 * btf_verifier_log_type(..., fmt = NULL).
1408 * Skip those prints for in-kernel BTF verification.
1409 */
1410 if (log->level == BPF_LOG_KERNEL && !fmt)
1411 return;
1412
1413 __btf_verifier_log(log, "[%u] %s %s%s",
1414 env->log_type_id,
1415 btf_type_str(t),
1416 __btf_name_by_offset(btf, t->name_off),
1417 log_details ? " " : "");
1418
1419 if (log_details)
1420 btf_type_ops(t)->log_details(env, t);
1421
1422 if (fmt && *fmt) {
1423 __btf_verifier_log(log, " ");
1424 va_start(args, fmt);
1425 bpf_verifier_vlog(log, fmt, args);
1426 va_end(args);
1427 }
1428
1429 __btf_verifier_log(log, "\n");
1430 }
1431
1432 #define btf_verifier_log_type(env, t, ...) \
1433 __btf_verifier_log_type((env), (t), true, __VA_ARGS__)
1434 #define btf_verifier_log_basic(env, t, ...) \
1435 __btf_verifier_log_type((env), (t), false, __VA_ARGS__)
1436
1437 __printf(4, 5)
btf_verifier_log_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const char * fmt,...)1438 static void btf_verifier_log_member(struct btf_verifier_env *env,
1439 const struct btf_type *struct_type,
1440 const struct btf_member *member,
1441 const char *fmt, ...)
1442 {
1443 struct bpf_verifier_log *log = &env->log;
1444 struct btf *btf = env->btf;
1445 va_list args;
1446
1447 if (!bpf_verifier_log_needed(log))
1448 return;
1449
1450 if (log->level == BPF_LOG_KERNEL && !fmt)
1451 return;
1452 /* The CHECK_META phase already did a btf dump.
1453 *
1454 * If member is logged again, it must hit an error in
1455 * parsing this member. It is useful to print out which
1456 * struct this member belongs to.
1457 */
1458 if (env->phase != CHECK_META)
1459 btf_verifier_log_type(env, struct_type, NULL);
1460
1461 if (btf_type_kflag(struct_type))
1462 __btf_verifier_log(log,
1463 "\t%s type_id=%u bitfield_size=%u bits_offset=%u",
1464 __btf_name_by_offset(btf, member->name_off),
1465 member->type,
1466 BTF_MEMBER_BITFIELD_SIZE(member->offset),
1467 BTF_MEMBER_BIT_OFFSET(member->offset));
1468 else
1469 __btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u",
1470 __btf_name_by_offset(btf, member->name_off),
1471 member->type, member->offset);
1472
1473 if (fmt && *fmt) {
1474 __btf_verifier_log(log, " ");
1475 va_start(args, fmt);
1476 bpf_verifier_vlog(log, fmt, args);
1477 va_end(args);
1478 }
1479
1480 __btf_verifier_log(log, "\n");
1481 }
1482
1483 __printf(4, 5)
btf_verifier_log_vsi(struct btf_verifier_env * env,const struct btf_type * datasec_type,const struct btf_var_secinfo * vsi,const char * fmt,...)1484 static void btf_verifier_log_vsi(struct btf_verifier_env *env,
1485 const struct btf_type *datasec_type,
1486 const struct btf_var_secinfo *vsi,
1487 const char *fmt, ...)
1488 {
1489 struct bpf_verifier_log *log = &env->log;
1490 va_list args;
1491
1492 if (!bpf_verifier_log_needed(log))
1493 return;
1494 if (log->level == BPF_LOG_KERNEL && !fmt)
1495 return;
1496 if (env->phase != CHECK_META)
1497 btf_verifier_log_type(env, datasec_type, NULL);
1498
1499 __btf_verifier_log(log, "\t type_id=%u offset=%u size=%u",
1500 vsi->type, vsi->offset, vsi->size);
1501 if (fmt && *fmt) {
1502 __btf_verifier_log(log, " ");
1503 va_start(args, fmt);
1504 bpf_verifier_vlog(log, fmt, args);
1505 va_end(args);
1506 }
1507
1508 __btf_verifier_log(log, "\n");
1509 }
1510
btf_verifier_log_hdr(struct btf_verifier_env * env,u32 btf_data_size)1511 static void btf_verifier_log_hdr(struct btf_verifier_env *env,
1512 u32 btf_data_size)
1513 {
1514 struct bpf_verifier_log *log = &env->log;
1515 const struct btf *btf = env->btf;
1516 const struct btf_header *hdr;
1517
1518 if (!bpf_verifier_log_needed(log))
1519 return;
1520
1521 if (log->level == BPF_LOG_KERNEL)
1522 return;
1523 hdr = &btf->hdr;
1524 __btf_verifier_log(log, "magic: 0x%x\n", hdr->magic);
1525 __btf_verifier_log(log, "version: %u\n", hdr->version);
1526 __btf_verifier_log(log, "flags: 0x%x\n", hdr->flags);
1527 __btf_verifier_log(log, "hdr_len: %u\n", hdr->hdr_len);
1528 __btf_verifier_log(log, "type_off: %u\n", hdr->type_off);
1529 __btf_verifier_log(log, "type_len: %u\n", hdr->type_len);
1530 __btf_verifier_log(log, "str_off: %u\n", hdr->str_off);
1531 __btf_verifier_log(log, "str_len: %u\n", hdr->str_len);
1532 __btf_verifier_log(log, "btf_total_size: %u\n", btf_data_size);
1533 }
1534
btf_add_type(struct btf_verifier_env * env,struct btf_type * t)1535 static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t)
1536 {
1537 struct btf *btf = env->btf;
1538
1539 if (btf->types_size == btf->nr_types) {
1540 /* Expand 'types' array */
1541
1542 struct btf_type **new_types;
1543 u32 expand_by, new_size;
1544
1545 if (btf->start_id + btf->types_size == BTF_MAX_TYPE) {
1546 btf_verifier_log(env, "Exceeded max num of types");
1547 return -E2BIG;
1548 }
1549
1550 expand_by = max_t(u32, btf->types_size >> 2, 16);
1551 new_size = min_t(u32, BTF_MAX_TYPE,
1552 btf->types_size + expand_by);
1553
1554 new_types = kvcalloc(new_size, sizeof(*new_types),
1555 GFP_KERNEL | __GFP_NOWARN);
1556 if (!new_types)
1557 return -ENOMEM;
1558
1559 if (btf->nr_types == 0) {
1560 if (!btf->base_btf) {
1561 /* lazily init VOID type */
1562 new_types[0] = &btf_void;
1563 btf->nr_types++;
1564 }
1565 } else {
1566 memcpy(new_types, btf->types,
1567 sizeof(*btf->types) * btf->nr_types);
1568 }
1569
1570 kvfree(btf->types);
1571 btf->types = new_types;
1572 btf->types_size = new_size;
1573 }
1574
1575 btf->types[btf->nr_types++] = t;
1576
1577 return 0;
1578 }
1579
btf_alloc_id(struct btf * btf)1580 static int btf_alloc_id(struct btf *btf)
1581 {
1582 int id;
1583
1584 idr_preload(GFP_KERNEL);
1585 spin_lock_bh(&btf_idr_lock);
1586 id = idr_alloc_cyclic(&btf_idr, btf, 1, INT_MAX, GFP_ATOMIC);
1587 if (id > 0)
1588 btf->id = id;
1589 spin_unlock_bh(&btf_idr_lock);
1590 idr_preload_end();
1591
1592 if (WARN_ON_ONCE(!id))
1593 return -ENOSPC;
1594
1595 return id > 0 ? 0 : id;
1596 }
1597
btf_free_id(struct btf * btf)1598 static void btf_free_id(struct btf *btf)
1599 {
1600 unsigned long flags;
1601
1602 /*
1603 * In map-in-map, calling map_delete_elem() on outer
1604 * map will call bpf_map_put on the inner map.
1605 * It will then eventually call btf_free_id()
1606 * on the inner map. Some of the map_delete_elem()
1607 * implementation may have irq disabled, so
1608 * we need to use the _irqsave() version instead
1609 * of the _bh() version.
1610 */
1611 spin_lock_irqsave(&btf_idr_lock, flags);
1612 idr_remove(&btf_idr, btf->id);
1613 spin_unlock_irqrestore(&btf_idr_lock, flags);
1614 }
1615
btf_free_kfunc_set_tab(struct btf * btf)1616 static void btf_free_kfunc_set_tab(struct btf *btf)
1617 {
1618 struct btf_kfunc_set_tab *tab = btf->kfunc_set_tab;
1619 int hook;
1620
1621 if (!tab)
1622 return;
1623 /* For module BTF, we directly assign the sets being registered, so
1624 * there is nothing to free except kfunc_set_tab.
1625 */
1626 if (btf_is_module(btf))
1627 goto free_tab;
1628 for (hook = 0; hook < ARRAY_SIZE(tab->sets); hook++)
1629 kfree(tab->sets[hook]);
1630 free_tab:
1631 kfree(tab);
1632 btf->kfunc_set_tab = NULL;
1633 }
1634
btf_free_dtor_kfunc_tab(struct btf * btf)1635 static void btf_free_dtor_kfunc_tab(struct btf *btf)
1636 {
1637 struct btf_id_dtor_kfunc_tab *tab = btf->dtor_kfunc_tab;
1638
1639 if (!tab)
1640 return;
1641 kfree(tab);
1642 btf->dtor_kfunc_tab = NULL;
1643 }
1644
btf_free(struct btf * btf)1645 static void btf_free(struct btf *btf)
1646 {
1647 btf_free_dtor_kfunc_tab(btf);
1648 btf_free_kfunc_set_tab(btf);
1649 kvfree(btf->types);
1650 kvfree(btf->resolved_sizes);
1651 kvfree(btf->resolved_ids);
1652 kvfree(btf->data);
1653 kfree(btf);
1654 }
1655
btf_free_rcu(struct rcu_head * rcu)1656 static void btf_free_rcu(struct rcu_head *rcu)
1657 {
1658 struct btf *btf = container_of(rcu, struct btf, rcu);
1659
1660 btf_free(btf);
1661 }
1662
btf_get(struct btf * btf)1663 void btf_get(struct btf *btf)
1664 {
1665 refcount_inc(&btf->refcnt);
1666 }
1667
btf_put(struct btf * btf)1668 void btf_put(struct btf *btf)
1669 {
1670 if (btf && refcount_dec_and_test(&btf->refcnt)) {
1671 btf_free_id(btf);
1672 call_rcu(&btf->rcu, btf_free_rcu);
1673 }
1674 }
1675
env_resolve_init(struct btf_verifier_env * env)1676 static int env_resolve_init(struct btf_verifier_env *env)
1677 {
1678 struct btf *btf = env->btf;
1679 u32 nr_types = btf->nr_types;
1680 u32 *resolved_sizes = NULL;
1681 u32 *resolved_ids = NULL;
1682 u8 *visit_states = NULL;
1683
1684 resolved_sizes = kvcalloc(nr_types, sizeof(*resolved_sizes),
1685 GFP_KERNEL | __GFP_NOWARN);
1686 if (!resolved_sizes)
1687 goto nomem;
1688
1689 resolved_ids = kvcalloc(nr_types, sizeof(*resolved_ids),
1690 GFP_KERNEL | __GFP_NOWARN);
1691 if (!resolved_ids)
1692 goto nomem;
1693
1694 visit_states = kvcalloc(nr_types, sizeof(*visit_states),
1695 GFP_KERNEL | __GFP_NOWARN);
1696 if (!visit_states)
1697 goto nomem;
1698
1699 btf->resolved_sizes = resolved_sizes;
1700 btf->resolved_ids = resolved_ids;
1701 env->visit_states = visit_states;
1702
1703 return 0;
1704
1705 nomem:
1706 kvfree(resolved_sizes);
1707 kvfree(resolved_ids);
1708 kvfree(visit_states);
1709 return -ENOMEM;
1710 }
1711
btf_verifier_env_free(struct btf_verifier_env * env)1712 static void btf_verifier_env_free(struct btf_verifier_env *env)
1713 {
1714 kvfree(env->visit_states);
1715 kfree(env);
1716 }
1717
env_type_is_resolve_sink(const struct btf_verifier_env * env,const struct btf_type * next_type)1718 static bool env_type_is_resolve_sink(const struct btf_verifier_env *env,
1719 const struct btf_type *next_type)
1720 {
1721 switch (env->resolve_mode) {
1722 case RESOLVE_TBD:
1723 /* int, enum or void is a sink */
1724 return !btf_type_needs_resolve(next_type);
1725 case RESOLVE_PTR:
1726 /* int, enum, void, struct, array, func or func_proto is a sink
1727 * for ptr
1728 */
1729 return !btf_type_is_modifier(next_type) &&
1730 !btf_type_is_ptr(next_type);
1731 case RESOLVE_STRUCT_OR_ARRAY:
1732 /* int, enum, void, ptr, func or func_proto is a sink
1733 * for struct and array
1734 */
1735 return !btf_type_is_modifier(next_type) &&
1736 !btf_type_is_array(next_type) &&
1737 !btf_type_is_struct(next_type);
1738 default:
1739 BUG();
1740 }
1741 }
1742
env_type_is_resolved(const struct btf_verifier_env * env,u32 type_id)1743 static bool env_type_is_resolved(const struct btf_verifier_env *env,
1744 u32 type_id)
1745 {
1746 /* base BTF types should be resolved by now */
1747 if (type_id < env->btf->start_id)
1748 return true;
1749
1750 return env->visit_states[type_id - env->btf->start_id] == RESOLVED;
1751 }
1752
env_stack_push(struct btf_verifier_env * env,const struct btf_type * t,u32 type_id)1753 static int env_stack_push(struct btf_verifier_env *env,
1754 const struct btf_type *t, u32 type_id)
1755 {
1756 const struct btf *btf = env->btf;
1757 struct resolve_vertex *v;
1758
1759 if (env->top_stack == MAX_RESOLVE_DEPTH)
1760 return -E2BIG;
1761
1762 if (type_id < btf->start_id
1763 || env->visit_states[type_id - btf->start_id] != NOT_VISITED)
1764 return -EEXIST;
1765
1766 env->visit_states[type_id - btf->start_id] = VISITED;
1767
1768 v = &env->stack[env->top_stack++];
1769 v->t = t;
1770 v->type_id = type_id;
1771 v->next_member = 0;
1772
1773 if (env->resolve_mode == RESOLVE_TBD) {
1774 if (btf_type_is_ptr(t))
1775 env->resolve_mode = RESOLVE_PTR;
1776 else if (btf_type_is_struct(t) || btf_type_is_array(t))
1777 env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY;
1778 }
1779
1780 return 0;
1781 }
1782
env_stack_set_next_member(struct btf_verifier_env * env,u16 next_member)1783 static void env_stack_set_next_member(struct btf_verifier_env *env,
1784 u16 next_member)
1785 {
1786 env->stack[env->top_stack - 1].next_member = next_member;
1787 }
1788
env_stack_pop_resolved(struct btf_verifier_env * env,u32 resolved_type_id,u32 resolved_size)1789 static void env_stack_pop_resolved(struct btf_verifier_env *env,
1790 u32 resolved_type_id,
1791 u32 resolved_size)
1792 {
1793 u32 type_id = env->stack[--(env->top_stack)].type_id;
1794 struct btf *btf = env->btf;
1795
1796 type_id -= btf->start_id; /* adjust to local type id */
1797 btf->resolved_sizes[type_id] = resolved_size;
1798 btf->resolved_ids[type_id] = resolved_type_id;
1799 env->visit_states[type_id] = RESOLVED;
1800 }
1801
env_stack_peak(struct btf_verifier_env * env)1802 static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env)
1803 {
1804 return env->top_stack ? &env->stack[env->top_stack - 1] : NULL;
1805 }
1806
1807 /* Resolve the size of a passed-in "type"
1808 *
1809 * type: is an array (e.g. u32 array[x][y])
1810 * return type: type "u32[x][y]", i.e. BTF_KIND_ARRAY,
1811 * *type_size: (x * y * sizeof(u32)). Hence, *type_size always
1812 * corresponds to the return type.
1813 * *elem_type: u32
1814 * *elem_id: id of u32
1815 * *total_nelems: (x * y). Hence, individual elem size is
1816 * (*type_size / *total_nelems)
1817 * *type_id: id of type if it's changed within the function, 0 if not
1818 *
1819 * type: is not an array (e.g. const struct X)
1820 * return type: type "struct X"
1821 * *type_size: sizeof(struct X)
1822 * *elem_type: same as return type ("struct X")
1823 * *elem_id: 0
1824 * *total_nelems: 1
1825 * *type_id: id of type if it's changed within the function, 0 if not
1826 */
1827 static const struct btf_type *
__btf_resolve_size(const struct btf * btf,const struct btf_type * type,u32 * type_size,const struct btf_type ** elem_type,u32 * elem_id,u32 * total_nelems,u32 * type_id)1828 __btf_resolve_size(const struct btf *btf, const struct btf_type *type,
1829 u32 *type_size, const struct btf_type **elem_type,
1830 u32 *elem_id, u32 *total_nelems, u32 *type_id)
1831 {
1832 const struct btf_type *array_type = NULL;
1833 const struct btf_array *array = NULL;
1834 u32 i, size, nelems = 1, id = 0;
1835
1836 for (i = 0; i < MAX_RESOLVE_DEPTH; i++) {
1837 switch (BTF_INFO_KIND(type->info)) {
1838 /* type->size can be used */
1839 case BTF_KIND_INT:
1840 case BTF_KIND_STRUCT:
1841 case BTF_KIND_UNION:
1842 case BTF_KIND_ENUM:
1843 case BTF_KIND_FLOAT:
1844 case BTF_KIND_ENUM64:
1845 size = type->size;
1846 goto resolved;
1847
1848 case BTF_KIND_PTR:
1849 size = sizeof(void *);
1850 goto resolved;
1851
1852 /* Modifiers */
1853 case BTF_KIND_TYPEDEF:
1854 case BTF_KIND_VOLATILE:
1855 case BTF_KIND_CONST:
1856 case BTF_KIND_RESTRICT:
1857 case BTF_KIND_TYPE_TAG:
1858 id = type->type;
1859 type = btf_type_by_id(btf, type->type);
1860 break;
1861
1862 case BTF_KIND_ARRAY:
1863 if (!array_type)
1864 array_type = type;
1865 array = btf_type_array(type);
1866 if (nelems && array->nelems > U32_MAX / nelems)
1867 return ERR_PTR(-EINVAL);
1868 nelems *= array->nelems;
1869 type = btf_type_by_id(btf, array->type);
1870 break;
1871
1872 /* type without size */
1873 default:
1874 return ERR_PTR(-EINVAL);
1875 }
1876 }
1877
1878 return ERR_PTR(-EINVAL);
1879
1880 resolved:
1881 if (nelems && size > U32_MAX / nelems)
1882 return ERR_PTR(-EINVAL);
1883
1884 *type_size = nelems * size;
1885 if (total_nelems)
1886 *total_nelems = nelems;
1887 if (elem_type)
1888 *elem_type = type;
1889 if (elem_id)
1890 *elem_id = array ? array->type : 0;
1891 if (type_id && id)
1892 *type_id = id;
1893
1894 return array_type ? : type;
1895 }
1896
1897 const struct btf_type *
btf_resolve_size(const struct btf * btf,const struct btf_type * type,u32 * type_size)1898 btf_resolve_size(const struct btf *btf, const struct btf_type *type,
1899 u32 *type_size)
1900 {
1901 return __btf_resolve_size(btf, type, type_size, NULL, NULL, NULL, NULL);
1902 }
1903
btf_resolved_type_id(const struct btf * btf,u32 type_id)1904 static u32 btf_resolved_type_id(const struct btf *btf, u32 type_id)
1905 {
1906 while (type_id < btf->start_id)
1907 btf = btf->base_btf;
1908
1909 return btf->resolved_ids[type_id - btf->start_id];
1910 }
1911
1912 /* The input param "type_id" must point to a needs_resolve type */
btf_type_id_resolve(const struct btf * btf,u32 * type_id)1913 static const struct btf_type *btf_type_id_resolve(const struct btf *btf,
1914 u32 *type_id)
1915 {
1916 *type_id = btf_resolved_type_id(btf, *type_id);
1917 return btf_type_by_id(btf, *type_id);
1918 }
1919
btf_resolved_type_size(const struct btf * btf,u32 type_id)1920 static u32 btf_resolved_type_size(const struct btf *btf, u32 type_id)
1921 {
1922 while (type_id < btf->start_id)
1923 btf = btf->base_btf;
1924
1925 return btf->resolved_sizes[type_id - btf->start_id];
1926 }
1927
btf_type_id_size(const struct btf * btf,u32 * type_id,u32 * ret_size)1928 const struct btf_type *btf_type_id_size(const struct btf *btf,
1929 u32 *type_id, u32 *ret_size)
1930 {
1931 const struct btf_type *size_type;
1932 u32 size_type_id = *type_id;
1933 u32 size = 0;
1934
1935 size_type = btf_type_by_id(btf, size_type_id);
1936 if (btf_type_nosize_or_null(size_type))
1937 return NULL;
1938
1939 if (btf_type_has_size(size_type)) {
1940 size = size_type->size;
1941 } else if (btf_type_is_array(size_type)) {
1942 size = btf_resolved_type_size(btf, size_type_id);
1943 } else if (btf_type_is_ptr(size_type)) {
1944 size = sizeof(void *);
1945 } else {
1946 if (WARN_ON_ONCE(!btf_type_is_modifier(size_type) &&
1947 !btf_type_is_var(size_type)))
1948 return NULL;
1949
1950 size_type_id = btf_resolved_type_id(btf, size_type_id);
1951 size_type = btf_type_by_id(btf, size_type_id);
1952 if (btf_type_nosize_or_null(size_type))
1953 return NULL;
1954 else if (btf_type_has_size(size_type))
1955 size = size_type->size;
1956 else if (btf_type_is_array(size_type))
1957 size = btf_resolved_type_size(btf, size_type_id);
1958 else if (btf_type_is_ptr(size_type))
1959 size = sizeof(void *);
1960 else
1961 return NULL;
1962 }
1963
1964 *type_id = size_type_id;
1965 if (ret_size)
1966 *ret_size = size;
1967
1968 return size_type;
1969 }
1970
btf_df_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)1971 static int btf_df_check_member(struct btf_verifier_env *env,
1972 const struct btf_type *struct_type,
1973 const struct btf_member *member,
1974 const struct btf_type *member_type)
1975 {
1976 btf_verifier_log_basic(env, struct_type,
1977 "Unsupported check_member");
1978 return -EINVAL;
1979 }
1980
btf_df_check_kflag_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)1981 static int btf_df_check_kflag_member(struct btf_verifier_env *env,
1982 const struct btf_type *struct_type,
1983 const struct btf_member *member,
1984 const struct btf_type *member_type)
1985 {
1986 btf_verifier_log_basic(env, struct_type,
1987 "Unsupported check_kflag_member");
1988 return -EINVAL;
1989 }
1990
1991 /* Used for ptr, array struct/union and float type members.
1992 * int, enum and modifier types have their specific callback functions.
1993 */
btf_generic_check_kflag_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)1994 static int btf_generic_check_kflag_member(struct btf_verifier_env *env,
1995 const struct btf_type *struct_type,
1996 const struct btf_member *member,
1997 const struct btf_type *member_type)
1998 {
1999 if (BTF_MEMBER_BITFIELD_SIZE(member->offset)) {
2000 btf_verifier_log_member(env, struct_type, member,
2001 "Invalid member bitfield_size");
2002 return -EINVAL;
2003 }
2004
2005 /* bitfield size is 0, so member->offset represents bit offset only.
2006 * It is safe to call non kflag check_member variants.
2007 */
2008 return btf_type_ops(member_type)->check_member(env, struct_type,
2009 member,
2010 member_type);
2011 }
2012
btf_df_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)2013 static int btf_df_resolve(struct btf_verifier_env *env,
2014 const struct resolve_vertex *v)
2015 {
2016 btf_verifier_log_basic(env, v->t, "Unsupported resolve");
2017 return -EINVAL;
2018 }
2019
btf_df_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offsets,struct btf_show * show)2020 static void btf_df_show(const struct btf *btf, const struct btf_type *t,
2021 u32 type_id, void *data, u8 bits_offsets,
2022 struct btf_show *show)
2023 {
2024 btf_show(show, "<unsupported kind:%u>", BTF_INFO_KIND(t->info));
2025 }
2026
btf_int_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)2027 static int btf_int_check_member(struct btf_verifier_env *env,
2028 const struct btf_type *struct_type,
2029 const struct btf_member *member,
2030 const struct btf_type *member_type)
2031 {
2032 u32 int_data = btf_type_int(member_type);
2033 u32 struct_bits_off = member->offset;
2034 u32 struct_size = struct_type->size;
2035 u32 nr_copy_bits;
2036 u32 bytes_offset;
2037
2038 if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) {
2039 btf_verifier_log_member(env, struct_type, member,
2040 "bits_offset exceeds U32_MAX");
2041 return -EINVAL;
2042 }
2043
2044 struct_bits_off += BTF_INT_OFFSET(int_data);
2045 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2046 nr_copy_bits = BTF_INT_BITS(int_data) +
2047 BITS_PER_BYTE_MASKED(struct_bits_off);
2048
2049 if (nr_copy_bits > BITS_PER_U128) {
2050 btf_verifier_log_member(env, struct_type, member,
2051 "nr_copy_bits exceeds 128");
2052 return -EINVAL;
2053 }
2054
2055 if (struct_size < bytes_offset ||
2056 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
2057 btf_verifier_log_member(env, struct_type, member,
2058 "Member exceeds struct_size");
2059 return -EINVAL;
2060 }
2061
2062 return 0;
2063 }
2064
btf_int_check_kflag_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)2065 static int btf_int_check_kflag_member(struct btf_verifier_env *env,
2066 const struct btf_type *struct_type,
2067 const struct btf_member *member,
2068 const struct btf_type *member_type)
2069 {
2070 u32 struct_bits_off, nr_bits, nr_int_data_bits, bytes_offset;
2071 u32 int_data = btf_type_int(member_type);
2072 u32 struct_size = struct_type->size;
2073 u32 nr_copy_bits;
2074
2075 /* a regular int type is required for the kflag int member */
2076 if (!btf_type_int_is_regular(member_type)) {
2077 btf_verifier_log_member(env, struct_type, member,
2078 "Invalid member base type");
2079 return -EINVAL;
2080 }
2081
2082 /* check sanity of bitfield size */
2083 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
2084 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
2085 nr_int_data_bits = BTF_INT_BITS(int_data);
2086 if (!nr_bits) {
2087 /* Not a bitfield member, member offset must be at byte
2088 * boundary.
2089 */
2090 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2091 btf_verifier_log_member(env, struct_type, member,
2092 "Invalid member offset");
2093 return -EINVAL;
2094 }
2095
2096 nr_bits = nr_int_data_bits;
2097 } else if (nr_bits > nr_int_data_bits) {
2098 btf_verifier_log_member(env, struct_type, member,
2099 "Invalid member bitfield_size");
2100 return -EINVAL;
2101 }
2102
2103 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2104 nr_copy_bits = nr_bits + BITS_PER_BYTE_MASKED(struct_bits_off);
2105 if (nr_copy_bits > BITS_PER_U128) {
2106 btf_verifier_log_member(env, struct_type, member,
2107 "nr_copy_bits exceeds 128");
2108 return -EINVAL;
2109 }
2110
2111 if (struct_size < bytes_offset ||
2112 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
2113 btf_verifier_log_member(env, struct_type, member,
2114 "Member exceeds struct_size");
2115 return -EINVAL;
2116 }
2117
2118 return 0;
2119 }
2120
btf_int_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)2121 static s32 btf_int_check_meta(struct btf_verifier_env *env,
2122 const struct btf_type *t,
2123 u32 meta_left)
2124 {
2125 u32 int_data, nr_bits, meta_needed = sizeof(int_data);
2126 u16 encoding;
2127
2128 if (meta_left < meta_needed) {
2129 btf_verifier_log_basic(env, t,
2130 "meta_left:%u meta_needed:%u",
2131 meta_left, meta_needed);
2132 return -EINVAL;
2133 }
2134
2135 if (btf_type_vlen(t)) {
2136 btf_verifier_log_type(env, t, "vlen != 0");
2137 return -EINVAL;
2138 }
2139
2140 if (btf_type_kflag(t)) {
2141 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2142 return -EINVAL;
2143 }
2144
2145 int_data = btf_type_int(t);
2146 if (int_data & ~BTF_INT_MASK) {
2147 btf_verifier_log_basic(env, t, "Invalid int_data:%x",
2148 int_data);
2149 return -EINVAL;
2150 }
2151
2152 nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data);
2153
2154 if (nr_bits > BITS_PER_U128) {
2155 btf_verifier_log_type(env, t, "nr_bits exceeds %zu",
2156 BITS_PER_U128);
2157 return -EINVAL;
2158 }
2159
2160 if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) {
2161 btf_verifier_log_type(env, t, "nr_bits exceeds type_size");
2162 return -EINVAL;
2163 }
2164
2165 /*
2166 * Only one of the encoding bits is allowed and it
2167 * should be sufficient for the pretty print purpose (i.e. decoding).
2168 * Multiple bits can be allowed later if it is found
2169 * to be insufficient.
2170 */
2171 encoding = BTF_INT_ENCODING(int_data);
2172 if (encoding &&
2173 encoding != BTF_INT_SIGNED &&
2174 encoding != BTF_INT_CHAR &&
2175 encoding != BTF_INT_BOOL) {
2176 btf_verifier_log_type(env, t, "Unsupported encoding");
2177 return -ENOTSUPP;
2178 }
2179
2180 btf_verifier_log_type(env, t, NULL);
2181
2182 return meta_needed;
2183 }
2184
btf_int_log(struct btf_verifier_env * env,const struct btf_type * t)2185 static void btf_int_log(struct btf_verifier_env *env,
2186 const struct btf_type *t)
2187 {
2188 int int_data = btf_type_int(t);
2189
2190 btf_verifier_log(env,
2191 "size=%u bits_offset=%u nr_bits=%u encoding=%s",
2192 t->size, BTF_INT_OFFSET(int_data),
2193 BTF_INT_BITS(int_data),
2194 btf_int_encoding_str(BTF_INT_ENCODING(int_data)));
2195 }
2196
btf_int128_print(struct btf_show * show,void * data)2197 static void btf_int128_print(struct btf_show *show, void *data)
2198 {
2199 /* data points to a __int128 number.
2200 * Suppose
2201 * int128_num = *(__int128 *)data;
2202 * The below formulas shows what upper_num and lower_num represents:
2203 * upper_num = int128_num >> 64;
2204 * lower_num = int128_num & 0xffffffffFFFFFFFFULL;
2205 */
2206 u64 upper_num, lower_num;
2207
2208 #ifdef __BIG_ENDIAN_BITFIELD
2209 upper_num = *(u64 *)data;
2210 lower_num = *(u64 *)(data + 8);
2211 #else
2212 upper_num = *(u64 *)(data + 8);
2213 lower_num = *(u64 *)data;
2214 #endif
2215 if (upper_num == 0)
2216 btf_show_type_value(show, "0x%llx", lower_num);
2217 else
2218 btf_show_type_values(show, "0x%llx%016llx", upper_num,
2219 lower_num);
2220 }
2221
btf_int128_shift(u64 * print_num,u16 left_shift_bits,u16 right_shift_bits)2222 static void btf_int128_shift(u64 *print_num, u16 left_shift_bits,
2223 u16 right_shift_bits)
2224 {
2225 u64 upper_num, lower_num;
2226
2227 #ifdef __BIG_ENDIAN_BITFIELD
2228 upper_num = print_num[0];
2229 lower_num = print_num[1];
2230 #else
2231 upper_num = print_num[1];
2232 lower_num = print_num[0];
2233 #endif
2234
2235 /* shake out un-needed bits by shift/or operations */
2236 if (left_shift_bits >= 64) {
2237 upper_num = lower_num << (left_shift_bits - 64);
2238 lower_num = 0;
2239 } else {
2240 upper_num = (upper_num << left_shift_bits) |
2241 (lower_num >> (64 - left_shift_bits));
2242 lower_num = lower_num << left_shift_bits;
2243 }
2244
2245 if (right_shift_bits >= 64) {
2246 lower_num = upper_num >> (right_shift_bits - 64);
2247 upper_num = 0;
2248 } else {
2249 lower_num = (lower_num >> right_shift_bits) |
2250 (upper_num << (64 - right_shift_bits));
2251 upper_num = upper_num >> right_shift_bits;
2252 }
2253
2254 #ifdef __BIG_ENDIAN_BITFIELD
2255 print_num[0] = upper_num;
2256 print_num[1] = lower_num;
2257 #else
2258 print_num[0] = lower_num;
2259 print_num[1] = upper_num;
2260 #endif
2261 }
2262
btf_bitfield_show(void * data,u8 bits_offset,u8 nr_bits,struct btf_show * show)2263 static void btf_bitfield_show(void *data, u8 bits_offset,
2264 u8 nr_bits, struct btf_show *show)
2265 {
2266 u16 left_shift_bits, right_shift_bits;
2267 u8 nr_copy_bytes;
2268 u8 nr_copy_bits;
2269 u64 print_num[2] = {};
2270
2271 nr_copy_bits = nr_bits + bits_offset;
2272 nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
2273
2274 memcpy(print_num, data, nr_copy_bytes);
2275
2276 #ifdef __BIG_ENDIAN_BITFIELD
2277 left_shift_bits = bits_offset;
2278 #else
2279 left_shift_bits = BITS_PER_U128 - nr_copy_bits;
2280 #endif
2281 right_shift_bits = BITS_PER_U128 - nr_bits;
2282
2283 btf_int128_shift(print_num, left_shift_bits, right_shift_bits);
2284 btf_int128_print(show, print_num);
2285 }
2286
2287
btf_int_bits_show(const struct btf * btf,const struct btf_type * t,void * data,u8 bits_offset,struct btf_show * show)2288 static void btf_int_bits_show(const struct btf *btf,
2289 const struct btf_type *t,
2290 void *data, u8 bits_offset,
2291 struct btf_show *show)
2292 {
2293 u32 int_data = btf_type_int(t);
2294 u8 nr_bits = BTF_INT_BITS(int_data);
2295 u8 total_bits_offset;
2296
2297 /*
2298 * bits_offset is at most 7.
2299 * BTF_INT_OFFSET() cannot exceed 128 bits.
2300 */
2301 total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
2302 data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
2303 bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
2304 btf_bitfield_show(data, bits_offset, nr_bits, show);
2305 }
2306
btf_int_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)2307 static void btf_int_show(const struct btf *btf, const struct btf_type *t,
2308 u32 type_id, void *data, u8 bits_offset,
2309 struct btf_show *show)
2310 {
2311 u32 int_data = btf_type_int(t);
2312 u8 encoding = BTF_INT_ENCODING(int_data);
2313 bool sign = encoding & BTF_INT_SIGNED;
2314 u8 nr_bits = BTF_INT_BITS(int_data);
2315 void *safe_data;
2316
2317 safe_data = btf_show_start_type(show, t, type_id, data);
2318 if (!safe_data)
2319 return;
2320
2321 if (bits_offset || BTF_INT_OFFSET(int_data) ||
2322 BITS_PER_BYTE_MASKED(nr_bits)) {
2323 btf_int_bits_show(btf, t, safe_data, bits_offset, show);
2324 goto out;
2325 }
2326
2327 switch (nr_bits) {
2328 case 128:
2329 btf_int128_print(show, safe_data);
2330 break;
2331 case 64:
2332 if (sign)
2333 btf_show_type_value(show, "%lld", *(s64 *)safe_data);
2334 else
2335 btf_show_type_value(show, "%llu", *(u64 *)safe_data);
2336 break;
2337 case 32:
2338 if (sign)
2339 btf_show_type_value(show, "%d", *(s32 *)safe_data);
2340 else
2341 btf_show_type_value(show, "%u", *(u32 *)safe_data);
2342 break;
2343 case 16:
2344 if (sign)
2345 btf_show_type_value(show, "%d", *(s16 *)safe_data);
2346 else
2347 btf_show_type_value(show, "%u", *(u16 *)safe_data);
2348 break;
2349 case 8:
2350 if (show->state.array_encoding == BTF_INT_CHAR) {
2351 /* check for null terminator */
2352 if (show->state.array_terminated)
2353 break;
2354 if (*(char *)data == '\0') {
2355 show->state.array_terminated = 1;
2356 break;
2357 }
2358 if (isprint(*(char *)data)) {
2359 btf_show_type_value(show, "'%c'",
2360 *(char *)safe_data);
2361 break;
2362 }
2363 }
2364 if (sign)
2365 btf_show_type_value(show, "%d", *(s8 *)safe_data);
2366 else
2367 btf_show_type_value(show, "%u", *(u8 *)safe_data);
2368 break;
2369 default:
2370 btf_int_bits_show(btf, t, safe_data, bits_offset, show);
2371 break;
2372 }
2373 out:
2374 btf_show_end_type(show);
2375 }
2376
2377 static const struct btf_kind_operations int_ops = {
2378 .check_meta = btf_int_check_meta,
2379 .resolve = btf_df_resolve,
2380 .check_member = btf_int_check_member,
2381 .check_kflag_member = btf_int_check_kflag_member,
2382 .log_details = btf_int_log,
2383 .show = btf_int_show,
2384 };
2385
btf_modifier_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)2386 static int btf_modifier_check_member(struct btf_verifier_env *env,
2387 const struct btf_type *struct_type,
2388 const struct btf_member *member,
2389 const struct btf_type *member_type)
2390 {
2391 const struct btf_type *resolved_type;
2392 u32 resolved_type_id = member->type;
2393 struct btf_member resolved_member;
2394 struct btf *btf = env->btf;
2395
2396 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
2397 if (!resolved_type) {
2398 btf_verifier_log_member(env, struct_type, member,
2399 "Invalid member");
2400 return -EINVAL;
2401 }
2402
2403 resolved_member = *member;
2404 resolved_member.type = resolved_type_id;
2405
2406 return btf_type_ops(resolved_type)->check_member(env, struct_type,
2407 &resolved_member,
2408 resolved_type);
2409 }
2410
btf_modifier_check_kflag_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)2411 static int btf_modifier_check_kflag_member(struct btf_verifier_env *env,
2412 const struct btf_type *struct_type,
2413 const struct btf_member *member,
2414 const struct btf_type *member_type)
2415 {
2416 const struct btf_type *resolved_type;
2417 u32 resolved_type_id = member->type;
2418 struct btf_member resolved_member;
2419 struct btf *btf = env->btf;
2420
2421 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
2422 if (!resolved_type) {
2423 btf_verifier_log_member(env, struct_type, member,
2424 "Invalid member");
2425 return -EINVAL;
2426 }
2427
2428 resolved_member = *member;
2429 resolved_member.type = resolved_type_id;
2430
2431 return btf_type_ops(resolved_type)->check_kflag_member(env, struct_type,
2432 &resolved_member,
2433 resolved_type);
2434 }
2435
btf_ptr_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)2436 static int btf_ptr_check_member(struct btf_verifier_env *env,
2437 const struct btf_type *struct_type,
2438 const struct btf_member *member,
2439 const struct btf_type *member_type)
2440 {
2441 u32 struct_size, struct_bits_off, bytes_offset;
2442
2443 struct_size = struct_type->size;
2444 struct_bits_off = member->offset;
2445 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2446
2447 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2448 btf_verifier_log_member(env, struct_type, member,
2449 "Member is not byte aligned");
2450 return -EINVAL;
2451 }
2452
2453 if (struct_size - bytes_offset < sizeof(void *)) {
2454 btf_verifier_log_member(env, struct_type, member,
2455 "Member exceeds struct_size");
2456 return -EINVAL;
2457 }
2458
2459 return 0;
2460 }
2461
btf_ref_type_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)2462 static int btf_ref_type_check_meta(struct btf_verifier_env *env,
2463 const struct btf_type *t,
2464 u32 meta_left)
2465 {
2466 const char *value;
2467
2468 if (btf_type_vlen(t)) {
2469 btf_verifier_log_type(env, t, "vlen != 0");
2470 return -EINVAL;
2471 }
2472
2473 if (btf_type_kflag(t)) {
2474 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2475 return -EINVAL;
2476 }
2477
2478 if (!BTF_TYPE_ID_VALID(t->type)) {
2479 btf_verifier_log_type(env, t, "Invalid type_id");
2480 return -EINVAL;
2481 }
2482
2483 /* typedef/type_tag type must have a valid name, and other ref types,
2484 * volatile, const, restrict, should have a null name.
2485 */
2486 if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) {
2487 if (!t->name_off ||
2488 !btf_name_valid_identifier(env->btf, t->name_off)) {
2489 btf_verifier_log_type(env, t, "Invalid name");
2490 return -EINVAL;
2491 }
2492 } else if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPE_TAG) {
2493 value = btf_name_by_offset(env->btf, t->name_off);
2494 if (!value || !value[0]) {
2495 btf_verifier_log_type(env, t, "Invalid name");
2496 return -EINVAL;
2497 }
2498 } else {
2499 if (t->name_off) {
2500 btf_verifier_log_type(env, t, "Invalid name");
2501 return -EINVAL;
2502 }
2503 }
2504
2505 btf_verifier_log_type(env, t, NULL);
2506
2507 return 0;
2508 }
2509
btf_modifier_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)2510 static int btf_modifier_resolve(struct btf_verifier_env *env,
2511 const struct resolve_vertex *v)
2512 {
2513 const struct btf_type *t = v->t;
2514 const struct btf_type *next_type;
2515 u32 next_type_id = t->type;
2516 struct btf *btf = env->btf;
2517
2518 next_type = btf_type_by_id(btf, next_type_id);
2519 if (!next_type || btf_type_is_resolve_source_only(next_type)) {
2520 btf_verifier_log_type(env, v->t, "Invalid type_id");
2521 return -EINVAL;
2522 }
2523
2524 if (!env_type_is_resolve_sink(env, next_type) &&
2525 !env_type_is_resolved(env, next_type_id))
2526 return env_stack_push(env, next_type, next_type_id);
2527
2528 /* Figure out the resolved next_type_id with size.
2529 * They will be stored in the current modifier's
2530 * resolved_ids and resolved_sizes such that it can
2531 * save us a few type-following when we use it later (e.g. in
2532 * pretty print).
2533 */
2534 if (!btf_type_id_size(btf, &next_type_id, NULL)) {
2535 if (env_type_is_resolved(env, next_type_id))
2536 next_type = btf_type_id_resolve(btf, &next_type_id);
2537
2538 /* "typedef void new_void", "const void"...etc */
2539 if (!btf_type_is_void(next_type) &&
2540 !btf_type_is_fwd(next_type) &&
2541 !btf_type_is_func_proto(next_type)) {
2542 btf_verifier_log_type(env, v->t, "Invalid type_id");
2543 return -EINVAL;
2544 }
2545 }
2546
2547 env_stack_pop_resolved(env, next_type_id, 0);
2548
2549 return 0;
2550 }
2551
btf_var_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)2552 static int btf_var_resolve(struct btf_verifier_env *env,
2553 const struct resolve_vertex *v)
2554 {
2555 const struct btf_type *next_type;
2556 const struct btf_type *t = v->t;
2557 u32 next_type_id = t->type;
2558 struct btf *btf = env->btf;
2559
2560 next_type = btf_type_by_id(btf, next_type_id);
2561 if (!next_type || btf_type_is_resolve_source_only(next_type)) {
2562 btf_verifier_log_type(env, v->t, "Invalid type_id");
2563 return -EINVAL;
2564 }
2565
2566 if (!env_type_is_resolve_sink(env, next_type) &&
2567 !env_type_is_resolved(env, next_type_id))
2568 return env_stack_push(env, next_type, next_type_id);
2569
2570 if (btf_type_is_modifier(next_type)) {
2571 const struct btf_type *resolved_type;
2572 u32 resolved_type_id;
2573
2574 resolved_type_id = next_type_id;
2575 resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
2576
2577 if (btf_type_is_ptr(resolved_type) &&
2578 !env_type_is_resolve_sink(env, resolved_type) &&
2579 !env_type_is_resolved(env, resolved_type_id))
2580 return env_stack_push(env, resolved_type,
2581 resolved_type_id);
2582 }
2583
2584 /* We must resolve to something concrete at this point, no
2585 * forward types or similar that would resolve to size of
2586 * zero is allowed.
2587 */
2588 if (!btf_type_id_size(btf, &next_type_id, NULL)) {
2589 btf_verifier_log_type(env, v->t, "Invalid type_id");
2590 return -EINVAL;
2591 }
2592
2593 env_stack_pop_resolved(env, next_type_id, 0);
2594
2595 return 0;
2596 }
2597
btf_ptr_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)2598 static int btf_ptr_resolve(struct btf_verifier_env *env,
2599 const struct resolve_vertex *v)
2600 {
2601 const struct btf_type *next_type;
2602 const struct btf_type *t = v->t;
2603 u32 next_type_id = t->type;
2604 struct btf *btf = env->btf;
2605
2606 next_type = btf_type_by_id(btf, next_type_id);
2607 if (!next_type || btf_type_is_resolve_source_only(next_type)) {
2608 btf_verifier_log_type(env, v->t, "Invalid type_id");
2609 return -EINVAL;
2610 }
2611
2612 if (!env_type_is_resolve_sink(env, next_type) &&
2613 !env_type_is_resolved(env, next_type_id))
2614 return env_stack_push(env, next_type, next_type_id);
2615
2616 /* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY,
2617 * the modifier may have stopped resolving when it was resolved
2618 * to a ptr (last-resolved-ptr).
2619 *
2620 * We now need to continue from the last-resolved-ptr to
2621 * ensure the last-resolved-ptr will not referring back to
2622 * the current ptr (t).
2623 */
2624 if (btf_type_is_modifier(next_type)) {
2625 const struct btf_type *resolved_type;
2626 u32 resolved_type_id;
2627
2628 resolved_type_id = next_type_id;
2629 resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
2630
2631 if (btf_type_is_ptr(resolved_type) &&
2632 !env_type_is_resolve_sink(env, resolved_type) &&
2633 !env_type_is_resolved(env, resolved_type_id))
2634 return env_stack_push(env, resolved_type,
2635 resolved_type_id);
2636 }
2637
2638 if (!btf_type_id_size(btf, &next_type_id, NULL)) {
2639 if (env_type_is_resolved(env, next_type_id))
2640 next_type = btf_type_id_resolve(btf, &next_type_id);
2641
2642 if (!btf_type_is_void(next_type) &&
2643 !btf_type_is_fwd(next_type) &&
2644 !btf_type_is_func_proto(next_type)) {
2645 btf_verifier_log_type(env, v->t, "Invalid type_id");
2646 return -EINVAL;
2647 }
2648 }
2649
2650 env_stack_pop_resolved(env, next_type_id, 0);
2651
2652 return 0;
2653 }
2654
btf_modifier_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)2655 static void btf_modifier_show(const struct btf *btf,
2656 const struct btf_type *t,
2657 u32 type_id, void *data,
2658 u8 bits_offset, struct btf_show *show)
2659 {
2660 if (btf->resolved_ids)
2661 t = btf_type_id_resolve(btf, &type_id);
2662 else
2663 t = btf_type_skip_modifiers(btf, type_id, NULL);
2664
2665 btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show);
2666 }
2667
btf_var_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)2668 static void btf_var_show(const struct btf *btf, const struct btf_type *t,
2669 u32 type_id, void *data, u8 bits_offset,
2670 struct btf_show *show)
2671 {
2672 t = btf_type_id_resolve(btf, &type_id);
2673
2674 btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show);
2675 }
2676
btf_ptr_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)2677 static void btf_ptr_show(const struct btf *btf, const struct btf_type *t,
2678 u32 type_id, void *data, u8 bits_offset,
2679 struct btf_show *show)
2680 {
2681 void *safe_data;
2682
2683 safe_data = btf_show_start_type(show, t, type_id, data);
2684 if (!safe_data)
2685 return;
2686
2687 /* It is a hashed value unless BTF_SHOW_PTR_RAW is specified */
2688 if (show->flags & BTF_SHOW_PTR_RAW)
2689 btf_show_type_value(show, "0x%px", *(void **)safe_data);
2690 else
2691 btf_show_type_value(show, "0x%p", *(void **)safe_data);
2692 btf_show_end_type(show);
2693 }
2694
btf_ref_type_log(struct btf_verifier_env * env,const struct btf_type * t)2695 static void btf_ref_type_log(struct btf_verifier_env *env,
2696 const struct btf_type *t)
2697 {
2698 btf_verifier_log(env, "type_id=%u", t->type);
2699 }
2700
2701 static struct btf_kind_operations modifier_ops = {
2702 .check_meta = btf_ref_type_check_meta,
2703 .resolve = btf_modifier_resolve,
2704 .check_member = btf_modifier_check_member,
2705 .check_kflag_member = btf_modifier_check_kflag_member,
2706 .log_details = btf_ref_type_log,
2707 .show = btf_modifier_show,
2708 };
2709
2710 static struct btf_kind_operations ptr_ops = {
2711 .check_meta = btf_ref_type_check_meta,
2712 .resolve = btf_ptr_resolve,
2713 .check_member = btf_ptr_check_member,
2714 .check_kflag_member = btf_generic_check_kflag_member,
2715 .log_details = btf_ref_type_log,
2716 .show = btf_ptr_show,
2717 };
2718
btf_fwd_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)2719 static s32 btf_fwd_check_meta(struct btf_verifier_env *env,
2720 const struct btf_type *t,
2721 u32 meta_left)
2722 {
2723 if (btf_type_vlen(t)) {
2724 btf_verifier_log_type(env, t, "vlen != 0");
2725 return -EINVAL;
2726 }
2727
2728 if (t->type) {
2729 btf_verifier_log_type(env, t, "type != 0");
2730 return -EINVAL;
2731 }
2732
2733 /* fwd type must have a valid name */
2734 if (!t->name_off ||
2735 !btf_name_valid_identifier(env->btf, t->name_off)) {
2736 btf_verifier_log_type(env, t, "Invalid name");
2737 return -EINVAL;
2738 }
2739
2740 btf_verifier_log_type(env, t, NULL);
2741
2742 return 0;
2743 }
2744
btf_fwd_type_log(struct btf_verifier_env * env,const struct btf_type * t)2745 static void btf_fwd_type_log(struct btf_verifier_env *env,
2746 const struct btf_type *t)
2747 {
2748 btf_verifier_log(env, "%s", btf_type_kflag(t) ? "union" : "struct");
2749 }
2750
2751 static struct btf_kind_operations fwd_ops = {
2752 .check_meta = btf_fwd_check_meta,
2753 .resolve = btf_df_resolve,
2754 .check_member = btf_df_check_member,
2755 .check_kflag_member = btf_df_check_kflag_member,
2756 .log_details = btf_fwd_type_log,
2757 .show = btf_df_show,
2758 };
2759
btf_array_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)2760 static int btf_array_check_member(struct btf_verifier_env *env,
2761 const struct btf_type *struct_type,
2762 const struct btf_member *member,
2763 const struct btf_type *member_type)
2764 {
2765 u32 struct_bits_off = member->offset;
2766 u32 struct_size, bytes_offset;
2767 u32 array_type_id, array_size;
2768 struct btf *btf = env->btf;
2769
2770 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2771 btf_verifier_log_member(env, struct_type, member,
2772 "Member is not byte aligned");
2773 return -EINVAL;
2774 }
2775
2776 array_type_id = member->type;
2777 btf_type_id_size(btf, &array_type_id, &array_size);
2778 struct_size = struct_type->size;
2779 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2780 if (struct_size - bytes_offset < array_size) {
2781 btf_verifier_log_member(env, struct_type, member,
2782 "Member exceeds struct_size");
2783 return -EINVAL;
2784 }
2785
2786 return 0;
2787 }
2788
btf_array_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)2789 static s32 btf_array_check_meta(struct btf_verifier_env *env,
2790 const struct btf_type *t,
2791 u32 meta_left)
2792 {
2793 const struct btf_array *array = btf_type_array(t);
2794 u32 meta_needed = sizeof(*array);
2795
2796 if (meta_left < meta_needed) {
2797 btf_verifier_log_basic(env, t,
2798 "meta_left:%u meta_needed:%u",
2799 meta_left, meta_needed);
2800 return -EINVAL;
2801 }
2802
2803 /* array type should not have a name */
2804 if (t->name_off) {
2805 btf_verifier_log_type(env, t, "Invalid name");
2806 return -EINVAL;
2807 }
2808
2809 if (btf_type_vlen(t)) {
2810 btf_verifier_log_type(env, t, "vlen != 0");
2811 return -EINVAL;
2812 }
2813
2814 if (btf_type_kflag(t)) {
2815 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2816 return -EINVAL;
2817 }
2818
2819 if (t->size) {
2820 btf_verifier_log_type(env, t, "size != 0");
2821 return -EINVAL;
2822 }
2823
2824 /* Array elem type and index type cannot be in type void,
2825 * so !array->type and !array->index_type are not allowed.
2826 */
2827 if (!array->type || !BTF_TYPE_ID_VALID(array->type)) {
2828 btf_verifier_log_type(env, t, "Invalid elem");
2829 return -EINVAL;
2830 }
2831
2832 if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) {
2833 btf_verifier_log_type(env, t, "Invalid index");
2834 return -EINVAL;
2835 }
2836
2837 btf_verifier_log_type(env, t, NULL);
2838
2839 return meta_needed;
2840 }
2841
btf_array_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)2842 static int btf_array_resolve(struct btf_verifier_env *env,
2843 const struct resolve_vertex *v)
2844 {
2845 const struct btf_array *array = btf_type_array(v->t);
2846 const struct btf_type *elem_type, *index_type;
2847 u32 elem_type_id, index_type_id;
2848 struct btf *btf = env->btf;
2849 u32 elem_size;
2850
2851 /* Check array->index_type */
2852 index_type_id = array->index_type;
2853 index_type = btf_type_by_id(btf, index_type_id);
2854 if (btf_type_nosize_or_null(index_type) ||
2855 btf_type_is_resolve_source_only(index_type)) {
2856 btf_verifier_log_type(env, v->t, "Invalid index");
2857 return -EINVAL;
2858 }
2859
2860 if (!env_type_is_resolve_sink(env, index_type) &&
2861 !env_type_is_resolved(env, index_type_id))
2862 return env_stack_push(env, index_type, index_type_id);
2863
2864 index_type = btf_type_id_size(btf, &index_type_id, NULL);
2865 if (!index_type || !btf_type_is_int(index_type) ||
2866 !btf_type_int_is_regular(index_type)) {
2867 btf_verifier_log_type(env, v->t, "Invalid index");
2868 return -EINVAL;
2869 }
2870
2871 /* Check array->type */
2872 elem_type_id = array->type;
2873 elem_type = btf_type_by_id(btf, elem_type_id);
2874 if (btf_type_nosize_or_null(elem_type) ||
2875 btf_type_is_resolve_source_only(elem_type)) {
2876 btf_verifier_log_type(env, v->t,
2877 "Invalid elem");
2878 return -EINVAL;
2879 }
2880
2881 if (!env_type_is_resolve_sink(env, elem_type) &&
2882 !env_type_is_resolved(env, elem_type_id))
2883 return env_stack_push(env, elem_type, elem_type_id);
2884
2885 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
2886 if (!elem_type) {
2887 btf_verifier_log_type(env, v->t, "Invalid elem");
2888 return -EINVAL;
2889 }
2890
2891 if (btf_type_is_int(elem_type) && !btf_type_int_is_regular(elem_type)) {
2892 btf_verifier_log_type(env, v->t, "Invalid array of int");
2893 return -EINVAL;
2894 }
2895
2896 if (array->nelems && elem_size > U32_MAX / array->nelems) {
2897 btf_verifier_log_type(env, v->t,
2898 "Array size overflows U32_MAX");
2899 return -EINVAL;
2900 }
2901
2902 env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems);
2903
2904 return 0;
2905 }
2906
btf_array_log(struct btf_verifier_env * env,const struct btf_type * t)2907 static void btf_array_log(struct btf_verifier_env *env,
2908 const struct btf_type *t)
2909 {
2910 const struct btf_array *array = btf_type_array(t);
2911
2912 btf_verifier_log(env, "type_id=%u index_type_id=%u nr_elems=%u",
2913 array->type, array->index_type, array->nelems);
2914 }
2915
__btf_array_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)2916 static void __btf_array_show(const struct btf *btf, const struct btf_type *t,
2917 u32 type_id, void *data, u8 bits_offset,
2918 struct btf_show *show)
2919 {
2920 const struct btf_array *array = btf_type_array(t);
2921 const struct btf_kind_operations *elem_ops;
2922 const struct btf_type *elem_type;
2923 u32 i, elem_size = 0, elem_type_id;
2924 u16 encoding = 0;
2925
2926 elem_type_id = array->type;
2927 elem_type = btf_type_skip_modifiers(btf, elem_type_id, NULL);
2928 if (elem_type && btf_type_has_size(elem_type))
2929 elem_size = elem_type->size;
2930
2931 if (elem_type && btf_type_is_int(elem_type)) {
2932 u32 int_type = btf_type_int(elem_type);
2933
2934 encoding = BTF_INT_ENCODING(int_type);
2935
2936 /*
2937 * BTF_INT_CHAR encoding never seems to be set for
2938 * char arrays, so if size is 1 and element is
2939 * printable as a char, we'll do that.
2940 */
2941 if (elem_size == 1)
2942 encoding = BTF_INT_CHAR;
2943 }
2944
2945 if (!btf_show_start_array_type(show, t, type_id, encoding, data))
2946 return;
2947
2948 if (!elem_type)
2949 goto out;
2950 elem_ops = btf_type_ops(elem_type);
2951
2952 for (i = 0; i < array->nelems; i++) {
2953
2954 btf_show_start_array_member(show);
2955
2956 elem_ops->show(btf, elem_type, elem_type_id, data,
2957 bits_offset, show);
2958 data += elem_size;
2959
2960 btf_show_end_array_member(show);
2961
2962 if (show->state.array_terminated)
2963 break;
2964 }
2965 out:
2966 btf_show_end_array_type(show);
2967 }
2968
btf_array_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)2969 static void btf_array_show(const struct btf *btf, const struct btf_type *t,
2970 u32 type_id, void *data, u8 bits_offset,
2971 struct btf_show *show)
2972 {
2973 const struct btf_member *m = show->state.member;
2974
2975 /*
2976 * First check if any members would be shown (are non-zero).
2977 * See comments above "struct btf_show" definition for more
2978 * details on how this works at a high-level.
2979 */
2980 if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) {
2981 if (!show->state.depth_check) {
2982 show->state.depth_check = show->state.depth + 1;
2983 show->state.depth_to_show = 0;
2984 }
2985 __btf_array_show(btf, t, type_id, data, bits_offset, show);
2986 show->state.member = m;
2987
2988 if (show->state.depth_check != show->state.depth + 1)
2989 return;
2990 show->state.depth_check = 0;
2991
2992 if (show->state.depth_to_show <= show->state.depth)
2993 return;
2994 /*
2995 * Reaching here indicates we have recursed and found
2996 * non-zero array member(s).
2997 */
2998 }
2999 __btf_array_show(btf, t, type_id, data, bits_offset, show);
3000 }
3001
3002 static struct btf_kind_operations array_ops = {
3003 .check_meta = btf_array_check_meta,
3004 .resolve = btf_array_resolve,
3005 .check_member = btf_array_check_member,
3006 .check_kflag_member = btf_generic_check_kflag_member,
3007 .log_details = btf_array_log,
3008 .show = btf_array_show,
3009 };
3010
btf_struct_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)3011 static int btf_struct_check_member(struct btf_verifier_env *env,
3012 const struct btf_type *struct_type,
3013 const struct btf_member *member,
3014 const struct btf_type *member_type)
3015 {
3016 u32 struct_bits_off = member->offset;
3017 u32 struct_size, bytes_offset;
3018
3019 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
3020 btf_verifier_log_member(env, struct_type, member,
3021 "Member is not byte aligned");
3022 return -EINVAL;
3023 }
3024
3025 struct_size = struct_type->size;
3026 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
3027 if (struct_size - bytes_offset < member_type->size) {
3028 btf_verifier_log_member(env, struct_type, member,
3029 "Member exceeds struct_size");
3030 return -EINVAL;
3031 }
3032
3033 return 0;
3034 }
3035
btf_struct_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)3036 static s32 btf_struct_check_meta(struct btf_verifier_env *env,
3037 const struct btf_type *t,
3038 u32 meta_left)
3039 {
3040 bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
3041 const struct btf_member *member;
3042 u32 meta_needed, last_offset;
3043 struct btf *btf = env->btf;
3044 u32 struct_size = t->size;
3045 u32 offset;
3046 u16 i;
3047
3048 meta_needed = btf_type_vlen(t) * sizeof(*member);
3049 if (meta_left < meta_needed) {
3050 btf_verifier_log_basic(env, t,
3051 "meta_left:%u meta_needed:%u",
3052 meta_left, meta_needed);
3053 return -EINVAL;
3054 }
3055
3056 /* struct type either no name or a valid one */
3057 if (t->name_off &&
3058 !btf_name_valid_identifier(env->btf, t->name_off)) {
3059 btf_verifier_log_type(env, t, "Invalid name");
3060 return -EINVAL;
3061 }
3062
3063 btf_verifier_log_type(env, t, NULL);
3064
3065 last_offset = 0;
3066 for_each_member(i, t, member) {
3067 if (!btf_name_offset_valid(btf, member->name_off)) {
3068 btf_verifier_log_member(env, t, member,
3069 "Invalid member name_offset:%u",
3070 member->name_off);
3071 return -EINVAL;
3072 }
3073
3074 /* struct member either no name or a valid one */
3075 if (member->name_off &&
3076 !btf_name_valid_identifier(btf, member->name_off)) {
3077 btf_verifier_log_member(env, t, member, "Invalid name");
3078 return -EINVAL;
3079 }
3080 /* A member cannot be in type void */
3081 if (!member->type || !BTF_TYPE_ID_VALID(member->type)) {
3082 btf_verifier_log_member(env, t, member,
3083 "Invalid type_id");
3084 return -EINVAL;
3085 }
3086
3087 offset = __btf_member_bit_offset(t, member);
3088 if (is_union && offset) {
3089 btf_verifier_log_member(env, t, member,
3090 "Invalid member bits_offset");
3091 return -EINVAL;
3092 }
3093
3094 /*
3095 * ">" instead of ">=" because the last member could be
3096 * "char a[0];"
3097 */
3098 if (last_offset > offset) {
3099 btf_verifier_log_member(env, t, member,
3100 "Invalid member bits_offset");
3101 return -EINVAL;
3102 }
3103
3104 if (BITS_ROUNDUP_BYTES(offset) > struct_size) {
3105 btf_verifier_log_member(env, t, member,
3106 "Member bits_offset exceeds its struct size");
3107 return -EINVAL;
3108 }
3109
3110 btf_verifier_log_member(env, t, member, NULL);
3111 last_offset = offset;
3112 }
3113
3114 return meta_needed;
3115 }
3116
btf_struct_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)3117 static int btf_struct_resolve(struct btf_verifier_env *env,
3118 const struct resolve_vertex *v)
3119 {
3120 const struct btf_member *member;
3121 int err;
3122 u16 i;
3123
3124 /* Before continue resolving the next_member,
3125 * ensure the last member is indeed resolved to a
3126 * type with size info.
3127 */
3128 if (v->next_member) {
3129 const struct btf_type *last_member_type;
3130 const struct btf_member *last_member;
3131 u32 last_member_type_id;
3132
3133 last_member = btf_type_member(v->t) + v->next_member - 1;
3134 last_member_type_id = last_member->type;
3135 if (WARN_ON_ONCE(!env_type_is_resolved(env,
3136 last_member_type_id)))
3137 return -EINVAL;
3138
3139 last_member_type = btf_type_by_id(env->btf,
3140 last_member_type_id);
3141 if (btf_type_kflag(v->t))
3142 err = btf_type_ops(last_member_type)->check_kflag_member(env, v->t,
3143 last_member,
3144 last_member_type);
3145 else
3146 err = btf_type_ops(last_member_type)->check_member(env, v->t,
3147 last_member,
3148 last_member_type);
3149 if (err)
3150 return err;
3151 }
3152
3153 for_each_member_from(i, v->next_member, v->t, member) {
3154 u32 member_type_id = member->type;
3155 const struct btf_type *member_type = btf_type_by_id(env->btf,
3156 member_type_id);
3157
3158 if (btf_type_nosize_or_null(member_type) ||
3159 btf_type_is_resolve_source_only(member_type)) {
3160 btf_verifier_log_member(env, v->t, member,
3161 "Invalid member");
3162 return -EINVAL;
3163 }
3164
3165 if (!env_type_is_resolve_sink(env, member_type) &&
3166 !env_type_is_resolved(env, member_type_id)) {
3167 env_stack_set_next_member(env, i + 1);
3168 return env_stack_push(env, member_type, member_type_id);
3169 }
3170
3171 if (btf_type_kflag(v->t))
3172 err = btf_type_ops(member_type)->check_kflag_member(env, v->t,
3173 member,
3174 member_type);
3175 else
3176 err = btf_type_ops(member_type)->check_member(env, v->t,
3177 member,
3178 member_type);
3179 if (err)
3180 return err;
3181 }
3182
3183 env_stack_pop_resolved(env, 0, 0);
3184
3185 return 0;
3186 }
3187
btf_struct_log(struct btf_verifier_env * env,const struct btf_type * t)3188 static void btf_struct_log(struct btf_verifier_env *env,
3189 const struct btf_type *t)
3190 {
3191 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
3192 }
3193
3194 enum btf_field_type {
3195 BTF_FIELD_SPIN_LOCK,
3196 BTF_FIELD_TIMER,
3197 BTF_FIELD_KPTR,
3198 };
3199
3200 enum {
3201 BTF_FIELD_IGNORE = 0,
3202 BTF_FIELD_FOUND = 1,
3203 };
3204
3205 struct btf_field_info {
3206 u32 type_id;
3207 u32 off;
3208 enum bpf_kptr_type type;
3209 };
3210
btf_find_struct(const struct btf * btf,const struct btf_type * t,u32 off,int sz,struct btf_field_info * info)3211 static int btf_find_struct(const struct btf *btf, const struct btf_type *t,
3212 u32 off, int sz, struct btf_field_info *info)
3213 {
3214 if (!__btf_type_is_struct(t))
3215 return BTF_FIELD_IGNORE;
3216 if (t->size != sz)
3217 return BTF_FIELD_IGNORE;
3218 info->off = off;
3219 return BTF_FIELD_FOUND;
3220 }
3221
btf_find_kptr(const struct btf * btf,const struct btf_type * t,u32 off,int sz,struct btf_field_info * info)3222 static int btf_find_kptr(const struct btf *btf, const struct btf_type *t,
3223 u32 off, int sz, struct btf_field_info *info)
3224 {
3225 enum bpf_kptr_type type;
3226 u32 res_id;
3227
3228 /* For PTR, sz is always == 8 */
3229 if (!btf_type_is_ptr(t))
3230 return BTF_FIELD_IGNORE;
3231 t = btf_type_by_id(btf, t->type);
3232
3233 if (!btf_type_is_type_tag(t))
3234 return BTF_FIELD_IGNORE;
3235 /* Reject extra tags */
3236 if (btf_type_is_type_tag(btf_type_by_id(btf, t->type)))
3237 return -EINVAL;
3238 if (!strcmp("kptr", __btf_name_by_offset(btf, t->name_off)))
3239 type = BPF_KPTR_UNREF;
3240 else if (!strcmp("kptr_ref", __btf_name_by_offset(btf, t->name_off)))
3241 type = BPF_KPTR_REF;
3242 else
3243 return -EINVAL;
3244
3245 /* Get the base type */
3246 t = btf_type_skip_modifiers(btf, t->type, &res_id);
3247 /* Only pointer to struct is allowed */
3248 if (!__btf_type_is_struct(t))
3249 return -EINVAL;
3250
3251 info->type_id = res_id;
3252 info->off = off;
3253 info->type = type;
3254 return BTF_FIELD_FOUND;
3255 }
3256
btf_find_struct_field(const struct btf * btf,const struct btf_type * t,const char * name,int sz,int align,enum btf_field_type field_type,struct btf_field_info * info,int info_cnt)3257 static int btf_find_struct_field(const struct btf *btf, const struct btf_type *t,
3258 const char *name, int sz, int align,
3259 enum btf_field_type field_type,
3260 struct btf_field_info *info, int info_cnt)
3261 {
3262 const struct btf_member *member;
3263 struct btf_field_info tmp;
3264 int ret, idx = 0;
3265 u32 i, off;
3266
3267 for_each_member(i, t, member) {
3268 const struct btf_type *member_type = btf_type_by_id(btf,
3269 member->type);
3270
3271 if (name && strcmp(__btf_name_by_offset(btf, member_type->name_off), name))
3272 continue;
3273
3274 off = __btf_member_bit_offset(t, member);
3275 if (off % 8)
3276 /* valid C code cannot generate such BTF */
3277 return -EINVAL;
3278 off /= 8;
3279 if (off % align)
3280 return -EINVAL;
3281
3282 switch (field_type) {
3283 case BTF_FIELD_SPIN_LOCK:
3284 case BTF_FIELD_TIMER:
3285 ret = btf_find_struct(btf, member_type, off, sz,
3286 idx < info_cnt ? &info[idx] : &tmp);
3287 if (ret < 0)
3288 return ret;
3289 break;
3290 case BTF_FIELD_KPTR:
3291 ret = btf_find_kptr(btf, member_type, off, sz,
3292 idx < info_cnt ? &info[idx] : &tmp);
3293 if (ret < 0)
3294 return ret;
3295 break;
3296 default:
3297 return -EFAULT;
3298 }
3299
3300 if (ret == BTF_FIELD_IGNORE)
3301 continue;
3302 if (idx >= info_cnt)
3303 return -E2BIG;
3304 ++idx;
3305 }
3306 return idx;
3307 }
3308
btf_find_datasec_var(const struct btf * btf,const struct btf_type * t,const char * name,int sz,int align,enum btf_field_type field_type,struct btf_field_info * info,int info_cnt)3309 static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
3310 const char *name, int sz, int align,
3311 enum btf_field_type field_type,
3312 struct btf_field_info *info, int info_cnt)
3313 {
3314 const struct btf_var_secinfo *vsi;
3315 struct btf_field_info tmp;
3316 int ret, idx = 0;
3317 u32 i, off;
3318
3319 for_each_vsi(i, t, vsi) {
3320 const struct btf_type *var = btf_type_by_id(btf, vsi->type);
3321 const struct btf_type *var_type = btf_type_by_id(btf, var->type);
3322
3323 off = vsi->offset;
3324
3325 if (name && strcmp(__btf_name_by_offset(btf, var_type->name_off), name))
3326 continue;
3327 if (vsi->size != sz)
3328 continue;
3329 if (off % align)
3330 return -EINVAL;
3331
3332 switch (field_type) {
3333 case BTF_FIELD_SPIN_LOCK:
3334 case BTF_FIELD_TIMER:
3335 ret = btf_find_struct(btf, var_type, off, sz,
3336 idx < info_cnt ? &info[idx] : &tmp);
3337 if (ret < 0)
3338 return ret;
3339 break;
3340 case BTF_FIELD_KPTR:
3341 ret = btf_find_kptr(btf, var_type, off, sz,
3342 idx < info_cnt ? &info[idx] : &tmp);
3343 if (ret < 0)
3344 return ret;
3345 break;
3346 default:
3347 return -EFAULT;
3348 }
3349
3350 if (ret == BTF_FIELD_IGNORE)
3351 continue;
3352 if (idx >= info_cnt)
3353 return -E2BIG;
3354 ++idx;
3355 }
3356 return idx;
3357 }
3358
btf_find_field(const struct btf * btf,const struct btf_type * t,enum btf_field_type field_type,struct btf_field_info * info,int info_cnt)3359 static int btf_find_field(const struct btf *btf, const struct btf_type *t,
3360 enum btf_field_type field_type,
3361 struct btf_field_info *info, int info_cnt)
3362 {
3363 const char *name;
3364 int sz, align;
3365
3366 switch (field_type) {
3367 case BTF_FIELD_SPIN_LOCK:
3368 name = "bpf_spin_lock";
3369 sz = sizeof(struct bpf_spin_lock);
3370 align = __alignof__(struct bpf_spin_lock);
3371 break;
3372 case BTF_FIELD_TIMER:
3373 name = "bpf_timer";
3374 sz = sizeof(struct bpf_timer);
3375 align = __alignof__(struct bpf_timer);
3376 break;
3377 case BTF_FIELD_KPTR:
3378 name = NULL;
3379 sz = sizeof(u64);
3380 align = 8;
3381 break;
3382 default:
3383 return -EFAULT;
3384 }
3385
3386 if (__btf_type_is_struct(t))
3387 return btf_find_struct_field(btf, t, name, sz, align, field_type, info, info_cnt);
3388 else if (btf_type_is_datasec(t))
3389 return btf_find_datasec_var(btf, t, name, sz, align, field_type, info, info_cnt);
3390 return -EINVAL;
3391 }
3392
3393 /* find 'struct bpf_spin_lock' in map value.
3394 * return >= 0 offset if found
3395 * and < 0 in case of error
3396 */
btf_find_spin_lock(const struct btf * btf,const struct btf_type * t)3397 int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t)
3398 {
3399 struct btf_field_info info;
3400 int ret;
3401
3402 ret = btf_find_field(btf, t, BTF_FIELD_SPIN_LOCK, &info, 1);
3403 if (ret < 0)
3404 return ret;
3405 if (!ret)
3406 return -ENOENT;
3407 return info.off;
3408 }
3409
btf_find_timer(const struct btf * btf,const struct btf_type * t)3410 int btf_find_timer(const struct btf *btf, const struct btf_type *t)
3411 {
3412 struct btf_field_info info;
3413 int ret;
3414
3415 ret = btf_find_field(btf, t, BTF_FIELD_TIMER, &info, 1);
3416 if (ret < 0)
3417 return ret;
3418 if (!ret)
3419 return -ENOENT;
3420 return info.off;
3421 }
3422
btf_parse_kptrs(const struct btf * btf,const struct btf_type * t)3423 struct bpf_map_value_off *btf_parse_kptrs(const struct btf *btf,
3424 const struct btf_type *t)
3425 {
3426 struct btf_field_info info_arr[BPF_MAP_VALUE_OFF_MAX];
3427 struct bpf_map_value_off *tab;
3428 struct btf *kernel_btf = NULL;
3429 struct module *mod = NULL;
3430 int ret, i, nr_off;
3431
3432 ret = btf_find_field(btf, t, BTF_FIELD_KPTR, info_arr, ARRAY_SIZE(info_arr));
3433 if (ret < 0)
3434 return ERR_PTR(ret);
3435 if (!ret)
3436 return NULL;
3437
3438 nr_off = ret;
3439 tab = kzalloc(offsetof(struct bpf_map_value_off, off[nr_off]), GFP_KERNEL | __GFP_NOWARN);
3440 if (!tab)
3441 return ERR_PTR(-ENOMEM);
3442
3443 for (i = 0; i < nr_off; i++) {
3444 const struct btf_type *t;
3445 s32 id;
3446
3447 /* Find type in map BTF, and use it to look up the matching type
3448 * in vmlinux or module BTFs, by name and kind.
3449 */
3450 t = btf_type_by_id(btf, info_arr[i].type_id);
3451 id = bpf_find_btf_id(__btf_name_by_offset(btf, t->name_off), BTF_INFO_KIND(t->info),
3452 &kernel_btf);
3453 if (id < 0) {
3454 ret = id;
3455 goto end;
3456 }
3457
3458 /* Find and stash the function pointer for the destruction function that
3459 * needs to be eventually invoked from the map free path.
3460 */
3461 if (info_arr[i].type == BPF_KPTR_REF) {
3462 const struct btf_type *dtor_func;
3463 const char *dtor_func_name;
3464 unsigned long addr;
3465 s32 dtor_btf_id;
3466
3467 /* This call also serves as a whitelist of allowed objects that
3468 * can be used as a referenced pointer and be stored in a map at
3469 * the same time.
3470 */
3471 dtor_btf_id = btf_find_dtor_kfunc(kernel_btf, id);
3472 if (dtor_btf_id < 0) {
3473 ret = dtor_btf_id;
3474 goto end_btf;
3475 }
3476
3477 dtor_func = btf_type_by_id(kernel_btf, dtor_btf_id);
3478 if (!dtor_func) {
3479 ret = -ENOENT;
3480 goto end_btf;
3481 }
3482
3483 if (btf_is_module(kernel_btf)) {
3484 mod = btf_try_get_module(kernel_btf);
3485 if (!mod) {
3486 ret = -ENXIO;
3487 goto end_btf;
3488 }
3489 }
3490
3491 /* We already verified dtor_func to be btf_type_is_func
3492 * in register_btf_id_dtor_kfuncs.
3493 */
3494 dtor_func_name = __btf_name_by_offset(kernel_btf, dtor_func->name_off);
3495 addr = kallsyms_lookup_name(dtor_func_name);
3496 if (!addr) {
3497 ret = -EINVAL;
3498 goto end_mod;
3499 }
3500 tab->off[i].kptr.dtor = (void *)addr;
3501 }
3502
3503 tab->off[i].offset = info_arr[i].off;
3504 tab->off[i].type = info_arr[i].type;
3505 tab->off[i].kptr.btf_id = id;
3506 tab->off[i].kptr.btf = kernel_btf;
3507 tab->off[i].kptr.module = mod;
3508 }
3509 tab->nr_off = nr_off;
3510 return tab;
3511 end_mod:
3512 module_put(mod);
3513 end_btf:
3514 btf_put(kernel_btf);
3515 end:
3516 while (i--) {
3517 btf_put(tab->off[i].kptr.btf);
3518 if (tab->off[i].kptr.module)
3519 module_put(tab->off[i].kptr.module);
3520 }
3521 kfree(tab);
3522 return ERR_PTR(ret);
3523 }
3524
__btf_struct_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)3525 static void __btf_struct_show(const struct btf *btf, const struct btf_type *t,
3526 u32 type_id, void *data, u8 bits_offset,
3527 struct btf_show *show)
3528 {
3529 const struct btf_member *member;
3530 void *safe_data;
3531 u32 i;
3532
3533 safe_data = btf_show_start_struct_type(show, t, type_id, data);
3534 if (!safe_data)
3535 return;
3536
3537 for_each_member(i, t, member) {
3538 const struct btf_type *member_type = btf_type_by_id(btf,
3539 member->type);
3540 const struct btf_kind_operations *ops;
3541 u32 member_offset, bitfield_size;
3542 u32 bytes_offset;
3543 u8 bits8_offset;
3544
3545 btf_show_start_member(show, member);
3546
3547 member_offset = __btf_member_bit_offset(t, member);
3548 bitfield_size = __btf_member_bitfield_size(t, member);
3549 bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
3550 bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
3551 if (bitfield_size) {
3552 safe_data = btf_show_start_type(show, member_type,
3553 member->type,
3554 data + bytes_offset);
3555 if (safe_data)
3556 btf_bitfield_show(safe_data,
3557 bits8_offset,
3558 bitfield_size, show);
3559 btf_show_end_type(show);
3560 } else {
3561 ops = btf_type_ops(member_type);
3562 ops->show(btf, member_type, member->type,
3563 data + bytes_offset, bits8_offset, show);
3564 }
3565
3566 btf_show_end_member(show);
3567 }
3568
3569 btf_show_end_struct_type(show);
3570 }
3571
btf_struct_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)3572 static void btf_struct_show(const struct btf *btf, const struct btf_type *t,
3573 u32 type_id, void *data, u8 bits_offset,
3574 struct btf_show *show)
3575 {
3576 const struct btf_member *m = show->state.member;
3577
3578 /*
3579 * First check if any members would be shown (are non-zero).
3580 * See comments above "struct btf_show" definition for more
3581 * details on how this works at a high-level.
3582 */
3583 if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) {
3584 if (!show->state.depth_check) {
3585 show->state.depth_check = show->state.depth + 1;
3586 show->state.depth_to_show = 0;
3587 }
3588 __btf_struct_show(btf, t, type_id, data, bits_offset, show);
3589 /* Restore saved member data here */
3590 show->state.member = m;
3591 if (show->state.depth_check != show->state.depth + 1)
3592 return;
3593 show->state.depth_check = 0;
3594
3595 if (show->state.depth_to_show <= show->state.depth)
3596 return;
3597 /*
3598 * Reaching here indicates we have recursed and found
3599 * non-zero child values.
3600 */
3601 }
3602
3603 __btf_struct_show(btf, t, type_id, data, bits_offset, show);
3604 }
3605
3606 static struct btf_kind_operations struct_ops = {
3607 .check_meta = btf_struct_check_meta,
3608 .resolve = btf_struct_resolve,
3609 .check_member = btf_struct_check_member,
3610 .check_kflag_member = btf_generic_check_kflag_member,
3611 .log_details = btf_struct_log,
3612 .show = btf_struct_show,
3613 };
3614
btf_enum_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)3615 static int btf_enum_check_member(struct btf_verifier_env *env,
3616 const struct btf_type *struct_type,
3617 const struct btf_member *member,
3618 const struct btf_type *member_type)
3619 {
3620 u32 struct_bits_off = member->offset;
3621 u32 struct_size, bytes_offset;
3622
3623 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
3624 btf_verifier_log_member(env, struct_type, member,
3625 "Member is not byte aligned");
3626 return -EINVAL;
3627 }
3628
3629 struct_size = struct_type->size;
3630 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
3631 if (struct_size - bytes_offset < member_type->size) {
3632 btf_verifier_log_member(env, struct_type, member,
3633 "Member exceeds struct_size");
3634 return -EINVAL;
3635 }
3636
3637 return 0;
3638 }
3639
btf_enum_check_kflag_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)3640 static int btf_enum_check_kflag_member(struct btf_verifier_env *env,
3641 const struct btf_type *struct_type,
3642 const struct btf_member *member,
3643 const struct btf_type *member_type)
3644 {
3645 u32 struct_bits_off, nr_bits, bytes_end, struct_size;
3646 u32 int_bitsize = sizeof(int) * BITS_PER_BYTE;
3647
3648 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
3649 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
3650 if (!nr_bits) {
3651 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
3652 btf_verifier_log_member(env, struct_type, member,
3653 "Member is not byte aligned");
3654 return -EINVAL;
3655 }
3656
3657 nr_bits = int_bitsize;
3658 } else if (nr_bits > int_bitsize) {
3659 btf_verifier_log_member(env, struct_type, member,
3660 "Invalid member bitfield_size");
3661 return -EINVAL;
3662 }
3663
3664 struct_size = struct_type->size;
3665 bytes_end = BITS_ROUNDUP_BYTES(struct_bits_off + nr_bits);
3666 if (struct_size < bytes_end) {
3667 btf_verifier_log_member(env, struct_type, member,
3668 "Member exceeds struct_size");
3669 return -EINVAL;
3670 }
3671
3672 return 0;
3673 }
3674
btf_enum_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)3675 static s32 btf_enum_check_meta(struct btf_verifier_env *env,
3676 const struct btf_type *t,
3677 u32 meta_left)
3678 {
3679 const struct btf_enum *enums = btf_type_enum(t);
3680 struct btf *btf = env->btf;
3681 const char *fmt_str;
3682 u16 i, nr_enums;
3683 u32 meta_needed;
3684
3685 nr_enums = btf_type_vlen(t);
3686 meta_needed = nr_enums * sizeof(*enums);
3687
3688 if (meta_left < meta_needed) {
3689 btf_verifier_log_basic(env, t,
3690 "meta_left:%u meta_needed:%u",
3691 meta_left, meta_needed);
3692 return -EINVAL;
3693 }
3694
3695 if (t->size > 8 || !is_power_of_2(t->size)) {
3696 btf_verifier_log_type(env, t, "Unexpected size");
3697 return -EINVAL;
3698 }
3699
3700 /* enum type either no name or a valid one */
3701 if (t->name_off &&
3702 !btf_name_valid_identifier(env->btf, t->name_off)) {
3703 btf_verifier_log_type(env, t, "Invalid name");
3704 return -EINVAL;
3705 }
3706
3707 btf_verifier_log_type(env, t, NULL);
3708
3709 for (i = 0; i < nr_enums; i++) {
3710 if (!btf_name_offset_valid(btf, enums[i].name_off)) {
3711 btf_verifier_log(env, "\tInvalid name_offset:%u",
3712 enums[i].name_off);
3713 return -EINVAL;
3714 }
3715
3716 /* enum member must have a valid name */
3717 if (!enums[i].name_off ||
3718 !btf_name_valid_identifier(btf, enums[i].name_off)) {
3719 btf_verifier_log_type(env, t, "Invalid name");
3720 return -EINVAL;
3721 }
3722
3723 if (env->log.level == BPF_LOG_KERNEL)
3724 continue;
3725 fmt_str = btf_type_kflag(t) ? "\t%s val=%d\n" : "\t%s val=%u\n";
3726 btf_verifier_log(env, fmt_str,
3727 __btf_name_by_offset(btf, enums[i].name_off),
3728 enums[i].val);
3729 }
3730
3731 return meta_needed;
3732 }
3733
btf_enum_log(struct btf_verifier_env * env,const struct btf_type * t)3734 static void btf_enum_log(struct btf_verifier_env *env,
3735 const struct btf_type *t)
3736 {
3737 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
3738 }
3739
btf_enum_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)3740 static void btf_enum_show(const struct btf *btf, const struct btf_type *t,
3741 u32 type_id, void *data, u8 bits_offset,
3742 struct btf_show *show)
3743 {
3744 const struct btf_enum *enums = btf_type_enum(t);
3745 u32 i, nr_enums = btf_type_vlen(t);
3746 void *safe_data;
3747 int v;
3748
3749 safe_data = btf_show_start_type(show, t, type_id, data);
3750 if (!safe_data)
3751 return;
3752
3753 v = *(int *)safe_data;
3754
3755 for (i = 0; i < nr_enums; i++) {
3756 if (v != enums[i].val)
3757 continue;
3758
3759 btf_show_type_value(show, "%s",
3760 __btf_name_by_offset(btf,
3761 enums[i].name_off));
3762
3763 btf_show_end_type(show);
3764 return;
3765 }
3766
3767 if (btf_type_kflag(t))
3768 btf_show_type_value(show, "%d", v);
3769 else
3770 btf_show_type_value(show, "%u", v);
3771 btf_show_end_type(show);
3772 }
3773
3774 static struct btf_kind_operations enum_ops = {
3775 .check_meta = btf_enum_check_meta,
3776 .resolve = btf_df_resolve,
3777 .check_member = btf_enum_check_member,
3778 .check_kflag_member = btf_enum_check_kflag_member,
3779 .log_details = btf_enum_log,
3780 .show = btf_enum_show,
3781 };
3782
btf_enum64_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)3783 static s32 btf_enum64_check_meta(struct btf_verifier_env *env,
3784 const struct btf_type *t,
3785 u32 meta_left)
3786 {
3787 const struct btf_enum64 *enums = btf_type_enum64(t);
3788 struct btf *btf = env->btf;
3789 const char *fmt_str;
3790 u16 i, nr_enums;
3791 u32 meta_needed;
3792
3793 nr_enums = btf_type_vlen(t);
3794 meta_needed = nr_enums * sizeof(*enums);
3795
3796 if (meta_left < meta_needed) {
3797 btf_verifier_log_basic(env, t,
3798 "meta_left:%u meta_needed:%u",
3799 meta_left, meta_needed);
3800 return -EINVAL;
3801 }
3802
3803 if (t->size > 8 || !is_power_of_2(t->size)) {
3804 btf_verifier_log_type(env, t, "Unexpected size");
3805 return -EINVAL;
3806 }
3807
3808 /* enum type either no name or a valid one */
3809 if (t->name_off &&
3810 !btf_name_valid_identifier(env->btf, t->name_off)) {
3811 btf_verifier_log_type(env, t, "Invalid name");
3812 return -EINVAL;
3813 }
3814
3815 btf_verifier_log_type(env, t, NULL);
3816
3817 for (i = 0; i < nr_enums; i++) {
3818 if (!btf_name_offset_valid(btf, enums[i].name_off)) {
3819 btf_verifier_log(env, "\tInvalid name_offset:%u",
3820 enums[i].name_off);
3821 return -EINVAL;
3822 }
3823
3824 /* enum member must have a valid name */
3825 if (!enums[i].name_off ||
3826 !btf_name_valid_identifier(btf, enums[i].name_off)) {
3827 btf_verifier_log_type(env, t, "Invalid name");
3828 return -EINVAL;
3829 }
3830
3831 if (env->log.level == BPF_LOG_KERNEL)
3832 continue;
3833
3834 fmt_str = btf_type_kflag(t) ? "\t%s val=%lld\n" : "\t%s val=%llu\n";
3835 btf_verifier_log(env, fmt_str,
3836 __btf_name_by_offset(btf, enums[i].name_off),
3837 btf_enum64_value(enums + i));
3838 }
3839
3840 return meta_needed;
3841 }
3842
btf_enum64_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)3843 static void btf_enum64_show(const struct btf *btf, const struct btf_type *t,
3844 u32 type_id, void *data, u8 bits_offset,
3845 struct btf_show *show)
3846 {
3847 const struct btf_enum64 *enums = btf_type_enum64(t);
3848 u32 i, nr_enums = btf_type_vlen(t);
3849 void *safe_data;
3850 s64 v;
3851
3852 safe_data = btf_show_start_type(show, t, type_id, data);
3853 if (!safe_data)
3854 return;
3855
3856 v = *(u64 *)safe_data;
3857
3858 for (i = 0; i < nr_enums; i++) {
3859 if (v != btf_enum64_value(enums + i))
3860 continue;
3861
3862 btf_show_type_value(show, "%s",
3863 __btf_name_by_offset(btf,
3864 enums[i].name_off));
3865
3866 btf_show_end_type(show);
3867 return;
3868 }
3869
3870 if (btf_type_kflag(t))
3871 btf_show_type_value(show, "%lld", v);
3872 else
3873 btf_show_type_value(show, "%llu", v);
3874 btf_show_end_type(show);
3875 }
3876
3877 static struct btf_kind_operations enum64_ops = {
3878 .check_meta = btf_enum64_check_meta,
3879 .resolve = btf_df_resolve,
3880 .check_member = btf_enum_check_member,
3881 .check_kflag_member = btf_enum_check_kflag_member,
3882 .log_details = btf_enum_log,
3883 .show = btf_enum64_show,
3884 };
3885
btf_func_proto_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)3886 static s32 btf_func_proto_check_meta(struct btf_verifier_env *env,
3887 const struct btf_type *t,
3888 u32 meta_left)
3889 {
3890 u32 meta_needed = btf_type_vlen(t) * sizeof(struct btf_param);
3891
3892 if (meta_left < meta_needed) {
3893 btf_verifier_log_basic(env, t,
3894 "meta_left:%u meta_needed:%u",
3895 meta_left, meta_needed);
3896 return -EINVAL;
3897 }
3898
3899 if (t->name_off) {
3900 btf_verifier_log_type(env, t, "Invalid name");
3901 return -EINVAL;
3902 }
3903
3904 if (btf_type_kflag(t)) {
3905 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
3906 return -EINVAL;
3907 }
3908
3909 btf_verifier_log_type(env, t, NULL);
3910
3911 return meta_needed;
3912 }
3913
btf_func_proto_log(struct btf_verifier_env * env,const struct btf_type * t)3914 static void btf_func_proto_log(struct btf_verifier_env *env,
3915 const struct btf_type *t)
3916 {
3917 const struct btf_param *args = (const struct btf_param *)(t + 1);
3918 u16 nr_args = btf_type_vlen(t), i;
3919
3920 btf_verifier_log(env, "return=%u args=(", t->type);
3921 if (!nr_args) {
3922 btf_verifier_log(env, "void");
3923 goto done;
3924 }
3925
3926 if (nr_args == 1 && !args[0].type) {
3927 /* Only one vararg */
3928 btf_verifier_log(env, "vararg");
3929 goto done;
3930 }
3931
3932 btf_verifier_log(env, "%u %s", args[0].type,
3933 __btf_name_by_offset(env->btf,
3934 args[0].name_off));
3935 for (i = 1; i < nr_args - 1; i++)
3936 btf_verifier_log(env, ", %u %s", args[i].type,
3937 __btf_name_by_offset(env->btf,
3938 args[i].name_off));
3939
3940 if (nr_args > 1) {
3941 const struct btf_param *last_arg = &args[nr_args - 1];
3942
3943 if (last_arg->type)
3944 btf_verifier_log(env, ", %u %s", last_arg->type,
3945 __btf_name_by_offset(env->btf,
3946 last_arg->name_off));
3947 else
3948 btf_verifier_log(env, ", vararg");
3949 }
3950
3951 done:
3952 btf_verifier_log(env, ")");
3953 }
3954
3955 static struct btf_kind_operations func_proto_ops = {
3956 .check_meta = btf_func_proto_check_meta,
3957 .resolve = btf_df_resolve,
3958 /*
3959 * BTF_KIND_FUNC_PROTO cannot be directly referred by
3960 * a struct's member.
3961 *
3962 * It should be a function pointer instead.
3963 * (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO)
3964 *
3965 * Hence, there is no btf_func_check_member().
3966 */
3967 .check_member = btf_df_check_member,
3968 .check_kflag_member = btf_df_check_kflag_member,
3969 .log_details = btf_func_proto_log,
3970 .show = btf_df_show,
3971 };
3972
btf_func_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)3973 static s32 btf_func_check_meta(struct btf_verifier_env *env,
3974 const struct btf_type *t,
3975 u32 meta_left)
3976 {
3977 if (!t->name_off ||
3978 !btf_name_valid_identifier(env->btf, t->name_off)) {
3979 btf_verifier_log_type(env, t, "Invalid name");
3980 return -EINVAL;
3981 }
3982
3983 if (btf_type_vlen(t) > BTF_FUNC_GLOBAL) {
3984 btf_verifier_log_type(env, t, "Invalid func linkage");
3985 return -EINVAL;
3986 }
3987
3988 if (btf_type_kflag(t)) {
3989 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
3990 return -EINVAL;
3991 }
3992
3993 btf_verifier_log_type(env, t, NULL);
3994
3995 return 0;
3996 }
3997
btf_func_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)3998 static int btf_func_resolve(struct btf_verifier_env *env,
3999 const struct resolve_vertex *v)
4000 {
4001 const struct btf_type *t = v->t;
4002 u32 next_type_id = t->type;
4003 int err;
4004
4005 err = btf_func_check(env, t);
4006 if (err)
4007 return err;
4008
4009 env_stack_pop_resolved(env, next_type_id, 0);
4010 return 0;
4011 }
4012
4013 static struct btf_kind_operations func_ops = {
4014 .check_meta = btf_func_check_meta,
4015 .resolve = btf_func_resolve,
4016 .check_member = btf_df_check_member,
4017 .check_kflag_member = btf_df_check_kflag_member,
4018 .log_details = btf_ref_type_log,
4019 .show = btf_df_show,
4020 };
4021
btf_var_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)4022 static s32 btf_var_check_meta(struct btf_verifier_env *env,
4023 const struct btf_type *t,
4024 u32 meta_left)
4025 {
4026 const struct btf_var *var;
4027 u32 meta_needed = sizeof(*var);
4028
4029 if (meta_left < meta_needed) {
4030 btf_verifier_log_basic(env, t,
4031 "meta_left:%u meta_needed:%u",
4032 meta_left, meta_needed);
4033 return -EINVAL;
4034 }
4035
4036 if (btf_type_vlen(t)) {
4037 btf_verifier_log_type(env, t, "vlen != 0");
4038 return -EINVAL;
4039 }
4040
4041 if (btf_type_kflag(t)) {
4042 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
4043 return -EINVAL;
4044 }
4045
4046 if (!t->name_off ||
4047 !__btf_name_valid(env->btf, t->name_off, true)) {
4048 btf_verifier_log_type(env, t, "Invalid name");
4049 return -EINVAL;
4050 }
4051
4052 /* A var cannot be in type void */
4053 if (!t->type || !BTF_TYPE_ID_VALID(t->type)) {
4054 btf_verifier_log_type(env, t, "Invalid type_id");
4055 return -EINVAL;
4056 }
4057
4058 var = btf_type_var(t);
4059 if (var->linkage != BTF_VAR_STATIC &&
4060 var->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
4061 btf_verifier_log_type(env, t, "Linkage not supported");
4062 return -EINVAL;
4063 }
4064
4065 btf_verifier_log_type(env, t, NULL);
4066
4067 return meta_needed;
4068 }
4069
btf_var_log(struct btf_verifier_env * env,const struct btf_type * t)4070 static void btf_var_log(struct btf_verifier_env *env, const struct btf_type *t)
4071 {
4072 const struct btf_var *var = btf_type_var(t);
4073
4074 btf_verifier_log(env, "type_id=%u linkage=%u", t->type, var->linkage);
4075 }
4076
4077 static const struct btf_kind_operations var_ops = {
4078 .check_meta = btf_var_check_meta,
4079 .resolve = btf_var_resolve,
4080 .check_member = btf_df_check_member,
4081 .check_kflag_member = btf_df_check_kflag_member,
4082 .log_details = btf_var_log,
4083 .show = btf_var_show,
4084 };
4085
btf_datasec_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)4086 static s32 btf_datasec_check_meta(struct btf_verifier_env *env,
4087 const struct btf_type *t,
4088 u32 meta_left)
4089 {
4090 const struct btf_var_secinfo *vsi;
4091 u64 last_vsi_end_off = 0, sum = 0;
4092 u32 i, meta_needed;
4093
4094 meta_needed = btf_type_vlen(t) * sizeof(*vsi);
4095 if (meta_left < meta_needed) {
4096 btf_verifier_log_basic(env, t,
4097 "meta_left:%u meta_needed:%u",
4098 meta_left, meta_needed);
4099 return -EINVAL;
4100 }
4101
4102 if (!t->size) {
4103 btf_verifier_log_type(env, t, "size == 0");
4104 return -EINVAL;
4105 }
4106
4107 if (btf_type_kflag(t)) {
4108 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
4109 return -EINVAL;
4110 }
4111
4112 if (!t->name_off ||
4113 !btf_name_valid_section(env->btf, t->name_off)) {
4114 btf_verifier_log_type(env, t, "Invalid name");
4115 return -EINVAL;
4116 }
4117
4118 btf_verifier_log_type(env, t, NULL);
4119
4120 for_each_vsi(i, t, vsi) {
4121 /* A var cannot be in type void */
4122 if (!vsi->type || !BTF_TYPE_ID_VALID(vsi->type)) {
4123 btf_verifier_log_vsi(env, t, vsi,
4124 "Invalid type_id");
4125 return -EINVAL;
4126 }
4127
4128 if (vsi->offset < last_vsi_end_off || vsi->offset >= t->size) {
4129 btf_verifier_log_vsi(env, t, vsi,
4130 "Invalid offset");
4131 return -EINVAL;
4132 }
4133
4134 if (!vsi->size || vsi->size > t->size) {
4135 btf_verifier_log_vsi(env, t, vsi,
4136 "Invalid size");
4137 return -EINVAL;
4138 }
4139
4140 last_vsi_end_off = vsi->offset + vsi->size;
4141 if (last_vsi_end_off > t->size) {
4142 btf_verifier_log_vsi(env, t, vsi,
4143 "Invalid offset+size");
4144 return -EINVAL;
4145 }
4146
4147 btf_verifier_log_vsi(env, t, vsi, NULL);
4148 sum += vsi->size;
4149 }
4150
4151 if (t->size < sum) {
4152 btf_verifier_log_type(env, t, "Invalid btf_info size");
4153 return -EINVAL;
4154 }
4155
4156 return meta_needed;
4157 }
4158
btf_datasec_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)4159 static int btf_datasec_resolve(struct btf_verifier_env *env,
4160 const struct resolve_vertex *v)
4161 {
4162 const struct btf_var_secinfo *vsi;
4163 struct btf *btf = env->btf;
4164 u16 i;
4165
4166 for_each_vsi_from(i, v->next_member, v->t, vsi) {
4167 u32 var_type_id = vsi->type, type_id, type_size = 0;
4168 const struct btf_type *var_type = btf_type_by_id(env->btf,
4169 var_type_id);
4170 if (!var_type || !btf_type_is_var(var_type)) {
4171 btf_verifier_log_vsi(env, v->t, vsi,
4172 "Not a VAR kind member");
4173 return -EINVAL;
4174 }
4175
4176 if (!env_type_is_resolve_sink(env, var_type) &&
4177 !env_type_is_resolved(env, var_type_id)) {
4178 env_stack_set_next_member(env, i + 1);
4179 return env_stack_push(env, var_type, var_type_id);
4180 }
4181
4182 type_id = var_type->type;
4183 if (!btf_type_id_size(btf, &type_id, &type_size)) {
4184 btf_verifier_log_vsi(env, v->t, vsi, "Invalid type");
4185 return -EINVAL;
4186 }
4187
4188 if (vsi->size < type_size) {
4189 btf_verifier_log_vsi(env, v->t, vsi, "Invalid size");
4190 return -EINVAL;
4191 }
4192 }
4193
4194 env_stack_pop_resolved(env, 0, 0);
4195 return 0;
4196 }
4197
btf_datasec_log(struct btf_verifier_env * env,const struct btf_type * t)4198 static void btf_datasec_log(struct btf_verifier_env *env,
4199 const struct btf_type *t)
4200 {
4201 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
4202 }
4203
btf_datasec_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)4204 static void btf_datasec_show(const struct btf *btf,
4205 const struct btf_type *t, u32 type_id,
4206 void *data, u8 bits_offset,
4207 struct btf_show *show)
4208 {
4209 const struct btf_var_secinfo *vsi;
4210 const struct btf_type *var;
4211 u32 i;
4212
4213 if (!btf_show_start_type(show, t, type_id, data))
4214 return;
4215
4216 btf_show_type_value(show, "section (\"%s\") = {",
4217 __btf_name_by_offset(btf, t->name_off));
4218 for_each_vsi(i, t, vsi) {
4219 var = btf_type_by_id(btf, vsi->type);
4220 if (i)
4221 btf_show(show, ",");
4222 btf_type_ops(var)->show(btf, var, vsi->type,
4223 data + vsi->offset, bits_offset, show);
4224 }
4225 btf_show_end_type(show);
4226 }
4227
4228 static const struct btf_kind_operations datasec_ops = {
4229 .check_meta = btf_datasec_check_meta,
4230 .resolve = btf_datasec_resolve,
4231 .check_member = btf_df_check_member,
4232 .check_kflag_member = btf_df_check_kflag_member,
4233 .log_details = btf_datasec_log,
4234 .show = btf_datasec_show,
4235 };
4236
btf_float_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)4237 static s32 btf_float_check_meta(struct btf_verifier_env *env,
4238 const struct btf_type *t,
4239 u32 meta_left)
4240 {
4241 if (btf_type_vlen(t)) {
4242 btf_verifier_log_type(env, t, "vlen != 0");
4243 return -EINVAL;
4244 }
4245
4246 if (btf_type_kflag(t)) {
4247 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
4248 return -EINVAL;
4249 }
4250
4251 if (t->size != 2 && t->size != 4 && t->size != 8 && t->size != 12 &&
4252 t->size != 16) {
4253 btf_verifier_log_type(env, t, "Invalid type_size");
4254 return -EINVAL;
4255 }
4256
4257 btf_verifier_log_type(env, t, NULL);
4258
4259 return 0;
4260 }
4261
btf_float_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)4262 static int btf_float_check_member(struct btf_verifier_env *env,
4263 const struct btf_type *struct_type,
4264 const struct btf_member *member,
4265 const struct btf_type *member_type)
4266 {
4267 u64 start_offset_bytes;
4268 u64 end_offset_bytes;
4269 u64 misalign_bits;
4270 u64 align_bytes;
4271 u64 align_bits;
4272
4273 /* Different architectures have different alignment requirements, so
4274 * here we check only for the reasonable minimum. This way we ensure
4275 * that types after CO-RE can pass the kernel BTF verifier.
4276 */
4277 align_bytes = min_t(u64, sizeof(void *), member_type->size);
4278 align_bits = align_bytes * BITS_PER_BYTE;
4279 div64_u64_rem(member->offset, align_bits, &misalign_bits);
4280 if (misalign_bits) {
4281 btf_verifier_log_member(env, struct_type, member,
4282 "Member is not properly aligned");
4283 return -EINVAL;
4284 }
4285
4286 start_offset_bytes = member->offset / BITS_PER_BYTE;
4287 end_offset_bytes = start_offset_bytes + member_type->size;
4288 if (end_offset_bytes > struct_type->size) {
4289 btf_verifier_log_member(env, struct_type, member,
4290 "Member exceeds struct_size");
4291 return -EINVAL;
4292 }
4293
4294 return 0;
4295 }
4296
btf_float_log(struct btf_verifier_env * env,const struct btf_type * t)4297 static void btf_float_log(struct btf_verifier_env *env,
4298 const struct btf_type *t)
4299 {
4300 btf_verifier_log(env, "size=%u", t->size);
4301 }
4302
4303 static const struct btf_kind_operations float_ops = {
4304 .check_meta = btf_float_check_meta,
4305 .resolve = btf_df_resolve,
4306 .check_member = btf_float_check_member,
4307 .check_kflag_member = btf_generic_check_kflag_member,
4308 .log_details = btf_float_log,
4309 .show = btf_df_show,
4310 };
4311
btf_decl_tag_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)4312 static s32 btf_decl_tag_check_meta(struct btf_verifier_env *env,
4313 const struct btf_type *t,
4314 u32 meta_left)
4315 {
4316 const struct btf_decl_tag *tag;
4317 u32 meta_needed = sizeof(*tag);
4318 s32 component_idx;
4319 const char *value;
4320
4321 if (meta_left < meta_needed) {
4322 btf_verifier_log_basic(env, t,
4323 "meta_left:%u meta_needed:%u",
4324 meta_left, meta_needed);
4325 return -EINVAL;
4326 }
4327
4328 value = btf_name_by_offset(env->btf, t->name_off);
4329 if (!value || !value[0]) {
4330 btf_verifier_log_type(env, t, "Invalid value");
4331 return -EINVAL;
4332 }
4333
4334 if (btf_type_vlen(t)) {
4335 btf_verifier_log_type(env, t, "vlen != 0");
4336 return -EINVAL;
4337 }
4338
4339 if (btf_type_kflag(t)) {
4340 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
4341 return -EINVAL;
4342 }
4343
4344 component_idx = btf_type_decl_tag(t)->component_idx;
4345 if (component_idx < -1) {
4346 btf_verifier_log_type(env, t, "Invalid component_idx");
4347 return -EINVAL;
4348 }
4349
4350 btf_verifier_log_type(env, t, NULL);
4351
4352 return meta_needed;
4353 }
4354
btf_decl_tag_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)4355 static int btf_decl_tag_resolve(struct btf_verifier_env *env,
4356 const struct resolve_vertex *v)
4357 {
4358 const struct btf_type *next_type;
4359 const struct btf_type *t = v->t;
4360 u32 next_type_id = t->type;
4361 struct btf *btf = env->btf;
4362 s32 component_idx;
4363 u32 vlen;
4364
4365 next_type = btf_type_by_id(btf, next_type_id);
4366 if (!next_type || !btf_type_is_decl_tag_target(next_type)) {
4367 btf_verifier_log_type(env, v->t, "Invalid type_id");
4368 return -EINVAL;
4369 }
4370
4371 if (!env_type_is_resolve_sink(env, next_type) &&
4372 !env_type_is_resolved(env, next_type_id))
4373 return env_stack_push(env, next_type, next_type_id);
4374
4375 component_idx = btf_type_decl_tag(t)->component_idx;
4376 if (component_idx != -1) {
4377 if (btf_type_is_var(next_type) || btf_type_is_typedef(next_type)) {
4378 btf_verifier_log_type(env, v->t, "Invalid component_idx");
4379 return -EINVAL;
4380 }
4381
4382 if (btf_type_is_struct(next_type)) {
4383 vlen = btf_type_vlen(next_type);
4384 } else {
4385 /* next_type should be a function */
4386 next_type = btf_type_by_id(btf, next_type->type);
4387 vlen = btf_type_vlen(next_type);
4388 }
4389
4390 if ((u32)component_idx >= vlen) {
4391 btf_verifier_log_type(env, v->t, "Invalid component_idx");
4392 return -EINVAL;
4393 }
4394 }
4395
4396 env_stack_pop_resolved(env, next_type_id, 0);
4397
4398 return 0;
4399 }
4400
btf_decl_tag_log(struct btf_verifier_env * env,const struct btf_type * t)4401 static void btf_decl_tag_log(struct btf_verifier_env *env, const struct btf_type *t)
4402 {
4403 btf_verifier_log(env, "type=%u component_idx=%d", t->type,
4404 btf_type_decl_tag(t)->component_idx);
4405 }
4406
4407 static const struct btf_kind_operations decl_tag_ops = {
4408 .check_meta = btf_decl_tag_check_meta,
4409 .resolve = btf_decl_tag_resolve,
4410 .check_member = btf_df_check_member,
4411 .check_kflag_member = btf_df_check_kflag_member,
4412 .log_details = btf_decl_tag_log,
4413 .show = btf_df_show,
4414 };
4415
btf_func_proto_check(struct btf_verifier_env * env,const struct btf_type * t)4416 static int btf_func_proto_check(struct btf_verifier_env *env,
4417 const struct btf_type *t)
4418 {
4419 const struct btf_type *ret_type;
4420 const struct btf_param *args;
4421 const struct btf *btf;
4422 u16 nr_args, i;
4423 int err;
4424
4425 btf = env->btf;
4426 args = (const struct btf_param *)(t + 1);
4427 nr_args = btf_type_vlen(t);
4428
4429 /* Check func return type which could be "void" (t->type == 0) */
4430 if (t->type) {
4431 u32 ret_type_id = t->type;
4432
4433 ret_type = btf_type_by_id(btf, ret_type_id);
4434 if (!ret_type) {
4435 btf_verifier_log_type(env, t, "Invalid return type");
4436 return -EINVAL;
4437 }
4438
4439 if (btf_type_is_resolve_source_only(ret_type)) {
4440 btf_verifier_log_type(env, t, "Invalid return type");
4441 return -EINVAL;
4442 }
4443
4444 if (btf_type_needs_resolve(ret_type) &&
4445 !env_type_is_resolved(env, ret_type_id)) {
4446 err = btf_resolve(env, ret_type, ret_type_id);
4447 if (err)
4448 return err;
4449 }
4450
4451 /* Ensure the return type is a type that has a size */
4452 if (!btf_type_id_size(btf, &ret_type_id, NULL)) {
4453 btf_verifier_log_type(env, t, "Invalid return type");
4454 return -EINVAL;
4455 }
4456 }
4457
4458 if (!nr_args)
4459 return 0;
4460
4461 /* Last func arg type_id could be 0 if it is a vararg */
4462 if (!args[nr_args - 1].type) {
4463 if (args[nr_args - 1].name_off) {
4464 btf_verifier_log_type(env, t, "Invalid arg#%u",
4465 nr_args);
4466 return -EINVAL;
4467 }
4468 nr_args--;
4469 }
4470
4471 err = 0;
4472 for (i = 0; i < nr_args; i++) {
4473 const struct btf_type *arg_type;
4474 u32 arg_type_id;
4475
4476 arg_type_id = args[i].type;
4477 arg_type = btf_type_by_id(btf, arg_type_id);
4478 if (!arg_type) {
4479 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
4480 err = -EINVAL;
4481 break;
4482 }
4483
4484 if (btf_type_is_resolve_source_only(arg_type)) {
4485 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
4486 return -EINVAL;
4487 }
4488
4489 if (args[i].name_off &&
4490 (!btf_name_offset_valid(btf, args[i].name_off) ||
4491 !btf_name_valid_identifier(btf, args[i].name_off))) {
4492 btf_verifier_log_type(env, t,
4493 "Invalid arg#%u", i + 1);
4494 err = -EINVAL;
4495 break;
4496 }
4497
4498 if (btf_type_needs_resolve(arg_type) &&
4499 !env_type_is_resolved(env, arg_type_id)) {
4500 err = btf_resolve(env, arg_type, arg_type_id);
4501 if (err)
4502 break;
4503 }
4504
4505 if (!btf_type_id_size(btf, &arg_type_id, NULL)) {
4506 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
4507 err = -EINVAL;
4508 break;
4509 }
4510 }
4511
4512 return err;
4513 }
4514
btf_func_check(struct btf_verifier_env * env,const struct btf_type * t)4515 static int btf_func_check(struct btf_verifier_env *env,
4516 const struct btf_type *t)
4517 {
4518 const struct btf_type *proto_type;
4519 const struct btf_param *args;
4520 const struct btf *btf;
4521 u16 nr_args, i;
4522
4523 btf = env->btf;
4524 proto_type = btf_type_by_id(btf, t->type);
4525
4526 if (!proto_type || !btf_type_is_func_proto(proto_type)) {
4527 btf_verifier_log_type(env, t, "Invalid type_id");
4528 return -EINVAL;
4529 }
4530
4531 args = (const struct btf_param *)(proto_type + 1);
4532 nr_args = btf_type_vlen(proto_type);
4533 for (i = 0; i < nr_args; i++) {
4534 if (!args[i].name_off && args[i].type) {
4535 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
4536 return -EINVAL;
4537 }
4538 }
4539
4540 return 0;
4541 }
4542
4543 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = {
4544 [BTF_KIND_INT] = &int_ops,
4545 [BTF_KIND_PTR] = &ptr_ops,
4546 [BTF_KIND_ARRAY] = &array_ops,
4547 [BTF_KIND_STRUCT] = &struct_ops,
4548 [BTF_KIND_UNION] = &struct_ops,
4549 [BTF_KIND_ENUM] = &enum_ops,
4550 [BTF_KIND_FWD] = &fwd_ops,
4551 [BTF_KIND_TYPEDEF] = &modifier_ops,
4552 [BTF_KIND_VOLATILE] = &modifier_ops,
4553 [BTF_KIND_CONST] = &modifier_ops,
4554 [BTF_KIND_RESTRICT] = &modifier_ops,
4555 [BTF_KIND_FUNC] = &func_ops,
4556 [BTF_KIND_FUNC_PROTO] = &func_proto_ops,
4557 [BTF_KIND_VAR] = &var_ops,
4558 [BTF_KIND_DATASEC] = &datasec_ops,
4559 [BTF_KIND_FLOAT] = &float_ops,
4560 [BTF_KIND_DECL_TAG] = &decl_tag_ops,
4561 [BTF_KIND_TYPE_TAG] = &modifier_ops,
4562 [BTF_KIND_ENUM64] = &enum64_ops,
4563 };
4564
btf_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)4565 static s32 btf_check_meta(struct btf_verifier_env *env,
4566 const struct btf_type *t,
4567 u32 meta_left)
4568 {
4569 u32 saved_meta_left = meta_left;
4570 s32 var_meta_size;
4571
4572 if (meta_left < sizeof(*t)) {
4573 btf_verifier_log(env, "[%u] meta_left:%u meta_needed:%zu",
4574 env->log_type_id, meta_left, sizeof(*t));
4575 return -EINVAL;
4576 }
4577 meta_left -= sizeof(*t);
4578
4579 if (t->info & ~BTF_INFO_MASK) {
4580 btf_verifier_log(env, "[%u] Invalid btf_info:%x",
4581 env->log_type_id, t->info);
4582 return -EINVAL;
4583 }
4584
4585 if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX ||
4586 BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) {
4587 btf_verifier_log(env, "[%u] Invalid kind:%u",
4588 env->log_type_id, BTF_INFO_KIND(t->info));
4589 return -EINVAL;
4590 }
4591
4592 if (!btf_name_offset_valid(env->btf, t->name_off)) {
4593 btf_verifier_log(env, "[%u] Invalid name_offset:%u",
4594 env->log_type_id, t->name_off);
4595 return -EINVAL;
4596 }
4597
4598 var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left);
4599 if (var_meta_size < 0)
4600 return var_meta_size;
4601
4602 meta_left -= var_meta_size;
4603
4604 return saved_meta_left - meta_left;
4605 }
4606
btf_check_all_metas(struct btf_verifier_env * env)4607 static int btf_check_all_metas(struct btf_verifier_env *env)
4608 {
4609 struct btf *btf = env->btf;
4610 struct btf_header *hdr;
4611 void *cur, *end;
4612
4613 hdr = &btf->hdr;
4614 cur = btf->nohdr_data + hdr->type_off;
4615 end = cur + hdr->type_len;
4616
4617 env->log_type_id = btf->base_btf ? btf->start_id : 1;
4618 while (cur < end) {
4619 struct btf_type *t = cur;
4620 s32 meta_size;
4621
4622 meta_size = btf_check_meta(env, t, end - cur);
4623 if (meta_size < 0)
4624 return meta_size;
4625
4626 btf_add_type(env, t);
4627 cur += meta_size;
4628 env->log_type_id++;
4629 }
4630
4631 return 0;
4632 }
4633
btf_resolve_valid(struct btf_verifier_env * env,const struct btf_type * t,u32 type_id)4634 static bool btf_resolve_valid(struct btf_verifier_env *env,
4635 const struct btf_type *t,
4636 u32 type_id)
4637 {
4638 struct btf *btf = env->btf;
4639
4640 if (!env_type_is_resolved(env, type_id))
4641 return false;
4642
4643 if (btf_type_is_struct(t) || btf_type_is_datasec(t))
4644 return !btf_resolved_type_id(btf, type_id) &&
4645 !btf_resolved_type_size(btf, type_id);
4646
4647 if (btf_type_is_decl_tag(t) || btf_type_is_func(t))
4648 return btf_resolved_type_id(btf, type_id) &&
4649 !btf_resolved_type_size(btf, type_id);
4650
4651 if (btf_type_is_modifier(t) || btf_type_is_ptr(t) ||
4652 btf_type_is_var(t)) {
4653 t = btf_type_id_resolve(btf, &type_id);
4654 return t &&
4655 !btf_type_is_modifier(t) &&
4656 !btf_type_is_var(t) &&
4657 !btf_type_is_datasec(t);
4658 }
4659
4660 if (btf_type_is_array(t)) {
4661 const struct btf_array *array = btf_type_array(t);
4662 const struct btf_type *elem_type;
4663 u32 elem_type_id = array->type;
4664 u32 elem_size;
4665
4666 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
4667 return elem_type && !btf_type_is_modifier(elem_type) &&
4668 (array->nelems * elem_size ==
4669 btf_resolved_type_size(btf, type_id));
4670 }
4671
4672 return false;
4673 }
4674
btf_resolve(struct btf_verifier_env * env,const struct btf_type * t,u32 type_id)4675 static int btf_resolve(struct btf_verifier_env *env,
4676 const struct btf_type *t, u32 type_id)
4677 {
4678 u32 save_log_type_id = env->log_type_id;
4679 const struct resolve_vertex *v;
4680 int err = 0;
4681
4682 env->resolve_mode = RESOLVE_TBD;
4683 env_stack_push(env, t, type_id);
4684 while (!err && (v = env_stack_peak(env))) {
4685 env->log_type_id = v->type_id;
4686 err = btf_type_ops(v->t)->resolve(env, v);
4687 }
4688
4689 env->log_type_id = type_id;
4690 if (err == -E2BIG) {
4691 btf_verifier_log_type(env, t,
4692 "Exceeded max resolving depth:%u",
4693 MAX_RESOLVE_DEPTH);
4694 } else if (err == -EEXIST) {
4695 btf_verifier_log_type(env, t, "Loop detected");
4696 }
4697
4698 /* Final sanity check */
4699 if (!err && !btf_resolve_valid(env, t, type_id)) {
4700 btf_verifier_log_type(env, t, "Invalid resolve state");
4701 err = -EINVAL;
4702 }
4703
4704 env->log_type_id = save_log_type_id;
4705 return err;
4706 }
4707
btf_check_all_types(struct btf_verifier_env * env)4708 static int btf_check_all_types(struct btf_verifier_env *env)
4709 {
4710 struct btf *btf = env->btf;
4711 const struct btf_type *t;
4712 u32 type_id, i;
4713 int err;
4714
4715 err = env_resolve_init(env);
4716 if (err)
4717 return err;
4718
4719 env->phase++;
4720 for (i = btf->base_btf ? 0 : 1; i < btf->nr_types; i++) {
4721 type_id = btf->start_id + i;
4722 t = btf_type_by_id(btf, type_id);
4723
4724 env->log_type_id = type_id;
4725 if (btf_type_needs_resolve(t) &&
4726 !env_type_is_resolved(env, type_id)) {
4727 err = btf_resolve(env, t, type_id);
4728 if (err)
4729 return err;
4730 }
4731
4732 if (btf_type_is_func_proto(t)) {
4733 err = btf_func_proto_check(env, t);
4734 if (err)
4735 return err;
4736 }
4737 }
4738
4739 return 0;
4740 }
4741
btf_parse_type_sec(struct btf_verifier_env * env)4742 static int btf_parse_type_sec(struct btf_verifier_env *env)
4743 {
4744 const struct btf_header *hdr = &env->btf->hdr;
4745 int err;
4746
4747 /* Type section must align to 4 bytes */
4748 if (hdr->type_off & (sizeof(u32) - 1)) {
4749 btf_verifier_log(env, "Unaligned type_off");
4750 return -EINVAL;
4751 }
4752
4753 if (!env->btf->base_btf && !hdr->type_len) {
4754 btf_verifier_log(env, "No type found");
4755 return -EINVAL;
4756 }
4757
4758 err = btf_check_all_metas(env);
4759 if (err)
4760 return err;
4761
4762 return btf_check_all_types(env);
4763 }
4764
btf_parse_str_sec(struct btf_verifier_env * env)4765 static int btf_parse_str_sec(struct btf_verifier_env *env)
4766 {
4767 const struct btf_header *hdr;
4768 struct btf *btf = env->btf;
4769 const char *start, *end;
4770
4771 hdr = &btf->hdr;
4772 start = btf->nohdr_data + hdr->str_off;
4773 end = start + hdr->str_len;
4774
4775 if (end != btf->data + btf->data_size) {
4776 btf_verifier_log(env, "String section is not at the end");
4777 return -EINVAL;
4778 }
4779
4780 btf->strings = start;
4781
4782 if (btf->base_btf && !hdr->str_len)
4783 return 0;
4784 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET || end[-1]) {
4785 btf_verifier_log(env, "Invalid string section");
4786 return -EINVAL;
4787 }
4788 if (!btf->base_btf && start[0]) {
4789 btf_verifier_log(env, "Invalid string section");
4790 return -EINVAL;
4791 }
4792
4793 return 0;
4794 }
4795
4796 static const size_t btf_sec_info_offset[] = {
4797 offsetof(struct btf_header, type_off),
4798 offsetof(struct btf_header, str_off),
4799 };
4800
btf_sec_info_cmp(const void * a,const void * b)4801 static int btf_sec_info_cmp(const void *a, const void *b)
4802 {
4803 const struct btf_sec_info *x = a;
4804 const struct btf_sec_info *y = b;
4805
4806 return (int)(x->off - y->off) ? : (int)(x->len - y->len);
4807 }
4808
btf_check_sec_info(struct btf_verifier_env * env,u32 btf_data_size)4809 static int btf_check_sec_info(struct btf_verifier_env *env,
4810 u32 btf_data_size)
4811 {
4812 struct btf_sec_info secs[ARRAY_SIZE(btf_sec_info_offset)];
4813 u32 total, expected_total, i;
4814 const struct btf_header *hdr;
4815 const struct btf *btf;
4816
4817 btf = env->btf;
4818 hdr = &btf->hdr;
4819
4820 /* Populate the secs from hdr */
4821 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++)
4822 secs[i] = *(struct btf_sec_info *)((void *)hdr +
4823 btf_sec_info_offset[i]);
4824
4825 sort(secs, ARRAY_SIZE(btf_sec_info_offset),
4826 sizeof(struct btf_sec_info), btf_sec_info_cmp, NULL);
4827
4828 /* Check for gaps and overlap among sections */
4829 total = 0;
4830 expected_total = btf_data_size - hdr->hdr_len;
4831 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) {
4832 if (expected_total < secs[i].off) {
4833 btf_verifier_log(env, "Invalid section offset");
4834 return -EINVAL;
4835 }
4836 if (total < secs[i].off) {
4837 /* gap */
4838 btf_verifier_log(env, "Unsupported section found");
4839 return -EINVAL;
4840 }
4841 if (total > secs[i].off) {
4842 btf_verifier_log(env, "Section overlap found");
4843 return -EINVAL;
4844 }
4845 if (expected_total - total < secs[i].len) {
4846 btf_verifier_log(env,
4847 "Total section length too long");
4848 return -EINVAL;
4849 }
4850 total += secs[i].len;
4851 }
4852
4853 /* There is data other than hdr and known sections */
4854 if (expected_total != total) {
4855 btf_verifier_log(env, "Unsupported section found");
4856 return -EINVAL;
4857 }
4858
4859 return 0;
4860 }
4861
btf_parse_hdr(struct btf_verifier_env * env)4862 static int btf_parse_hdr(struct btf_verifier_env *env)
4863 {
4864 u32 hdr_len, hdr_copy, btf_data_size;
4865 const struct btf_header *hdr;
4866 struct btf *btf;
4867
4868 btf = env->btf;
4869 btf_data_size = btf->data_size;
4870
4871 if (btf_data_size < offsetofend(struct btf_header, hdr_len)) {
4872 btf_verifier_log(env, "hdr_len not found");
4873 return -EINVAL;
4874 }
4875
4876 hdr = btf->data;
4877 hdr_len = hdr->hdr_len;
4878 if (btf_data_size < hdr_len) {
4879 btf_verifier_log(env, "btf_header not found");
4880 return -EINVAL;
4881 }
4882
4883 /* Ensure the unsupported header fields are zero */
4884 if (hdr_len > sizeof(btf->hdr)) {
4885 u8 *expected_zero = btf->data + sizeof(btf->hdr);
4886 u8 *end = btf->data + hdr_len;
4887
4888 for (; expected_zero < end; expected_zero++) {
4889 if (*expected_zero) {
4890 btf_verifier_log(env, "Unsupported btf_header");
4891 return -E2BIG;
4892 }
4893 }
4894 }
4895
4896 hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr));
4897 memcpy(&btf->hdr, btf->data, hdr_copy);
4898
4899 hdr = &btf->hdr;
4900
4901 btf_verifier_log_hdr(env, btf_data_size);
4902
4903 if (hdr->magic != BTF_MAGIC) {
4904 btf_verifier_log(env, "Invalid magic");
4905 return -EINVAL;
4906 }
4907
4908 if (hdr->version != BTF_VERSION) {
4909 btf_verifier_log(env, "Unsupported version");
4910 return -ENOTSUPP;
4911 }
4912
4913 if (hdr->flags) {
4914 btf_verifier_log(env, "Unsupported flags");
4915 return -ENOTSUPP;
4916 }
4917
4918 if (!btf->base_btf && btf_data_size == hdr->hdr_len) {
4919 btf_verifier_log(env, "No data");
4920 return -EINVAL;
4921 }
4922
4923 return btf_check_sec_info(env, btf_data_size);
4924 }
4925
btf_check_type_tags(struct btf_verifier_env * env,struct btf * btf,int start_id)4926 static int btf_check_type_tags(struct btf_verifier_env *env,
4927 struct btf *btf, int start_id)
4928 {
4929 int i, n, good_id = start_id - 1;
4930 bool in_tags;
4931
4932 n = btf_nr_types(btf);
4933 for (i = start_id; i < n; i++) {
4934 const struct btf_type *t;
4935 int chain_limit = 32;
4936 u32 cur_id = i;
4937
4938 t = btf_type_by_id(btf, i);
4939 if (!t)
4940 return -EINVAL;
4941 if (!btf_type_is_modifier(t))
4942 continue;
4943
4944 cond_resched();
4945
4946 in_tags = btf_type_is_type_tag(t);
4947 while (btf_type_is_modifier(t)) {
4948 if (!chain_limit--) {
4949 btf_verifier_log(env, "Max chain length or cycle detected");
4950 return -ELOOP;
4951 }
4952 if (btf_type_is_type_tag(t)) {
4953 if (!in_tags) {
4954 btf_verifier_log(env, "Type tags don't precede modifiers");
4955 return -EINVAL;
4956 }
4957 } else if (in_tags) {
4958 in_tags = false;
4959 }
4960 if (cur_id <= good_id)
4961 break;
4962 /* Move to next type */
4963 cur_id = t->type;
4964 t = btf_type_by_id(btf, cur_id);
4965 if (!t)
4966 return -EINVAL;
4967 }
4968 good_id = i;
4969 }
4970 return 0;
4971 }
4972
btf_parse(bpfptr_t btf_data,u32 btf_data_size,u32 log_level,char __user * log_ubuf,u32 log_size)4973 static struct btf *btf_parse(bpfptr_t btf_data, u32 btf_data_size,
4974 u32 log_level, char __user *log_ubuf, u32 log_size)
4975 {
4976 struct btf_verifier_env *env = NULL;
4977 struct bpf_verifier_log *log;
4978 struct btf *btf = NULL;
4979 u8 *data;
4980 int err;
4981
4982 if (btf_data_size > BTF_MAX_SIZE)
4983 return ERR_PTR(-E2BIG);
4984
4985 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
4986 if (!env)
4987 return ERR_PTR(-ENOMEM);
4988
4989 log = &env->log;
4990 if (log_level || log_ubuf || log_size) {
4991 /* user requested verbose verifier output
4992 * and supplied buffer to store the verification trace
4993 */
4994 log->level = log_level;
4995 log->ubuf = log_ubuf;
4996 log->len_total = log_size;
4997
4998 /* log attributes have to be sane */
4999 if (!bpf_verifier_log_attr_valid(log)) {
5000 err = -EINVAL;
5001 goto errout;
5002 }
5003 }
5004
5005 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
5006 if (!btf) {
5007 err = -ENOMEM;
5008 goto errout;
5009 }
5010 env->btf = btf;
5011
5012 data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN);
5013 if (!data) {
5014 err = -ENOMEM;
5015 goto errout;
5016 }
5017
5018 btf->data = data;
5019 btf->data_size = btf_data_size;
5020
5021 if (copy_from_bpfptr(data, btf_data, btf_data_size)) {
5022 err = -EFAULT;
5023 goto errout;
5024 }
5025
5026 err = btf_parse_hdr(env);
5027 if (err)
5028 goto errout;
5029
5030 btf->nohdr_data = btf->data + btf->hdr.hdr_len;
5031
5032 err = btf_parse_str_sec(env);
5033 if (err)
5034 goto errout;
5035
5036 err = btf_parse_type_sec(env);
5037 if (err)
5038 goto errout;
5039
5040 err = btf_check_type_tags(env, btf, 1);
5041 if (err)
5042 goto errout;
5043
5044 if (log->level && bpf_verifier_log_full(log)) {
5045 err = -ENOSPC;
5046 goto errout;
5047 }
5048
5049 btf_verifier_env_free(env);
5050 refcount_set(&btf->refcnt, 1);
5051 return btf;
5052
5053 errout:
5054 btf_verifier_env_free(env);
5055 if (btf)
5056 btf_free(btf);
5057 return ERR_PTR(err);
5058 }
5059
5060 extern char __weak __start_BTF[];
5061 extern char __weak __stop_BTF[];
5062 extern struct btf *btf_vmlinux;
5063
5064 #define BPF_MAP_TYPE(_id, _ops)
5065 #define BPF_LINK_TYPE(_id, _name)
5066 static union {
5067 struct bpf_ctx_convert {
5068 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
5069 prog_ctx_type _id##_prog; \
5070 kern_ctx_type _id##_kern;
5071 #include <linux/bpf_types.h>
5072 #undef BPF_PROG_TYPE
5073 } *__t;
5074 /* 't' is written once under lock. Read many times. */
5075 const struct btf_type *t;
5076 } bpf_ctx_convert;
5077 enum {
5078 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
5079 __ctx_convert##_id,
5080 #include <linux/bpf_types.h>
5081 #undef BPF_PROG_TYPE
5082 __ctx_convert_unused, /* to avoid empty enum in extreme .config */
5083 };
5084 static u8 bpf_ctx_convert_map[] = {
5085 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
5086 [_id] = __ctx_convert##_id,
5087 #include <linux/bpf_types.h>
5088 #undef BPF_PROG_TYPE
5089 0, /* avoid empty array */
5090 };
5091 #undef BPF_MAP_TYPE
5092 #undef BPF_LINK_TYPE
5093
5094 static const struct btf_member *
btf_get_prog_ctx_type(struct bpf_verifier_log * log,const struct btf * btf,const struct btf_type * t,enum bpf_prog_type prog_type,int arg)5095 btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
5096 const struct btf_type *t, enum bpf_prog_type prog_type,
5097 int arg)
5098 {
5099 const struct btf_type *conv_struct;
5100 const struct btf_type *ctx_struct;
5101 const struct btf_member *ctx_type;
5102 const char *tname, *ctx_tname;
5103
5104 conv_struct = bpf_ctx_convert.t;
5105 if (!conv_struct) {
5106 bpf_log(log, "btf_vmlinux is malformed\n");
5107 return NULL;
5108 }
5109 t = btf_type_by_id(btf, t->type);
5110 while (btf_type_is_modifier(t))
5111 t = btf_type_by_id(btf, t->type);
5112 if (!btf_type_is_struct(t)) {
5113 /* Only pointer to struct is supported for now.
5114 * That means that BPF_PROG_TYPE_TRACEPOINT with BTF
5115 * is not supported yet.
5116 * BPF_PROG_TYPE_RAW_TRACEPOINT is fine.
5117 */
5118 return NULL;
5119 }
5120 tname = btf_name_by_offset(btf, t->name_off);
5121 if (!tname) {
5122 bpf_log(log, "arg#%d struct doesn't have a name\n", arg);
5123 return NULL;
5124 }
5125 /* prog_type is valid bpf program type. No need for bounds check. */
5126 ctx_type = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2;
5127 /* ctx_struct is a pointer to prog_ctx_type in vmlinux.
5128 * Like 'struct __sk_buff'
5129 */
5130 ctx_struct = btf_type_by_id(btf_vmlinux, ctx_type->type);
5131 if (!ctx_struct)
5132 /* should not happen */
5133 return NULL;
5134 ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_struct->name_off);
5135 if (!ctx_tname) {
5136 /* should not happen */
5137 bpf_log(log, "Please fix kernel include/linux/bpf_types.h\n");
5138 return NULL;
5139 }
5140 /* only compare that prog's ctx type name is the same as
5141 * kernel expects. No need to compare field by field.
5142 * It's ok for bpf prog to do:
5143 * struct __sk_buff {};
5144 * int socket_filter_bpf_prog(struct __sk_buff *skb)
5145 * { // no fields of skb are ever used }
5146 */
5147 if (strcmp(ctx_tname, tname))
5148 return NULL;
5149 return ctx_type;
5150 }
5151
btf_translate_to_vmlinux(struct bpf_verifier_log * log,struct btf * btf,const struct btf_type * t,enum bpf_prog_type prog_type,int arg)5152 static int btf_translate_to_vmlinux(struct bpf_verifier_log *log,
5153 struct btf *btf,
5154 const struct btf_type *t,
5155 enum bpf_prog_type prog_type,
5156 int arg)
5157 {
5158 const struct btf_member *prog_ctx_type, *kern_ctx_type;
5159
5160 prog_ctx_type = btf_get_prog_ctx_type(log, btf, t, prog_type, arg);
5161 if (!prog_ctx_type)
5162 return -ENOENT;
5163 kern_ctx_type = prog_ctx_type + 1;
5164 return kern_ctx_type->type;
5165 }
5166
5167 BTF_ID_LIST(bpf_ctx_convert_btf_id)
BTF_ID(struct,bpf_ctx_convert)5168 BTF_ID(struct, bpf_ctx_convert)
5169
5170 struct btf *btf_parse_vmlinux(void)
5171 {
5172 struct btf_verifier_env *env = NULL;
5173 struct bpf_verifier_log *log;
5174 struct btf *btf = NULL;
5175 int err;
5176
5177 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
5178 if (!env)
5179 return ERR_PTR(-ENOMEM);
5180
5181 log = &env->log;
5182 log->level = BPF_LOG_KERNEL;
5183
5184 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
5185 if (!btf) {
5186 err = -ENOMEM;
5187 goto errout;
5188 }
5189 env->btf = btf;
5190
5191 btf->data = __start_BTF;
5192 btf->data_size = __stop_BTF - __start_BTF;
5193 btf->kernel_btf = true;
5194 snprintf(btf->name, sizeof(btf->name), "vmlinux");
5195
5196 err = btf_parse_hdr(env);
5197 if (err)
5198 goto errout;
5199
5200 btf->nohdr_data = btf->data + btf->hdr.hdr_len;
5201
5202 err = btf_parse_str_sec(env);
5203 if (err)
5204 goto errout;
5205
5206 err = btf_check_all_metas(env);
5207 if (err)
5208 goto errout;
5209
5210 err = btf_check_type_tags(env, btf, 1);
5211 if (err)
5212 goto errout;
5213
5214 /* btf_parse_vmlinux() runs under bpf_verifier_lock */
5215 bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]);
5216
5217 bpf_struct_ops_init(btf, log);
5218
5219 refcount_set(&btf->refcnt, 1);
5220
5221 err = btf_alloc_id(btf);
5222 if (err)
5223 goto errout;
5224
5225 btf_verifier_env_free(env);
5226 return btf;
5227
5228 errout:
5229 btf_verifier_env_free(env);
5230 if (btf) {
5231 kvfree(btf->types);
5232 kfree(btf);
5233 }
5234 return ERR_PTR(err);
5235 }
5236
5237 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
5238
btf_parse_module(const char * module_name,const void * data,unsigned int data_size)5239 static struct btf *btf_parse_module(const char *module_name, const void *data, unsigned int data_size)
5240 {
5241 struct btf_verifier_env *env = NULL;
5242 struct bpf_verifier_log *log;
5243 struct btf *btf = NULL, *base_btf;
5244 int err;
5245
5246 base_btf = bpf_get_btf_vmlinux();
5247 if (IS_ERR(base_btf))
5248 return base_btf;
5249 if (!base_btf)
5250 return ERR_PTR(-EINVAL);
5251
5252 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
5253 if (!env)
5254 return ERR_PTR(-ENOMEM);
5255
5256 log = &env->log;
5257 log->level = BPF_LOG_KERNEL;
5258
5259 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
5260 if (!btf) {
5261 err = -ENOMEM;
5262 goto errout;
5263 }
5264 env->btf = btf;
5265
5266 btf->base_btf = base_btf;
5267 btf->start_id = base_btf->nr_types;
5268 btf->start_str_off = base_btf->hdr.str_len;
5269 btf->kernel_btf = true;
5270 snprintf(btf->name, sizeof(btf->name), "%s", module_name);
5271
5272 btf->data = kvmalloc(data_size, GFP_KERNEL | __GFP_NOWARN);
5273 if (!btf->data) {
5274 err = -ENOMEM;
5275 goto errout;
5276 }
5277 memcpy(btf->data, data, data_size);
5278 btf->data_size = data_size;
5279
5280 err = btf_parse_hdr(env);
5281 if (err)
5282 goto errout;
5283
5284 btf->nohdr_data = btf->data + btf->hdr.hdr_len;
5285
5286 err = btf_parse_str_sec(env);
5287 if (err)
5288 goto errout;
5289
5290 err = btf_check_all_metas(env);
5291 if (err)
5292 goto errout;
5293
5294 err = btf_check_type_tags(env, btf, btf_nr_types(base_btf));
5295 if (err)
5296 goto errout;
5297
5298 btf_verifier_env_free(env);
5299 refcount_set(&btf->refcnt, 1);
5300 return btf;
5301
5302 errout:
5303 btf_verifier_env_free(env);
5304 if (btf) {
5305 kvfree(btf->data);
5306 kvfree(btf->types);
5307 kfree(btf);
5308 }
5309 return ERR_PTR(err);
5310 }
5311
5312 #endif /* CONFIG_DEBUG_INFO_BTF_MODULES */
5313
bpf_prog_get_target_btf(const struct bpf_prog * prog)5314 struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog)
5315 {
5316 struct bpf_prog *tgt_prog = prog->aux->dst_prog;
5317
5318 if (tgt_prog)
5319 return tgt_prog->aux->btf;
5320 else
5321 return prog->aux->attach_btf;
5322 }
5323
is_int_ptr(struct btf * btf,const struct btf_type * t)5324 static bool is_int_ptr(struct btf *btf, const struct btf_type *t)
5325 {
5326 /* t comes in already as a pointer */
5327 t = btf_type_by_id(btf, t->type);
5328
5329 /* allow const */
5330 if (BTF_INFO_KIND(t->info) == BTF_KIND_CONST)
5331 t = btf_type_by_id(btf, t->type);
5332
5333 return btf_type_is_int(t);
5334 }
5335
get_ctx_arg_idx(struct btf * btf,const struct btf_type * func_proto,int off)5336 static u32 get_ctx_arg_idx(struct btf *btf, const struct btf_type *func_proto,
5337 int off)
5338 {
5339 const struct btf_param *args;
5340 const struct btf_type *t;
5341 u32 offset = 0, nr_args;
5342 int i;
5343
5344 if (!func_proto)
5345 return off / 8;
5346
5347 nr_args = btf_type_vlen(func_proto);
5348 args = (const struct btf_param *)(func_proto + 1);
5349 for (i = 0; i < nr_args; i++) {
5350 t = btf_type_skip_modifiers(btf, args[i].type, NULL);
5351 offset += btf_type_is_ptr(t) ? 8 : roundup(t->size, 8);
5352 if (off < offset)
5353 return i;
5354 }
5355
5356 t = btf_type_skip_modifiers(btf, func_proto->type, NULL);
5357 offset += btf_type_is_ptr(t) ? 8 : roundup(t->size, 8);
5358 if (off < offset)
5359 return nr_args;
5360
5361 return nr_args + 1;
5362 }
5363
btf_ctx_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)5364 bool btf_ctx_access(int off, int size, enum bpf_access_type type,
5365 const struct bpf_prog *prog,
5366 struct bpf_insn_access_aux *info)
5367 {
5368 const struct btf_type *t = prog->aux->attach_func_proto;
5369 struct bpf_prog *tgt_prog = prog->aux->dst_prog;
5370 struct btf *btf = bpf_prog_get_target_btf(prog);
5371 const char *tname = prog->aux->attach_func_name;
5372 struct bpf_verifier_log *log = info->log;
5373 const struct btf_param *args;
5374 const char *tag_value;
5375 u32 nr_args, arg;
5376 int i, ret;
5377
5378 if (off % 8) {
5379 bpf_log(log, "func '%s' offset %d is not multiple of 8\n",
5380 tname, off);
5381 return false;
5382 }
5383 arg = get_ctx_arg_idx(btf, t, off);
5384 args = (const struct btf_param *)(t + 1);
5385 /* if (t == NULL) Fall back to default BPF prog with
5386 * MAX_BPF_FUNC_REG_ARGS u64 arguments.
5387 */
5388 nr_args = t ? btf_type_vlen(t) : MAX_BPF_FUNC_REG_ARGS;
5389 if (prog->aux->attach_btf_trace) {
5390 /* skip first 'void *__data' argument in btf_trace_##name typedef */
5391 args++;
5392 nr_args--;
5393 }
5394
5395 if (arg > nr_args) {
5396 bpf_log(log, "func '%s' doesn't have %d-th argument\n",
5397 tname, arg + 1);
5398 return false;
5399 }
5400
5401 if (arg == nr_args) {
5402 switch (prog->expected_attach_type) {
5403 case BPF_LSM_CGROUP:
5404 case BPF_LSM_MAC:
5405 case BPF_TRACE_FEXIT:
5406 /* When LSM programs are attached to void LSM hooks
5407 * they use FEXIT trampolines and when attached to
5408 * int LSM hooks, they use MODIFY_RETURN trampolines.
5409 *
5410 * While the LSM programs are BPF_MODIFY_RETURN-like
5411 * the check:
5412 *
5413 * if (ret_type != 'int')
5414 * return -EINVAL;
5415 *
5416 * is _not_ done here. This is still safe as LSM hooks
5417 * have only void and int return types.
5418 */
5419 if (!t)
5420 return true;
5421 t = btf_type_by_id(btf, t->type);
5422 break;
5423 case BPF_MODIFY_RETURN:
5424 /* For now the BPF_MODIFY_RETURN can only be attached to
5425 * functions that return an int.
5426 */
5427 if (!t)
5428 return false;
5429
5430 t = btf_type_skip_modifiers(btf, t->type, NULL);
5431 if (!btf_type_is_small_int(t)) {
5432 bpf_log(log,
5433 "ret type %s not allowed for fmod_ret\n",
5434 btf_type_str(t));
5435 return false;
5436 }
5437 break;
5438 default:
5439 bpf_log(log, "func '%s' doesn't have %d-th argument\n",
5440 tname, arg + 1);
5441 return false;
5442 }
5443 } else {
5444 if (!t)
5445 /* Default prog with MAX_BPF_FUNC_REG_ARGS args */
5446 return true;
5447 t = btf_type_by_id(btf, args[arg].type);
5448 }
5449
5450 /* skip modifiers */
5451 while (btf_type_is_modifier(t))
5452 t = btf_type_by_id(btf, t->type);
5453 if (btf_type_is_small_int(t) || btf_is_any_enum(t) || __btf_type_is_struct(t))
5454 /* accessing a scalar */
5455 return true;
5456 if (!btf_type_is_ptr(t)) {
5457 bpf_log(log,
5458 "func '%s' arg%d '%s' has type %s. Only pointer access is allowed\n",
5459 tname, arg,
5460 __btf_name_by_offset(btf, t->name_off),
5461 btf_type_str(t));
5462 return false;
5463 }
5464
5465 /* check for PTR_TO_RDONLY_BUF_OR_NULL or PTR_TO_RDWR_BUF_OR_NULL */
5466 for (i = 0; i < prog->aux->ctx_arg_info_size; i++) {
5467 const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i];
5468 u32 type, flag;
5469
5470 type = base_type(ctx_arg_info->reg_type);
5471 flag = type_flag(ctx_arg_info->reg_type);
5472 if (ctx_arg_info->offset == off && type == PTR_TO_BUF &&
5473 (flag & PTR_MAYBE_NULL)) {
5474 info->reg_type = ctx_arg_info->reg_type;
5475 return true;
5476 }
5477 }
5478
5479 if (t->type == 0)
5480 /* This is a pointer to void.
5481 * It is the same as scalar from the verifier safety pov.
5482 * No further pointer walking is allowed.
5483 */
5484 return true;
5485
5486 if (is_int_ptr(btf, t))
5487 return true;
5488
5489 /* this is a pointer to another type */
5490 for (i = 0; i < prog->aux->ctx_arg_info_size; i++) {
5491 const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i];
5492
5493 if (ctx_arg_info->offset == off) {
5494 if (!ctx_arg_info->btf_id) {
5495 bpf_log(log,"invalid btf_id for context argument offset %u\n", off);
5496 return false;
5497 }
5498
5499 info->reg_type = ctx_arg_info->reg_type;
5500 info->btf = btf_vmlinux;
5501 info->btf_id = ctx_arg_info->btf_id;
5502 return true;
5503 }
5504 }
5505
5506 info->reg_type = PTR_TO_BTF_ID;
5507 if (tgt_prog) {
5508 enum bpf_prog_type tgt_type;
5509
5510 if (tgt_prog->type == BPF_PROG_TYPE_EXT)
5511 tgt_type = tgt_prog->aux->saved_dst_prog_type;
5512 else
5513 tgt_type = tgt_prog->type;
5514
5515 ret = btf_translate_to_vmlinux(log, btf, t, tgt_type, arg);
5516 if (ret > 0) {
5517 info->btf = btf_vmlinux;
5518 info->btf_id = ret;
5519 return true;
5520 } else {
5521 return false;
5522 }
5523 }
5524
5525 info->btf = btf;
5526 info->btf_id = t->type;
5527 t = btf_type_by_id(btf, t->type);
5528
5529 if (btf_type_is_type_tag(t)) {
5530 tag_value = __btf_name_by_offset(btf, t->name_off);
5531 if (strcmp(tag_value, "user") == 0)
5532 info->reg_type |= MEM_USER;
5533 if (strcmp(tag_value, "percpu") == 0)
5534 info->reg_type |= MEM_PERCPU;
5535 }
5536
5537 /* skip modifiers */
5538 while (btf_type_is_modifier(t)) {
5539 info->btf_id = t->type;
5540 t = btf_type_by_id(btf, t->type);
5541 }
5542 if (!btf_type_is_struct(t)) {
5543 bpf_log(log,
5544 "func '%s' arg%d type %s is not a struct\n",
5545 tname, arg, btf_type_str(t));
5546 return false;
5547 }
5548 bpf_log(log, "func '%s' arg%d has btf_id %d type %s '%s'\n",
5549 tname, arg, info->btf_id, btf_type_str(t),
5550 __btf_name_by_offset(btf, t->name_off));
5551 return true;
5552 }
5553
5554 enum bpf_struct_walk_result {
5555 /* < 0 error */
5556 WALK_SCALAR = 0,
5557 WALK_PTR,
5558 WALK_STRUCT,
5559 };
5560
btf_struct_walk(struct bpf_verifier_log * log,const struct btf * btf,const struct btf_type * t,int off,int size,u32 * next_btf_id,enum bpf_type_flag * flag)5561 static int btf_struct_walk(struct bpf_verifier_log *log, const struct btf *btf,
5562 const struct btf_type *t, int off, int size,
5563 u32 *next_btf_id, enum bpf_type_flag *flag)
5564 {
5565 u32 i, moff, mtrue_end, msize = 0, total_nelems = 0;
5566 const struct btf_type *mtype, *elem_type = NULL;
5567 const struct btf_member *member;
5568 const char *tname, *mname, *tag_value;
5569 u32 vlen, elem_id, mid;
5570
5571 again:
5572 tname = __btf_name_by_offset(btf, t->name_off);
5573 if (!btf_type_is_struct(t)) {
5574 bpf_log(log, "Type '%s' is not a struct\n", tname);
5575 return -EINVAL;
5576 }
5577
5578 vlen = btf_type_vlen(t);
5579 if (off + size > t->size) {
5580 /* If the last element is a variable size array, we may
5581 * need to relax the rule.
5582 */
5583 struct btf_array *array_elem;
5584
5585 if (vlen == 0)
5586 goto error;
5587
5588 member = btf_type_member(t) + vlen - 1;
5589 mtype = btf_type_skip_modifiers(btf, member->type,
5590 NULL);
5591 if (!btf_type_is_array(mtype))
5592 goto error;
5593
5594 array_elem = (struct btf_array *)(mtype + 1);
5595 if (array_elem->nelems != 0)
5596 goto error;
5597
5598 moff = __btf_member_bit_offset(t, member) / 8;
5599 if (off < moff)
5600 goto error;
5601
5602 /* Only allow structure for now, can be relaxed for
5603 * other types later.
5604 */
5605 t = btf_type_skip_modifiers(btf, array_elem->type,
5606 NULL);
5607 if (!btf_type_is_struct(t))
5608 goto error;
5609
5610 off = (off - moff) % t->size;
5611 goto again;
5612
5613 error:
5614 bpf_log(log, "access beyond struct %s at off %u size %u\n",
5615 tname, off, size);
5616 return -EACCES;
5617 }
5618
5619 for_each_member(i, t, member) {
5620 /* offset of the field in bytes */
5621 moff = __btf_member_bit_offset(t, member) / 8;
5622 if (off + size <= moff)
5623 /* won't find anything, field is already too far */
5624 break;
5625
5626 if (__btf_member_bitfield_size(t, member)) {
5627 u32 end_bit = __btf_member_bit_offset(t, member) +
5628 __btf_member_bitfield_size(t, member);
5629
5630 /* off <= moff instead of off == moff because clang
5631 * does not generate a BTF member for anonymous
5632 * bitfield like the ":16" here:
5633 * struct {
5634 * int :16;
5635 * int x:8;
5636 * };
5637 */
5638 if (off <= moff &&
5639 BITS_ROUNDUP_BYTES(end_bit) <= off + size)
5640 return WALK_SCALAR;
5641
5642 /* off may be accessing a following member
5643 *
5644 * or
5645 *
5646 * Doing partial access at either end of this
5647 * bitfield. Continue on this case also to
5648 * treat it as not accessing this bitfield
5649 * and eventually error out as field not
5650 * found to keep it simple.
5651 * It could be relaxed if there was a legit
5652 * partial access case later.
5653 */
5654 continue;
5655 }
5656
5657 /* In case of "off" is pointing to holes of a struct */
5658 if (off < moff)
5659 break;
5660
5661 /* type of the field */
5662 mid = member->type;
5663 mtype = btf_type_by_id(btf, member->type);
5664 mname = __btf_name_by_offset(btf, member->name_off);
5665
5666 mtype = __btf_resolve_size(btf, mtype, &msize,
5667 &elem_type, &elem_id, &total_nelems,
5668 &mid);
5669 if (IS_ERR(mtype)) {
5670 bpf_log(log, "field %s doesn't have size\n", mname);
5671 return -EFAULT;
5672 }
5673
5674 mtrue_end = moff + msize;
5675 if (off >= mtrue_end)
5676 /* no overlap with member, keep iterating */
5677 continue;
5678
5679 if (btf_type_is_array(mtype)) {
5680 u32 elem_idx;
5681
5682 /* __btf_resolve_size() above helps to
5683 * linearize a multi-dimensional array.
5684 *
5685 * The logic here is treating an array
5686 * in a struct as the following way:
5687 *
5688 * struct outer {
5689 * struct inner array[2][2];
5690 * };
5691 *
5692 * looks like:
5693 *
5694 * struct outer {
5695 * struct inner array_elem0;
5696 * struct inner array_elem1;
5697 * struct inner array_elem2;
5698 * struct inner array_elem3;
5699 * };
5700 *
5701 * When accessing outer->array[1][0], it moves
5702 * moff to "array_elem2", set mtype to
5703 * "struct inner", and msize also becomes
5704 * sizeof(struct inner). Then most of the
5705 * remaining logic will fall through without
5706 * caring the current member is an array or
5707 * not.
5708 *
5709 * Unlike mtype/msize/moff, mtrue_end does not
5710 * change. The naming difference ("_true") tells
5711 * that it is not always corresponding to
5712 * the current mtype/msize/moff.
5713 * It is the true end of the current
5714 * member (i.e. array in this case). That
5715 * will allow an int array to be accessed like
5716 * a scratch space,
5717 * i.e. allow access beyond the size of
5718 * the array's element as long as it is
5719 * within the mtrue_end boundary.
5720 */
5721
5722 /* skip empty array */
5723 if (moff == mtrue_end)
5724 continue;
5725
5726 msize /= total_nelems;
5727 elem_idx = (off - moff) / msize;
5728 moff += elem_idx * msize;
5729 mtype = elem_type;
5730 mid = elem_id;
5731 }
5732
5733 /* the 'off' we're looking for is either equal to start
5734 * of this field or inside of this struct
5735 */
5736 if (btf_type_is_struct(mtype)) {
5737 /* our field must be inside that union or struct */
5738 t = mtype;
5739
5740 /* return if the offset matches the member offset */
5741 if (off == moff) {
5742 *next_btf_id = mid;
5743 return WALK_STRUCT;
5744 }
5745
5746 /* adjust offset we're looking for */
5747 off -= moff;
5748 goto again;
5749 }
5750
5751 if (btf_type_is_ptr(mtype)) {
5752 const struct btf_type *stype, *t;
5753 enum bpf_type_flag tmp_flag = 0;
5754 u32 id;
5755
5756 if (msize != size || off != moff) {
5757 bpf_log(log,
5758 "cannot access ptr member %s with moff %u in struct %s with off %u size %u\n",
5759 mname, moff, tname, off, size);
5760 return -EACCES;
5761 }
5762
5763 /* check type tag */
5764 t = btf_type_by_id(btf, mtype->type);
5765 if (btf_type_is_type_tag(t)) {
5766 tag_value = __btf_name_by_offset(btf, t->name_off);
5767 /* check __user tag */
5768 if (strcmp(tag_value, "user") == 0)
5769 tmp_flag = MEM_USER;
5770 /* check __percpu tag */
5771 if (strcmp(tag_value, "percpu") == 0)
5772 tmp_flag = MEM_PERCPU;
5773 }
5774
5775 stype = btf_type_skip_modifiers(btf, mtype->type, &id);
5776 if (btf_type_is_struct(stype)) {
5777 *next_btf_id = id;
5778 *flag = tmp_flag;
5779 return WALK_PTR;
5780 }
5781 }
5782
5783 /* Allow more flexible access within an int as long as
5784 * it is within mtrue_end.
5785 * Since mtrue_end could be the end of an array,
5786 * that also allows using an array of int as a scratch
5787 * space. e.g. skb->cb[].
5788 */
5789 if (off + size > mtrue_end) {
5790 bpf_log(log,
5791 "access beyond the end of member %s (mend:%u) in struct %s with off %u size %u\n",
5792 mname, mtrue_end, tname, off, size);
5793 return -EACCES;
5794 }
5795
5796 return WALK_SCALAR;
5797 }
5798 bpf_log(log, "struct %s doesn't have field at offset %d\n", tname, off);
5799 return -EINVAL;
5800 }
5801
btf_struct_access(struct bpf_verifier_log * log,const struct btf * btf,const struct btf_type * t,int off,int size,enum bpf_access_type atype __maybe_unused,u32 * next_btf_id,enum bpf_type_flag * flag)5802 int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf,
5803 const struct btf_type *t, int off, int size,
5804 enum bpf_access_type atype __maybe_unused,
5805 u32 *next_btf_id, enum bpf_type_flag *flag)
5806 {
5807 enum bpf_type_flag tmp_flag = 0;
5808 int err;
5809 u32 id;
5810
5811 do {
5812 err = btf_struct_walk(log, btf, t, off, size, &id, &tmp_flag);
5813
5814 switch (err) {
5815 case WALK_PTR:
5816 /* If we found the pointer or scalar on t+off,
5817 * we're done.
5818 */
5819 *next_btf_id = id;
5820 *flag = tmp_flag;
5821 return PTR_TO_BTF_ID;
5822 case WALK_SCALAR:
5823 return SCALAR_VALUE;
5824 case WALK_STRUCT:
5825 /* We found nested struct, so continue the search
5826 * by diving in it. At this point the offset is
5827 * aligned with the new type, so set it to 0.
5828 */
5829 t = btf_type_by_id(btf, id);
5830 off = 0;
5831 break;
5832 default:
5833 /* It's either error or unknown return value..
5834 * scream and leave.
5835 */
5836 if (WARN_ONCE(err > 0, "unknown btf_struct_walk return value"))
5837 return -EINVAL;
5838 return err;
5839 }
5840 } while (t);
5841
5842 return -EINVAL;
5843 }
5844
5845 /* Check that two BTF types, each specified as an BTF object + id, are exactly
5846 * the same. Trivial ID check is not enough due to module BTFs, because we can
5847 * end up with two different module BTFs, but IDs point to the common type in
5848 * vmlinux BTF.
5849 */
btf_types_are_same(const struct btf * btf1,u32 id1,const struct btf * btf2,u32 id2)5850 static bool btf_types_are_same(const struct btf *btf1, u32 id1,
5851 const struct btf *btf2, u32 id2)
5852 {
5853 if (id1 != id2)
5854 return false;
5855 if (btf1 == btf2)
5856 return true;
5857 return btf_type_by_id(btf1, id1) == btf_type_by_id(btf2, id2);
5858 }
5859
btf_struct_ids_match(struct bpf_verifier_log * log,const struct btf * btf,u32 id,int off,const struct btf * need_btf,u32 need_type_id,bool strict)5860 bool btf_struct_ids_match(struct bpf_verifier_log *log,
5861 const struct btf *btf, u32 id, int off,
5862 const struct btf *need_btf, u32 need_type_id,
5863 bool strict)
5864 {
5865 const struct btf_type *type;
5866 enum bpf_type_flag flag;
5867 int err;
5868
5869 /* Are we already done? */
5870 if (off == 0 && btf_types_are_same(btf, id, need_btf, need_type_id))
5871 return true;
5872 /* In case of strict type match, we do not walk struct, the top level
5873 * type match must succeed. When strict is true, off should have already
5874 * been 0.
5875 */
5876 if (strict)
5877 return false;
5878 again:
5879 type = btf_type_by_id(btf, id);
5880 if (!type)
5881 return false;
5882 err = btf_struct_walk(log, btf, type, off, 1, &id, &flag);
5883 if (err != WALK_STRUCT)
5884 return false;
5885
5886 /* We found nested struct object. If it matches
5887 * the requested ID, we're done. Otherwise let's
5888 * continue the search with offset 0 in the new
5889 * type.
5890 */
5891 if (!btf_types_are_same(btf, id, need_btf, need_type_id)) {
5892 off = 0;
5893 goto again;
5894 }
5895
5896 return true;
5897 }
5898
__get_type_size(struct btf * btf,u32 btf_id,const struct btf_type ** ret_type)5899 static int __get_type_size(struct btf *btf, u32 btf_id,
5900 const struct btf_type **ret_type)
5901 {
5902 const struct btf_type *t;
5903
5904 *ret_type = btf_type_by_id(btf, 0);
5905 if (!btf_id)
5906 /* void */
5907 return 0;
5908 t = btf_type_by_id(btf, btf_id);
5909 while (t && btf_type_is_modifier(t))
5910 t = btf_type_by_id(btf, t->type);
5911 if (!t)
5912 return -EINVAL;
5913 *ret_type = t;
5914 if (btf_type_is_ptr(t))
5915 /* kernel size of pointer. Not BPF's size of pointer*/
5916 return sizeof(void *);
5917 if (btf_type_is_int(t) || btf_is_any_enum(t) || __btf_type_is_struct(t))
5918 return t->size;
5919 return -EINVAL;
5920 }
5921
btf_distill_func_proto(struct bpf_verifier_log * log,struct btf * btf,const struct btf_type * func,const char * tname,struct btf_func_model * m)5922 int btf_distill_func_proto(struct bpf_verifier_log *log,
5923 struct btf *btf,
5924 const struct btf_type *func,
5925 const char *tname,
5926 struct btf_func_model *m)
5927 {
5928 const struct btf_param *args;
5929 const struct btf_type *t;
5930 u32 i, nargs;
5931 int ret;
5932
5933 if (!func) {
5934 /* BTF function prototype doesn't match the verifier types.
5935 * Fall back to MAX_BPF_FUNC_REG_ARGS u64 args.
5936 */
5937 for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
5938 m->arg_size[i] = 8;
5939 m->arg_flags[i] = 0;
5940 }
5941 m->ret_size = 8;
5942 m->nr_args = MAX_BPF_FUNC_REG_ARGS;
5943 return 0;
5944 }
5945 args = (const struct btf_param *)(func + 1);
5946 nargs = btf_type_vlen(func);
5947 if (nargs > MAX_BPF_FUNC_ARGS) {
5948 bpf_log(log,
5949 "The function %s has %d arguments. Too many.\n",
5950 tname, nargs);
5951 return -EINVAL;
5952 }
5953 ret = __get_type_size(btf, func->type, &t);
5954 if (ret < 0 || __btf_type_is_struct(t)) {
5955 bpf_log(log,
5956 "The function %s return type %s is unsupported.\n",
5957 tname, btf_type_str(t));
5958 return -EINVAL;
5959 }
5960 m->ret_size = ret;
5961
5962 for (i = 0; i < nargs; i++) {
5963 if (i == nargs - 1 && args[i].type == 0) {
5964 bpf_log(log,
5965 "The function %s with variable args is unsupported.\n",
5966 tname);
5967 return -EINVAL;
5968 }
5969 ret = __get_type_size(btf, args[i].type, &t);
5970
5971 /* No support of struct argument size greater than 16 bytes */
5972 if (ret < 0 || ret > 16) {
5973 bpf_log(log,
5974 "The function %s arg%d type %s is unsupported.\n",
5975 tname, i, btf_type_str(t));
5976 return -EINVAL;
5977 }
5978 if (ret == 0) {
5979 bpf_log(log,
5980 "The function %s has malformed void argument.\n",
5981 tname);
5982 return -EINVAL;
5983 }
5984 m->arg_size[i] = ret;
5985 m->arg_flags[i] = __btf_type_is_struct(t) ? BTF_FMODEL_STRUCT_ARG : 0;
5986 }
5987 m->nr_args = nargs;
5988 return 0;
5989 }
5990
5991 /* Compare BTFs of two functions assuming only scalars and pointers to context.
5992 * t1 points to BTF_KIND_FUNC in btf1
5993 * t2 points to BTF_KIND_FUNC in btf2
5994 * Returns:
5995 * EINVAL - function prototype mismatch
5996 * EFAULT - verifier bug
5997 * 0 - 99% match. The last 1% is validated by the verifier.
5998 */
btf_check_func_type_match(struct bpf_verifier_log * log,struct btf * btf1,const struct btf_type * t1,struct btf * btf2,const struct btf_type * t2)5999 static int btf_check_func_type_match(struct bpf_verifier_log *log,
6000 struct btf *btf1, const struct btf_type *t1,
6001 struct btf *btf2, const struct btf_type *t2)
6002 {
6003 const struct btf_param *args1, *args2;
6004 const char *fn1, *fn2, *s1, *s2;
6005 u32 nargs1, nargs2, i;
6006
6007 fn1 = btf_name_by_offset(btf1, t1->name_off);
6008 fn2 = btf_name_by_offset(btf2, t2->name_off);
6009
6010 if (btf_func_linkage(t1) != BTF_FUNC_GLOBAL) {
6011 bpf_log(log, "%s() is not a global function\n", fn1);
6012 return -EINVAL;
6013 }
6014 if (btf_func_linkage(t2) != BTF_FUNC_GLOBAL) {
6015 bpf_log(log, "%s() is not a global function\n", fn2);
6016 return -EINVAL;
6017 }
6018
6019 t1 = btf_type_by_id(btf1, t1->type);
6020 if (!t1 || !btf_type_is_func_proto(t1))
6021 return -EFAULT;
6022 t2 = btf_type_by_id(btf2, t2->type);
6023 if (!t2 || !btf_type_is_func_proto(t2))
6024 return -EFAULT;
6025
6026 args1 = (const struct btf_param *)(t1 + 1);
6027 nargs1 = btf_type_vlen(t1);
6028 args2 = (const struct btf_param *)(t2 + 1);
6029 nargs2 = btf_type_vlen(t2);
6030
6031 if (nargs1 != nargs2) {
6032 bpf_log(log, "%s() has %d args while %s() has %d args\n",
6033 fn1, nargs1, fn2, nargs2);
6034 return -EINVAL;
6035 }
6036
6037 t1 = btf_type_skip_modifiers(btf1, t1->type, NULL);
6038 t2 = btf_type_skip_modifiers(btf2, t2->type, NULL);
6039 if (t1->info != t2->info) {
6040 bpf_log(log,
6041 "Return type %s of %s() doesn't match type %s of %s()\n",
6042 btf_type_str(t1), fn1,
6043 btf_type_str(t2), fn2);
6044 return -EINVAL;
6045 }
6046
6047 for (i = 0; i < nargs1; i++) {
6048 t1 = btf_type_skip_modifiers(btf1, args1[i].type, NULL);
6049 t2 = btf_type_skip_modifiers(btf2, args2[i].type, NULL);
6050
6051 if (t1->info != t2->info) {
6052 bpf_log(log, "arg%d in %s() is %s while %s() has %s\n",
6053 i, fn1, btf_type_str(t1),
6054 fn2, btf_type_str(t2));
6055 return -EINVAL;
6056 }
6057 if (btf_type_has_size(t1) && t1->size != t2->size) {
6058 bpf_log(log,
6059 "arg%d in %s() has size %d while %s() has %d\n",
6060 i, fn1, t1->size,
6061 fn2, t2->size);
6062 return -EINVAL;
6063 }
6064
6065 /* global functions are validated with scalars and pointers
6066 * to context only. And only global functions can be replaced.
6067 * Hence type check only those types.
6068 */
6069 if (btf_type_is_int(t1) || btf_is_any_enum(t1))
6070 continue;
6071 if (!btf_type_is_ptr(t1)) {
6072 bpf_log(log,
6073 "arg%d in %s() has unrecognized type\n",
6074 i, fn1);
6075 return -EINVAL;
6076 }
6077 t1 = btf_type_skip_modifiers(btf1, t1->type, NULL);
6078 t2 = btf_type_skip_modifiers(btf2, t2->type, NULL);
6079 if (!btf_type_is_struct(t1)) {
6080 bpf_log(log,
6081 "arg%d in %s() is not a pointer to context\n",
6082 i, fn1);
6083 return -EINVAL;
6084 }
6085 if (!btf_type_is_struct(t2)) {
6086 bpf_log(log,
6087 "arg%d in %s() is not a pointer to context\n",
6088 i, fn2);
6089 return -EINVAL;
6090 }
6091 /* This is an optional check to make program writing easier.
6092 * Compare names of structs and report an error to the user.
6093 * btf_prepare_func_args() already checked that t2 struct
6094 * is a context type. btf_prepare_func_args() will check
6095 * later that t1 struct is a context type as well.
6096 */
6097 s1 = btf_name_by_offset(btf1, t1->name_off);
6098 s2 = btf_name_by_offset(btf2, t2->name_off);
6099 if (strcmp(s1, s2)) {
6100 bpf_log(log,
6101 "arg%d %s(struct %s *) doesn't match %s(struct %s *)\n",
6102 i, fn1, s1, fn2, s2);
6103 return -EINVAL;
6104 }
6105 }
6106 return 0;
6107 }
6108
6109 /* Compare BTFs of given program with BTF of target program */
btf_check_type_match(struct bpf_verifier_log * log,const struct bpf_prog * prog,struct btf * btf2,const struct btf_type * t2)6110 int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
6111 struct btf *btf2, const struct btf_type *t2)
6112 {
6113 struct btf *btf1 = prog->aux->btf;
6114 const struct btf_type *t1;
6115 u32 btf_id = 0;
6116
6117 if (!prog->aux->func_info) {
6118 bpf_log(log, "Program extension requires BTF\n");
6119 return -EINVAL;
6120 }
6121
6122 btf_id = prog->aux->func_info[0].type_id;
6123 if (!btf_id)
6124 return -EFAULT;
6125
6126 t1 = btf_type_by_id(btf1, btf_id);
6127 if (!t1 || !btf_type_is_func(t1))
6128 return -EFAULT;
6129
6130 return btf_check_func_type_match(log, btf1, t1, btf2, t2);
6131 }
6132
6133 static u32 *reg2btf_ids[__BPF_REG_TYPE_MAX] = {
6134 #ifdef CONFIG_NET
6135 [PTR_TO_SOCKET] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK],
6136 [PTR_TO_SOCK_COMMON] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
6137 [PTR_TO_TCP_SOCK] = &btf_sock_ids[BTF_SOCK_TYPE_TCP],
6138 #endif
6139 };
6140
6141 /* Returns true if struct is composed of scalars, 4 levels of nesting allowed */
__btf_type_is_scalar_struct(struct bpf_verifier_log * log,const struct btf * btf,const struct btf_type * t,int rec)6142 static bool __btf_type_is_scalar_struct(struct bpf_verifier_log *log,
6143 const struct btf *btf,
6144 const struct btf_type *t, int rec)
6145 {
6146 const struct btf_type *member_type;
6147 const struct btf_member *member;
6148 u32 i;
6149
6150 if (!btf_type_is_struct(t))
6151 return false;
6152
6153 for_each_member(i, t, member) {
6154 const struct btf_array *array;
6155
6156 member_type = btf_type_skip_modifiers(btf, member->type, NULL);
6157 if (btf_type_is_struct(member_type)) {
6158 if (rec >= 3) {
6159 bpf_log(log, "max struct nesting depth exceeded\n");
6160 return false;
6161 }
6162 if (!__btf_type_is_scalar_struct(log, btf, member_type, rec + 1))
6163 return false;
6164 continue;
6165 }
6166 if (btf_type_is_array(member_type)) {
6167 array = btf_type_array(member_type);
6168 if (!array->nelems)
6169 return false;
6170 member_type = btf_type_skip_modifiers(btf, array->type, NULL);
6171 if (!btf_type_is_scalar(member_type))
6172 return false;
6173 continue;
6174 }
6175 if (!btf_type_is_scalar(member_type))
6176 return false;
6177 }
6178 return true;
6179 }
6180
is_kfunc_arg_mem_size(const struct btf * btf,const struct btf_param * arg,const struct bpf_reg_state * reg)6181 static bool is_kfunc_arg_mem_size(const struct btf *btf,
6182 const struct btf_param *arg,
6183 const struct bpf_reg_state *reg)
6184 {
6185 int len, sfx_len = sizeof("__sz") - 1;
6186 const struct btf_type *t;
6187 const char *param_name;
6188
6189 t = btf_type_skip_modifiers(btf, arg->type, NULL);
6190 if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE)
6191 return false;
6192
6193 /* In the future, this can be ported to use BTF tagging */
6194 param_name = btf_name_by_offset(btf, arg->name_off);
6195 if (str_is_empty(param_name))
6196 return false;
6197 len = strlen(param_name);
6198 if (len < sfx_len)
6199 return false;
6200 param_name += len - sfx_len;
6201 if (strncmp(param_name, "__sz", sfx_len))
6202 return false;
6203
6204 return true;
6205 }
6206
btf_is_kfunc_arg_mem_size(const struct btf * btf,const struct btf_param * arg,const struct bpf_reg_state * reg,const char * name)6207 static bool btf_is_kfunc_arg_mem_size(const struct btf *btf,
6208 const struct btf_param *arg,
6209 const struct bpf_reg_state *reg,
6210 const char *name)
6211 {
6212 int len, target_len = strlen(name);
6213 const struct btf_type *t;
6214 const char *param_name;
6215
6216 t = btf_type_skip_modifiers(btf, arg->type, NULL);
6217 if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE)
6218 return false;
6219
6220 param_name = btf_name_by_offset(btf, arg->name_off);
6221 if (str_is_empty(param_name))
6222 return false;
6223 len = strlen(param_name);
6224 if (len != target_len)
6225 return false;
6226 if (strcmp(param_name, name))
6227 return false;
6228
6229 return true;
6230 }
6231
btf_check_func_arg_match(struct bpf_verifier_env * env,const struct btf * btf,u32 func_id,struct bpf_reg_state * regs,bool ptr_to_mem_ok,struct bpf_kfunc_arg_meta * kfunc_meta,bool processing_call)6232 static int btf_check_func_arg_match(struct bpf_verifier_env *env,
6233 const struct btf *btf, u32 func_id,
6234 struct bpf_reg_state *regs,
6235 bool ptr_to_mem_ok,
6236 struct bpf_kfunc_arg_meta *kfunc_meta,
6237 bool processing_call)
6238 {
6239 enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
6240 bool rel = false, kptr_get = false, trusted_args = false;
6241 bool sleepable = false;
6242 struct bpf_verifier_log *log = &env->log;
6243 u32 i, nargs, ref_id, ref_obj_id = 0;
6244 bool is_kfunc = btf_is_kernel(btf);
6245 const char *func_name, *ref_tname;
6246 const struct btf_type *t, *ref_t;
6247 const struct btf_param *args;
6248 int ref_regno = 0, ret;
6249
6250 t = btf_type_by_id(btf, func_id);
6251 if (!t || !btf_type_is_func(t)) {
6252 /* These checks were already done by the verifier while loading
6253 * struct bpf_func_info or in add_kfunc_call().
6254 */
6255 bpf_log(log, "BTF of func_id %u doesn't point to KIND_FUNC\n",
6256 func_id);
6257 return -EFAULT;
6258 }
6259 func_name = btf_name_by_offset(btf, t->name_off);
6260
6261 t = btf_type_by_id(btf, t->type);
6262 if (!t || !btf_type_is_func_proto(t)) {
6263 bpf_log(log, "Invalid BTF of func %s\n", func_name);
6264 return -EFAULT;
6265 }
6266 args = (const struct btf_param *)(t + 1);
6267 nargs = btf_type_vlen(t);
6268 if (nargs > MAX_BPF_FUNC_REG_ARGS) {
6269 bpf_log(log, "Function %s has %d > %d args\n", func_name, nargs,
6270 MAX_BPF_FUNC_REG_ARGS);
6271 return -EINVAL;
6272 }
6273
6274 if (is_kfunc && kfunc_meta) {
6275 /* Only kfunc can be release func */
6276 rel = kfunc_meta->flags & KF_RELEASE;
6277 kptr_get = kfunc_meta->flags & KF_KPTR_GET;
6278 trusted_args = kfunc_meta->flags & KF_TRUSTED_ARGS;
6279 sleepable = kfunc_meta->flags & KF_SLEEPABLE;
6280 }
6281
6282 /* check that BTF function arguments match actual types that the
6283 * verifier sees.
6284 */
6285 for (i = 0; i < nargs; i++) {
6286 enum bpf_arg_type arg_type = ARG_DONTCARE;
6287 u32 regno = i + 1;
6288 struct bpf_reg_state *reg = ®s[regno];
6289 bool obj_ptr = false;
6290
6291 t = btf_type_skip_modifiers(btf, args[i].type, NULL);
6292 if (btf_type_is_scalar(t)) {
6293 if (is_kfunc && kfunc_meta) {
6294 bool is_buf_size = false;
6295
6296 /* check for any const scalar parameter of name "rdonly_buf_size"
6297 * or "rdwr_buf_size"
6298 */
6299 if (btf_is_kfunc_arg_mem_size(btf, &args[i], reg,
6300 "rdonly_buf_size")) {
6301 kfunc_meta->r0_rdonly = true;
6302 is_buf_size = true;
6303 } else if (btf_is_kfunc_arg_mem_size(btf, &args[i], reg,
6304 "rdwr_buf_size"))
6305 is_buf_size = true;
6306
6307 if (is_buf_size) {
6308 if (kfunc_meta->r0_size) {
6309 bpf_log(log, "2 or more rdonly/rdwr_buf_size parameters for kfunc");
6310 return -EINVAL;
6311 }
6312
6313 if (!tnum_is_const(reg->var_off)) {
6314 bpf_log(log, "R%d is not a const\n", regno);
6315 return -EINVAL;
6316 }
6317
6318 kfunc_meta->r0_size = reg->var_off.value;
6319 ret = mark_chain_precision(env, regno);
6320 if (ret)
6321 return ret;
6322 }
6323 }
6324
6325 if (reg->type == SCALAR_VALUE)
6326 continue;
6327 bpf_log(log, "R%d is not a scalar\n", regno);
6328 return -EINVAL;
6329 }
6330
6331 if (!btf_type_is_ptr(t)) {
6332 bpf_log(log, "Unrecognized arg#%d type %s\n",
6333 i, btf_type_str(t));
6334 return -EINVAL;
6335 }
6336
6337 /* These register types have special constraints wrt ref_obj_id
6338 * and offset checks. The rest of trusted args don't.
6339 */
6340 obj_ptr = reg->type == PTR_TO_CTX || reg->type == PTR_TO_BTF_ID ||
6341 reg2btf_ids[base_type(reg->type)];
6342
6343 /* Check if argument must be a referenced pointer, args + i has
6344 * been verified to be a pointer (after skipping modifiers).
6345 * PTR_TO_CTX is ok without having non-zero ref_obj_id.
6346 */
6347 if (is_kfunc && trusted_args && (obj_ptr && reg->type != PTR_TO_CTX) && !reg->ref_obj_id) {
6348 bpf_log(log, "R%d must be referenced\n", regno);
6349 return -EINVAL;
6350 }
6351
6352 ref_t = btf_type_skip_modifiers(btf, t->type, &ref_id);
6353 ref_tname = btf_name_by_offset(btf, ref_t->name_off);
6354
6355 /* Trusted args have the same offset checks as release arguments */
6356 if ((trusted_args && obj_ptr) || (rel && reg->ref_obj_id))
6357 arg_type |= OBJ_RELEASE;
6358 ret = check_func_arg_reg_off(env, reg, regno, arg_type);
6359 if (ret < 0)
6360 return ret;
6361
6362 if (is_kfunc && reg->ref_obj_id) {
6363 /* Ensure only one argument is referenced PTR_TO_BTF_ID */
6364 if (ref_obj_id) {
6365 bpf_log(log, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
6366 regno, reg->ref_obj_id, ref_obj_id);
6367 return -EFAULT;
6368 }
6369 ref_regno = regno;
6370 ref_obj_id = reg->ref_obj_id;
6371 }
6372
6373 /* kptr_get is only true for kfunc */
6374 if (i == 0 && kptr_get) {
6375 struct bpf_map_value_off_desc *off_desc;
6376
6377 if (reg->type != PTR_TO_MAP_VALUE) {
6378 bpf_log(log, "arg#0 expected pointer to map value\n");
6379 return -EINVAL;
6380 }
6381
6382 /* check_func_arg_reg_off allows var_off for
6383 * PTR_TO_MAP_VALUE, but we need fixed offset to find
6384 * off_desc.
6385 */
6386 if (!tnum_is_const(reg->var_off)) {
6387 bpf_log(log, "arg#0 must have constant offset\n");
6388 return -EINVAL;
6389 }
6390
6391 off_desc = bpf_map_kptr_off_contains(reg->map_ptr, reg->off + reg->var_off.value);
6392 if (!off_desc || off_desc->type != BPF_KPTR_REF) {
6393 bpf_log(log, "arg#0 no referenced kptr at map value offset=%llu\n",
6394 reg->off + reg->var_off.value);
6395 return -EINVAL;
6396 }
6397
6398 if (!btf_type_is_ptr(ref_t)) {
6399 bpf_log(log, "arg#0 BTF type must be a double pointer\n");
6400 return -EINVAL;
6401 }
6402
6403 ref_t = btf_type_skip_modifiers(btf, ref_t->type, &ref_id);
6404 ref_tname = btf_name_by_offset(btf, ref_t->name_off);
6405
6406 if (!btf_type_is_struct(ref_t)) {
6407 bpf_log(log, "kernel function %s args#%d pointer type %s %s is not supported\n",
6408 func_name, i, btf_type_str(ref_t), ref_tname);
6409 return -EINVAL;
6410 }
6411 if (!btf_struct_ids_match(log, btf, ref_id, 0, off_desc->kptr.btf,
6412 off_desc->kptr.btf_id, true)) {
6413 bpf_log(log, "kernel function %s args#%d expected pointer to %s %s\n",
6414 func_name, i, btf_type_str(ref_t), ref_tname);
6415 return -EINVAL;
6416 }
6417 /* rest of the arguments can be anything, like normal kfunc */
6418 } else if (btf_get_prog_ctx_type(log, btf, t, prog_type, i)) {
6419 /* If function expects ctx type in BTF check that caller
6420 * is passing PTR_TO_CTX.
6421 */
6422 if (reg->type != PTR_TO_CTX) {
6423 bpf_log(log,
6424 "arg#%d expected pointer to ctx, but got %s\n",
6425 i, btf_type_str(t));
6426 return -EINVAL;
6427 }
6428 } else if (is_kfunc && (reg->type == PTR_TO_BTF_ID ||
6429 (reg2btf_ids[base_type(reg->type)] && !type_flag(reg->type)))) {
6430 const struct btf_type *reg_ref_t;
6431 const struct btf *reg_btf;
6432 const char *reg_ref_tname;
6433 u32 reg_ref_id;
6434
6435 if (!btf_type_is_struct(ref_t)) {
6436 bpf_log(log, "kernel function %s args#%d pointer type %s %s is not supported\n",
6437 func_name, i, btf_type_str(ref_t),
6438 ref_tname);
6439 return -EINVAL;
6440 }
6441
6442 if (reg->type == PTR_TO_BTF_ID) {
6443 reg_btf = reg->btf;
6444 reg_ref_id = reg->btf_id;
6445 } else {
6446 reg_btf = btf_vmlinux;
6447 reg_ref_id = *reg2btf_ids[base_type(reg->type)];
6448 }
6449
6450 reg_ref_t = btf_type_skip_modifiers(reg_btf, reg_ref_id,
6451 ®_ref_id);
6452 reg_ref_tname = btf_name_by_offset(reg_btf,
6453 reg_ref_t->name_off);
6454 if (!btf_struct_ids_match(log, reg_btf, reg_ref_id,
6455 reg->off, btf, ref_id,
6456 trusted_args || (rel && reg->ref_obj_id))) {
6457 bpf_log(log, "kernel function %s args#%d expected pointer to %s %s but R%d has a pointer to %s %s\n",
6458 func_name, i,
6459 btf_type_str(ref_t), ref_tname,
6460 regno, btf_type_str(reg_ref_t),
6461 reg_ref_tname);
6462 return -EINVAL;
6463 }
6464 } else if (ptr_to_mem_ok && processing_call) {
6465 const struct btf_type *resolve_ret;
6466 u32 type_size;
6467
6468 if (is_kfunc) {
6469 bool arg_mem_size = i + 1 < nargs && is_kfunc_arg_mem_size(btf, &args[i + 1], ®s[regno + 1]);
6470 bool arg_dynptr = btf_type_is_struct(ref_t) &&
6471 !strcmp(ref_tname,
6472 stringify_struct(bpf_dynptr_kern));
6473
6474 /* Permit pointer to mem, but only when argument
6475 * type is pointer to scalar, or struct composed
6476 * (recursively) of scalars.
6477 * When arg_mem_size is true, the pointer can be
6478 * void *.
6479 * Also permit initialized local dynamic pointers.
6480 */
6481 if (!btf_type_is_scalar(ref_t) &&
6482 !__btf_type_is_scalar_struct(log, btf, ref_t, 0) &&
6483 !arg_dynptr &&
6484 (arg_mem_size ? !btf_type_is_void(ref_t) : 1)) {
6485 bpf_log(log,
6486 "arg#%d pointer type %s %s must point to %sscalar, or struct with scalar\n",
6487 i, btf_type_str(ref_t), ref_tname, arg_mem_size ? "void, " : "");
6488 return -EINVAL;
6489 }
6490
6491 if (arg_dynptr) {
6492 if (reg->type != PTR_TO_STACK) {
6493 bpf_log(log, "arg#%d pointer type %s %s not to stack\n",
6494 i, btf_type_str(ref_t),
6495 ref_tname);
6496 return -EINVAL;
6497 }
6498
6499 if (!is_dynptr_reg_valid_init(env, reg)) {
6500 bpf_log(log,
6501 "arg#%d pointer type %s %s must be valid and initialized\n",
6502 i, btf_type_str(ref_t),
6503 ref_tname);
6504 return -EINVAL;
6505 }
6506
6507 if (!is_dynptr_type_expected(env, reg,
6508 ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL)) {
6509 bpf_log(log,
6510 "arg#%d pointer type %s %s points to unsupported dynamic pointer type\n",
6511 i, btf_type_str(ref_t),
6512 ref_tname);
6513 return -EINVAL;
6514 }
6515
6516 continue;
6517 }
6518
6519 /* Check for mem, len pair */
6520 if (arg_mem_size) {
6521 if (check_kfunc_mem_size_reg(env, ®s[regno + 1], regno + 1)) {
6522 bpf_log(log, "arg#%d arg#%d memory, len pair leads to invalid memory access\n",
6523 i, i + 1);
6524 return -EINVAL;
6525 }
6526 i++;
6527 continue;
6528 }
6529 }
6530
6531 resolve_ret = btf_resolve_size(btf, ref_t, &type_size);
6532 if (IS_ERR(resolve_ret)) {
6533 bpf_log(log,
6534 "arg#%d reference type('%s %s') size cannot be determined: %ld\n",
6535 i, btf_type_str(ref_t), ref_tname,
6536 PTR_ERR(resolve_ret));
6537 return -EINVAL;
6538 }
6539
6540 if (check_mem_reg(env, reg, regno, type_size))
6541 return -EINVAL;
6542 } else {
6543 bpf_log(log, "reg type unsupported for arg#%d %sfunction %s#%d\n", i,
6544 is_kfunc ? "kernel " : "", func_name, func_id);
6545 return -EINVAL;
6546 }
6547 }
6548
6549 /* Either both are set, or neither */
6550 WARN_ON_ONCE((ref_obj_id && !ref_regno) || (!ref_obj_id && ref_regno));
6551 /* We already made sure ref_obj_id is set only for one argument. We do
6552 * allow (!rel && ref_obj_id), so that passing such referenced
6553 * PTR_TO_BTF_ID to other kfuncs works. Note that rel is only true when
6554 * is_kfunc is true.
6555 */
6556 if (rel && !ref_obj_id) {
6557 bpf_log(log, "release kernel function %s expects refcounted PTR_TO_BTF_ID\n",
6558 func_name);
6559 return -EINVAL;
6560 }
6561
6562 if (sleepable && !env->prog->aux->sleepable) {
6563 bpf_log(log, "kernel function %s is sleepable but the program is not\n",
6564 func_name);
6565 return -EINVAL;
6566 }
6567
6568 if (kfunc_meta && ref_obj_id)
6569 kfunc_meta->ref_obj_id = ref_obj_id;
6570
6571 /* returns argument register number > 0 in case of reference release kfunc */
6572 return rel ? ref_regno : 0;
6573 }
6574
6575 /* Compare BTF of a function declaration with given bpf_reg_state.
6576 * Returns:
6577 * EFAULT - there is a verifier bug. Abort verification.
6578 * EINVAL - there is a type mismatch or BTF is not available.
6579 * 0 - BTF matches with what bpf_reg_state expects.
6580 * Only PTR_TO_CTX and SCALAR_VALUE states are recognized.
6581 */
btf_check_subprog_arg_match(struct bpf_verifier_env * env,int subprog,struct bpf_reg_state * regs)6582 int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog,
6583 struct bpf_reg_state *regs)
6584 {
6585 struct bpf_prog *prog = env->prog;
6586 struct btf *btf = prog->aux->btf;
6587 bool is_global;
6588 u32 btf_id;
6589 int err;
6590
6591 if (!prog->aux->func_info)
6592 return -EINVAL;
6593
6594 btf_id = prog->aux->func_info[subprog].type_id;
6595 if (!btf_id)
6596 return -EFAULT;
6597
6598 if (prog->aux->func_info_aux[subprog].unreliable)
6599 return -EINVAL;
6600
6601 is_global = prog->aux->func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
6602 err = btf_check_func_arg_match(env, btf, btf_id, regs, is_global, NULL, false);
6603
6604 /* Compiler optimizations can remove arguments from static functions
6605 * or mismatched type can be passed into a global function.
6606 * In such cases mark the function as unreliable from BTF point of view.
6607 */
6608 if (err)
6609 prog->aux->func_info_aux[subprog].unreliable = true;
6610 return err;
6611 }
6612
6613 /* Compare BTF of a function call with given bpf_reg_state.
6614 * Returns:
6615 * EFAULT - there is a verifier bug. Abort verification.
6616 * EINVAL - there is a type mismatch or BTF is not available.
6617 * 0 - BTF matches with what bpf_reg_state expects.
6618 * Only PTR_TO_CTX and SCALAR_VALUE states are recognized.
6619 *
6620 * NOTE: the code is duplicated from btf_check_subprog_arg_match()
6621 * because btf_check_func_arg_match() is still doing both. Once that
6622 * function is split in 2, we can call from here btf_check_subprog_arg_match()
6623 * first, and then treat the calling part in a new code path.
6624 */
btf_check_subprog_call(struct bpf_verifier_env * env,int subprog,struct bpf_reg_state * regs)6625 int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog,
6626 struct bpf_reg_state *regs)
6627 {
6628 struct bpf_prog *prog = env->prog;
6629 struct btf *btf = prog->aux->btf;
6630 bool is_global;
6631 u32 btf_id;
6632 int err;
6633
6634 if (!prog->aux->func_info)
6635 return -EINVAL;
6636
6637 btf_id = prog->aux->func_info[subprog].type_id;
6638 if (!btf_id)
6639 return -EFAULT;
6640
6641 if (prog->aux->func_info_aux[subprog].unreliable)
6642 return -EINVAL;
6643
6644 is_global = prog->aux->func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
6645 err = btf_check_func_arg_match(env, btf, btf_id, regs, is_global, NULL, true);
6646
6647 /* Compiler optimizations can remove arguments from static functions
6648 * or mismatched type can be passed into a global function.
6649 * In such cases mark the function as unreliable from BTF point of view.
6650 */
6651 if (err)
6652 prog->aux->func_info_aux[subprog].unreliable = true;
6653 return err;
6654 }
6655
btf_check_kfunc_arg_match(struct bpf_verifier_env * env,const struct btf * btf,u32 func_id,struct bpf_reg_state * regs,struct bpf_kfunc_arg_meta * meta)6656 int btf_check_kfunc_arg_match(struct bpf_verifier_env *env,
6657 const struct btf *btf, u32 func_id,
6658 struct bpf_reg_state *regs,
6659 struct bpf_kfunc_arg_meta *meta)
6660 {
6661 return btf_check_func_arg_match(env, btf, func_id, regs, true, meta, true);
6662 }
6663
6664 /* Convert BTF of a function into bpf_reg_state if possible
6665 * Returns:
6666 * EFAULT - there is a verifier bug. Abort verification.
6667 * EINVAL - cannot convert BTF.
6668 * 0 - Successfully converted BTF into bpf_reg_state
6669 * (either PTR_TO_CTX or SCALAR_VALUE).
6670 */
btf_prepare_func_args(struct bpf_verifier_env * env,int subprog,struct bpf_reg_state * regs)6671 int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
6672 struct bpf_reg_state *regs)
6673 {
6674 struct bpf_verifier_log *log = &env->log;
6675 struct bpf_prog *prog = env->prog;
6676 enum bpf_prog_type prog_type = prog->type;
6677 struct btf *btf = prog->aux->btf;
6678 const struct btf_param *args;
6679 const struct btf_type *t, *ref_t;
6680 u32 i, nargs, btf_id;
6681 const char *tname;
6682
6683 if (!prog->aux->func_info ||
6684 prog->aux->func_info_aux[subprog].linkage != BTF_FUNC_GLOBAL) {
6685 bpf_log(log, "Verifier bug\n");
6686 return -EFAULT;
6687 }
6688
6689 btf_id = prog->aux->func_info[subprog].type_id;
6690 if (!btf_id) {
6691 bpf_log(log, "Global functions need valid BTF\n");
6692 return -EFAULT;
6693 }
6694
6695 t = btf_type_by_id(btf, btf_id);
6696 if (!t || !btf_type_is_func(t)) {
6697 /* These checks were already done by the verifier while loading
6698 * struct bpf_func_info
6699 */
6700 bpf_log(log, "BTF of func#%d doesn't point to KIND_FUNC\n",
6701 subprog);
6702 return -EFAULT;
6703 }
6704 tname = btf_name_by_offset(btf, t->name_off);
6705
6706 if (log->level & BPF_LOG_LEVEL)
6707 bpf_log(log, "Validating %s() func#%d...\n",
6708 tname, subprog);
6709
6710 if (prog->aux->func_info_aux[subprog].unreliable) {
6711 bpf_log(log, "Verifier bug in function %s()\n", tname);
6712 return -EFAULT;
6713 }
6714 if (prog_type == BPF_PROG_TYPE_EXT)
6715 prog_type = prog->aux->dst_prog->type;
6716
6717 t = btf_type_by_id(btf, t->type);
6718 if (!t || !btf_type_is_func_proto(t)) {
6719 bpf_log(log, "Invalid type of function %s()\n", tname);
6720 return -EFAULT;
6721 }
6722 args = (const struct btf_param *)(t + 1);
6723 nargs = btf_type_vlen(t);
6724 if (nargs > MAX_BPF_FUNC_REG_ARGS) {
6725 bpf_log(log, "Global function %s() with %d > %d args. Buggy compiler.\n",
6726 tname, nargs, MAX_BPF_FUNC_REG_ARGS);
6727 return -EINVAL;
6728 }
6729 /* check that function returns int */
6730 t = btf_type_by_id(btf, t->type);
6731 while (btf_type_is_modifier(t))
6732 t = btf_type_by_id(btf, t->type);
6733 if (!btf_type_is_int(t) && !btf_is_any_enum(t)) {
6734 bpf_log(log,
6735 "Global function %s() doesn't return scalar. Only those are supported.\n",
6736 tname);
6737 return -EINVAL;
6738 }
6739 /* Convert BTF function arguments into verifier types.
6740 * Only PTR_TO_CTX and SCALAR are supported atm.
6741 */
6742 for (i = 0; i < nargs; i++) {
6743 struct bpf_reg_state *reg = ®s[i + 1];
6744
6745 t = btf_type_by_id(btf, args[i].type);
6746 while (btf_type_is_modifier(t))
6747 t = btf_type_by_id(btf, t->type);
6748 if (btf_type_is_int(t) || btf_is_any_enum(t)) {
6749 reg->type = SCALAR_VALUE;
6750 continue;
6751 }
6752 if (btf_type_is_ptr(t)) {
6753 if (btf_get_prog_ctx_type(log, btf, t, prog_type, i)) {
6754 reg->type = PTR_TO_CTX;
6755 continue;
6756 }
6757
6758 t = btf_type_skip_modifiers(btf, t->type, NULL);
6759
6760 ref_t = btf_resolve_size(btf, t, ®->mem_size);
6761 if (IS_ERR(ref_t)) {
6762 bpf_log(log,
6763 "arg#%d reference type('%s %s') size cannot be determined: %ld\n",
6764 i, btf_type_str(t), btf_name_by_offset(btf, t->name_off),
6765 PTR_ERR(ref_t));
6766 return -EINVAL;
6767 }
6768
6769 reg->type = PTR_TO_MEM | PTR_MAYBE_NULL;
6770 reg->id = ++env->id_gen;
6771
6772 continue;
6773 }
6774 bpf_log(log, "Arg#%d type %s in %s() is not supported yet.\n",
6775 i, btf_type_str(t), tname);
6776 return -EINVAL;
6777 }
6778 return 0;
6779 }
6780
btf_type_show(const struct btf * btf,u32 type_id,void * obj,struct btf_show * show)6781 static void btf_type_show(const struct btf *btf, u32 type_id, void *obj,
6782 struct btf_show *show)
6783 {
6784 const struct btf_type *t = btf_type_by_id(btf, type_id);
6785
6786 show->btf = btf;
6787 memset(&show->state, 0, sizeof(show->state));
6788 memset(&show->obj, 0, sizeof(show->obj));
6789
6790 btf_type_ops(t)->show(btf, t, type_id, obj, 0, show);
6791 }
6792
btf_seq_show(struct btf_show * show,const char * fmt,va_list args)6793 static void btf_seq_show(struct btf_show *show, const char *fmt,
6794 va_list args)
6795 {
6796 seq_vprintf((struct seq_file *)show->target, fmt, args);
6797 }
6798
btf_type_seq_show_flags(const struct btf * btf,u32 type_id,void * obj,struct seq_file * m,u64 flags)6799 int btf_type_seq_show_flags(const struct btf *btf, u32 type_id,
6800 void *obj, struct seq_file *m, u64 flags)
6801 {
6802 struct btf_show sseq;
6803
6804 sseq.target = m;
6805 sseq.showfn = btf_seq_show;
6806 sseq.flags = flags;
6807
6808 btf_type_show(btf, type_id, obj, &sseq);
6809
6810 return sseq.state.status;
6811 }
6812
btf_type_seq_show(const struct btf * btf,u32 type_id,void * obj,struct seq_file * m)6813 void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
6814 struct seq_file *m)
6815 {
6816 (void) btf_type_seq_show_flags(btf, type_id, obj, m,
6817 BTF_SHOW_NONAME | BTF_SHOW_COMPACT |
6818 BTF_SHOW_ZERO | BTF_SHOW_UNSAFE);
6819 }
6820
6821 struct btf_show_snprintf {
6822 struct btf_show show;
6823 int len_left; /* space left in string */
6824 int len; /* length we would have written */
6825 };
6826
btf_snprintf_show(struct btf_show * show,const char * fmt,va_list args)6827 static void btf_snprintf_show(struct btf_show *show, const char *fmt,
6828 va_list args)
6829 {
6830 struct btf_show_snprintf *ssnprintf = (struct btf_show_snprintf *)show;
6831 int len;
6832
6833 len = vsnprintf(show->target, ssnprintf->len_left, fmt, args);
6834
6835 if (len < 0) {
6836 ssnprintf->len_left = 0;
6837 ssnprintf->len = len;
6838 } else if (len >= ssnprintf->len_left) {
6839 /* no space, drive on to get length we would have written */
6840 ssnprintf->len_left = 0;
6841 ssnprintf->len += len;
6842 } else {
6843 ssnprintf->len_left -= len;
6844 ssnprintf->len += len;
6845 show->target += len;
6846 }
6847 }
6848
btf_type_snprintf_show(const struct btf * btf,u32 type_id,void * obj,char * buf,int len,u64 flags)6849 int btf_type_snprintf_show(const struct btf *btf, u32 type_id, void *obj,
6850 char *buf, int len, u64 flags)
6851 {
6852 struct btf_show_snprintf ssnprintf;
6853
6854 ssnprintf.show.target = buf;
6855 ssnprintf.show.flags = flags;
6856 ssnprintf.show.showfn = btf_snprintf_show;
6857 ssnprintf.len_left = len;
6858 ssnprintf.len = 0;
6859
6860 btf_type_show(btf, type_id, obj, (struct btf_show *)&ssnprintf);
6861
6862 /* If we encountered an error, return it. */
6863 if (ssnprintf.show.state.status)
6864 return ssnprintf.show.state.status;
6865
6866 /* Otherwise return length we would have written */
6867 return ssnprintf.len;
6868 }
6869
6870 #ifdef CONFIG_PROC_FS
bpf_btf_show_fdinfo(struct seq_file * m,struct file * filp)6871 static void bpf_btf_show_fdinfo(struct seq_file *m, struct file *filp)
6872 {
6873 const struct btf *btf = filp->private_data;
6874
6875 seq_printf(m, "btf_id:\t%u\n", btf->id);
6876 }
6877 #endif
6878
btf_release(struct inode * inode,struct file * filp)6879 static int btf_release(struct inode *inode, struct file *filp)
6880 {
6881 btf_put(filp->private_data);
6882 return 0;
6883 }
6884
6885 const struct file_operations btf_fops = {
6886 #ifdef CONFIG_PROC_FS
6887 .show_fdinfo = bpf_btf_show_fdinfo,
6888 #endif
6889 .release = btf_release,
6890 };
6891
__btf_new_fd(struct btf * btf)6892 static int __btf_new_fd(struct btf *btf)
6893 {
6894 return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC);
6895 }
6896
btf_new_fd(const union bpf_attr * attr,bpfptr_t uattr)6897 int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr)
6898 {
6899 struct btf *btf;
6900 int ret;
6901
6902 btf = btf_parse(make_bpfptr(attr->btf, uattr.is_kernel),
6903 attr->btf_size, attr->btf_log_level,
6904 u64_to_user_ptr(attr->btf_log_buf),
6905 attr->btf_log_size);
6906 if (IS_ERR(btf))
6907 return PTR_ERR(btf);
6908
6909 ret = btf_alloc_id(btf);
6910 if (ret) {
6911 btf_free(btf);
6912 return ret;
6913 }
6914
6915 /*
6916 * The BTF ID is published to the userspace.
6917 * All BTF free must go through call_rcu() from
6918 * now on (i.e. free by calling btf_put()).
6919 */
6920
6921 ret = __btf_new_fd(btf);
6922 if (ret < 0)
6923 btf_put(btf);
6924
6925 return ret;
6926 }
6927
btf_get_by_fd(int fd)6928 struct btf *btf_get_by_fd(int fd)
6929 {
6930 struct btf *btf;
6931 struct fd f;
6932
6933 f = fdget(fd);
6934
6935 if (!f.file)
6936 return ERR_PTR(-EBADF);
6937
6938 if (f.file->f_op != &btf_fops) {
6939 fdput(f);
6940 return ERR_PTR(-EINVAL);
6941 }
6942
6943 btf = f.file->private_data;
6944 refcount_inc(&btf->refcnt);
6945 fdput(f);
6946
6947 return btf;
6948 }
6949
btf_get_info_by_fd(const struct btf * btf,const union bpf_attr * attr,union bpf_attr __user * uattr)6950 int btf_get_info_by_fd(const struct btf *btf,
6951 const union bpf_attr *attr,
6952 union bpf_attr __user *uattr)
6953 {
6954 struct bpf_btf_info __user *uinfo;
6955 struct bpf_btf_info info;
6956 u32 info_copy, btf_copy;
6957 void __user *ubtf;
6958 char __user *uname;
6959 u32 uinfo_len, uname_len, name_len;
6960 int ret = 0;
6961
6962 uinfo = u64_to_user_ptr(attr->info.info);
6963 uinfo_len = attr->info.info_len;
6964
6965 info_copy = min_t(u32, uinfo_len, sizeof(info));
6966 memset(&info, 0, sizeof(info));
6967 if (copy_from_user(&info, uinfo, info_copy))
6968 return -EFAULT;
6969
6970 info.id = btf->id;
6971 ubtf = u64_to_user_ptr(info.btf);
6972 btf_copy = min_t(u32, btf->data_size, info.btf_size);
6973 if (copy_to_user(ubtf, btf->data, btf_copy))
6974 return -EFAULT;
6975 info.btf_size = btf->data_size;
6976
6977 info.kernel_btf = btf->kernel_btf;
6978
6979 uname = u64_to_user_ptr(info.name);
6980 uname_len = info.name_len;
6981 if (!uname ^ !uname_len)
6982 return -EINVAL;
6983
6984 name_len = strlen(btf->name);
6985 info.name_len = name_len;
6986
6987 if (uname) {
6988 if (uname_len >= name_len + 1) {
6989 if (copy_to_user(uname, btf->name, name_len + 1))
6990 return -EFAULT;
6991 } else {
6992 char zero = '\0';
6993
6994 if (copy_to_user(uname, btf->name, uname_len - 1))
6995 return -EFAULT;
6996 if (put_user(zero, uname + uname_len - 1))
6997 return -EFAULT;
6998 /* let user-space know about too short buffer */
6999 ret = -ENOSPC;
7000 }
7001 }
7002
7003 if (copy_to_user(uinfo, &info, info_copy) ||
7004 put_user(info_copy, &uattr->info.info_len))
7005 return -EFAULT;
7006
7007 return ret;
7008 }
7009
btf_get_fd_by_id(u32 id)7010 int btf_get_fd_by_id(u32 id)
7011 {
7012 struct btf *btf;
7013 int fd;
7014
7015 rcu_read_lock();
7016 btf = idr_find(&btf_idr, id);
7017 if (!btf || !refcount_inc_not_zero(&btf->refcnt))
7018 btf = ERR_PTR(-ENOENT);
7019 rcu_read_unlock();
7020
7021 if (IS_ERR(btf))
7022 return PTR_ERR(btf);
7023
7024 fd = __btf_new_fd(btf);
7025 if (fd < 0)
7026 btf_put(btf);
7027
7028 return fd;
7029 }
7030
btf_obj_id(const struct btf * btf)7031 u32 btf_obj_id(const struct btf *btf)
7032 {
7033 return btf->id;
7034 }
7035
btf_is_kernel(const struct btf * btf)7036 bool btf_is_kernel(const struct btf *btf)
7037 {
7038 return btf->kernel_btf;
7039 }
7040
btf_is_module(const struct btf * btf)7041 bool btf_is_module(const struct btf *btf)
7042 {
7043 return btf->kernel_btf && strcmp(btf->name, "vmlinux") != 0;
7044 }
7045
btf_id_cmp_func(const void * a,const void * b)7046 static int btf_id_cmp_func(const void *a, const void *b)
7047 {
7048 const int *pa = a, *pb = b;
7049
7050 return *pa - *pb;
7051 }
7052
btf_id_set_contains(const struct btf_id_set * set,u32 id)7053 bool btf_id_set_contains(const struct btf_id_set *set, u32 id)
7054 {
7055 return bsearch(&id, set->ids, set->cnt, sizeof(u32), btf_id_cmp_func) != NULL;
7056 }
7057
btf_id_set8_contains(const struct btf_id_set8 * set,u32 id)7058 static void *btf_id_set8_contains(const struct btf_id_set8 *set, u32 id)
7059 {
7060 return bsearch(&id, set->pairs, set->cnt, sizeof(set->pairs[0]), btf_id_cmp_func);
7061 }
7062
7063 enum {
7064 BTF_MODULE_F_LIVE = (1 << 0),
7065 };
7066
7067 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
7068 struct btf_module {
7069 struct list_head list;
7070 struct module *module;
7071 struct btf *btf;
7072 struct bin_attribute *sysfs_attr;
7073 int flags;
7074 };
7075
7076 static LIST_HEAD(btf_modules);
7077 static DEFINE_MUTEX(btf_module_mutex);
7078
7079 static ssize_t
btf_module_read(struct file * file,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t len)7080 btf_module_read(struct file *file, struct kobject *kobj,
7081 struct bin_attribute *bin_attr,
7082 char *buf, loff_t off, size_t len)
7083 {
7084 const struct btf *btf = bin_attr->private;
7085
7086 memcpy(buf, btf->data + off, len);
7087 return len;
7088 }
7089
7090 static void purge_cand_cache(struct btf *btf);
7091
btf_module_notify(struct notifier_block * nb,unsigned long op,void * module)7092 static int btf_module_notify(struct notifier_block *nb, unsigned long op,
7093 void *module)
7094 {
7095 struct btf_module *btf_mod, *tmp;
7096 struct module *mod = module;
7097 struct btf *btf;
7098 int err = 0;
7099
7100 if (mod->btf_data_size == 0 ||
7101 (op != MODULE_STATE_COMING && op != MODULE_STATE_LIVE &&
7102 op != MODULE_STATE_GOING))
7103 goto out;
7104
7105 switch (op) {
7106 case MODULE_STATE_COMING:
7107 btf_mod = kzalloc(sizeof(*btf_mod), GFP_KERNEL);
7108 if (!btf_mod) {
7109 err = -ENOMEM;
7110 goto out;
7111 }
7112 btf = btf_parse_module(mod->name, mod->btf_data, mod->btf_data_size);
7113 if (IS_ERR(btf)) {
7114 pr_warn("failed to validate module [%s] BTF: %ld\n",
7115 mod->name, PTR_ERR(btf));
7116 kfree(btf_mod);
7117 if (!IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH))
7118 err = PTR_ERR(btf);
7119 goto out;
7120 }
7121 err = btf_alloc_id(btf);
7122 if (err) {
7123 btf_free(btf);
7124 kfree(btf_mod);
7125 goto out;
7126 }
7127
7128 purge_cand_cache(NULL);
7129 mutex_lock(&btf_module_mutex);
7130 btf_mod->module = module;
7131 btf_mod->btf = btf;
7132 list_add(&btf_mod->list, &btf_modules);
7133 mutex_unlock(&btf_module_mutex);
7134
7135 if (IS_ENABLED(CONFIG_SYSFS)) {
7136 struct bin_attribute *attr;
7137
7138 attr = kzalloc(sizeof(*attr), GFP_KERNEL);
7139 if (!attr)
7140 goto out;
7141
7142 sysfs_bin_attr_init(attr);
7143 attr->attr.name = btf->name;
7144 attr->attr.mode = 0444;
7145 attr->size = btf->data_size;
7146 attr->private = btf;
7147 attr->read = btf_module_read;
7148
7149 err = sysfs_create_bin_file(btf_kobj, attr);
7150 if (err) {
7151 pr_warn("failed to register module [%s] BTF in sysfs: %d\n",
7152 mod->name, err);
7153 kfree(attr);
7154 err = 0;
7155 goto out;
7156 }
7157
7158 btf_mod->sysfs_attr = attr;
7159 }
7160
7161 break;
7162 case MODULE_STATE_LIVE:
7163 mutex_lock(&btf_module_mutex);
7164 list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
7165 if (btf_mod->module != module)
7166 continue;
7167
7168 btf_mod->flags |= BTF_MODULE_F_LIVE;
7169 break;
7170 }
7171 mutex_unlock(&btf_module_mutex);
7172 break;
7173 case MODULE_STATE_GOING:
7174 mutex_lock(&btf_module_mutex);
7175 list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
7176 if (btf_mod->module != module)
7177 continue;
7178
7179 list_del(&btf_mod->list);
7180 if (btf_mod->sysfs_attr)
7181 sysfs_remove_bin_file(btf_kobj, btf_mod->sysfs_attr);
7182 purge_cand_cache(btf_mod->btf);
7183 btf_put(btf_mod->btf);
7184 kfree(btf_mod->sysfs_attr);
7185 kfree(btf_mod);
7186 break;
7187 }
7188 mutex_unlock(&btf_module_mutex);
7189 break;
7190 }
7191 out:
7192 return notifier_from_errno(err);
7193 }
7194
7195 static struct notifier_block btf_module_nb = {
7196 .notifier_call = btf_module_notify,
7197 };
7198
btf_module_init(void)7199 static int __init btf_module_init(void)
7200 {
7201 register_module_notifier(&btf_module_nb);
7202 return 0;
7203 }
7204
7205 fs_initcall(btf_module_init);
7206 #endif /* CONFIG_DEBUG_INFO_BTF_MODULES */
7207
btf_try_get_module(const struct btf * btf)7208 struct module *btf_try_get_module(const struct btf *btf)
7209 {
7210 struct module *res = NULL;
7211 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
7212 struct btf_module *btf_mod, *tmp;
7213
7214 mutex_lock(&btf_module_mutex);
7215 list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
7216 if (btf_mod->btf != btf)
7217 continue;
7218
7219 /* We must only consider module whose __init routine has
7220 * finished, hence we must check for BTF_MODULE_F_LIVE flag,
7221 * which is set from the notifier callback for
7222 * MODULE_STATE_LIVE.
7223 */
7224 if ((btf_mod->flags & BTF_MODULE_F_LIVE) && try_module_get(btf_mod->module))
7225 res = btf_mod->module;
7226
7227 break;
7228 }
7229 mutex_unlock(&btf_module_mutex);
7230 #endif
7231
7232 return res;
7233 }
7234
7235 /* Returns struct btf corresponding to the struct module.
7236 * This function can return NULL or ERR_PTR.
7237 */
btf_get_module_btf(const struct module * module)7238 static struct btf *btf_get_module_btf(const struct module *module)
7239 {
7240 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
7241 struct btf_module *btf_mod, *tmp;
7242 #endif
7243 struct btf *btf = NULL;
7244
7245 if (!module) {
7246 btf = bpf_get_btf_vmlinux();
7247 if (!IS_ERR_OR_NULL(btf))
7248 btf_get(btf);
7249 return btf;
7250 }
7251
7252 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
7253 mutex_lock(&btf_module_mutex);
7254 list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
7255 if (btf_mod->module != module)
7256 continue;
7257
7258 btf_get(btf_mod->btf);
7259 btf = btf_mod->btf;
7260 break;
7261 }
7262 mutex_unlock(&btf_module_mutex);
7263 #endif
7264
7265 return btf;
7266 }
7267
BPF_CALL_4(bpf_btf_find_by_name_kind,char *,name,int,name_sz,u32,kind,int,flags)7268 BPF_CALL_4(bpf_btf_find_by_name_kind, char *, name, int, name_sz, u32, kind, int, flags)
7269 {
7270 struct btf *btf = NULL;
7271 int btf_obj_fd = 0;
7272 long ret;
7273
7274 if (flags)
7275 return -EINVAL;
7276
7277 if (name_sz <= 1 || name[name_sz - 1])
7278 return -EINVAL;
7279
7280 ret = bpf_find_btf_id(name, kind, &btf);
7281 if (ret > 0 && btf_is_module(btf)) {
7282 btf_obj_fd = __btf_new_fd(btf);
7283 if (btf_obj_fd < 0) {
7284 btf_put(btf);
7285 return btf_obj_fd;
7286 }
7287 return ret | (((u64)btf_obj_fd) << 32);
7288 }
7289 if (ret > 0)
7290 btf_put(btf);
7291 return ret;
7292 }
7293
7294 const struct bpf_func_proto bpf_btf_find_by_name_kind_proto = {
7295 .func = bpf_btf_find_by_name_kind,
7296 .gpl_only = false,
7297 .ret_type = RET_INTEGER,
7298 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
7299 .arg2_type = ARG_CONST_SIZE,
7300 .arg3_type = ARG_ANYTHING,
7301 .arg4_type = ARG_ANYTHING,
7302 };
7303
BTF_ID_LIST_GLOBAL(btf_tracing_ids,MAX_BTF_TRACING_TYPE)7304 BTF_ID_LIST_GLOBAL(btf_tracing_ids, MAX_BTF_TRACING_TYPE)
7305 #define BTF_TRACING_TYPE(name, type) BTF_ID(struct, type)
7306 BTF_TRACING_TYPE_xxx
7307 #undef BTF_TRACING_TYPE
7308
7309 /* Kernel Function (kfunc) BTF ID set registration API */
7310
7311 static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook,
7312 struct btf_id_set8 *add_set)
7313 {
7314 bool vmlinux_set = !btf_is_module(btf);
7315 struct btf_kfunc_set_tab *tab;
7316 struct btf_id_set8 *set;
7317 u32 set_cnt;
7318 int ret;
7319
7320 if (hook >= BTF_KFUNC_HOOK_MAX) {
7321 ret = -EINVAL;
7322 goto end;
7323 }
7324
7325 if (!add_set->cnt)
7326 return 0;
7327
7328 tab = btf->kfunc_set_tab;
7329 if (!tab) {
7330 tab = kzalloc(sizeof(*tab), GFP_KERNEL | __GFP_NOWARN);
7331 if (!tab)
7332 return -ENOMEM;
7333 btf->kfunc_set_tab = tab;
7334 }
7335
7336 set = tab->sets[hook];
7337 /* Warn when register_btf_kfunc_id_set is called twice for the same hook
7338 * for module sets.
7339 */
7340 if (WARN_ON_ONCE(set && !vmlinux_set)) {
7341 ret = -EINVAL;
7342 goto end;
7343 }
7344
7345 /* We don't need to allocate, concatenate, and sort module sets, because
7346 * only one is allowed per hook. Hence, we can directly assign the
7347 * pointer and return.
7348 */
7349 if (!vmlinux_set) {
7350 tab->sets[hook] = add_set;
7351 return 0;
7352 }
7353
7354 /* In case of vmlinux sets, there may be more than one set being
7355 * registered per hook. To create a unified set, we allocate a new set
7356 * and concatenate all individual sets being registered. While each set
7357 * is individually sorted, they may become unsorted when concatenated,
7358 * hence re-sorting the final set again is required to make binary
7359 * searching the set using btf_id_set8_contains function work.
7360 */
7361 set_cnt = set ? set->cnt : 0;
7362
7363 if (set_cnt > U32_MAX - add_set->cnt) {
7364 ret = -EOVERFLOW;
7365 goto end;
7366 }
7367
7368 if (set_cnt + add_set->cnt > BTF_KFUNC_SET_MAX_CNT) {
7369 ret = -E2BIG;
7370 goto end;
7371 }
7372
7373 /* Grow set */
7374 set = krealloc(tab->sets[hook],
7375 offsetof(struct btf_id_set8, pairs[set_cnt + add_set->cnt]),
7376 GFP_KERNEL | __GFP_NOWARN);
7377 if (!set) {
7378 ret = -ENOMEM;
7379 goto end;
7380 }
7381
7382 /* For newly allocated set, initialize set->cnt to 0 */
7383 if (!tab->sets[hook])
7384 set->cnt = 0;
7385 tab->sets[hook] = set;
7386
7387 /* Concatenate the two sets */
7388 memcpy(set->pairs + set->cnt, add_set->pairs, add_set->cnt * sizeof(set->pairs[0]));
7389 set->cnt += add_set->cnt;
7390
7391 sort(set->pairs, set->cnt, sizeof(set->pairs[0]), btf_id_cmp_func, NULL);
7392
7393 return 0;
7394 end:
7395 btf_free_kfunc_set_tab(btf);
7396 return ret;
7397 }
7398
__btf_kfunc_id_set_contains(const struct btf * btf,enum btf_kfunc_hook hook,u32 kfunc_btf_id)7399 static u32 *__btf_kfunc_id_set_contains(const struct btf *btf,
7400 enum btf_kfunc_hook hook,
7401 u32 kfunc_btf_id)
7402 {
7403 struct btf_id_set8 *set;
7404 u32 *id;
7405
7406 if (hook >= BTF_KFUNC_HOOK_MAX)
7407 return NULL;
7408 if (!btf->kfunc_set_tab)
7409 return NULL;
7410 set = btf->kfunc_set_tab->sets[hook];
7411 if (!set)
7412 return NULL;
7413 id = btf_id_set8_contains(set, kfunc_btf_id);
7414 if (!id)
7415 return NULL;
7416 /* The flags for BTF ID are located next to it */
7417 return id + 1;
7418 }
7419
bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type)7420 static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type)
7421 {
7422 switch (prog_type) {
7423 case BPF_PROG_TYPE_XDP:
7424 return BTF_KFUNC_HOOK_XDP;
7425 case BPF_PROG_TYPE_SCHED_CLS:
7426 return BTF_KFUNC_HOOK_TC;
7427 case BPF_PROG_TYPE_STRUCT_OPS:
7428 return BTF_KFUNC_HOOK_STRUCT_OPS;
7429 case BPF_PROG_TYPE_TRACING:
7430 case BPF_PROG_TYPE_LSM:
7431 return BTF_KFUNC_HOOK_TRACING;
7432 case BPF_PROG_TYPE_SYSCALL:
7433 return BTF_KFUNC_HOOK_SYSCALL;
7434 default:
7435 return BTF_KFUNC_HOOK_MAX;
7436 }
7437 }
7438
7439 /* Caution:
7440 * Reference to the module (obtained using btf_try_get_module) corresponding to
7441 * the struct btf *MUST* be held when calling this function from verifier
7442 * context. This is usually true as we stash references in prog's kfunc_btf_tab;
7443 * keeping the reference for the duration of the call provides the necessary
7444 * protection for looking up a well-formed btf->kfunc_set_tab.
7445 */
btf_kfunc_id_set_contains(const struct btf * btf,enum bpf_prog_type prog_type,u32 kfunc_btf_id)7446 u32 *btf_kfunc_id_set_contains(const struct btf *btf,
7447 enum bpf_prog_type prog_type,
7448 u32 kfunc_btf_id)
7449 {
7450 enum btf_kfunc_hook hook;
7451
7452 hook = bpf_prog_type_to_kfunc_hook(prog_type);
7453 return __btf_kfunc_id_set_contains(btf, hook, kfunc_btf_id);
7454 }
7455
7456 /* This function must be invoked only from initcalls/module init functions */
register_btf_kfunc_id_set(enum bpf_prog_type prog_type,const struct btf_kfunc_id_set * kset)7457 int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
7458 const struct btf_kfunc_id_set *kset)
7459 {
7460 enum btf_kfunc_hook hook;
7461 struct btf *btf;
7462 int ret;
7463
7464 btf = btf_get_module_btf(kset->owner);
7465 if (!btf) {
7466 if (!kset->owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
7467 pr_err("missing vmlinux BTF, cannot register kfuncs\n");
7468 return -ENOENT;
7469 }
7470 if (kset->owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)) {
7471 pr_err("missing module BTF, cannot register kfuncs\n");
7472 return -ENOENT;
7473 }
7474 return 0;
7475 }
7476 if (IS_ERR(btf))
7477 return PTR_ERR(btf);
7478
7479 hook = bpf_prog_type_to_kfunc_hook(prog_type);
7480 ret = btf_populate_kfunc_set(btf, hook, kset->set);
7481 btf_put(btf);
7482 return ret;
7483 }
7484 EXPORT_SYMBOL_GPL(register_btf_kfunc_id_set);
7485
btf_find_dtor_kfunc(struct btf * btf,u32 btf_id)7486 s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id)
7487 {
7488 struct btf_id_dtor_kfunc_tab *tab = btf->dtor_kfunc_tab;
7489 struct btf_id_dtor_kfunc *dtor;
7490
7491 if (!tab)
7492 return -ENOENT;
7493 /* Even though the size of tab->dtors[0] is > sizeof(u32), we only need
7494 * to compare the first u32 with btf_id, so we can reuse btf_id_cmp_func.
7495 */
7496 BUILD_BUG_ON(offsetof(struct btf_id_dtor_kfunc, btf_id) != 0);
7497 dtor = bsearch(&btf_id, tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func);
7498 if (!dtor)
7499 return -ENOENT;
7500 return dtor->kfunc_btf_id;
7501 }
7502
btf_check_dtor_kfuncs(struct btf * btf,const struct btf_id_dtor_kfunc * dtors,u32 cnt)7503 static int btf_check_dtor_kfuncs(struct btf *btf, const struct btf_id_dtor_kfunc *dtors, u32 cnt)
7504 {
7505 const struct btf_type *dtor_func, *dtor_func_proto, *t;
7506 const struct btf_param *args;
7507 s32 dtor_btf_id;
7508 u32 nr_args, i;
7509
7510 for (i = 0; i < cnt; i++) {
7511 dtor_btf_id = dtors[i].kfunc_btf_id;
7512
7513 dtor_func = btf_type_by_id(btf, dtor_btf_id);
7514 if (!dtor_func || !btf_type_is_func(dtor_func))
7515 return -EINVAL;
7516
7517 dtor_func_proto = btf_type_by_id(btf, dtor_func->type);
7518 if (!dtor_func_proto || !btf_type_is_func_proto(dtor_func_proto))
7519 return -EINVAL;
7520
7521 /* Make sure the prototype of the destructor kfunc is 'void func(type *)' */
7522 t = btf_type_by_id(btf, dtor_func_proto->type);
7523 if (!t || !btf_type_is_void(t))
7524 return -EINVAL;
7525
7526 nr_args = btf_type_vlen(dtor_func_proto);
7527 if (nr_args != 1)
7528 return -EINVAL;
7529 args = btf_params(dtor_func_proto);
7530 t = btf_type_by_id(btf, args[0].type);
7531 /* Allow any pointer type, as width on targets Linux supports
7532 * will be same for all pointer types (i.e. sizeof(void *))
7533 */
7534 if (!t || !btf_type_is_ptr(t))
7535 return -EINVAL;
7536 }
7537 return 0;
7538 }
7539
7540 /* This function must be invoked only from initcalls/module init functions */
register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc * dtors,u32 add_cnt,struct module * owner)7541 int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_cnt,
7542 struct module *owner)
7543 {
7544 struct btf_id_dtor_kfunc_tab *tab;
7545 struct btf *btf;
7546 u32 tab_cnt;
7547 int ret;
7548
7549 btf = btf_get_module_btf(owner);
7550 if (!btf) {
7551 if (!owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
7552 pr_err("missing vmlinux BTF, cannot register dtor kfuncs\n");
7553 return -ENOENT;
7554 }
7555 if (owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)) {
7556 pr_err("missing module BTF, cannot register dtor kfuncs\n");
7557 return -ENOENT;
7558 }
7559 return 0;
7560 }
7561 if (IS_ERR(btf))
7562 return PTR_ERR(btf);
7563
7564 if (add_cnt >= BTF_DTOR_KFUNC_MAX_CNT) {
7565 pr_err("cannot register more than %d kfunc destructors\n", BTF_DTOR_KFUNC_MAX_CNT);
7566 ret = -E2BIG;
7567 goto end;
7568 }
7569
7570 /* Ensure that the prototype of dtor kfuncs being registered is sane */
7571 ret = btf_check_dtor_kfuncs(btf, dtors, add_cnt);
7572 if (ret < 0)
7573 goto end;
7574
7575 tab = btf->dtor_kfunc_tab;
7576 /* Only one call allowed for modules */
7577 if (WARN_ON_ONCE(tab && btf_is_module(btf))) {
7578 ret = -EINVAL;
7579 goto end;
7580 }
7581
7582 tab_cnt = tab ? tab->cnt : 0;
7583 if (tab_cnt > U32_MAX - add_cnt) {
7584 ret = -EOVERFLOW;
7585 goto end;
7586 }
7587 if (tab_cnt + add_cnt >= BTF_DTOR_KFUNC_MAX_CNT) {
7588 pr_err("cannot register more than %d kfunc destructors\n", BTF_DTOR_KFUNC_MAX_CNT);
7589 ret = -E2BIG;
7590 goto end;
7591 }
7592
7593 tab = krealloc(btf->dtor_kfunc_tab,
7594 offsetof(struct btf_id_dtor_kfunc_tab, dtors[tab_cnt + add_cnt]),
7595 GFP_KERNEL | __GFP_NOWARN);
7596 if (!tab) {
7597 ret = -ENOMEM;
7598 goto end;
7599 }
7600
7601 if (!btf->dtor_kfunc_tab)
7602 tab->cnt = 0;
7603 btf->dtor_kfunc_tab = tab;
7604
7605 memcpy(tab->dtors + tab->cnt, dtors, add_cnt * sizeof(tab->dtors[0]));
7606 tab->cnt += add_cnt;
7607
7608 sort(tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func, NULL);
7609
7610 return 0;
7611 end:
7612 btf_free_dtor_kfunc_tab(btf);
7613 btf_put(btf);
7614 return ret;
7615 }
7616 EXPORT_SYMBOL_GPL(register_btf_id_dtor_kfuncs);
7617
7618 #define MAX_TYPES_ARE_COMPAT_DEPTH 2
7619
7620 /* Check local and target types for compatibility. This check is used for
7621 * type-based CO-RE relocations and follow slightly different rules than
7622 * field-based relocations. This function assumes that root types were already
7623 * checked for name match. Beyond that initial root-level name check, names
7624 * are completely ignored. Compatibility rules are as follows:
7625 * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs/ENUM64s are considered compatible, but
7626 * kind should match for local and target types (i.e., STRUCT is not
7627 * compatible with UNION);
7628 * - for ENUMs/ENUM64s, the size is ignored;
7629 * - for INT, size and signedness are ignored;
7630 * - for ARRAY, dimensionality is ignored, element types are checked for
7631 * compatibility recursively;
7632 * - CONST/VOLATILE/RESTRICT modifiers are ignored;
7633 * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
7634 * - FUNC_PROTOs are compatible if they have compatible signature: same
7635 * number of input args and compatible return and argument types.
7636 * These rules are not set in stone and probably will be adjusted as we get
7637 * more experience with using BPF CO-RE relocations.
7638 */
bpf_core_types_are_compat(const struct btf * local_btf,__u32 local_id,const struct btf * targ_btf,__u32 targ_id)7639 int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
7640 const struct btf *targ_btf, __u32 targ_id)
7641 {
7642 return __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id,
7643 MAX_TYPES_ARE_COMPAT_DEPTH);
7644 }
7645
7646 #define MAX_TYPES_MATCH_DEPTH 2
7647
bpf_core_types_match(const struct btf * local_btf,u32 local_id,const struct btf * targ_btf,u32 targ_id)7648 int bpf_core_types_match(const struct btf *local_btf, u32 local_id,
7649 const struct btf *targ_btf, u32 targ_id)
7650 {
7651 return __bpf_core_types_match(local_btf, local_id, targ_btf, targ_id, false,
7652 MAX_TYPES_MATCH_DEPTH);
7653 }
7654
bpf_core_is_flavor_sep(const char * s)7655 static bool bpf_core_is_flavor_sep(const char *s)
7656 {
7657 /* check X___Y name pattern, where X and Y are not underscores */
7658 return s[0] != '_' && /* X */
7659 s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */
7660 s[4] != '_'; /* Y */
7661 }
7662
bpf_core_essential_name_len(const char * name)7663 size_t bpf_core_essential_name_len(const char *name)
7664 {
7665 size_t n = strlen(name);
7666 int i;
7667
7668 for (i = n - 5; i >= 0; i--) {
7669 if (bpf_core_is_flavor_sep(name + i))
7670 return i + 1;
7671 }
7672 return n;
7673 }
7674
7675 struct bpf_cand_cache {
7676 const char *name;
7677 u32 name_len;
7678 u16 kind;
7679 u16 cnt;
7680 struct {
7681 const struct btf *btf;
7682 u32 id;
7683 } cands[];
7684 };
7685
bpf_free_cands(struct bpf_cand_cache * cands)7686 static void bpf_free_cands(struct bpf_cand_cache *cands)
7687 {
7688 if (!cands->cnt)
7689 /* empty candidate array was allocated on stack */
7690 return;
7691 kfree(cands);
7692 }
7693
bpf_free_cands_from_cache(struct bpf_cand_cache * cands)7694 static void bpf_free_cands_from_cache(struct bpf_cand_cache *cands)
7695 {
7696 kfree(cands->name);
7697 kfree(cands);
7698 }
7699
7700 #define VMLINUX_CAND_CACHE_SIZE 31
7701 static struct bpf_cand_cache *vmlinux_cand_cache[VMLINUX_CAND_CACHE_SIZE];
7702
7703 #define MODULE_CAND_CACHE_SIZE 31
7704 static struct bpf_cand_cache *module_cand_cache[MODULE_CAND_CACHE_SIZE];
7705
7706 static DEFINE_MUTEX(cand_cache_mutex);
7707
__print_cand_cache(struct bpf_verifier_log * log,struct bpf_cand_cache ** cache,int cache_size)7708 static void __print_cand_cache(struct bpf_verifier_log *log,
7709 struct bpf_cand_cache **cache,
7710 int cache_size)
7711 {
7712 struct bpf_cand_cache *cc;
7713 int i, j;
7714
7715 for (i = 0; i < cache_size; i++) {
7716 cc = cache[i];
7717 if (!cc)
7718 continue;
7719 bpf_log(log, "[%d]%s(", i, cc->name);
7720 for (j = 0; j < cc->cnt; j++) {
7721 bpf_log(log, "%d", cc->cands[j].id);
7722 if (j < cc->cnt - 1)
7723 bpf_log(log, " ");
7724 }
7725 bpf_log(log, "), ");
7726 }
7727 }
7728
print_cand_cache(struct bpf_verifier_log * log)7729 static void print_cand_cache(struct bpf_verifier_log *log)
7730 {
7731 mutex_lock(&cand_cache_mutex);
7732 bpf_log(log, "vmlinux_cand_cache:");
7733 __print_cand_cache(log, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE);
7734 bpf_log(log, "\nmodule_cand_cache:");
7735 __print_cand_cache(log, module_cand_cache, MODULE_CAND_CACHE_SIZE);
7736 bpf_log(log, "\n");
7737 mutex_unlock(&cand_cache_mutex);
7738 }
7739
hash_cands(struct bpf_cand_cache * cands)7740 static u32 hash_cands(struct bpf_cand_cache *cands)
7741 {
7742 return jhash(cands->name, cands->name_len, 0);
7743 }
7744
check_cand_cache(struct bpf_cand_cache * cands,struct bpf_cand_cache ** cache,int cache_size)7745 static struct bpf_cand_cache *check_cand_cache(struct bpf_cand_cache *cands,
7746 struct bpf_cand_cache **cache,
7747 int cache_size)
7748 {
7749 struct bpf_cand_cache *cc = cache[hash_cands(cands) % cache_size];
7750
7751 if (cc && cc->name_len == cands->name_len &&
7752 !strncmp(cc->name, cands->name, cands->name_len))
7753 return cc;
7754 return NULL;
7755 }
7756
sizeof_cands(int cnt)7757 static size_t sizeof_cands(int cnt)
7758 {
7759 return offsetof(struct bpf_cand_cache, cands[cnt]);
7760 }
7761
populate_cand_cache(struct bpf_cand_cache * cands,struct bpf_cand_cache ** cache,int cache_size)7762 static struct bpf_cand_cache *populate_cand_cache(struct bpf_cand_cache *cands,
7763 struct bpf_cand_cache **cache,
7764 int cache_size)
7765 {
7766 struct bpf_cand_cache **cc = &cache[hash_cands(cands) % cache_size], *new_cands;
7767
7768 if (*cc) {
7769 bpf_free_cands_from_cache(*cc);
7770 *cc = NULL;
7771 }
7772 new_cands = kmemdup(cands, sizeof_cands(cands->cnt), GFP_KERNEL);
7773 if (!new_cands) {
7774 bpf_free_cands(cands);
7775 return ERR_PTR(-ENOMEM);
7776 }
7777 /* strdup the name, since it will stay in cache.
7778 * the cands->name points to strings in prog's BTF and the prog can be unloaded.
7779 */
7780 new_cands->name = kmemdup_nul(cands->name, cands->name_len, GFP_KERNEL);
7781 bpf_free_cands(cands);
7782 if (!new_cands->name) {
7783 kfree(new_cands);
7784 return ERR_PTR(-ENOMEM);
7785 }
7786 *cc = new_cands;
7787 return new_cands;
7788 }
7789
7790 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
__purge_cand_cache(struct btf * btf,struct bpf_cand_cache ** cache,int cache_size)7791 static void __purge_cand_cache(struct btf *btf, struct bpf_cand_cache **cache,
7792 int cache_size)
7793 {
7794 struct bpf_cand_cache *cc;
7795 int i, j;
7796
7797 for (i = 0; i < cache_size; i++) {
7798 cc = cache[i];
7799 if (!cc)
7800 continue;
7801 if (!btf) {
7802 /* when new module is loaded purge all of module_cand_cache,
7803 * since new module might have candidates with the name
7804 * that matches cached cands.
7805 */
7806 bpf_free_cands_from_cache(cc);
7807 cache[i] = NULL;
7808 continue;
7809 }
7810 /* when module is unloaded purge cache entries
7811 * that match module's btf
7812 */
7813 for (j = 0; j < cc->cnt; j++)
7814 if (cc->cands[j].btf == btf) {
7815 bpf_free_cands_from_cache(cc);
7816 cache[i] = NULL;
7817 break;
7818 }
7819 }
7820
7821 }
7822
purge_cand_cache(struct btf * btf)7823 static void purge_cand_cache(struct btf *btf)
7824 {
7825 mutex_lock(&cand_cache_mutex);
7826 __purge_cand_cache(btf, module_cand_cache, MODULE_CAND_CACHE_SIZE);
7827 mutex_unlock(&cand_cache_mutex);
7828 }
7829 #endif
7830
7831 static struct bpf_cand_cache *
bpf_core_add_cands(struct bpf_cand_cache * cands,const struct btf * targ_btf,int targ_start_id)7832 bpf_core_add_cands(struct bpf_cand_cache *cands, const struct btf *targ_btf,
7833 int targ_start_id)
7834 {
7835 struct bpf_cand_cache *new_cands;
7836 const struct btf_type *t;
7837 const char *targ_name;
7838 size_t targ_essent_len;
7839 int n, i;
7840
7841 n = btf_nr_types(targ_btf);
7842 for (i = targ_start_id; i < n; i++) {
7843 t = btf_type_by_id(targ_btf, i);
7844 if (btf_kind(t) != cands->kind)
7845 continue;
7846
7847 targ_name = btf_name_by_offset(targ_btf, t->name_off);
7848 if (!targ_name)
7849 continue;
7850
7851 /* the resched point is before strncmp to make sure that search
7852 * for non-existing name will have a chance to schedule().
7853 */
7854 cond_resched();
7855
7856 if (strncmp(cands->name, targ_name, cands->name_len) != 0)
7857 continue;
7858
7859 targ_essent_len = bpf_core_essential_name_len(targ_name);
7860 if (targ_essent_len != cands->name_len)
7861 continue;
7862
7863 /* most of the time there is only one candidate for a given kind+name pair */
7864 new_cands = kmalloc(sizeof_cands(cands->cnt + 1), GFP_KERNEL);
7865 if (!new_cands) {
7866 bpf_free_cands(cands);
7867 return ERR_PTR(-ENOMEM);
7868 }
7869
7870 memcpy(new_cands, cands, sizeof_cands(cands->cnt));
7871 bpf_free_cands(cands);
7872 cands = new_cands;
7873 cands->cands[cands->cnt].btf = targ_btf;
7874 cands->cands[cands->cnt].id = i;
7875 cands->cnt++;
7876 }
7877 return cands;
7878 }
7879
7880 static struct bpf_cand_cache *
bpf_core_find_cands(struct bpf_core_ctx * ctx,u32 local_type_id)7881 bpf_core_find_cands(struct bpf_core_ctx *ctx, u32 local_type_id)
7882 {
7883 struct bpf_cand_cache *cands, *cc, local_cand = {};
7884 const struct btf *local_btf = ctx->btf;
7885 const struct btf_type *local_type;
7886 const struct btf *main_btf;
7887 size_t local_essent_len;
7888 struct btf *mod_btf;
7889 const char *name;
7890 int id;
7891
7892 main_btf = bpf_get_btf_vmlinux();
7893 if (IS_ERR(main_btf))
7894 return ERR_CAST(main_btf);
7895 if (!main_btf)
7896 return ERR_PTR(-EINVAL);
7897
7898 local_type = btf_type_by_id(local_btf, local_type_id);
7899 if (!local_type)
7900 return ERR_PTR(-EINVAL);
7901
7902 name = btf_name_by_offset(local_btf, local_type->name_off);
7903 if (str_is_empty(name))
7904 return ERR_PTR(-EINVAL);
7905 local_essent_len = bpf_core_essential_name_len(name);
7906
7907 cands = &local_cand;
7908 cands->name = name;
7909 cands->kind = btf_kind(local_type);
7910 cands->name_len = local_essent_len;
7911
7912 cc = check_cand_cache(cands, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE);
7913 /* cands is a pointer to stack here */
7914 if (cc) {
7915 if (cc->cnt)
7916 return cc;
7917 goto check_modules;
7918 }
7919
7920 /* Attempt to find target candidates in vmlinux BTF first */
7921 cands = bpf_core_add_cands(cands, main_btf, 1);
7922 if (IS_ERR(cands))
7923 return ERR_CAST(cands);
7924
7925 /* cands is a pointer to kmalloced memory here if cands->cnt > 0 */
7926
7927 /* populate cache even when cands->cnt == 0 */
7928 cc = populate_cand_cache(cands, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE);
7929 if (IS_ERR(cc))
7930 return ERR_CAST(cc);
7931
7932 /* if vmlinux BTF has any candidate, don't go for module BTFs */
7933 if (cc->cnt)
7934 return cc;
7935
7936 check_modules:
7937 /* cands is a pointer to stack here and cands->cnt == 0 */
7938 cc = check_cand_cache(cands, module_cand_cache, MODULE_CAND_CACHE_SIZE);
7939 if (cc)
7940 /* if cache has it return it even if cc->cnt == 0 */
7941 return cc;
7942
7943 /* If candidate is not found in vmlinux's BTF then search in module's BTFs */
7944 spin_lock_bh(&btf_idr_lock);
7945 idr_for_each_entry(&btf_idr, mod_btf, id) {
7946 if (!btf_is_module(mod_btf))
7947 continue;
7948 /* linear search could be slow hence unlock/lock
7949 * the IDR to avoiding holding it for too long
7950 */
7951 btf_get(mod_btf);
7952 spin_unlock_bh(&btf_idr_lock);
7953 cands = bpf_core_add_cands(cands, mod_btf, btf_nr_types(main_btf));
7954 if (IS_ERR(cands)) {
7955 btf_put(mod_btf);
7956 return ERR_CAST(cands);
7957 }
7958 spin_lock_bh(&btf_idr_lock);
7959 btf_put(mod_btf);
7960 }
7961 spin_unlock_bh(&btf_idr_lock);
7962 /* cands is a pointer to kmalloced memory here if cands->cnt > 0
7963 * or pointer to stack if cands->cnd == 0.
7964 * Copy it into the cache even when cands->cnt == 0 and
7965 * return the result.
7966 */
7967 return populate_cand_cache(cands, module_cand_cache, MODULE_CAND_CACHE_SIZE);
7968 }
7969
bpf_core_apply(struct bpf_core_ctx * ctx,const struct bpf_core_relo * relo,int relo_idx,void * insn)7970 int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
7971 int relo_idx, void *insn)
7972 {
7973 bool need_cands = relo->kind != BPF_CORE_TYPE_ID_LOCAL;
7974 struct bpf_core_cand_list cands = {};
7975 struct bpf_core_relo_res targ_res;
7976 struct bpf_core_spec *specs;
7977 int err;
7978
7979 /* ~4k of temp memory necessary to convert LLVM spec like "0:1:0:5"
7980 * into arrays of btf_ids of struct fields and array indices.
7981 */
7982 specs = kcalloc(3, sizeof(*specs), GFP_KERNEL);
7983 if (!specs)
7984 return -ENOMEM;
7985
7986 if (need_cands) {
7987 struct bpf_cand_cache *cc;
7988 int i;
7989
7990 mutex_lock(&cand_cache_mutex);
7991 cc = bpf_core_find_cands(ctx, relo->type_id);
7992 if (IS_ERR(cc)) {
7993 bpf_log(ctx->log, "target candidate search failed for %d\n",
7994 relo->type_id);
7995 err = PTR_ERR(cc);
7996 goto out;
7997 }
7998 if (cc->cnt) {
7999 cands.cands = kcalloc(cc->cnt, sizeof(*cands.cands), GFP_KERNEL);
8000 if (!cands.cands) {
8001 err = -ENOMEM;
8002 goto out;
8003 }
8004 }
8005 for (i = 0; i < cc->cnt; i++) {
8006 bpf_log(ctx->log,
8007 "CO-RE relocating %s %s: found target candidate [%d]\n",
8008 btf_kind_str[cc->kind], cc->name, cc->cands[i].id);
8009 cands.cands[i].btf = cc->cands[i].btf;
8010 cands.cands[i].id = cc->cands[i].id;
8011 }
8012 cands.len = cc->cnt;
8013 /* cand_cache_mutex needs to span the cache lookup and
8014 * copy of btf pointer into bpf_core_cand_list,
8015 * since module can be unloaded while bpf_core_calc_relo_insn
8016 * is working with module's btf.
8017 */
8018 }
8019
8020 err = bpf_core_calc_relo_insn((void *)ctx->log, relo, relo_idx, ctx->btf, &cands, specs,
8021 &targ_res);
8022 if (err)
8023 goto out;
8024
8025 err = bpf_core_patch_insn((void *)ctx->log, insn, relo->insn_off / 8, relo, relo_idx,
8026 &targ_res);
8027
8028 out:
8029 kfree(specs);
8030 if (need_cands) {
8031 kfree(cands.cands);
8032 mutex_unlock(&cand_cache_mutex);
8033 if (ctx->log->level & BPF_LOG_LEVEL2)
8034 print_cand_cache(ctx->log);
8035 }
8036 return err;
8037 }
8038