1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Facebook */
3 #include <linux/btf.h>
4 #include <linux/btf_ids.h>
5 #include <linux/error-injection.h>
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/percpu-defs.h>
9 #include <linux/sysfs.h>
10 #include <linux/tracepoint.h>
11 #include "bpf_testmod.h"
12 #include "bpf_testmod_kfunc.h"
13
14 #define CREATE_TRACE_POINTS
15 #include "bpf_testmod-events.h"
16
17 typedef int (*func_proto_typedef)(long);
18 typedef int (*func_proto_typedef_nested1)(func_proto_typedef);
19 typedef int (*func_proto_typedef_nested2)(func_proto_typedef_nested1);
20
21 DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123;
22 long bpf_testmod_test_struct_arg_result;
23
24 struct bpf_testmod_struct_arg_1 {
25 int a;
26 };
27 struct bpf_testmod_struct_arg_2 {
28 long a;
29 long b;
30 };
31
32 struct bpf_testmod_struct_arg_3 {
33 int a;
34 int b[];
35 };
36
37 struct bpf_testmod_struct_arg_4 {
38 u64 a;
39 int b;
40 };
41
42 __diag_push();
43 __diag_ignore_all("-Wmissing-prototypes",
44 "Global functions as their definitions will be in bpf_testmod.ko BTF");
45
46 noinline int
bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a,int b,int c)47 bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a, int b, int c) {
48 bpf_testmod_test_struct_arg_result = a.a + a.b + b + c;
49 return bpf_testmod_test_struct_arg_result;
50 }
51
52 noinline int
bpf_testmod_test_struct_arg_2(int a,struct bpf_testmod_struct_arg_2 b,int c)53 bpf_testmod_test_struct_arg_2(int a, struct bpf_testmod_struct_arg_2 b, int c) {
54 bpf_testmod_test_struct_arg_result = a + b.a + b.b + c;
55 return bpf_testmod_test_struct_arg_result;
56 }
57
58 noinline int
bpf_testmod_test_struct_arg_3(int a,int b,struct bpf_testmod_struct_arg_2 c)59 bpf_testmod_test_struct_arg_3(int a, int b, struct bpf_testmod_struct_arg_2 c) {
60 bpf_testmod_test_struct_arg_result = a + b + c.a + c.b;
61 return bpf_testmod_test_struct_arg_result;
62 }
63
64 noinline int
bpf_testmod_test_struct_arg_4(struct bpf_testmod_struct_arg_1 a,int b,int c,int d,struct bpf_testmod_struct_arg_2 e)65 bpf_testmod_test_struct_arg_4(struct bpf_testmod_struct_arg_1 a, int b,
66 int c, int d, struct bpf_testmod_struct_arg_2 e) {
67 bpf_testmod_test_struct_arg_result = a.a + b + c + d + e.a + e.b;
68 return bpf_testmod_test_struct_arg_result;
69 }
70
71 noinline int
bpf_testmod_test_struct_arg_5(void)72 bpf_testmod_test_struct_arg_5(void) {
73 bpf_testmod_test_struct_arg_result = 1;
74 return bpf_testmod_test_struct_arg_result;
75 }
76
77 noinline int
bpf_testmod_test_struct_arg_6(struct bpf_testmod_struct_arg_3 * a)78 bpf_testmod_test_struct_arg_6(struct bpf_testmod_struct_arg_3 *a) {
79 bpf_testmod_test_struct_arg_result = a->b[0];
80 return bpf_testmod_test_struct_arg_result;
81 }
82
83 noinline int
bpf_testmod_test_struct_arg_7(u64 a,void * b,short c,int d,void * e,struct bpf_testmod_struct_arg_4 f)84 bpf_testmod_test_struct_arg_7(u64 a, void *b, short c, int d, void *e,
85 struct bpf_testmod_struct_arg_4 f)
86 {
87 bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
88 (long)e + f.a + f.b;
89 return bpf_testmod_test_struct_arg_result;
90 }
91
92 noinline int
bpf_testmod_test_struct_arg_8(u64 a,void * b,short c,int d,void * e,struct bpf_testmod_struct_arg_4 f,int g)93 bpf_testmod_test_struct_arg_8(u64 a, void *b, short c, int d, void *e,
94 struct bpf_testmod_struct_arg_4 f, int g)
95 {
96 bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
97 (long)e + f.a + f.b + g;
98 return bpf_testmod_test_struct_arg_result;
99 }
100
101 noinline int
bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 * a)102 bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 *a) {
103 bpf_testmod_test_struct_arg_result = a->a;
104 return bpf_testmod_test_struct_arg_result;
105 }
106
107 __bpf_kfunc void
bpf_testmod_test_mod_kfunc(int i)108 bpf_testmod_test_mod_kfunc(int i)
109 {
110 *(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i;
111 }
112
bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq * it,s64 value,int cnt)113 __bpf_kfunc int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt)
114 {
115 if (cnt < 0) {
116 it->cnt = 0;
117 return -EINVAL;
118 }
119
120 it->value = value;
121 it->cnt = cnt;
122
123 return 0;
124 }
125
bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq * it)126 __bpf_kfunc s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq* it)
127 {
128 if (it->cnt <= 0)
129 return NULL;
130
131 it->cnt--;
132
133 return &it->value;
134 }
135
bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq * it)136 __bpf_kfunc void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it)
137 {
138 it->cnt = 0;
139 }
140
141 struct bpf_testmod_btf_type_tag_1 {
142 int a;
143 };
144
145 struct bpf_testmod_btf_type_tag_2 {
146 struct bpf_testmod_btf_type_tag_1 __user *p;
147 };
148
149 struct bpf_testmod_btf_type_tag_3 {
150 struct bpf_testmod_btf_type_tag_1 __percpu *p;
151 };
152
153 noinline int
bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user * arg)154 bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user *arg) {
155 BTF_TYPE_EMIT(func_proto_typedef);
156 BTF_TYPE_EMIT(func_proto_typedef_nested1);
157 BTF_TYPE_EMIT(func_proto_typedef_nested2);
158 return arg->a;
159 }
160
161 noinline int
bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 * arg)162 bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 *arg) {
163 return arg->p->a;
164 }
165
166 noinline int
bpf_testmod_test_btf_type_tag_percpu_1(struct bpf_testmod_btf_type_tag_1 __percpu * arg)167 bpf_testmod_test_btf_type_tag_percpu_1(struct bpf_testmod_btf_type_tag_1 __percpu *arg) {
168 return arg->a;
169 }
170
171 noinline int
bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 * arg)172 bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 *arg) {
173 return arg->p->a;
174 }
175
bpf_testmod_loop_test(int n)176 noinline int bpf_testmod_loop_test(int n)
177 {
178 /* Make sum volatile, so smart compilers, such as clang, will not
179 * optimize the code by removing the loop.
180 */
181 volatile int sum = 0;
182 int i;
183
184 /* the primary goal of this test is to test LBR. Create a lot of
185 * branches in the function, so we can catch it easily.
186 */
187 for (i = 0; i < n; i++)
188 sum += i;
189 return sum;
190 }
191
bpf_testmod_return_ptr(int arg)192 __weak noinline struct file *bpf_testmod_return_ptr(int arg)
193 {
194 static struct file f = {};
195
196 switch (arg) {
197 case 1: return (void *)EINVAL; /* user addr */
198 case 2: return (void *)0xcafe4a11; /* user addr */
199 case 3: return (void *)-EINVAL; /* canonical, but invalid */
200 case 4: return (void *)(1ull << 60); /* non-canonical and invalid */
201 case 5: return (void *)~(1ull << 30); /* trigger extable */
202 case 6: return &f; /* valid addr */
203 case 7: return (void *)((long)&f | 1); /* kernel tricks */
204 default: return NULL;
205 }
206 }
207
bpf_testmod_fentry_test1(int a)208 noinline int bpf_testmod_fentry_test1(int a)
209 {
210 return a + 1;
211 }
212
bpf_testmod_fentry_test2(int a,u64 b)213 noinline int bpf_testmod_fentry_test2(int a, u64 b)
214 {
215 return a + b;
216 }
217
bpf_testmod_fentry_test3(char a,int b,u64 c)218 noinline int bpf_testmod_fentry_test3(char a, int b, u64 c)
219 {
220 return a + b + c;
221 }
222
bpf_testmod_fentry_test7(u64 a,void * b,short c,int d,void * e,char f,int g)223 noinline int bpf_testmod_fentry_test7(u64 a, void *b, short c, int d,
224 void *e, char f, int g)
225 {
226 return a + (long)b + c + d + (long)e + f + g;
227 }
228
bpf_testmod_fentry_test11(u64 a,void * b,short c,int d,void * e,char f,int g,unsigned int h,long i,__u64 j,unsigned long k)229 noinline int bpf_testmod_fentry_test11(u64 a, void *b, short c, int d,
230 void *e, char f, int g,
231 unsigned int h, long i, __u64 j,
232 unsigned long k)
233 {
234 return a + (long)b + c + d + (long)e + f + g + h + i + j + k;
235 }
236
237 int bpf_testmod_fentry_ok;
238
239 noinline ssize_t
bpf_testmod_test_read(struct file * file,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t len)240 bpf_testmod_test_read(struct file *file, struct kobject *kobj,
241 struct bin_attribute *bin_attr,
242 char *buf, loff_t off, size_t len)
243 {
244 struct bpf_testmod_test_read_ctx ctx = {
245 .buf = buf,
246 .off = off,
247 .len = len,
248 };
249 struct bpf_testmod_struct_arg_1 struct_arg1 = {10}, struct_arg1_2 = {-1};
250 struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3};
251 struct bpf_testmod_struct_arg_3 *struct_arg3;
252 struct bpf_testmod_struct_arg_4 struct_arg4 = {21, 22};
253 int i = 1;
254
255 while (bpf_testmod_return_ptr(i))
256 i++;
257
258 (void)bpf_testmod_test_struct_arg_1(struct_arg2, 1, 4);
259 (void)bpf_testmod_test_struct_arg_2(1, struct_arg2, 4);
260 (void)bpf_testmod_test_struct_arg_3(1, 4, struct_arg2);
261 (void)bpf_testmod_test_struct_arg_4(struct_arg1, 1, 2, 3, struct_arg2);
262 (void)bpf_testmod_test_struct_arg_5();
263 (void)bpf_testmod_test_struct_arg_7(16, (void *)17, 18, 19,
264 (void *)20, struct_arg4);
265 (void)bpf_testmod_test_struct_arg_8(16, (void *)17, 18, 19,
266 (void *)20, struct_arg4, 23);
267
268 (void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2);
269
270 struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) +
271 sizeof(int)), GFP_KERNEL);
272 if (struct_arg3 != NULL) {
273 struct_arg3->b[0] = 1;
274 (void)bpf_testmod_test_struct_arg_6(struct_arg3);
275 kfree(struct_arg3);
276 }
277
278 /* This is always true. Use the check to make sure the compiler
279 * doesn't remove bpf_testmod_loop_test.
280 */
281 if (bpf_testmod_loop_test(101) > 100)
282 trace_bpf_testmod_test_read(current, &ctx);
283
284 /* Magic number to enable writable tp */
285 if (len == 64) {
286 struct bpf_testmod_test_writable_ctx writable = {
287 .val = 1024,
288 };
289 trace_bpf_testmod_test_writable_bare(&writable);
290 if (writable.early_ret)
291 return snprintf(buf, len, "%d\n", writable.val);
292 }
293
294 if (bpf_testmod_fentry_test1(1) != 2 ||
295 bpf_testmod_fentry_test2(2, 3) != 5 ||
296 bpf_testmod_fentry_test3(4, 5, 6) != 15 ||
297 bpf_testmod_fentry_test7(16, (void *)17, 18, 19, (void *)20,
298 21, 22) != 133 ||
299 bpf_testmod_fentry_test11(16, (void *)17, 18, 19, (void *)20,
300 21, 22, 23, 24, 25, 26) != 231)
301 goto out;
302
303 bpf_testmod_fentry_ok = 1;
304 out:
305 return -EIO; /* always fail */
306 }
307 EXPORT_SYMBOL(bpf_testmod_test_read);
308 ALLOW_ERROR_INJECTION(bpf_testmod_test_read, ERRNO);
309
310 noinline ssize_t
bpf_testmod_test_write(struct file * file,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t len)311 bpf_testmod_test_write(struct file *file, struct kobject *kobj,
312 struct bin_attribute *bin_attr,
313 char *buf, loff_t off, size_t len)
314 {
315 struct bpf_testmod_test_write_ctx ctx = {
316 .buf = buf,
317 .off = off,
318 .len = len,
319 };
320
321 trace_bpf_testmod_test_write_bare(current, &ctx);
322
323 return -EIO; /* always fail */
324 }
325 EXPORT_SYMBOL(bpf_testmod_test_write);
326 ALLOW_ERROR_INJECTION(bpf_testmod_test_write, ERRNO);
327
bpf_fentry_shadow_test(int a)328 noinline int bpf_fentry_shadow_test(int a)
329 {
330 return a + 2;
331 }
332 EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test);
333
334 __diag_pop();
335
336 static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
337 .attr = { .name = "bpf_testmod", .mode = 0666, },
338 .read = bpf_testmod_test_read,
339 .write = bpf_testmod_test_write,
340 };
341
342 BTF_SET8_START(bpf_testmod_common_kfunc_ids)
343 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW)
344 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
345 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
346 BTF_SET8_END(bpf_testmod_common_kfunc_ids)
347
348 static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = {
349 .owner = THIS_MODULE,
350 .set = &bpf_testmod_common_kfunc_ids,
351 };
352
bpf_kfunc_call_test1(struct sock * sk,u32 a,u64 b,u32 c,u64 d)353 __bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
354 {
355 return a + b + c + d;
356 }
357
bpf_kfunc_call_test2(struct sock * sk,u32 a,u32 b)358 __bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
359 {
360 return a + b;
361 }
362
bpf_kfunc_call_test3(struct sock * sk)363 __bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk)
364 {
365 return sk;
366 }
367
bpf_kfunc_call_test4(signed char a,short b,int c,long d)368 __bpf_kfunc long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d)
369 {
370 /* Provoke the compiler to assume that the caller has sign-extended a,
371 * b and c on platforms where this is required (e.g. s390x).
372 */
373 return (long)a + (long)b + (long)c + d;
374 }
375
376 static struct prog_test_ref_kfunc prog_test_struct = {
377 .a = 42,
378 .b = 108,
379 .next = &prog_test_struct,
380 .cnt = REFCOUNT_INIT(1),
381 };
382
383 __bpf_kfunc struct prog_test_ref_kfunc *
bpf_kfunc_call_test_acquire(unsigned long * scalar_ptr)384 bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr)
385 {
386 refcount_inc(&prog_test_struct.cnt);
387 return &prog_test_struct;
388 }
389
bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc * p)390 __bpf_kfunc void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p)
391 {
392 WARN_ON_ONCE(1);
393 }
394
395 __bpf_kfunc struct prog_test_member *
bpf_kfunc_call_memb_acquire(void)396 bpf_kfunc_call_memb_acquire(void)
397 {
398 WARN_ON_ONCE(1);
399 return NULL;
400 }
401
bpf_kfunc_call_memb1_release(struct prog_test_member1 * p)402 __bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
403 {
404 WARN_ON_ONCE(1);
405 }
406
__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc * p,const int size)407 static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size)
408 {
409 if (size > 2 * sizeof(int))
410 return NULL;
411
412 return (int *)p;
413 }
414
bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc * p,const int rdwr_buf_size)415 __bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p,
416 const int rdwr_buf_size)
417 {
418 return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size);
419 }
420
bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc * p,const int rdonly_buf_size)421 __bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p,
422 const int rdonly_buf_size)
423 {
424 return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
425 }
426
427 /* the next 2 ones can't be really used for testing expect to ensure
428 * that the verifier rejects the call.
429 * Acquire functions must return struct pointers, so these ones are
430 * failing.
431 */
bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc * p,const int rdonly_buf_size)432 __bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p,
433 const int rdonly_buf_size)
434 {
435 return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
436 }
437
bpf_kfunc_call_int_mem_release(int * p)438 __bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p)
439 {
440 }
441
bpf_kfunc_call_test_pass_ctx(struct __sk_buff * skb)442 __bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb)
443 {
444 }
445
bpf_kfunc_call_test_pass1(struct prog_test_pass1 * p)446 __bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p)
447 {
448 }
449
bpf_kfunc_call_test_pass2(struct prog_test_pass2 * p)450 __bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p)
451 {
452 }
453
bpf_kfunc_call_test_fail1(struct prog_test_fail1 * p)454 __bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p)
455 {
456 }
457
bpf_kfunc_call_test_fail2(struct prog_test_fail2 * p)458 __bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p)
459 {
460 }
461
bpf_kfunc_call_test_fail3(struct prog_test_fail3 * p)462 __bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p)
463 {
464 }
465
bpf_kfunc_call_test_mem_len_pass1(void * mem,int mem__sz)466 __bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz)
467 {
468 }
469
bpf_kfunc_call_test_mem_len_fail1(void * mem,int len)470 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len)
471 {
472 }
473
bpf_kfunc_call_test_mem_len_fail2(u64 * mem,int len)474 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len)
475 {
476 }
477
bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc * p)478 __bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p)
479 {
480 /* p != NULL, but p->cnt could be 0 */
481 }
482
bpf_kfunc_call_test_destructive(void)483 __bpf_kfunc void bpf_kfunc_call_test_destructive(void)
484 {
485 }
486
bpf_kfunc_call_test_static_unused_arg(u32 arg,u32 unused)487 __bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused)
488 {
489 return arg;
490 }
491
492 BTF_SET8_START(bpf_testmod_check_kfunc_ids)
493 BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
494 BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
495 BTF_ID_FLAGS(func, bpf_kfunc_call_test2)
496 BTF_ID_FLAGS(func, bpf_kfunc_call_test3)
497 BTF_ID_FLAGS(func, bpf_kfunc_call_test4)
498 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1)
499 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1)
500 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2)
501 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL)
502 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL)
503 BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE)
504 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL)
505 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL)
506 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL)
507 BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE)
508 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx)
509 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1)
510 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2)
511 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1)
512 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2)
513 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3)
514 BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU)
515 BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE)
516 BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg)
517 BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset)
518 BTF_SET8_END(bpf_testmod_check_kfunc_ids)
519
520 static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = {
521 .owner = THIS_MODULE,
522 .set = &bpf_testmod_check_kfunc_ids,
523 };
524
525 extern int bpf_fentry_test1(int a);
526
bpf_testmod_init(void)527 static int bpf_testmod_init(void)
528 {
529 int ret;
530
531 ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set);
532 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set);
533 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set);
534 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set);
535 if (ret < 0)
536 return ret;
537 if (bpf_fentry_test1(0) < 0)
538 return -EINVAL;
539 return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
540 }
541
bpf_testmod_exit(void)542 static void bpf_testmod_exit(void)
543 {
544 return sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
545 }
546
547 module_init(bpf_testmod_init);
548 module_exit(bpf_testmod_exit);
549
550 MODULE_AUTHOR("Andrii Nakryiko");
551 MODULE_DESCRIPTION("BPF selftests module");
552 MODULE_LICENSE("Dual BSD/GPL");
553