1 // SPDX-License-Identifier: GPL-2.0
2 /* Converted from tools/testing/selftests/bpf/verifier/helper_access_var_len.c */
3
4 #include <linux/bpf.h>
5 #include <bpf/bpf_helpers.h>
6 #include "bpf_misc.h"
7
8 #define MAX_ENTRIES 11
9
10 struct test_val {
11 unsigned int index;
12 int foo[MAX_ENTRIES];
13 };
14
15 struct {
16 __uint(type, BPF_MAP_TYPE_HASH);
17 __uint(max_entries, 1);
18 __type(key, long long);
19 __type(value, struct test_val);
20 } map_hash_48b SEC(".maps");
21
22 struct {
23 __uint(type, BPF_MAP_TYPE_HASH);
24 __uint(max_entries, 1);
25 __type(key, long long);
26 __type(value, long long);
27 } map_hash_8b SEC(".maps");
28
29 struct {
30 __uint(type, BPF_MAP_TYPE_RINGBUF);
31 __uint(max_entries, 4096);
32 } map_ringbuf SEC(".maps");
33
34 SEC("tracepoint")
35 __description("helper access to variable memory: stack, bitwise AND + JMP, correct bounds")
36 __success
bitwise_and_jmp_correct_bounds(void)37 __naked void bitwise_and_jmp_correct_bounds(void)
38 {
39 asm volatile (" \
40 r1 = r10; \
41 r1 += -64; \
42 r0 = 0; \
43 *(u64*)(r10 - 64) = r0; \
44 *(u64*)(r10 - 56) = r0; \
45 *(u64*)(r10 - 48) = r0; \
46 *(u64*)(r10 - 40) = r0; \
47 *(u64*)(r10 - 32) = r0; \
48 *(u64*)(r10 - 24) = r0; \
49 *(u64*)(r10 - 16) = r0; \
50 *(u64*)(r10 - 8) = r0; \
51 r2 = 16; \
52 *(u64*)(r1 - 128) = r2; \
53 r2 = *(u64*)(r1 - 128); \
54 r2 &= 64; \
55 r4 = 0; \
56 if r4 >= r2 goto l0_%=; \
57 r3 = 0; \
58 call %[bpf_probe_read_kernel]; \
59 l0_%=: r0 = 0; \
60 exit; \
61 " :
62 : __imm(bpf_probe_read_kernel)
63 : __clobber_all);
64 }
65
66 SEC("socket")
67 __description("helper access to variable memory: stack, bitwise AND, zero included")
68 /* in privileged mode reads from uninitialized stack locations are permitted */
69 __success __failure_unpriv
70 __msg_unpriv("invalid indirect read from stack R2 off -64+0 size 64")
71 __retval(0)
stack_bitwise_and_zero_included(void)72 __naked void stack_bitwise_and_zero_included(void)
73 {
74 asm volatile (" \
75 /* set max stack size */ \
76 r6 = 0; \
77 *(u64*)(r10 - 128) = r6; \
78 /* set r3 to a random value */ \
79 call %[bpf_get_prandom_u32]; \
80 r3 = r0; \
81 /* use bitwise AND to limit r3 range to [0, 64] */\
82 r3 &= 64; \
83 r1 = %[map_ringbuf] ll; \
84 r2 = r10; \
85 r2 += -64; \
86 r4 = 0; \
87 /* Call bpf_ringbuf_output(), it is one of a few helper functions with\
88 * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.\
89 * For unpriv this should signal an error, because memory at &fp[-64] is\
90 * not initialized. \
91 */ \
92 call %[bpf_ringbuf_output]; \
93 exit; \
94 " :
95 : __imm(bpf_get_prandom_u32),
96 __imm(bpf_ringbuf_output),
97 __imm_addr(map_ringbuf)
98 : __clobber_all);
99 }
100
101 SEC("tracepoint")
102 __description("helper access to variable memory: stack, bitwise AND + JMP, wrong max")
103 __failure __msg("invalid indirect access to stack R1 off=-64 size=65")
bitwise_and_jmp_wrong_max(void)104 __naked void bitwise_and_jmp_wrong_max(void)
105 {
106 asm volatile (" \
107 r2 = *(u64*)(r1 + 8); \
108 r1 = r10; \
109 r1 += -64; \
110 *(u64*)(r1 - 128) = r2; \
111 r2 = *(u64*)(r1 - 128); \
112 r2 &= 65; \
113 r4 = 0; \
114 if r4 >= r2 goto l0_%=; \
115 r3 = 0; \
116 call %[bpf_probe_read_kernel]; \
117 l0_%=: r0 = 0; \
118 exit; \
119 " :
120 : __imm(bpf_probe_read_kernel)
121 : __clobber_all);
122 }
123
124 SEC("tracepoint")
125 __description("helper access to variable memory: stack, JMP, correct bounds")
126 __success
memory_stack_jmp_correct_bounds(void)127 __naked void memory_stack_jmp_correct_bounds(void)
128 {
129 asm volatile (" \
130 r1 = r10; \
131 r1 += -64; \
132 r0 = 0; \
133 *(u64*)(r10 - 64) = r0; \
134 *(u64*)(r10 - 56) = r0; \
135 *(u64*)(r10 - 48) = r0; \
136 *(u64*)(r10 - 40) = r0; \
137 *(u64*)(r10 - 32) = r0; \
138 *(u64*)(r10 - 24) = r0; \
139 *(u64*)(r10 - 16) = r0; \
140 *(u64*)(r10 - 8) = r0; \
141 r2 = 16; \
142 *(u64*)(r1 - 128) = r2; \
143 r2 = *(u64*)(r1 - 128); \
144 if r2 > 64 goto l0_%=; \
145 r4 = 0; \
146 if r4 >= r2 goto l0_%=; \
147 r3 = 0; \
148 call %[bpf_probe_read_kernel]; \
149 l0_%=: r0 = 0; \
150 exit; \
151 " :
152 : __imm(bpf_probe_read_kernel)
153 : __clobber_all);
154 }
155
156 SEC("tracepoint")
157 __description("helper access to variable memory: stack, JMP (signed), correct bounds")
158 __success
stack_jmp_signed_correct_bounds(void)159 __naked void stack_jmp_signed_correct_bounds(void)
160 {
161 asm volatile (" \
162 r1 = r10; \
163 r1 += -64; \
164 r0 = 0; \
165 *(u64*)(r10 - 64) = r0; \
166 *(u64*)(r10 - 56) = r0; \
167 *(u64*)(r10 - 48) = r0; \
168 *(u64*)(r10 - 40) = r0; \
169 *(u64*)(r10 - 32) = r0; \
170 *(u64*)(r10 - 24) = r0; \
171 *(u64*)(r10 - 16) = r0; \
172 *(u64*)(r10 - 8) = r0; \
173 r2 = 16; \
174 *(u64*)(r1 - 128) = r2; \
175 r2 = *(u64*)(r1 - 128); \
176 if r2 s> 64 goto l0_%=; \
177 r4 = 0; \
178 if r4 s>= r2 goto l0_%=; \
179 r3 = 0; \
180 call %[bpf_probe_read_kernel]; \
181 l0_%=: r0 = 0; \
182 exit; \
183 " :
184 : __imm(bpf_probe_read_kernel)
185 : __clobber_all);
186 }
187
188 SEC("tracepoint")
189 __description("helper access to variable memory: stack, JMP, bounds + offset")
190 __failure __msg("invalid indirect access to stack R1 off=-64 size=65")
memory_stack_jmp_bounds_offset(void)191 __naked void memory_stack_jmp_bounds_offset(void)
192 {
193 asm volatile (" \
194 r2 = *(u64*)(r1 + 8); \
195 r1 = r10; \
196 r1 += -64; \
197 *(u64*)(r1 - 128) = r2; \
198 r2 = *(u64*)(r1 - 128); \
199 if r2 > 64 goto l0_%=; \
200 r4 = 0; \
201 if r4 >= r2 goto l0_%=; \
202 r2 += 1; \
203 r3 = 0; \
204 call %[bpf_probe_read_kernel]; \
205 l0_%=: r0 = 0; \
206 exit; \
207 " :
208 : __imm(bpf_probe_read_kernel)
209 : __clobber_all);
210 }
211
212 SEC("tracepoint")
213 __description("helper access to variable memory: stack, JMP, wrong max")
214 __failure __msg("invalid indirect access to stack R1 off=-64 size=65")
memory_stack_jmp_wrong_max(void)215 __naked void memory_stack_jmp_wrong_max(void)
216 {
217 asm volatile (" \
218 r2 = *(u64*)(r1 + 8); \
219 r1 = r10; \
220 r1 += -64; \
221 *(u64*)(r1 - 128) = r2; \
222 r2 = *(u64*)(r1 - 128); \
223 if r2 > 65 goto l0_%=; \
224 r4 = 0; \
225 if r4 >= r2 goto l0_%=; \
226 r3 = 0; \
227 call %[bpf_probe_read_kernel]; \
228 l0_%=: r0 = 0; \
229 exit; \
230 " :
231 : __imm(bpf_probe_read_kernel)
232 : __clobber_all);
233 }
234
235 SEC("tracepoint")
236 __description("helper access to variable memory: stack, JMP, no max check")
237 __failure
238 /* because max wasn't checked, signed min is negative */
239 __msg("R2 min value is negative, either use unsigned or 'var &= const'")
stack_jmp_no_max_check(void)240 __naked void stack_jmp_no_max_check(void)
241 {
242 asm volatile (" \
243 r2 = *(u64*)(r1 + 8); \
244 r1 = r10; \
245 r1 += -64; \
246 *(u64*)(r1 - 128) = r2; \
247 r2 = *(u64*)(r1 - 128); \
248 r4 = 0; \
249 if r4 >= r2 goto l0_%=; \
250 r3 = 0; \
251 call %[bpf_probe_read_kernel]; \
252 l0_%=: r0 = 0; \
253 exit; \
254 " :
255 : __imm(bpf_probe_read_kernel)
256 : __clobber_all);
257 }
258
259 SEC("socket")
260 __description("helper access to variable memory: stack, JMP, no min check")
261 /* in privileged mode reads from uninitialized stack locations are permitted */
262 __success __failure_unpriv
263 __msg_unpriv("invalid indirect read from stack R2 off -64+0 size 64")
264 __retval(0)
stack_jmp_no_min_check(void)265 __naked void stack_jmp_no_min_check(void)
266 {
267 asm volatile (" \
268 /* set max stack size */ \
269 r6 = 0; \
270 *(u64*)(r10 - 128) = r6; \
271 /* set r3 to a random value */ \
272 call %[bpf_get_prandom_u32]; \
273 r3 = r0; \
274 /* use JMP to limit r3 range to [0, 64] */ \
275 if r3 > 64 goto l0_%=; \
276 r1 = %[map_ringbuf] ll; \
277 r2 = r10; \
278 r2 += -64; \
279 r4 = 0; \
280 /* Call bpf_ringbuf_output(), it is one of a few helper functions with\
281 * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.\
282 * For unpriv this should signal an error, because memory at &fp[-64] is\
283 * not initialized. \
284 */ \
285 call %[bpf_ringbuf_output]; \
286 l0_%=: r0 = 0; \
287 exit; \
288 " :
289 : __imm(bpf_get_prandom_u32),
290 __imm(bpf_ringbuf_output),
291 __imm_addr(map_ringbuf)
292 : __clobber_all);
293 }
294
295 SEC("tracepoint")
296 __description("helper access to variable memory: stack, JMP (signed), no min check")
297 __failure __msg("R2 min value is negative")
jmp_signed_no_min_check(void)298 __naked void jmp_signed_no_min_check(void)
299 {
300 asm volatile (" \
301 r2 = *(u64*)(r1 + 8); \
302 r1 = r10; \
303 r1 += -64; \
304 *(u64*)(r1 - 128) = r2; \
305 r2 = *(u64*)(r1 - 128); \
306 if r2 s> 64 goto l0_%=; \
307 r3 = 0; \
308 call %[bpf_probe_read_kernel]; \
309 r0 = 0; \
310 l0_%=: exit; \
311 " :
312 : __imm(bpf_probe_read_kernel)
313 : __clobber_all);
314 }
315
316 SEC("tracepoint")
317 __description("helper access to variable memory: map, JMP, correct bounds")
318 __success
memory_map_jmp_correct_bounds(void)319 __naked void memory_map_jmp_correct_bounds(void)
320 {
321 asm volatile (" \
322 r2 = r10; \
323 r2 += -8; \
324 r1 = 0; \
325 *(u64*)(r2 + 0) = r1; \
326 r1 = %[map_hash_48b] ll; \
327 call %[bpf_map_lookup_elem]; \
328 if r0 == 0 goto l0_%=; \
329 r1 = r0; \
330 r2 = %[sizeof_test_val]; \
331 *(u64*)(r10 - 128) = r2; \
332 r2 = *(u64*)(r10 - 128); \
333 if r2 s> %[sizeof_test_val] goto l1_%=; \
334 r4 = 0; \
335 if r4 s>= r2 goto l1_%=; \
336 r3 = 0; \
337 call %[bpf_probe_read_kernel]; \
338 l1_%=: r0 = 0; \
339 l0_%=: exit; \
340 " :
341 : __imm(bpf_map_lookup_elem),
342 __imm(bpf_probe_read_kernel),
343 __imm_addr(map_hash_48b),
344 __imm_const(sizeof_test_val, sizeof(struct test_val))
345 : __clobber_all);
346 }
347
348 SEC("tracepoint")
349 __description("helper access to variable memory: map, JMP, wrong max")
350 __failure __msg("invalid access to map value, value_size=48 off=0 size=49")
memory_map_jmp_wrong_max(void)351 __naked void memory_map_jmp_wrong_max(void)
352 {
353 asm volatile (" \
354 r6 = *(u64*)(r1 + 8); \
355 r2 = r10; \
356 r2 += -8; \
357 r1 = 0; \
358 *(u64*)(r2 + 0) = r1; \
359 r1 = %[map_hash_48b] ll; \
360 call %[bpf_map_lookup_elem]; \
361 if r0 == 0 goto l0_%=; \
362 r1 = r0; \
363 r2 = r6; \
364 *(u64*)(r10 - 128) = r2; \
365 r2 = *(u64*)(r10 - 128); \
366 if r2 s> %[__imm_0] goto l1_%=; \
367 r4 = 0; \
368 if r4 s>= r2 goto l1_%=; \
369 r3 = 0; \
370 call %[bpf_probe_read_kernel]; \
371 l1_%=: r0 = 0; \
372 l0_%=: exit; \
373 " :
374 : __imm(bpf_map_lookup_elem),
375 __imm(bpf_probe_read_kernel),
376 __imm_addr(map_hash_48b),
377 __imm_const(__imm_0, sizeof(struct test_val) + 1)
378 : __clobber_all);
379 }
380
381 SEC("tracepoint")
382 __description("helper access to variable memory: map adjusted, JMP, correct bounds")
383 __success
map_adjusted_jmp_correct_bounds(void)384 __naked void map_adjusted_jmp_correct_bounds(void)
385 {
386 asm volatile (" \
387 r2 = r10; \
388 r2 += -8; \
389 r1 = 0; \
390 *(u64*)(r2 + 0) = r1; \
391 r1 = %[map_hash_48b] ll; \
392 call %[bpf_map_lookup_elem]; \
393 if r0 == 0 goto l0_%=; \
394 r1 = r0; \
395 r1 += 20; \
396 r2 = %[sizeof_test_val]; \
397 *(u64*)(r10 - 128) = r2; \
398 r2 = *(u64*)(r10 - 128); \
399 if r2 s> %[__imm_0] goto l1_%=; \
400 r4 = 0; \
401 if r4 s>= r2 goto l1_%=; \
402 r3 = 0; \
403 call %[bpf_probe_read_kernel]; \
404 l1_%=: r0 = 0; \
405 l0_%=: exit; \
406 " :
407 : __imm(bpf_map_lookup_elem),
408 __imm(bpf_probe_read_kernel),
409 __imm_addr(map_hash_48b),
410 __imm_const(__imm_0, sizeof(struct test_val) - 20),
411 __imm_const(sizeof_test_val, sizeof(struct test_val))
412 : __clobber_all);
413 }
414
415 SEC("tracepoint")
416 __description("helper access to variable memory: map adjusted, JMP, wrong max")
417 __failure __msg("R1 min value is outside of the allowed memory range")
map_adjusted_jmp_wrong_max(void)418 __naked void map_adjusted_jmp_wrong_max(void)
419 {
420 asm volatile (" \
421 r6 = *(u64*)(r1 + 8); \
422 r2 = r10; \
423 r2 += -8; \
424 r1 = 0; \
425 *(u64*)(r2 + 0) = r1; \
426 r1 = %[map_hash_48b] ll; \
427 call %[bpf_map_lookup_elem]; \
428 if r0 == 0 goto l0_%=; \
429 r1 = r0; \
430 r1 += 20; \
431 r2 = r6; \
432 *(u64*)(r10 - 128) = r2; \
433 r2 = *(u64*)(r10 - 128); \
434 if r2 s> %[__imm_0] goto l1_%=; \
435 r4 = 0; \
436 if r4 s>= r2 goto l1_%=; \
437 r3 = 0; \
438 call %[bpf_probe_read_kernel]; \
439 l1_%=: r0 = 0; \
440 l0_%=: exit; \
441 " :
442 : __imm(bpf_map_lookup_elem),
443 __imm(bpf_probe_read_kernel),
444 __imm_addr(map_hash_48b),
445 __imm_const(__imm_0, sizeof(struct test_val) - 19)
446 : __clobber_all);
447 }
448
449 SEC("tc")
450 __description("helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)")
451 __success __retval(0)
ptr_to_mem_or_null_1(void)452 __naked void ptr_to_mem_or_null_1(void)
453 {
454 asm volatile (" \
455 r1 = 0; \
456 r2 = 0; \
457 r3 = 0; \
458 r4 = 0; \
459 r5 = 0; \
460 call %[bpf_csum_diff]; \
461 exit; \
462 " :
463 : __imm(bpf_csum_diff)
464 : __clobber_all);
465 }
466
467 SEC("tc")
468 __description("helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)")
469 __failure __msg("R1 type=scalar expected=fp")
ptr_to_mem_or_null_2(void)470 __naked void ptr_to_mem_or_null_2(void)
471 {
472 asm volatile (" \
473 r2 = *(u32*)(r1 + 0); \
474 r1 = 0; \
475 *(u64*)(r10 - 128) = r2; \
476 r2 = *(u64*)(r10 - 128); \
477 r2 &= 64; \
478 r3 = 0; \
479 r4 = 0; \
480 r5 = 0; \
481 call %[bpf_csum_diff]; \
482 exit; \
483 " :
484 : __imm(bpf_csum_diff)
485 : __clobber_all);
486 }
487
488 SEC("tc")
489 __description("helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)")
490 __success __retval(0)
ptr_to_mem_or_null_3(void)491 __naked void ptr_to_mem_or_null_3(void)
492 {
493 asm volatile (" \
494 r1 = r10; \
495 r1 += -8; \
496 r2 = 0; \
497 *(u64*)(r1 + 0) = r2; \
498 r2 &= 8; \
499 r3 = 0; \
500 r4 = 0; \
501 r5 = 0; \
502 call %[bpf_csum_diff]; \
503 exit; \
504 " :
505 : __imm(bpf_csum_diff)
506 : __clobber_all);
507 }
508
509 SEC("tc")
510 __description("helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)")
511 __success __retval(0)
ptr_to_mem_or_null_4(void)512 __naked void ptr_to_mem_or_null_4(void)
513 {
514 asm volatile (" \
515 r1 = 0; \
516 *(u64*)(r10 - 8) = r1; \
517 r2 = r10; \
518 r2 += -8; \
519 r1 = %[map_hash_8b] ll; \
520 call %[bpf_map_lookup_elem]; \
521 if r0 == 0 goto l0_%=; \
522 r1 = r0; \
523 r2 = 0; \
524 r3 = 0; \
525 r4 = 0; \
526 r5 = 0; \
527 call %[bpf_csum_diff]; \
528 l0_%=: exit; \
529 " :
530 : __imm(bpf_csum_diff),
531 __imm(bpf_map_lookup_elem),
532 __imm_addr(map_hash_8b)
533 : __clobber_all);
534 }
535
536 SEC("tc")
537 __description("helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)")
538 __success __retval(0)
ptr_to_mem_or_null_5(void)539 __naked void ptr_to_mem_or_null_5(void)
540 {
541 asm volatile (" \
542 r1 = 0; \
543 *(u64*)(r10 - 8) = r1; \
544 r2 = r10; \
545 r2 += -8; \
546 r1 = %[map_hash_8b] ll; \
547 call %[bpf_map_lookup_elem]; \
548 if r0 == 0 goto l0_%=; \
549 r2 = *(u64*)(r0 + 0); \
550 if r2 > 8 goto l0_%=; \
551 r1 = r10; \
552 r1 += -8; \
553 *(u64*)(r1 + 0) = r2; \
554 r3 = 0; \
555 r4 = 0; \
556 r5 = 0; \
557 call %[bpf_csum_diff]; \
558 l0_%=: exit; \
559 " :
560 : __imm(bpf_csum_diff),
561 __imm(bpf_map_lookup_elem),
562 __imm_addr(map_hash_8b)
563 : __clobber_all);
564 }
565
566 SEC("tc")
567 __description("helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)")
568 __success __retval(0)
ptr_to_mem_or_null_6(void)569 __naked void ptr_to_mem_or_null_6(void)
570 {
571 asm volatile (" \
572 r1 = 0; \
573 *(u64*)(r10 - 8) = r1; \
574 r2 = r10; \
575 r2 += -8; \
576 r1 = %[map_hash_8b] ll; \
577 call %[bpf_map_lookup_elem]; \
578 if r0 == 0 goto l0_%=; \
579 r1 = r0; \
580 r2 = *(u64*)(r0 + 0); \
581 if r2 > 8 goto l0_%=; \
582 r3 = 0; \
583 r4 = 0; \
584 r5 = 0; \
585 call %[bpf_csum_diff]; \
586 l0_%=: exit; \
587 " :
588 : __imm(bpf_csum_diff),
589 __imm(bpf_map_lookup_elem),
590 __imm_addr(map_hash_8b)
591 : __clobber_all);
592 }
593
594 SEC("tc")
595 __description("helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)")
596 __success __retval(0)
597 /* csum_diff of 64-byte packet */
__flag(BPF_F_ANY_ALIGNMENT)598 __flag(BPF_F_ANY_ALIGNMENT)
599 __naked void ptr_to_mem_or_null_7(void)
600 {
601 asm volatile (" \
602 r6 = *(u32*)(r1 + %[__sk_buff_data]); \
603 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
604 r0 = r6; \
605 r0 += 8; \
606 if r0 > r3 goto l0_%=; \
607 r1 = r6; \
608 r2 = *(u64*)(r6 + 0); \
609 if r2 > 8 goto l0_%=; \
610 r3 = 0; \
611 r4 = 0; \
612 r5 = 0; \
613 call %[bpf_csum_diff]; \
614 l0_%=: exit; \
615 " :
616 : __imm(bpf_csum_diff),
617 __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
618 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
619 : __clobber_all);
620 }
621
622 SEC("tracepoint")
623 __description("helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)")
624 __failure __msg("R1 type=scalar expected=fp")
ptr_to_mem_or_null_8(void)625 __naked void ptr_to_mem_or_null_8(void)
626 {
627 asm volatile (" \
628 r1 = 0; \
629 r2 = 0; \
630 r3 = 0; \
631 call %[bpf_probe_read_kernel]; \
632 exit; \
633 " :
634 : __imm(bpf_probe_read_kernel)
635 : __clobber_all);
636 }
637
638 SEC("tracepoint")
639 __description("helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)")
640 __failure __msg("R1 type=scalar expected=fp")
ptr_to_mem_or_null_9(void)641 __naked void ptr_to_mem_or_null_9(void)
642 {
643 asm volatile (" \
644 r1 = 0; \
645 r2 = 1; \
646 r3 = 0; \
647 call %[bpf_probe_read_kernel]; \
648 exit; \
649 " :
650 : __imm(bpf_probe_read_kernel)
651 : __clobber_all);
652 }
653
654 SEC("tracepoint")
655 __description("helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)")
656 __success
ptr_to_mem_or_null_10(void)657 __naked void ptr_to_mem_or_null_10(void)
658 {
659 asm volatile (" \
660 r1 = r10; \
661 r1 += -8; \
662 r2 = 0; \
663 r3 = 0; \
664 call %[bpf_probe_read_kernel]; \
665 exit; \
666 " :
667 : __imm(bpf_probe_read_kernel)
668 : __clobber_all);
669 }
670
671 SEC("tracepoint")
672 __description("helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)")
673 __success
ptr_to_mem_or_null_11(void)674 __naked void ptr_to_mem_or_null_11(void)
675 {
676 asm volatile (" \
677 r1 = 0; \
678 *(u64*)(r10 - 8) = r1; \
679 r2 = r10; \
680 r2 += -8; \
681 r1 = %[map_hash_8b] ll; \
682 call %[bpf_map_lookup_elem]; \
683 if r0 == 0 goto l0_%=; \
684 r1 = r0; \
685 r2 = 0; \
686 r3 = 0; \
687 call %[bpf_probe_read_kernel]; \
688 l0_%=: exit; \
689 " :
690 : __imm(bpf_map_lookup_elem),
691 __imm(bpf_probe_read_kernel),
692 __imm_addr(map_hash_8b)
693 : __clobber_all);
694 }
695
696 SEC("tracepoint")
697 __description("helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)")
698 __success
ptr_to_mem_or_null_12(void)699 __naked void ptr_to_mem_or_null_12(void)
700 {
701 asm volatile (" \
702 r1 = 0; \
703 *(u64*)(r10 - 8) = r1; \
704 r2 = r10; \
705 r2 += -8; \
706 r1 = %[map_hash_8b] ll; \
707 call %[bpf_map_lookup_elem]; \
708 if r0 == 0 goto l0_%=; \
709 r2 = *(u64*)(r0 + 0); \
710 if r2 > 8 goto l0_%=; \
711 r1 = r10; \
712 r1 += -8; \
713 r3 = 0; \
714 call %[bpf_probe_read_kernel]; \
715 l0_%=: exit; \
716 " :
717 : __imm(bpf_map_lookup_elem),
718 __imm(bpf_probe_read_kernel),
719 __imm_addr(map_hash_8b)
720 : __clobber_all);
721 }
722
723 SEC("tracepoint")
724 __description("helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)")
725 __success
ptr_to_mem_or_null_13(void)726 __naked void ptr_to_mem_or_null_13(void)
727 {
728 asm volatile (" \
729 r1 = 0; \
730 *(u64*)(r10 - 8) = r1; \
731 r2 = r10; \
732 r2 += -8; \
733 r1 = %[map_hash_8b] ll; \
734 call %[bpf_map_lookup_elem]; \
735 if r0 == 0 goto l0_%=; \
736 r1 = r0; \
737 r2 = *(u64*)(r0 + 0); \
738 if r2 > 8 goto l0_%=; \
739 r3 = 0; \
740 call %[bpf_probe_read_kernel]; \
741 l0_%=: exit; \
742 " :
743 : __imm(bpf_map_lookup_elem),
744 __imm(bpf_probe_read_kernel),
745 __imm_addr(map_hash_8b)
746 : __clobber_all);
747 }
748
749 SEC("socket")
750 __description("helper access to variable memory: 8 bytes leak")
751 /* in privileged mode reads from uninitialized stack locations are permitted */
752 __success __failure_unpriv
753 __msg_unpriv("invalid indirect read from stack R2 off -64+32 size 64")
754 __retval(0)
variable_memory_8_bytes_leak(void)755 __naked void variable_memory_8_bytes_leak(void)
756 {
757 asm volatile (" \
758 /* set max stack size */ \
759 r6 = 0; \
760 *(u64*)(r10 - 128) = r6; \
761 /* set r3 to a random value */ \
762 call %[bpf_get_prandom_u32]; \
763 r3 = r0; \
764 r1 = %[map_ringbuf] ll; \
765 r2 = r10; \
766 r2 += -64; \
767 r0 = 0; \
768 *(u64*)(r10 - 64) = r0; \
769 *(u64*)(r10 - 56) = r0; \
770 *(u64*)(r10 - 48) = r0; \
771 *(u64*)(r10 - 40) = r0; \
772 /* Note: fp[-32] left uninitialized */ \
773 *(u64*)(r10 - 24) = r0; \
774 *(u64*)(r10 - 16) = r0; \
775 *(u64*)(r10 - 8) = r0; \
776 /* Limit r3 range to [1, 64] */ \
777 r3 &= 63; \
778 r3 += 1; \
779 r4 = 0; \
780 /* Call bpf_ringbuf_output(), it is one of a few helper functions with\
781 * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.\
782 * For unpriv this should signal an error, because memory region [1, 64]\
783 * at &fp[-64] is not fully initialized. \
784 */ \
785 call %[bpf_ringbuf_output]; \
786 r0 = 0; \
787 exit; \
788 " :
789 : __imm(bpf_get_prandom_u32),
790 __imm(bpf_ringbuf_output),
791 __imm_addr(map_ringbuf)
792 : __clobber_all);
793 }
794
795 SEC("tracepoint")
796 __description("helper access to variable memory: 8 bytes no leak (init memory)")
797 __success
bytes_no_leak_init_memory(void)798 __naked void bytes_no_leak_init_memory(void)
799 {
800 asm volatile (" \
801 r1 = r10; \
802 r0 = 0; \
803 r0 = 0; \
804 *(u64*)(r10 - 64) = r0; \
805 *(u64*)(r10 - 56) = r0; \
806 *(u64*)(r10 - 48) = r0; \
807 *(u64*)(r10 - 40) = r0; \
808 *(u64*)(r10 - 32) = r0; \
809 *(u64*)(r10 - 24) = r0; \
810 *(u64*)(r10 - 16) = r0; \
811 *(u64*)(r10 - 8) = r0; \
812 r1 += -64; \
813 r2 = 0; \
814 r2 &= 32; \
815 r2 += 32; \
816 r3 = 0; \
817 call %[bpf_probe_read_kernel]; \
818 r1 = *(u64*)(r10 - 16); \
819 exit; \
820 " :
821 : __imm(bpf_probe_read_kernel)
822 : __clobber_all);
823 }
824
825 char _license[] SEC("license") = "GPL";
826