1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Testsuite for eBPF verifier
4 *
5 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
6 * Copyright (c) 2017 Facebook
7 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
8 */
9
10 #include <endian.h>
11 #include <asm/types.h>
12 #include <linux/types.h>
13 #include <stdint.h>
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <unistd.h>
17 #include <errno.h>
18 #include <string.h>
19 #include <stddef.h>
20 #include <stdbool.h>
21 #include <sched.h>
22 #include <limits.h>
23 #include <assert.h>
24
25 #include <linux/unistd.h>
26 #include <linux/filter.h>
27 #include <linux/bpf_perf_event.h>
28 #include <linux/bpf.h>
29 #include <linux/if_ether.h>
30 #include <linux/btf.h>
31
32 #include <bpf/btf.h>
33 #include <bpf/bpf.h>
34 #include <bpf/libbpf.h>
35
36 #ifdef HAVE_GENHDR
37 # include "autoconf.h"
38 #else
39 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
40 # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
41 # endif
42 #endif
43 #include "cap_helpers.h"
44 #include "bpf_rand.h"
45 #include "bpf_util.h"
46 #include "test_btf.h"
47 #include "../../../include/linux/filter.h"
48
49 #ifndef ENOTSUPP
50 #define ENOTSUPP 524
51 #endif
52
53 #define MAX_INSNS BPF_MAXINSNS
54 #define MAX_TEST_INSNS 1000000
55 #define MAX_FIXUPS 8
56 #define MAX_NR_MAPS 23
57 #define MAX_TEST_RUNS 8
58 #define POINTER_VALUE 0xcafe4all
59 #define TEST_DATA_LEN 64
60
61 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
62 #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
63
64 /* need CAP_BPF, CAP_NET_ADMIN, CAP_PERFMON to load progs */
65 #define ADMIN_CAPS (1ULL << CAP_NET_ADMIN | \
66 1ULL << CAP_PERFMON | \
67 1ULL << CAP_BPF)
68 #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
69 static bool unpriv_disabled = false;
70 static int skips;
71 static bool verbose = false;
72
73 struct kfunc_btf_id_pair {
74 const char *kfunc;
75 int insn_idx;
76 };
77
78 struct bpf_test {
79 const char *descr;
80 struct bpf_insn insns[MAX_INSNS];
81 struct bpf_insn *fill_insns;
82 int fixup_map_hash_8b[MAX_FIXUPS];
83 int fixup_map_hash_48b[MAX_FIXUPS];
84 int fixup_map_hash_16b[MAX_FIXUPS];
85 int fixup_map_array_48b[MAX_FIXUPS];
86 int fixup_map_sockmap[MAX_FIXUPS];
87 int fixup_map_sockhash[MAX_FIXUPS];
88 int fixup_map_xskmap[MAX_FIXUPS];
89 int fixup_map_stacktrace[MAX_FIXUPS];
90 int fixup_prog1[MAX_FIXUPS];
91 int fixup_prog2[MAX_FIXUPS];
92 int fixup_map_in_map[MAX_FIXUPS];
93 int fixup_cgroup_storage[MAX_FIXUPS];
94 int fixup_percpu_cgroup_storage[MAX_FIXUPS];
95 int fixup_map_spin_lock[MAX_FIXUPS];
96 int fixup_map_array_ro[MAX_FIXUPS];
97 int fixup_map_array_wo[MAX_FIXUPS];
98 int fixup_map_array_small[MAX_FIXUPS];
99 int fixup_sk_storage_map[MAX_FIXUPS];
100 int fixup_map_event_output[MAX_FIXUPS];
101 int fixup_map_reuseport_array[MAX_FIXUPS];
102 int fixup_map_ringbuf[MAX_FIXUPS];
103 int fixup_map_timer[MAX_FIXUPS];
104 int fixup_map_kptr[MAX_FIXUPS];
105 struct kfunc_btf_id_pair fixup_kfunc_btf_id[MAX_FIXUPS];
106 /* Expected verifier log output for result REJECT or VERBOSE_ACCEPT.
107 * Can be a tab-separated sequence of expected strings. An empty string
108 * means no log verification.
109 */
110 const char *errstr;
111 const char *errstr_unpriv;
112 uint32_t insn_processed;
113 int prog_len;
114 enum {
115 UNDEF,
116 ACCEPT,
117 REJECT,
118 VERBOSE_ACCEPT,
119 } result, result_unpriv;
120 enum bpf_prog_type prog_type;
121 uint8_t flags;
122 void (*fill_helper)(struct bpf_test *self);
123 int runs;
124 #define bpf_testdata_struct_t \
125 struct { \
126 uint32_t retval, retval_unpriv; \
127 union { \
128 __u8 data[TEST_DATA_LEN]; \
129 __u64 data64[TEST_DATA_LEN / 8]; \
130 }; \
131 }
132 union {
133 bpf_testdata_struct_t;
134 bpf_testdata_struct_t retvals[MAX_TEST_RUNS];
135 };
136 enum bpf_attach_type expected_attach_type;
137 const char *kfunc;
138 };
139
140 /* Note we want this to be 64 bit aligned so that the end of our array is
141 * actually the end of the structure.
142 */
143 #define MAX_ENTRIES 11
144
145 struct test_val {
146 unsigned int index;
147 int foo[MAX_ENTRIES];
148 };
149
150 struct other_val {
151 long long foo;
152 long long bar;
153 };
154
bpf_fill_ld_abs_vlan_push_pop(struct bpf_test * self)155 static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
156 {
157 /* test: {skb->data[0], vlan_push} x 51 + {skb->data[0], vlan_pop} x 51 */
158 #define PUSH_CNT 51
159 /* jump range is limited to 16 bit. PUSH_CNT of ld_abs needs room */
160 unsigned int len = (1 << 15) - PUSH_CNT * 2 * 5 * 6;
161 struct bpf_insn *insn = self->fill_insns;
162 int i = 0, j, k = 0;
163
164 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
165 loop:
166 for (j = 0; j < PUSH_CNT; j++) {
167 insn[i++] = BPF_LD_ABS(BPF_B, 0);
168 /* jump to error label */
169 insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3);
170 i++;
171 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
172 insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
173 insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
174 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
175 BPF_FUNC_skb_vlan_push),
176 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
177 i++;
178 }
179
180 for (j = 0; j < PUSH_CNT; j++) {
181 insn[i++] = BPF_LD_ABS(BPF_B, 0);
182 insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3);
183 i++;
184 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
185 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
186 BPF_FUNC_skb_vlan_pop),
187 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
188 i++;
189 }
190 if (++k < 5)
191 goto loop;
192
193 for (; i < len - 3; i++)
194 insn[i] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0xbef);
195 insn[len - 3] = BPF_JMP_A(1);
196 /* error label */
197 insn[len - 2] = BPF_MOV32_IMM(BPF_REG_0, 0);
198 insn[len - 1] = BPF_EXIT_INSN();
199 self->prog_len = len;
200 }
201
bpf_fill_jump_around_ld_abs(struct bpf_test * self)202 static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
203 {
204 struct bpf_insn *insn = self->fill_insns;
205 /* jump range is limited to 16 bit. every ld_abs is replaced by 6 insns,
206 * but on arches like arm, ppc etc, there will be one BPF_ZEXT inserted
207 * to extend the error value of the inlined ld_abs sequence which then
208 * contains 7 insns. so, set the dividend to 7 so the testcase could
209 * work on all arches.
210 */
211 unsigned int len = (1 << 15) / 7;
212 int i = 0;
213
214 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
215 insn[i++] = BPF_LD_ABS(BPF_B, 0);
216 insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
217 i++;
218 while (i < len - 1)
219 insn[i++] = BPF_LD_ABS(BPF_B, 1);
220 insn[i] = BPF_EXIT_INSN();
221 self->prog_len = i + 1;
222 }
223
bpf_fill_rand_ld_dw(struct bpf_test * self)224 static void bpf_fill_rand_ld_dw(struct bpf_test *self)
225 {
226 struct bpf_insn *insn = self->fill_insns;
227 uint64_t res = 0;
228 int i = 0;
229
230 insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
231 while (i < self->retval) {
232 uint64_t val = bpf_semi_rand_get();
233 struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
234
235 res ^= val;
236 insn[i++] = tmp[0];
237 insn[i++] = tmp[1];
238 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
239 }
240 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
241 insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
242 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
243 insn[i] = BPF_EXIT_INSN();
244 self->prog_len = i + 1;
245 res ^= (res >> 32);
246 self->retval = (uint32_t)res;
247 }
248
249 #define MAX_JMP_SEQ 8192
250
251 /* test the sequence of 8k jumps */
bpf_fill_scale1(struct bpf_test * self)252 static void bpf_fill_scale1(struct bpf_test *self)
253 {
254 struct bpf_insn *insn = self->fill_insns;
255 int i = 0, k = 0;
256
257 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
258 /* test to check that the long sequence of jumps is acceptable */
259 while (k++ < MAX_JMP_SEQ) {
260 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
261 BPF_FUNC_get_prandom_u32);
262 insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2);
263 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10);
264 insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
265 -8 * (k % 64 + 1));
266 }
267 /* is_state_visited() doesn't allocate state for pruning for every jump.
268 * Hence multiply jmps by 4 to accommodate that heuristic
269 */
270 while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4)
271 insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
272 insn[i] = BPF_EXIT_INSN();
273 self->prog_len = i + 1;
274 self->retval = 42;
275 }
276
277 /* test the sequence of 8k jumps in inner most function (function depth 8)*/
bpf_fill_scale2(struct bpf_test * self)278 static void bpf_fill_scale2(struct bpf_test *self)
279 {
280 struct bpf_insn *insn = self->fill_insns;
281 int i = 0, k = 0;
282
283 #define FUNC_NEST 7
284 for (k = 0; k < FUNC_NEST; k++) {
285 insn[i++] = BPF_CALL_REL(1);
286 insn[i++] = BPF_EXIT_INSN();
287 }
288 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
289 /* test to check that the long sequence of jumps is acceptable */
290 k = 0;
291 while (k++ < MAX_JMP_SEQ) {
292 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
293 BPF_FUNC_get_prandom_u32);
294 insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2);
295 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10);
296 insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
297 -8 * (k % (64 - 4 * FUNC_NEST) + 1));
298 }
299 while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4)
300 insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
301 insn[i] = BPF_EXIT_INSN();
302 self->prog_len = i + 1;
303 self->retval = 42;
304 }
305
bpf_fill_scale(struct bpf_test * self)306 static void bpf_fill_scale(struct bpf_test *self)
307 {
308 switch (self->retval) {
309 case 1:
310 return bpf_fill_scale1(self);
311 case 2:
312 return bpf_fill_scale2(self);
313 default:
314 self->prog_len = 0;
315 break;
316 }
317 }
318
bpf_fill_torturous_jumps_insn_1(struct bpf_insn * insn)319 static int bpf_fill_torturous_jumps_insn_1(struct bpf_insn *insn)
320 {
321 unsigned int len = 259, hlen = 128;
322 int i;
323
324 insn[0] = BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32);
325 for (i = 1; i <= hlen; i++) {
326 insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, i, hlen);
327 insn[i + hlen] = BPF_JMP_A(hlen - i);
328 }
329 insn[len - 2] = BPF_MOV64_IMM(BPF_REG_0, 1);
330 insn[len - 1] = BPF_EXIT_INSN();
331
332 return len;
333 }
334
bpf_fill_torturous_jumps_insn_2(struct bpf_insn * insn)335 static int bpf_fill_torturous_jumps_insn_2(struct bpf_insn *insn)
336 {
337 unsigned int len = 4100, jmp_off = 2048;
338 int i, j;
339
340 insn[0] = BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32);
341 for (i = 1; i <= jmp_off; i++) {
342 insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, i, jmp_off);
343 }
344 insn[i++] = BPF_JMP_A(jmp_off);
345 for (; i <= jmp_off * 2 + 1; i+=16) {
346 for (j = 0; j < 16; j++) {
347 insn[i + j] = BPF_JMP_A(16 - j - 1);
348 }
349 }
350
351 insn[len - 2] = BPF_MOV64_IMM(BPF_REG_0, 2);
352 insn[len - 1] = BPF_EXIT_INSN();
353
354 return len;
355 }
356
bpf_fill_torturous_jumps(struct bpf_test * self)357 static void bpf_fill_torturous_jumps(struct bpf_test *self)
358 {
359 struct bpf_insn *insn = self->fill_insns;
360 int i = 0;
361
362 switch (self->retval) {
363 case 1:
364 self->prog_len = bpf_fill_torturous_jumps_insn_1(insn);
365 return;
366 case 2:
367 self->prog_len = bpf_fill_torturous_jumps_insn_2(insn);
368 return;
369 case 3:
370 /* main */
371 insn[i++] = BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4);
372 insn[i++] = BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 262);
373 insn[i++] = BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0);
374 insn[i++] = BPF_MOV64_IMM(BPF_REG_0, 3);
375 insn[i++] = BPF_EXIT_INSN();
376
377 /* subprog 1 */
378 i += bpf_fill_torturous_jumps_insn_1(insn + i);
379
380 /* subprog 2 */
381 i += bpf_fill_torturous_jumps_insn_2(insn + i);
382
383 self->prog_len = i;
384 return;
385 default:
386 self->prog_len = 0;
387 break;
388 }
389 }
390
391 /* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
392 #define BPF_SK_LOOKUP(func) \
393 /* struct bpf_sock_tuple tuple = {} */ \
394 BPF_MOV64_IMM(BPF_REG_2, 0), \
395 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8), \
396 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16), \
397 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24), \
398 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32), \
399 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40), \
400 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48), \
401 /* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */ \
402 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \
403 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48), \
404 BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)), \
405 BPF_MOV64_IMM(BPF_REG_4, 0), \
406 BPF_MOV64_IMM(BPF_REG_5, 0), \
407 BPF_EMIT_CALL(BPF_FUNC_ ## func)
408
409 /* BPF_DIRECT_PKT_R2 contains 7 instructions, it initializes default return
410 * value into 0 and does necessary preparation for direct packet access
411 * through r2. The allowed access range is 8 bytes.
412 */
413 #define BPF_DIRECT_PKT_R2 \
414 BPF_MOV64_IMM(BPF_REG_0, 0), \
415 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \
416 offsetof(struct __sk_buff, data)), \
417 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \
418 offsetof(struct __sk_buff, data_end)), \
419 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), \
420 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), \
421 BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1), \
422 BPF_EXIT_INSN()
423
424 /* BPF_RAND_UEXT_R7 contains 4 instructions, it initializes R7 into a random
425 * positive u32, and zero-extend it into 64-bit.
426 */
427 #define BPF_RAND_UEXT_R7 \
428 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
429 BPF_FUNC_get_prandom_u32), \
430 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
431 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 33), \
432 BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 33)
433
434 /* BPF_RAND_SEXT_R7 contains 5 instructions, it initializes R7 into a random
435 * negative u32, and sign-extend it into 64-bit.
436 */
437 #define BPF_RAND_SEXT_R7 \
438 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
439 BPF_FUNC_get_prandom_u32), \
440 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
441 BPF_ALU64_IMM(BPF_OR, BPF_REG_7, 0x80000000), \
442 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 32), \
443 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_7, 32)
444
445 static struct bpf_test tests[] = {
446 #define FILL_ARRAY
447 #include <verifier/tests.h>
448 #undef FILL_ARRAY
449 };
450
probe_filter_length(const struct bpf_insn * fp)451 static int probe_filter_length(const struct bpf_insn *fp)
452 {
453 int len;
454
455 for (len = MAX_INSNS - 1; len > 0; --len)
456 if (fp[len].code != 0 || fp[len].imm != 0)
457 break;
458 return len + 1;
459 }
460
skip_unsupported_map(enum bpf_map_type map_type)461 static bool skip_unsupported_map(enum bpf_map_type map_type)
462 {
463 if (!libbpf_probe_bpf_map_type(map_type, NULL)) {
464 printf("SKIP (unsupported map type %d)\n", map_type);
465 skips++;
466 return true;
467 }
468 return false;
469 }
470
__create_map(uint32_t type,uint32_t size_key,uint32_t size_value,uint32_t max_elem,uint32_t extra_flags)471 static int __create_map(uint32_t type, uint32_t size_key,
472 uint32_t size_value, uint32_t max_elem,
473 uint32_t extra_flags)
474 {
475 LIBBPF_OPTS(bpf_map_create_opts, opts);
476 int fd;
477
478 opts.map_flags = (type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0) | extra_flags;
479 fd = bpf_map_create(type, NULL, size_key, size_value, max_elem, &opts);
480 if (fd < 0) {
481 if (skip_unsupported_map(type))
482 return -1;
483 printf("Failed to create hash map '%s'!\n", strerror(errno));
484 }
485
486 return fd;
487 }
488
create_map(uint32_t type,uint32_t size_key,uint32_t size_value,uint32_t max_elem)489 static int create_map(uint32_t type, uint32_t size_key,
490 uint32_t size_value, uint32_t max_elem)
491 {
492 return __create_map(type, size_key, size_value, max_elem, 0);
493 }
494
update_map(int fd,int index)495 static void update_map(int fd, int index)
496 {
497 struct test_val value = {
498 .index = (6 + 1) * sizeof(int),
499 .foo[6] = 0xabcdef12,
500 };
501
502 assert(!bpf_map_update_elem(fd, &index, &value, 0));
503 }
504
create_prog_dummy_simple(enum bpf_prog_type prog_type,int ret)505 static int create_prog_dummy_simple(enum bpf_prog_type prog_type, int ret)
506 {
507 struct bpf_insn prog[] = {
508 BPF_MOV64_IMM(BPF_REG_0, ret),
509 BPF_EXIT_INSN(),
510 };
511
512 return bpf_prog_load(prog_type, NULL, "GPL", prog, ARRAY_SIZE(prog), NULL);
513 }
514
create_prog_dummy_loop(enum bpf_prog_type prog_type,int mfd,int idx,int ret)515 static int create_prog_dummy_loop(enum bpf_prog_type prog_type, int mfd,
516 int idx, int ret)
517 {
518 struct bpf_insn prog[] = {
519 BPF_MOV64_IMM(BPF_REG_3, idx),
520 BPF_LD_MAP_FD(BPF_REG_2, mfd),
521 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
522 BPF_FUNC_tail_call),
523 BPF_MOV64_IMM(BPF_REG_0, ret),
524 BPF_EXIT_INSN(),
525 };
526
527 return bpf_prog_load(prog_type, NULL, "GPL", prog, ARRAY_SIZE(prog), NULL);
528 }
529
create_prog_array(enum bpf_prog_type prog_type,uint32_t max_elem,int p1key,int p2key,int p3key)530 static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem,
531 int p1key, int p2key, int p3key)
532 {
533 int mfd, p1fd, p2fd, p3fd;
534
535 mfd = bpf_map_create(BPF_MAP_TYPE_PROG_ARRAY, NULL, sizeof(int),
536 sizeof(int), max_elem, NULL);
537 if (mfd < 0) {
538 if (skip_unsupported_map(BPF_MAP_TYPE_PROG_ARRAY))
539 return -1;
540 printf("Failed to create prog array '%s'!\n", strerror(errno));
541 return -1;
542 }
543
544 p1fd = create_prog_dummy_simple(prog_type, 42);
545 p2fd = create_prog_dummy_loop(prog_type, mfd, p2key, 41);
546 p3fd = create_prog_dummy_simple(prog_type, 24);
547 if (p1fd < 0 || p2fd < 0 || p3fd < 0)
548 goto err;
549 if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
550 goto err;
551 if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
552 goto err;
553 if (bpf_map_update_elem(mfd, &p3key, &p3fd, BPF_ANY) < 0) {
554 err:
555 close(mfd);
556 mfd = -1;
557 }
558 close(p3fd);
559 close(p2fd);
560 close(p1fd);
561 return mfd;
562 }
563
create_map_in_map(void)564 static int create_map_in_map(void)
565 {
566 LIBBPF_OPTS(bpf_map_create_opts, opts);
567 int inner_map_fd, outer_map_fd;
568
569 inner_map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int),
570 sizeof(int), 1, NULL);
571 if (inner_map_fd < 0) {
572 if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY))
573 return -1;
574 printf("Failed to create array '%s'!\n", strerror(errno));
575 return inner_map_fd;
576 }
577
578 opts.inner_map_fd = inner_map_fd;
579 outer_map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
580 sizeof(int), sizeof(int), 1, &opts);
581 if (outer_map_fd < 0) {
582 if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY_OF_MAPS))
583 return -1;
584 printf("Failed to create array of maps '%s'!\n",
585 strerror(errno));
586 }
587
588 close(inner_map_fd);
589
590 return outer_map_fd;
591 }
592
create_cgroup_storage(bool percpu)593 static int create_cgroup_storage(bool percpu)
594 {
595 enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE :
596 BPF_MAP_TYPE_CGROUP_STORAGE;
597 int fd;
598
599 fd = bpf_map_create(type, NULL, sizeof(struct bpf_cgroup_storage_key),
600 TEST_DATA_LEN, 0, NULL);
601 if (fd < 0) {
602 if (skip_unsupported_map(type))
603 return -1;
604 printf("Failed to create cgroup storage '%s'!\n",
605 strerror(errno));
606 }
607
608 return fd;
609 }
610
611 /* struct bpf_spin_lock {
612 * int val;
613 * };
614 * struct val {
615 * int cnt;
616 * struct bpf_spin_lock l;
617 * };
618 * struct bpf_timer {
619 * __u64 :64;
620 * __u64 :64;
621 * } __attribute__((aligned(8)));
622 * struct timer {
623 * struct bpf_timer t;
624 * };
625 * struct btf_ptr {
626 * struct prog_test_ref_kfunc __kptr *ptr;
627 * struct prog_test_ref_kfunc __kptr_ref *ptr;
628 * struct prog_test_member __kptr_ref *ptr;
629 * }
630 */
631 static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l\0bpf_timer\0timer\0t"
632 "\0btf_ptr\0prog_test_ref_kfunc\0ptr\0kptr\0kptr_ref"
633 "\0prog_test_member";
634 static __u32 btf_raw_types[] = {
635 /* int */
636 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
637 /* struct bpf_spin_lock */ /* [2] */
638 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
639 BTF_MEMBER_ENC(15, 1, 0), /* int val; */
640 /* struct val */ /* [3] */
641 BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
642 BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
643 BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
644 /* struct bpf_timer */ /* [4] */
645 BTF_TYPE_ENC(25, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0), 16),
646 /* struct timer */ /* [5] */
647 BTF_TYPE_ENC(35, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 16),
648 BTF_MEMBER_ENC(41, 4, 0), /* struct bpf_timer t; */
649 /* struct prog_test_ref_kfunc */ /* [6] */
650 BTF_STRUCT_ENC(51, 0, 0),
651 BTF_STRUCT_ENC(89, 0, 0), /* [7] */
652 /* type tag "kptr" */
653 BTF_TYPE_TAG_ENC(75, 6), /* [8] */
654 /* type tag "kptr_ref" */
655 BTF_TYPE_TAG_ENC(80, 6), /* [9] */
656 BTF_TYPE_TAG_ENC(80, 7), /* [10] */
657 BTF_PTR_ENC(8), /* [11] */
658 BTF_PTR_ENC(9), /* [12] */
659 BTF_PTR_ENC(10), /* [13] */
660 /* struct btf_ptr */ /* [14] */
661 BTF_STRUCT_ENC(43, 3, 24),
662 BTF_MEMBER_ENC(71, 11, 0), /* struct prog_test_ref_kfunc __kptr *ptr; */
663 BTF_MEMBER_ENC(71, 12, 64), /* struct prog_test_ref_kfunc __kptr_ref *ptr; */
664 BTF_MEMBER_ENC(71, 13, 128), /* struct prog_test_member __kptr_ref *ptr; */
665 };
666
load_btf(void)667 static int load_btf(void)
668 {
669 struct btf_header hdr = {
670 .magic = BTF_MAGIC,
671 .version = BTF_VERSION,
672 .hdr_len = sizeof(struct btf_header),
673 .type_len = sizeof(btf_raw_types),
674 .str_off = sizeof(btf_raw_types),
675 .str_len = sizeof(btf_str_sec),
676 };
677 void *ptr, *raw_btf;
678 int btf_fd;
679
680 ptr = raw_btf = malloc(sizeof(hdr) + sizeof(btf_raw_types) +
681 sizeof(btf_str_sec));
682
683 memcpy(ptr, &hdr, sizeof(hdr));
684 ptr += sizeof(hdr);
685 memcpy(ptr, btf_raw_types, hdr.type_len);
686 ptr += hdr.type_len;
687 memcpy(ptr, btf_str_sec, hdr.str_len);
688 ptr += hdr.str_len;
689
690 btf_fd = bpf_btf_load(raw_btf, ptr - raw_btf, NULL);
691 free(raw_btf);
692 if (btf_fd < 0)
693 return -1;
694 return btf_fd;
695 }
696
create_map_spin_lock(void)697 static int create_map_spin_lock(void)
698 {
699 LIBBPF_OPTS(bpf_map_create_opts, opts,
700 .btf_key_type_id = 1,
701 .btf_value_type_id = 3,
702 );
703 int fd, btf_fd;
704
705 btf_fd = load_btf();
706 if (btf_fd < 0)
707 return -1;
708 opts.btf_fd = btf_fd;
709 fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_map", 4, 8, 1, &opts);
710 if (fd < 0)
711 printf("Failed to create map with spin_lock\n");
712 return fd;
713 }
714
create_sk_storage_map(void)715 static int create_sk_storage_map(void)
716 {
717 LIBBPF_OPTS(bpf_map_create_opts, opts,
718 .map_flags = BPF_F_NO_PREALLOC,
719 .btf_key_type_id = 1,
720 .btf_value_type_id = 3,
721 );
722 int fd, btf_fd;
723
724 btf_fd = load_btf();
725 if (btf_fd < 0)
726 return -1;
727 opts.btf_fd = btf_fd;
728 fd = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "test_map", 4, 8, 0, &opts);
729 close(opts.btf_fd);
730 if (fd < 0)
731 printf("Failed to create sk_storage_map\n");
732 return fd;
733 }
734
create_map_timer(void)735 static int create_map_timer(void)
736 {
737 LIBBPF_OPTS(bpf_map_create_opts, opts,
738 .btf_key_type_id = 1,
739 .btf_value_type_id = 5,
740 );
741 int fd, btf_fd;
742
743 btf_fd = load_btf();
744 if (btf_fd < 0)
745 return -1;
746
747 opts.btf_fd = btf_fd;
748 fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_map", 4, 16, 1, &opts);
749 if (fd < 0)
750 printf("Failed to create map with timer\n");
751 return fd;
752 }
753
create_map_kptr(void)754 static int create_map_kptr(void)
755 {
756 LIBBPF_OPTS(bpf_map_create_opts, opts,
757 .btf_key_type_id = 1,
758 .btf_value_type_id = 14,
759 );
760 int fd, btf_fd;
761
762 btf_fd = load_btf();
763 if (btf_fd < 0)
764 return -1;
765
766 opts.btf_fd = btf_fd;
767 fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_map", 4, 24, 1, &opts);
768 if (fd < 0)
769 printf("Failed to create map with btf_id pointer\n");
770 return fd;
771 }
772
773 static char bpf_vlog[UINT_MAX >> 8];
774
do_test_fixup(struct bpf_test * test,enum bpf_prog_type prog_type,struct bpf_insn * prog,int * map_fds)775 static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
776 struct bpf_insn *prog, int *map_fds)
777 {
778 int *fixup_map_hash_8b = test->fixup_map_hash_8b;
779 int *fixup_map_hash_48b = test->fixup_map_hash_48b;
780 int *fixup_map_hash_16b = test->fixup_map_hash_16b;
781 int *fixup_map_array_48b = test->fixup_map_array_48b;
782 int *fixup_map_sockmap = test->fixup_map_sockmap;
783 int *fixup_map_sockhash = test->fixup_map_sockhash;
784 int *fixup_map_xskmap = test->fixup_map_xskmap;
785 int *fixup_map_stacktrace = test->fixup_map_stacktrace;
786 int *fixup_prog1 = test->fixup_prog1;
787 int *fixup_prog2 = test->fixup_prog2;
788 int *fixup_map_in_map = test->fixup_map_in_map;
789 int *fixup_cgroup_storage = test->fixup_cgroup_storage;
790 int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
791 int *fixup_map_spin_lock = test->fixup_map_spin_lock;
792 int *fixup_map_array_ro = test->fixup_map_array_ro;
793 int *fixup_map_array_wo = test->fixup_map_array_wo;
794 int *fixup_map_array_small = test->fixup_map_array_small;
795 int *fixup_sk_storage_map = test->fixup_sk_storage_map;
796 int *fixup_map_event_output = test->fixup_map_event_output;
797 int *fixup_map_reuseport_array = test->fixup_map_reuseport_array;
798 int *fixup_map_ringbuf = test->fixup_map_ringbuf;
799 int *fixup_map_timer = test->fixup_map_timer;
800 int *fixup_map_kptr = test->fixup_map_kptr;
801 struct kfunc_btf_id_pair *fixup_kfunc_btf_id = test->fixup_kfunc_btf_id;
802
803 if (test->fill_helper) {
804 test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
805 test->fill_helper(test);
806 }
807
808 /* Allocating HTs with 1 elem is fine here, since we only test
809 * for verifier and not do a runtime lookup, so the only thing
810 * that really matters is value size in this case.
811 */
812 if (*fixup_map_hash_8b) {
813 map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
814 sizeof(long long), 1);
815 do {
816 prog[*fixup_map_hash_8b].imm = map_fds[0];
817 fixup_map_hash_8b++;
818 } while (*fixup_map_hash_8b);
819 }
820
821 if (*fixup_map_hash_48b) {
822 map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
823 sizeof(struct test_val), 1);
824 do {
825 prog[*fixup_map_hash_48b].imm = map_fds[1];
826 fixup_map_hash_48b++;
827 } while (*fixup_map_hash_48b);
828 }
829
830 if (*fixup_map_hash_16b) {
831 map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
832 sizeof(struct other_val), 1);
833 do {
834 prog[*fixup_map_hash_16b].imm = map_fds[2];
835 fixup_map_hash_16b++;
836 } while (*fixup_map_hash_16b);
837 }
838
839 if (*fixup_map_array_48b) {
840 map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
841 sizeof(struct test_val), 1);
842 update_map(map_fds[3], 0);
843 do {
844 prog[*fixup_map_array_48b].imm = map_fds[3];
845 fixup_map_array_48b++;
846 } while (*fixup_map_array_48b);
847 }
848
849 if (*fixup_prog1) {
850 map_fds[4] = create_prog_array(prog_type, 4, 0, 1, 2);
851 do {
852 prog[*fixup_prog1].imm = map_fds[4];
853 fixup_prog1++;
854 } while (*fixup_prog1);
855 }
856
857 if (*fixup_prog2) {
858 map_fds[5] = create_prog_array(prog_type, 8, 7, 1, 2);
859 do {
860 prog[*fixup_prog2].imm = map_fds[5];
861 fixup_prog2++;
862 } while (*fixup_prog2);
863 }
864
865 if (*fixup_map_in_map) {
866 map_fds[6] = create_map_in_map();
867 do {
868 prog[*fixup_map_in_map].imm = map_fds[6];
869 fixup_map_in_map++;
870 } while (*fixup_map_in_map);
871 }
872
873 if (*fixup_cgroup_storage) {
874 map_fds[7] = create_cgroup_storage(false);
875 do {
876 prog[*fixup_cgroup_storage].imm = map_fds[7];
877 fixup_cgroup_storage++;
878 } while (*fixup_cgroup_storage);
879 }
880
881 if (*fixup_percpu_cgroup_storage) {
882 map_fds[8] = create_cgroup_storage(true);
883 do {
884 prog[*fixup_percpu_cgroup_storage].imm = map_fds[8];
885 fixup_percpu_cgroup_storage++;
886 } while (*fixup_percpu_cgroup_storage);
887 }
888 if (*fixup_map_sockmap) {
889 map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int),
890 sizeof(int), 1);
891 do {
892 prog[*fixup_map_sockmap].imm = map_fds[9];
893 fixup_map_sockmap++;
894 } while (*fixup_map_sockmap);
895 }
896 if (*fixup_map_sockhash) {
897 map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int),
898 sizeof(int), 1);
899 do {
900 prog[*fixup_map_sockhash].imm = map_fds[10];
901 fixup_map_sockhash++;
902 } while (*fixup_map_sockhash);
903 }
904 if (*fixup_map_xskmap) {
905 map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int),
906 sizeof(int), 1);
907 do {
908 prog[*fixup_map_xskmap].imm = map_fds[11];
909 fixup_map_xskmap++;
910 } while (*fixup_map_xskmap);
911 }
912 if (*fixup_map_stacktrace) {
913 map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32),
914 sizeof(u64), 1);
915 do {
916 prog[*fixup_map_stacktrace].imm = map_fds[12];
917 fixup_map_stacktrace++;
918 } while (*fixup_map_stacktrace);
919 }
920 if (*fixup_map_spin_lock) {
921 map_fds[13] = create_map_spin_lock();
922 do {
923 prog[*fixup_map_spin_lock].imm = map_fds[13];
924 fixup_map_spin_lock++;
925 } while (*fixup_map_spin_lock);
926 }
927 if (*fixup_map_array_ro) {
928 map_fds[14] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
929 sizeof(struct test_val), 1,
930 BPF_F_RDONLY_PROG);
931 update_map(map_fds[14], 0);
932 do {
933 prog[*fixup_map_array_ro].imm = map_fds[14];
934 fixup_map_array_ro++;
935 } while (*fixup_map_array_ro);
936 }
937 if (*fixup_map_array_wo) {
938 map_fds[15] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
939 sizeof(struct test_val), 1,
940 BPF_F_WRONLY_PROG);
941 update_map(map_fds[15], 0);
942 do {
943 prog[*fixup_map_array_wo].imm = map_fds[15];
944 fixup_map_array_wo++;
945 } while (*fixup_map_array_wo);
946 }
947 if (*fixup_map_array_small) {
948 map_fds[16] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
949 1, 1, 0);
950 update_map(map_fds[16], 0);
951 do {
952 prog[*fixup_map_array_small].imm = map_fds[16];
953 fixup_map_array_small++;
954 } while (*fixup_map_array_small);
955 }
956 if (*fixup_sk_storage_map) {
957 map_fds[17] = create_sk_storage_map();
958 do {
959 prog[*fixup_sk_storage_map].imm = map_fds[17];
960 fixup_sk_storage_map++;
961 } while (*fixup_sk_storage_map);
962 }
963 if (*fixup_map_event_output) {
964 map_fds[18] = __create_map(BPF_MAP_TYPE_PERF_EVENT_ARRAY,
965 sizeof(int), sizeof(int), 1, 0);
966 do {
967 prog[*fixup_map_event_output].imm = map_fds[18];
968 fixup_map_event_output++;
969 } while (*fixup_map_event_output);
970 }
971 if (*fixup_map_reuseport_array) {
972 map_fds[19] = __create_map(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
973 sizeof(u32), sizeof(u64), 1, 0);
974 do {
975 prog[*fixup_map_reuseport_array].imm = map_fds[19];
976 fixup_map_reuseport_array++;
977 } while (*fixup_map_reuseport_array);
978 }
979 if (*fixup_map_ringbuf) {
980 map_fds[20] = create_map(BPF_MAP_TYPE_RINGBUF, 0,
981 0, 4096);
982 do {
983 prog[*fixup_map_ringbuf].imm = map_fds[20];
984 fixup_map_ringbuf++;
985 } while (*fixup_map_ringbuf);
986 }
987 if (*fixup_map_timer) {
988 map_fds[21] = create_map_timer();
989 do {
990 prog[*fixup_map_timer].imm = map_fds[21];
991 fixup_map_timer++;
992 } while (*fixup_map_timer);
993 }
994 if (*fixup_map_kptr) {
995 map_fds[22] = create_map_kptr();
996 do {
997 prog[*fixup_map_kptr].imm = map_fds[22];
998 fixup_map_kptr++;
999 } while (*fixup_map_kptr);
1000 }
1001
1002 /* Patch in kfunc BTF IDs */
1003 if (fixup_kfunc_btf_id->kfunc) {
1004 struct btf *btf;
1005 int btf_id;
1006
1007 do {
1008 btf_id = 0;
1009 btf = btf__load_vmlinux_btf();
1010 if (btf) {
1011 btf_id = btf__find_by_name_kind(btf,
1012 fixup_kfunc_btf_id->kfunc,
1013 BTF_KIND_FUNC);
1014 btf_id = btf_id < 0 ? 0 : btf_id;
1015 }
1016 btf__free(btf);
1017 prog[fixup_kfunc_btf_id->insn_idx].imm = btf_id;
1018 fixup_kfunc_btf_id++;
1019 } while (fixup_kfunc_btf_id->kfunc);
1020 }
1021 }
1022
1023 struct libcap {
1024 struct __user_cap_header_struct hdr;
1025 struct __user_cap_data_struct data[2];
1026 };
1027
set_admin(bool admin)1028 static int set_admin(bool admin)
1029 {
1030 int err;
1031
1032 if (admin) {
1033 err = cap_enable_effective(ADMIN_CAPS, NULL);
1034 if (err)
1035 perror("cap_enable_effective(ADMIN_CAPS)");
1036 } else {
1037 err = cap_disable_effective(ADMIN_CAPS, NULL);
1038 if (err)
1039 perror("cap_disable_effective(ADMIN_CAPS)");
1040 }
1041
1042 return err;
1043 }
1044
do_prog_test_run(int fd_prog,bool unpriv,uint32_t expected_val,void * data,size_t size_data)1045 static int do_prog_test_run(int fd_prog, bool unpriv, uint32_t expected_val,
1046 void *data, size_t size_data)
1047 {
1048 __u8 tmp[TEST_DATA_LEN << 2];
1049 __u32 size_tmp = sizeof(tmp);
1050 int err, saved_errno;
1051 LIBBPF_OPTS(bpf_test_run_opts, topts,
1052 .data_in = data,
1053 .data_size_in = size_data,
1054 .data_out = tmp,
1055 .data_size_out = size_tmp,
1056 .repeat = 1,
1057 );
1058
1059 if (unpriv)
1060 set_admin(true);
1061 err = bpf_prog_test_run_opts(fd_prog, &topts);
1062 saved_errno = errno;
1063
1064 if (unpriv)
1065 set_admin(false);
1066
1067 if (err) {
1068 switch (saved_errno) {
1069 case ENOTSUPP:
1070 printf("Did not run the program (not supported) ");
1071 return 0;
1072 case EPERM:
1073 if (unpriv) {
1074 printf("Did not run the program (no permission) ");
1075 return 0;
1076 }
1077 /* fallthrough; */
1078 default:
1079 printf("FAIL: Unexpected bpf_prog_test_run error (%s) ",
1080 strerror(saved_errno));
1081 return err;
1082 }
1083 }
1084
1085 if (topts.retval != expected_val && expected_val != POINTER_VALUE) {
1086 printf("FAIL retval %d != %d ", topts.retval, expected_val);
1087 return 1;
1088 }
1089
1090 return 0;
1091 }
1092
1093 /* Returns true if every part of exp (tab-separated) appears in log, in order.
1094 *
1095 * If exp is an empty string, returns true.
1096 */
cmp_str_seq(const char * log,const char * exp)1097 static bool cmp_str_seq(const char *log, const char *exp)
1098 {
1099 char needle[200];
1100 const char *p, *q;
1101 int len;
1102
1103 do {
1104 if (!strlen(exp))
1105 break;
1106 p = strchr(exp, '\t');
1107 if (!p)
1108 p = exp + strlen(exp);
1109
1110 len = p - exp;
1111 if (len >= sizeof(needle) || !len) {
1112 printf("FAIL\nTestcase bug\n");
1113 return false;
1114 }
1115 strncpy(needle, exp, len);
1116 needle[len] = 0;
1117 q = strstr(log, needle);
1118 if (!q) {
1119 printf("FAIL\nUnexpected verifier log!\n"
1120 "EXP: %s\nRES:\n", needle);
1121 return false;
1122 }
1123 log = q + len;
1124 exp = p + 1;
1125 } while (*p);
1126 return true;
1127 }
1128
do_test_single(struct bpf_test * test,bool unpriv,int * passes,int * errors)1129 static void do_test_single(struct bpf_test *test, bool unpriv,
1130 int *passes, int *errors)
1131 {
1132 int fd_prog, expected_ret, alignment_prevented_execution;
1133 int prog_len, prog_type = test->prog_type;
1134 struct bpf_insn *prog = test->insns;
1135 LIBBPF_OPTS(bpf_prog_load_opts, opts);
1136 int run_errs, run_successes;
1137 int map_fds[MAX_NR_MAPS];
1138 const char *expected_err;
1139 int saved_errno;
1140 int fixup_skips;
1141 __u32 pflags;
1142 int i, err;
1143
1144 for (i = 0; i < MAX_NR_MAPS; i++)
1145 map_fds[i] = -1;
1146
1147 if (!prog_type)
1148 prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
1149 fixup_skips = skips;
1150 do_test_fixup(test, prog_type, prog, map_fds);
1151 if (test->fill_insns) {
1152 prog = test->fill_insns;
1153 prog_len = test->prog_len;
1154 } else {
1155 prog_len = probe_filter_length(prog);
1156 }
1157 /* If there were some map skips during fixup due to missing bpf
1158 * features, skip this test.
1159 */
1160 if (fixup_skips != skips)
1161 return;
1162
1163 pflags = BPF_F_TEST_RND_HI32;
1164 if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
1165 pflags |= BPF_F_STRICT_ALIGNMENT;
1166 if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
1167 pflags |= BPF_F_ANY_ALIGNMENT;
1168 if (test->flags & ~3)
1169 pflags |= test->flags;
1170
1171 expected_ret = unpriv && test->result_unpriv != UNDEF ?
1172 test->result_unpriv : test->result;
1173 expected_err = unpriv && test->errstr_unpriv ?
1174 test->errstr_unpriv : test->errstr;
1175
1176 opts.expected_attach_type = test->expected_attach_type;
1177 if (verbose)
1178 opts.log_level = 1;
1179 else if (expected_ret == VERBOSE_ACCEPT)
1180 opts.log_level = 2;
1181 else
1182 opts.log_level = 4;
1183 opts.prog_flags = pflags;
1184
1185 if (prog_type == BPF_PROG_TYPE_TRACING && test->kfunc) {
1186 int attach_btf_id;
1187
1188 attach_btf_id = libbpf_find_vmlinux_btf_id(test->kfunc,
1189 opts.expected_attach_type);
1190 if (attach_btf_id < 0) {
1191 printf("FAIL\nFailed to find BTF ID for '%s'!\n",
1192 test->kfunc);
1193 (*errors)++;
1194 return;
1195 }
1196
1197 opts.attach_btf_id = attach_btf_id;
1198 }
1199
1200 opts.log_buf = bpf_vlog;
1201 opts.log_size = sizeof(bpf_vlog);
1202 fd_prog = bpf_prog_load(prog_type, NULL, "GPL", prog, prog_len, &opts);
1203 saved_errno = errno;
1204
1205 /* BPF_PROG_TYPE_TRACING requires more setup and
1206 * bpf_probe_prog_type won't give correct answer
1207 */
1208 if (fd_prog < 0 && prog_type != BPF_PROG_TYPE_TRACING &&
1209 !libbpf_probe_bpf_prog_type(prog_type, NULL)) {
1210 printf("SKIP (unsupported program type %d)\n", prog_type);
1211 skips++;
1212 goto close_fds;
1213 }
1214
1215 if (fd_prog < 0 && saved_errno == ENOTSUPP) {
1216 printf("SKIP (program uses an unsupported feature)\n");
1217 skips++;
1218 goto close_fds;
1219 }
1220
1221 alignment_prevented_execution = 0;
1222
1223 if (expected_ret == ACCEPT || expected_ret == VERBOSE_ACCEPT) {
1224 if (fd_prog < 0) {
1225 printf("FAIL\nFailed to load prog '%s'!\n",
1226 strerror(saved_errno));
1227 goto fail_log;
1228 }
1229 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1230 if (fd_prog >= 0 &&
1231 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS))
1232 alignment_prevented_execution = 1;
1233 #endif
1234 if (expected_ret == VERBOSE_ACCEPT && !cmp_str_seq(bpf_vlog, expected_err)) {
1235 goto fail_log;
1236 }
1237 } else {
1238 if (fd_prog >= 0) {
1239 printf("FAIL\nUnexpected success to load!\n");
1240 goto fail_log;
1241 }
1242 if (!expected_err || !cmp_str_seq(bpf_vlog, expected_err)) {
1243 printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
1244 expected_err, bpf_vlog);
1245 goto fail_log;
1246 }
1247 }
1248
1249 if (!unpriv && test->insn_processed) {
1250 uint32_t insn_processed;
1251 char *proc;
1252
1253 proc = strstr(bpf_vlog, "processed ");
1254 insn_processed = atoi(proc + 10);
1255 if (test->insn_processed != insn_processed) {
1256 printf("FAIL\nUnexpected insn_processed %u vs %u\n",
1257 insn_processed, test->insn_processed);
1258 goto fail_log;
1259 }
1260 }
1261
1262 if (verbose)
1263 printf(", verifier log:\n%s", bpf_vlog);
1264
1265 run_errs = 0;
1266 run_successes = 0;
1267 if (!alignment_prevented_execution && fd_prog >= 0 && test->runs >= 0) {
1268 uint32_t expected_val;
1269 int i;
1270
1271 if (!test->runs)
1272 test->runs = 1;
1273
1274 for (i = 0; i < test->runs; i++) {
1275 if (unpriv && test->retvals[i].retval_unpriv)
1276 expected_val = test->retvals[i].retval_unpriv;
1277 else
1278 expected_val = test->retvals[i].retval;
1279
1280 err = do_prog_test_run(fd_prog, unpriv, expected_val,
1281 test->retvals[i].data,
1282 sizeof(test->retvals[i].data));
1283 if (err) {
1284 printf("(run %d/%d) ", i + 1, test->runs);
1285 run_errs++;
1286 } else {
1287 run_successes++;
1288 }
1289 }
1290 }
1291
1292 if (!run_errs) {
1293 (*passes)++;
1294 if (run_successes > 1)
1295 printf("%d cases ", run_successes);
1296 printf("OK");
1297 if (alignment_prevented_execution)
1298 printf(" (NOTE: not executed due to unknown alignment)");
1299 printf("\n");
1300 } else {
1301 printf("\n");
1302 goto fail_log;
1303 }
1304 close_fds:
1305 if (test->fill_insns)
1306 free(test->fill_insns);
1307 close(fd_prog);
1308 for (i = 0; i < MAX_NR_MAPS; i++)
1309 close(map_fds[i]);
1310 sched_yield();
1311 return;
1312 fail_log:
1313 (*errors)++;
1314 printf("%s", bpf_vlog);
1315 goto close_fds;
1316 }
1317
is_admin(void)1318 static bool is_admin(void)
1319 {
1320 __u64 caps;
1321
1322 /* The test checks for finer cap as CAP_NET_ADMIN,
1323 * CAP_PERFMON, and CAP_BPF instead of CAP_SYS_ADMIN.
1324 * Thus, disable CAP_SYS_ADMIN at the beginning.
1325 */
1326 if (cap_disable_effective(1ULL << CAP_SYS_ADMIN, &caps)) {
1327 perror("cap_disable_effective(CAP_SYS_ADMIN)");
1328 return false;
1329 }
1330
1331 return (caps & ADMIN_CAPS) == ADMIN_CAPS;
1332 }
1333
get_unpriv_disabled()1334 static void get_unpriv_disabled()
1335 {
1336 char buf[2];
1337 FILE *fd;
1338
1339 fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
1340 if (!fd) {
1341 perror("fopen /proc/sys/"UNPRIV_SYSCTL);
1342 unpriv_disabled = true;
1343 return;
1344 }
1345 if (fgets(buf, 2, fd) == buf && atoi(buf))
1346 unpriv_disabled = true;
1347 fclose(fd);
1348 }
1349
test_as_unpriv(struct bpf_test * test)1350 static bool test_as_unpriv(struct bpf_test *test)
1351 {
1352 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1353 /* Some architectures have strict alignment requirements. In
1354 * that case, the BPF verifier detects if a program has
1355 * unaligned accesses and rejects them. A user can pass
1356 * BPF_F_ANY_ALIGNMENT to a program to override this
1357 * check. That, however, will only work when a privileged user
1358 * loads a program. An unprivileged user loading a program
1359 * with this flag will be rejected prior entering the
1360 * verifier.
1361 */
1362 if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
1363 return false;
1364 #endif
1365 return !test->prog_type ||
1366 test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
1367 test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
1368 }
1369
do_test(bool unpriv,unsigned int from,unsigned int to)1370 static int do_test(bool unpriv, unsigned int from, unsigned int to)
1371 {
1372 int i, passes = 0, errors = 0;
1373
1374 for (i = from; i < to; i++) {
1375 struct bpf_test *test = &tests[i];
1376
1377 /* Program types that are not supported by non-root we
1378 * skip right away.
1379 */
1380 if (test_as_unpriv(test) && unpriv_disabled) {
1381 printf("#%d/u %s SKIP\n", i, test->descr);
1382 skips++;
1383 } else if (test_as_unpriv(test)) {
1384 if (!unpriv)
1385 set_admin(false);
1386 printf("#%d/u %s ", i, test->descr);
1387 do_test_single(test, true, &passes, &errors);
1388 if (!unpriv)
1389 set_admin(true);
1390 }
1391
1392 if (unpriv) {
1393 printf("#%d/p %s SKIP\n", i, test->descr);
1394 skips++;
1395 } else {
1396 printf("#%d/p %s ", i, test->descr);
1397 do_test_single(test, false, &passes, &errors);
1398 }
1399 }
1400
1401 printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
1402 skips, errors);
1403 return errors ? EXIT_FAILURE : EXIT_SUCCESS;
1404 }
1405
main(int argc,char ** argv)1406 int main(int argc, char **argv)
1407 {
1408 unsigned int from = 0, to = ARRAY_SIZE(tests);
1409 bool unpriv = !is_admin();
1410 int arg = 1;
1411
1412 if (argc > 1 && strcmp(argv[1], "-v") == 0) {
1413 arg++;
1414 verbose = true;
1415 argc--;
1416 }
1417
1418 if (argc == 3) {
1419 unsigned int l = atoi(argv[arg]);
1420 unsigned int u = atoi(argv[arg + 1]);
1421
1422 if (l < to && u < to) {
1423 from = l;
1424 to = u + 1;
1425 }
1426 } else if (argc == 2) {
1427 unsigned int t = atoi(argv[arg]);
1428
1429 if (t < to) {
1430 from = t;
1431 to = t + 1;
1432 }
1433 }
1434
1435 get_unpriv_disabled();
1436 if (unpriv && unpriv_disabled) {
1437 printf("Cannot run as unprivileged user with sysctl %s.\n",
1438 UNPRIV_SYSCTL);
1439 return EXIT_FAILURE;
1440 }
1441
1442 /* Use libbpf 1.0 API mode */
1443 libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
1444
1445 bpf_semi_rand_init();
1446 return do_test(unpriv, from, to);
1447 }
1448