1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Facebook
3 */
4 #define _GNU_SOURCE
5 #include "test_progs.h"
6 #include "testing_helpers.h"
7 #include "cgroup_helpers.h"
8 #include <argp.h>
9 #include <pthread.h>
10 #include <sched.h>
11 #include <signal.h>
12 #include <string.h>
13 #include <execinfo.h> /* backtrace */
14 #include <linux/membarrier.h>
15 #include <sys/sysinfo.h> /* get_nprocs */
16 #include <netinet/in.h>
17 #include <sys/select.h>
18 #include <sys/socket.h>
19 #include <sys/un.h>
20
verbose(void)21 static bool verbose(void)
22 {
23 return env.verbosity > VERBOSE_NONE;
24 }
25
stdio_hijack_init(char ** log_buf,size_t * log_cnt)26 static void stdio_hijack_init(char **log_buf, size_t *log_cnt)
27 {
28 #ifdef __GLIBC__
29 if (verbose() && env.worker_id == -1) {
30 /* nothing to do, output to stdout by default */
31 return;
32 }
33
34 fflush(stdout);
35 fflush(stderr);
36
37 stdout = open_memstream(log_buf, log_cnt);
38 if (!stdout) {
39 stdout = env.stdout;
40 perror("open_memstream");
41 return;
42 }
43
44 if (env.subtest_state)
45 env.subtest_state->stdout = stdout;
46 else
47 env.test_state->stdout = stdout;
48
49 stderr = stdout;
50 #endif
51 }
52
stdio_hijack(char ** log_buf,size_t * log_cnt)53 static void stdio_hijack(char **log_buf, size_t *log_cnt)
54 {
55 #ifdef __GLIBC__
56 if (verbose() && env.worker_id == -1) {
57 /* nothing to do, output to stdout by default */
58 return;
59 }
60
61 env.stdout = stdout;
62 env.stderr = stderr;
63
64 stdio_hijack_init(log_buf, log_cnt);
65 #endif
66 }
67
stdio_restore_cleanup(void)68 static void stdio_restore_cleanup(void)
69 {
70 #ifdef __GLIBC__
71 if (verbose() && env.worker_id == -1) {
72 /* nothing to do, output to stdout by default */
73 return;
74 }
75
76 fflush(stdout);
77
78 if (env.subtest_state) {
79 fclose(env.subtest_state->stdout);
80 env.subtest_state->stdout = NULL;
81 stdout = env.test_state->stdout;
82 stderr = env.test_state->stdout;
83 } else {
84 fclose(env.test_state->stdout);
85 env.test_state->stdout = NULL;
86 }
87 #endif
88 }
89
stdio_restore(void)90 static void stdio_restore(void)
91 {
92 #ifdef __GLIBC__
93 if (verbose() && env.worker_id == -1) {
94 /* nothing to do, output to stdout by default */
95 return;
96 }
97
98 if (stdout == env.stdout)
99 return;
100
101 stdio_restore_cleanup();
102
103 stdout = env.stdout;
104 stderr = env.stderr;
105 #endif
106 }
107
108 /* Adapted from perf/util/string.c */
glob_match(const char * str,const char * pat)109 static bool glob_match(const char *str, const char *pat)
110 {
111 while (*str && *pat && *pat != '*') {
112 if (*str != *pat)
113 return false;
114 str++;
115 pat++;
116 }
117 /* Check wild card */
118 if (*pat == '*') {
119 while (*pat == '*')
120 pat++;
121 if (!*pat) /* Tail wild card matches all */
122 return true;
123 while (*str)
124 if (glob_match(str++, pat))
125 return true;
126 }
127 return !*str && !*pat;
128 }
129
130 #define EXIT_NO_TEST 2
131 #define EXIT_ERR_SETUP_INFRA 3
132
133 /* defined in test_progs.h */
134 struct test_env env = {};
135
136 struct prog_test_def {
137 const char *test_name;
138 int test_num;
139 void (*run_test)(void);
140 void (*run_serial_test)(void);
141 bool should_run;
142 bool need_cgroup_cleanup;
143 };
144
145 /* Override C runtime library's usleep() implementation to ensure nanosleep()
146 * is always called. Usleep is frequently used in selftests as a way to
147 * trigger kprobe and tracepoints.
148 */
usleep(useconds_t usec)149 int usleep(useconds_t usec)
150 {
151 struct timespec ts = {
152 .tv_sec = usec / 1000000,
153 .tv_nsec = (usec % 1000000) * 1000,
154 };
155
156 return syscall(__NR_nanosleep, &ts, NULL);
157 }
158
should_run(struct test_selector * sel,int num,const char * name)159 static bool should_run(struct test_selector *sel, int num, const char *name)
160 {
161 int i;
162
163 for (i = 0; i < sel->blacklist.cnt; i++) {
164 if (glob_match(name, sel->blacklist.tests[i].name) &&
165 !sel->blacklist.tests[i].subtest_cnt)
166 return false;
167 }
168
169 for (i = 0; i < sel->whitelist.cnt; i++) {
170 if (glob_match(name, sel->whitelist.tests[i].name))
171 return true;
172 }
173
174 if (!sel->whitelist.cnt && !sel->num_set)
175 return true;
176
177 return num < sel->num_set_len && sel->num_set[num];
178 }
179
should_run_subtest(struct test_selector * sel,struct test_selector * subtest_sel,int subtest_num,const char * test_name,const char * subtest_name)180 static bool should_run_subtest(struct test_selector *sel,
181 struct test_selector *subtest_sel,
182 int subtest_num,
183 const char *test_name,
184 const char *subtest_name)
185 {
186 int i, j;
187
188 for (i = 0; i < sel->blacklist.cnt; i++) {
189 if (glob_match(test_name, sel->blacklist.tests[i].name)) {
190 if (!sel->blacklist.tests[i].subtest_cnt)
191 return false;
192
193 for (j = 0; j < sel->blacklist.tests[i].subtest_cnt; j++) {
194 if (glob_match(subtest_name,
195 sel->blacklist.tests[i].subtests[j]))
196 return false;
197 }
198 }
199 }
200
201 for (i = 0; i < sel->whitelist.cnt; i++) {
202 if (glob_match(test_name, sel->whitelist.tests[i].name)) {
203 if (!sel->whitelist.tests[i].subtest_cnt)
204 return true;
205
206 for (j = 0; j < sel->whitelist.tests[i].subtest_cnt; j++) {
207 if (glob_match(subtest_name,
208 sel->whitelist.tests[i].subtests[j]))
209 return true;
210 }
211 }
212 }
213
214 if (!sel->whitelist.cnt && !subtest_sel->num_set)
215 return true;
216
217 return subtest_num < subtest_sel->num_set_len && subtest_sel->num_set[subtest_num];
218 }
219
test_result(bool failed,bool skipped)220 static char *test_result(bool failed, bool skipped)
221 {
222 return failed ? "FAIL" : (skipped ? "SKIP" : "OK");
223 }
224
print_test_log(char * log_buf,size_t log_cnt)225 static void print_test_log(char *log_buf, size_t log_cnt)
226 {
227 log_buf[log_cnt] = '\0';
228 fprintf(env.stdout, "%s", log_buf);
229 if (log_buf[log_cnt - 1] != '\n')
230 fprintf(env.stdout, "\n");
231 }
232
233 #define TEST_NUM_WIDTH 7
234
print_test_name(int test_num,const char * test_name,char * result)235 static void print_test_name(int test_num, const char *test_name, char *result)
236 {
237 fprintf(env.stdout, "#%-*d %s", TEST_NUM_WIDTH, test_num, test_name);
238
239 if (result)
240 fprintf(env.stdout, ":%s", result);
241
242 fprintf(env.stdout, "\n");
243 }
244
print_subtest_name(int test_num,int subtest_num,const char * test_name,char * subtest_name,char * result)245 static void print_subtest_name(int test_num, int subtest_num,
246 const char *test_name, char *subtest_name,
247 char *result)
248 {
249 char test_num_str[TEST_NUM_WIDTH + 1];
250
251 snprintf(test_num_str, sizeof(test_num_str), "%d/%d", test_num, subtest_num);
252
253 fprintf(env.stdout, "#%-*s %s/%s",
254 TEST_NUM_WIDTH, test_num_str,
255 test_name, subtest_name);
256
257 if (result)
258 fprintf(env.stdout, ":%s", result);
259
260 fprintf(env.stdout, "\n");
261 }
262
dump_test_log(const struct prog_test_def * test,const struct test_state * test_state,bool skip_ok_subtests,bool par_exec_result)263 static void dump_test_log(const struct prog_test_def *test,
264 const struct test_state *test_state,
265 bool skip_ok_subtests,
266 bool par_exec_result)
267 {
268 bool test_failed = test_state->error_cnt > 0;
269 bool force_log = test_state->force_log;
270 bool print_test = verbose() || force_log || test_failed;
271 int i;
272 struct subtest_state *subtest_state;
273 bool subtest_failed;
274 bool subtest_filtered;
275 bool print_subtest;
276
277 /* we do not print anything in the worker thread */
278 if (env.worker_id != -1)
279 return;
280
281 /* there is nothing to print when verbose log is used and execution
282 * is not in parallel mode
283 */
284 if (verbose() && !par_exec_result)
285 return;
286
287 if (test_state->log_cnt && print_test)
288 print_test_log(test_state->log_buf, test_state->log_cnt);
289
290 for (i = 0; i < test_state->subtest_num; i++) {
291 subtest_state = &test_state->subtest_states[i];
292 subtest_failed = subtest_state->error_cnt;
293 subtest_filtered = subtest_state->filtered;
294 print_subtest = verbose() || force_log || subtest_failed;
295
296 if ((skip_ok_subtests && !subtest_failed) || subtest_filtered)
297 continue;
298
299 if (subtest_state->log_cnt && print_subtest) {
300 print_test_log(subtest_state->log_buf,
301 subtest_state->log_cnt);
302 }
303
304 print_subtest_name(test->test_num, i + 1,
305 test->test_name, subtest_state->name,
306 test_result(subtest_state->error_cnt,
307 subtest_state->skipped));
308 }
309
310 print_test_name(test->test_num, test->test_name,
311 test_result(test_failed, test_state->skip_cnt));
312 }
313
314 static void stdio_restore(void);
315
316 /* A bunch of tests set custom affinity per-thread and/or per-process. Reset
317 * it after each test/sub-test.
318 */
reset_affinity(void)319 static void reset_affinity(void)
320 {
321 cpu_set_t cpuset;
322 int i, err;
323
324 CPU_ZERO(&cpuset);
325 for (i = 0; i < env.nr_cpus; i++)
326 CPU_SET(i, &cpuset);
327
328 err = sched_setaffinity(0, sizeof(cpuset), &cpuset);
329 if (err < 0) {
330 stdio_restore();
331 fprintf(stderr, "Failed to reset process affinity: %d!\n", err);
332 exit(EXIT_ERR_SETUP_INFRA);
333 }
334 err = pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset);
335 if (err < 0) {
336 stdio_restore();
337 fprintf(stderr, "Failed to reset thread affinity: %d!\n", err);
338 exit(EXIT_ERR_SETUP_INFRA);
339 }
340 }
341
save_netns(void)342 static void save_netns(void)
343 {
344 env.saved_netns_fd = open("/proc/self/ns/net", O_RDONLY);
345 if (env.saved_netns_fd == -1) {
346 perror("open(/proc/self/ns/net)");
347 exit(EXIT_ERR_SETUP_INFRA);
348 }
349 }
350
restore_netns(void)351 static void restore_netns(void)
352 {
353 if (setns(env.saved_netns_fd, CLONE_NEWNET) == -1) {
354 stdio_restore();
355 perror("setns(CLONE_NEWNS)");
356 exit(EXIT_ERR_SETUP_INFRA);
357 }
358 }
359
test__end_subtest(void)360 void test__end_subtest(void)
361 {
362 struct prog_test_def *test = env.test;
363 struct test_state *test_state = env.test_state;
364 struct subtest_state *subtest_state = env.subtest_state;
365
366 if (subtest_state->error_cnt) {
367 test_state->error_cnt++;
368 } else {
369 if (!subtest_state->skipped)
370 test_state->sub_succ_cnt++;
371 else
372 test_state->skip_cnt++;
373 }
374
375 if (verbose() && !env.workers)
376 print_subtest_name(test->test_num, test_state->subtest_num,
377 test->test_name, subtest_state->name,
378 test_result(subtest_state->error_cnt,
379 subtest_state->skipped));
380
381 stdio_restore_cleanup();
382 env.subtest_state = NULL;
383 }
384
test__start_subtest(const char * subtest_name)385 bool test__start_subtest(const char *subtest_name)
386 {
387 struct prog_test_def *test = env.test;
388 struct test_state *state = env.test_state;
389 struct subtest_state *subtest_state;
390 size_t sub_state_size = sizeof(*subtest_state);
391
392 if (env.subtest_state)
393 test__end_subtest();
394
395 state->subtest_num++;
396 state->subtest_states =
397 realloc(state->subtest_states,
398 state->subtest_num * sub_state_size);
399 if (!state->subtest_states) {
400 fprintf(stderr, "Not enough memory to allocate subtest result\n");
401 return false;
402 }
403
404 subtest_state = &state->subtest_states[state->subtest_num - 1];
405
406 memset(subtest_state, 0, sub_state_size);
407
408 if (!subtest_name || !subtest_name[0]) {
409 fprintf(env.stderr,
410 "Subtest #%d didn't provide sub-test name!\n",
411 state->subtest_num);
412 return false;
413 }
414
415 subtest_state->name = strdup(subtest_name);
416 if (!subtest_state->name) {
417 fprintf(env.stderr,
418 "Subtest #%d: failed to copy subtest name!\n",
419 state->subtest_num);
420 return false;
421 }
422
423 if (!should_run_subtest(&env.test_selector,
424 &env.subtest_selector,
425 state->subtest_num,
426 test->test_name,
427 subtest_name)) {
428 subtest_state->filtered = true;
429 return false;
430 }
431
432 env.subtest_state = subtest_state;
433 stdio_hijack_init(&subtest_state->log_buf, &subtest_state->log_cnt);
434
435 return true;
436 }
437
test__force_log(void)438 void test__force_log(void)
439 {
440 env.test_state->force_log = true;
441 }
442
test__skip(void)443 void test__skip(void)
444 {
445 if (env.subtest_state)
446 env.subtest_state->skipped = true;
447 else
448 env.test_state->skip_cnt++;
449 }
450
test__fail(void)451 void test__fail(void)
452 {
453 if (env.subtest_state)
454 env.subtest_state->error_cnt++;
455 else
456 env.test_state->error_cnt++;
457 }
458
test__join_cgroup(const char * path)459 int test__join_cgroup(const char *path)
460 {
461 int fd;
462
463 if (!env.test->need_cgroup_cleanup) {
464 if (setup_cgroup_environment()) {
465 fprintf(stderr,
466 "#%d %s: Failed to setup cgroup environment\n",
467 env.test->test_num, env.test->test_name);
468 return -1;
469 }
470
471 env.test->need_cgroup_cleanup = true;
472 }
473
474 fd = create_and_get_cgroup(path);
475 if (fd < 0) {
476 fprintf(stderr,
477 "#%d %s: Failed to create cgroup '%s' (errno=%d)\n",
478 env.test->test_num, env.test->test_name, path, errno);
479 return fd;
480 }
481
482 if (join_cgroup(path)) {
483 fprintf(stderr,
484 "#%d %s: Failed to join cgroup '%s' (errno=%d)\n",
485 env.test->test_num, env.test->test_name, path, errno);
486 return -1;
487 }
488
489 return fd;
490 }
491
bpf_find_map(const char * test,struct bpf_object * obj,const char * name)492 int bpf_find_map(const char *test, struct bpf_object *obj, const char *name)
493 {
494 struct bpf_map *map;
495
496 map = bpf_object__find_map_by_name(obj, name);
497 if (!map) {
498 fprintf(stdout, "%s:FAIL:map '%s' not found\n", test, name);
499 test__fail();
500 return -1;
501 }
502 return bpf_map__fd(map);
503 }
504
is_jit_enabled(void)505 static bool is_jit_enabled(void)
506 {
507 const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
508 bool enabled = false;
509 int sysctl_fd;
510
511 sysctl_fd = open(jit_sysctl, 0, O_RDONLY);
512 if (sysctl_fd != -1) {
513 char tmpc;
514
515 if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1)
516 enabled = (tmpc != '0');
517 close(sysctl_fd);
518 }
519
520 return enabled;
521 }
522
compare_map_keys(int map1_fd,int map2_fd)523 int compare_map_keys(int map1_fd, int map2_fd)
524 {
525 __u32 key, next_key;
526 char val_buf[PERF_MAX_STACK_DEPTH *
527 sizeof(struct bpf_stack_build_id)];
528 int err;
529
530 err = bpf_map_get_next_key(map1_fd, NULL, &key);
531 if (err)
532 return err;
533 err = bpf_map_lookup_elem(map2_fd, &key, val_buf);
534 if (err)
535 return err;
536
537 while (bpf_map_get_next_key(map1_fd, &key, &next_key) == 0) {
538 err = bpf_map_lookup_elem(map2_fd, &next_key, val_buf);
539 if (err)
540 return err;
541
542 key = next_key;
543 }
544 if (errno != ENOENT)
545 return -1;
546
547 return 0;
548 }
549
compare_stack_ips(int smap_fd,int amap_fd,int stack_trace_len)550 int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len)
551 {
552 __u32 key, next_key, *cur_key_p, *next_key_p;
553 char *val_buf1, *val_buf2;
554 int i, err = 0;
555
556 val_buf1 = malloc(stack_trace_len);
557 val_buf2 = malloc(stack_trace_len);
558 cur_key_p = NULL;
559 next_key_p = &key;
560 while (bpf_map_get_next_key(smap_fd, cur_key_p, next_key_p) == 0) {
561 err = bpf_map_lookup_elem(smap_fd, next_key_p, val_buf1);
562 if (err)
563 goto out;
564 err = bpf_map_lookup_elem(amap_fd, next_key_p, val_buf2);
565 if (err)
566 goto out;
567 for (i = 0; i < stack_trace_len; i++) {
568 if (val_buf1[i] != val_buf2[i]) {
569 err = -1;
570 goto out;
571 }
572 }
573 key = *next_key_p;
574 cur_key_p = &key;
575 next_key_p = &next_key;
576 }
577 if (errno != ENOENT)
578 err = -1;
579
580 out:
581 free(val_buf1);
582 free(val_buf2);
583 return err;
584 }
585
extract_build_id(char * build_id,size_t size)586 int extract_build_id(char *build_id, size_t size)
587 {
588 FILE *fp;
589 char *line = NULL;
590 size_t len = 0;
591
592 fp = popen("readelf -n ./urandom_read | grep 'Build ID'", "r");
593 if (fp == NULL)
594 return -1;
595
596 if (getline(&line, &len, fp) == -1)
597 goto err;
598 pclose(fp);
599
600 if (len > size)
601 len = size;
602 memcpy(build_id, line, len);
603 build_id[len] = '\0';
604 free(line);
605 return 0;
606 err:
607 pclose(fp);
608 return -1;
609 }
610
finit_module(int fd,const char * param_values,int flags)611 static int finit_module(int fd, const char *param_values, int flags)
612 {
613 return syscall(__NR_finit_module, fd, param_values, flags);
614 }
615
delete_module(const char * name,int flags)616 static int delete_module(const char *name, int flags)
617 {
618 return syscall(__NR_delete_module, name, flags);
619 }
620
621 /*
622 * Trigger synchronize_rcu() in kernel.
623 */
kern_sync_rcu(void)624 int kern_sync_rcu(void)
625 {
626 return syscall(__NR_membarrier, MEMBARRIER_CMD_SHARED, 0, 0);
627 }
628
unload_bpf_testmod(void)629 static void unload_bpf_testmod(void)
630 {
631 if (kern_sync_rcu())
632 fprintf(env.stderr, "Failed to trigger kernel-side RCU sync!\n");
633 if (delete_module("bpf_testmod", 0)) {
634 if (errno == ENOENT) {
635 if (verbose())
636 fprintf(stdout, "bpf_testmod.ko is already unloaded.\n");
637 return;
638 }
639 fprintf(env.stderr, "Failed to unload bpf_testmod.ko from kernel: %d\n", -errno);
640 return;
641 }
642 if (verbose())
643 fprintf(stdout, "Successfully unloaded bpf_testmod.ko.\n");
644 }
645
load_bpf_testmod(void)646 static int load_bpf_testmod(void)
647 {
648 int fd;
649
650 /* ensure previous instance of the module is unloaded */
651 unload_bpf_testmod();
652
653 if (verbose())
654 fprintf(stdout, "Loading bpf_testmod.ko...\n");
655
656 fd = open("bpf_testmod.ko", O_RDONLY);
657 if (fd < 0) {
658 fprintf(env.stderr, "Can't find bpf_testmod.ko kernel module: %d\n", -errno);
659 return -ENOENT;
660 }
661 if (finit_module(fd, "", 0)) {
662 fprintf(env.stderr, "Failed to load bpf_testmod.ko into the kernel: %d\n", -errno);
663 close(fd);
664 return -EINVAL;
665 }
666 close(fd);
667
668 if (verbose())
669 fprintf(stdout, "Successfully loaded bpf_testmod.ko.\n");
670 return 0;
671 }
672
673 /* extern declarations for test funcs */
674 #define DEFINE_TEST(name) \
675 extern void test_##name(void) __weak; \
676 extern void serial_test_##name(void) __weak;
677 #include <prog_tests/tests.h>
678 #undef DEFINE_TEST
679
680 static struct prog_test_def prog_test_defs[] = {
681 #define DEFINE_TEST(name) { \
682 .test_name = #name, \
683 .run_test = &test_##name, \
684 .run_serial_test = &serial_test_##name, \
685 },
686 #include <prog_tests/tests.h>
687 #undef DEFINE_TEST
688 };
689
690 static const int prog_test_cnt = ARRAY_SIZE(prog_test_defs);
691
692 static struct test_state test_states[ARRAY_SIZE(prog_test_defs)];
693
694 const char *argp_program_version = "test_progs 0.1";
695 const char *argp_program_bug_address = "<bpf@vger.kernel.org>";
696 static const char argp_program_doc[] = "BPF selftests test runner";
697
698 enum ARG_KEYS {
699 ARG_TEST_NUM = 'n',
700 ARG_TEST_NAME = 't',
701 ARG_TEST_NAME_BLACKLIST = 'b',
702 ARG_VERIFIER_STATS = 's',
703 ARG_VERBOSE = 'v',
704 ARG_GET_TEST_CNT = 'c',
705 ARG_LIST_TEST_NAMES = 'l',
706 ARG_TEST_NAME_GLOB_ALLOWLIST = 'a',
707 ARG_TEST_NAME_GLOB_DENYLIST = 'd',
708 ARG_NUM_WORKERS = 'j',
709 ARG_DEBUG = -1,
710 };
711
712 static const struct argp_option opts[] = {
713 { "num", ARG_TEST_NUM, "NUM", 0,
714 "Run test number NUM only " },
715 { "name", ARG_TEST_NAME, "NAMES", 0,
716 "Run tests with names containing any string from NAMES list" },
717 { "name-blacklist", ARG_TEST_NAME_BLACKLIST, "NAMES", 0,
718 "Don't run tests with names containing any string from NAMES list" },
719 { "verifier-stats", ARG_VERIFIER_STATS, NULL, 0,
720 "Output verifier statistics", },
721 { "verbose", ARG_VERBOSE, "LEVEL", OPTION_ARG_OPTIONAL,
722 "Verbose output (use -vv or -vvv for progressively verbose output)" },
723 { "count", ARG_GET_TEST_CNT, NULL, 0,
724 "Get number of selected top-level tests " },
725 { "list", ARG_LIST_TEST_NAMES, NULL, 0,
726 "List test names that would run (without running them) " },
727 { "allow", ARG_TEST_NAME_GLOB_ALLOWLIST, "NAMES", 0,
728 "Run tests with name matching the pattern (supports '*' wildcard)." },
729 { "deny", ARG_TEST_NAME_GLOB_DENYLIST, "NAMES", 0,
730 "Don't run tests with name matching the pattern (supports '*' wildcard)." },
731 { "workers", ARG_NUM_WORKERS, "WORKERS", OPTION_ARG_OPTIONAL,
732 "Number of workers to run in parallel, default to number of cpus." },
733 { "debug", ARG_DEBUG, NULL, 0,
734 "print extra debug information for test_progs." },
735 {},
736 };
737
libbpf_print_fn(enum libbpf_print_level level,const char * format,va_list args)738 static int libbpf_print_fn(enum libbpf_print_level level,
739 const char *format, va_list args)
740 {
741 if (env.verbosity < VERBOSE_VERY && level == LIBBPF_DEBUG)
742 return 0;
743 vfprintf(stdout, format, args);
744 return 0;
745 }
746
free_test_filter_set(const struct test_filter_set * set)747 static void free_test_filter_set(const struct test_filter_set *set)
748 {
749 int i, j;
750
751 if (!set)
752 return;
753
754 for (i = 0; i < set->cnt; i++) {
755 free((void *)set->tests[i].name);
756 for (j = 0; j < set->tests[i].subtest_cnt; j++)
757 free((void *)set->tests[i].subtests[j]);
758
759 free((void *)set->tests[i].subtests);
760 }
761
762 free((void *)set->tests);
763 }
764
free_test_selector(struct test_selector * test_selector)765 static void free_test_selector(struct test_selector *test_selector)
766 {
767 free_test_filter_set(&test_selector->blacklist);
768 free_test_filter_set(&test_selector->whitelist);
769 free(test_selector->num_set);
770 }
771
772 extern int extra_prog_load_log_flags;
773
parse_arg(int key,char * arg,struct argp_state * state)774 static error_t parse_arg(int key, char *arg, struct argp_state *state)
775 {
776 struct test_env *env = state->input;
777
778 switch (key) {
779 case ARG_TEST_NUM: {
780 char *subtest_str = strchr(arg, '/');
781
782 if (subtest_str) {
783 *subtest_str = '\0';
784 if (parse_num_list(subtest_str + 1,
785 &env->subtest_selector.num_set,
786 &env->subtest_selector.num_set_len)) {
787 fprintf(stderr,
788 "Failed to parse subtest numbers.\n");
789 return -EINVAL;
790 }
791 }
792 if (parse_num_list(arg, &env->test_selector.num_set,
793 &env->test_selector.num_set_len)) {
794 fprintf(stderr, "Failed to parse test numbers.\n");
795 return -EINVAL;
796 }
797 break;
798 }
799 case ARG_TEST_NAME_GLOB_ALLOWLIST:
800 case ARG_TEST_NAME: {
801 if (parse_test_list(arg,
802 &env->test_selector.whitelist,
803 key == ARG_TEST_NAME_GLOB_ALLOWLIST))
804 return -ENOMEM;
805 break;
806 }
807 case ARG_TEST_NAME_GLOB_DENYLIST:
808 case ARG_TEST_NAME_BLACKLIST: {
809 if (parse_test_list(arg,
810 &env->test_selector.blacklist,
811 key == ARG_TEST_NAME_GLOB_DENYLIST))
812 return -ENOMEM;
813 break;
814 }
815 case ARG_VERIFIER_STATS:
816 env->verifier_stats = true;
817 break;
818 case ARG_VERBOSE:
819 env->verbosity = VERBOSE_NORMAL;
820 if (arg) {
821 if (strcmp(arg, "v") == 0) {
822 env->verbosity = VERBOSE_VERY;
823 extra_prog_load_log_flags = 1;
824 } else if (strcmp(arg, "vv") == 0) {
825 env->verbosity = VERBOSE_SUPER;
826 extra_prog_load_log_flags = 2;
827 } else {
828 fprintf(stderr,
829 "Unrecognized verbosity setting ('%s'), only -v and -vv are supported\n",
830 arg);
831 return -EINVAL;
832 }
833 }
834
835 if (verbose()) {
836 if (setenv("SELFTESTS_VERBOSE", "1", 1) == -1) {
837 fprintf(stderr,
838 "Unable to setenv SELFTESTS_VERBOSE=1 (errno=%d)",
839 errno);
840 return -EINVAL;
841 }
842 }
843
844 break;
845 case ARG_GET_TEST_CNT:
846 env->get_test_cnt = true;
847 break;
848 case ARG_LIST_TEST_NAMES:
849 env->list_test_names = true;
850 break;
851 case ARG_NUM_WORKERS:
852 if (arg) {
853 env->workers = atoi(arg);
854 if (!env->workers) {
855 fprintf(stderr, "Invalid number of worker: %s.", arg);
856 return -EINVAL;
857 }
858 } else {
859 env->workers = get_nprocs();
860 }
861 break;
862 case ARG_DEBUG:
863 env->debug = true;
864 break;
865 case ARGP_KEY_ARG:
866 argp_usage(state);
867 break;
868 case ARGP_KEY_END:
869 break;
870 default:
871 return ARGP_ERR_UNKNOWN;
872 }
873 return 0;
874 }
875
876 /*
877 * Determine if test_progs is running as a "flavored" test runner and switch
878 * into corresponding sub-directory to load correct BPF objects.
879 *
880 * This is done by looking at executable name. If it contains "-flavor"
881 * suffix, then we are running as a flavored test runner.
882 */
cd_flavor_subdir(const char * exec_name)883 int cd_flavor_subdir(const char *exec_name)
884 {
885 /* General form of argv[0] passed here is:
886 * some/path/to/test_progs[-flavor], where -flavor part is optional.
887 * First cut out "test_progs[-flavor]" part, then extract "flavor"
888 * part, if it's there.
889 */
890 const char *flavor = strrchr(exec_name, '/');
891
892 if (!flavor)
893 flavor = exec_name;
894 else
895 flavor++;
896
897 flavor = strrchr(flavor, '-');
898 if (!flavor)
899 return 0;
900 flavor++;
901 if (verbose())
902 fprintf(stdout, "Switching to flavor '%s' subdirectory...\n", flavor);
903
904 return chdir(flavor);
905 }
906
trigger_module_test_read(int read_sz)907 int trigger_module_test_read(int read_sz)
908 {
909 int fd, err;
910
911 fd = open(BPF_TESTMOD_TEST_FILE, O_RDONLY);
912 err = -errno;
913 if (!ASSERT_GE(fd, 0, "testmod_file_open"))
914 return err;
915
916 read(fd, NULL, read_sz);
917 close(fd);
918
919 return 0;
920 }
921
trigger_module_test_write(int write_sz)922 int trigger_module_test_write(int write_sz)
923 {
924 int fd, err;
925 char *buf = malloc(write_sz);
926
927 if (!buf)
928 return -ENOMEM;
929
930 memset(buf, 'a', write_sz);
931 buf[write_sz-1] = '\0';
932
933 fd = open(BPF_TESTMOD_TEST_FILE, O_WRONLY);
934 err = -errno;
935 if (!ASSERT_GE(fd, 0, "testmod_file_open")) {
936 free(buf);
937 return err;
938 }
939
940 write(fd, buf, write_sz);
941 close(fd);
942 free(buf);
943 return 0;
944 }
945
write_sysctl(const char * sysctl,const char * value)946 int write_sysctl(const char *sysctl, const char *value)
947 {
948 int fd, err, len;
949
950 fd = open(sysctl, O_WRONLY);
951 if (!ASSERT_NEQ(fd, -1, "open sysctl"))
952 return -1;
953
954 len = strlen(value);
955 err = write(fd, value, len);
956 close(fd);
957 if (!ASSERT_EQ(err, len, "write sysctl"))
958 return -1;
959
960 return 0;
961 }
962
963 #define MAX_BACKTRACE_SZ 128
crash_handler(int signum)964 void crash_handler(int signum)
965 {
966 void *bt[MAX_BACKTRACE_SZ];
967 size_t sz;
968
969 sz = backtrace(bt, ARRAY_SIZE(bt));
970
971 if (env.test) {
972 env.test_state->error_cnt++;
973 dump_test_log(env.test, env.test_state, true, false);
974 }
975 if (env.stdout)
976 stdio_restore();
977 if (env.worker_id != -1)
978 fprintf(stderr, "[%d]: ", env.worker_id);
979 fprintf(stderr, "Caught signal #%d!\nStack trace:\n", signum);
980 backtrace_symbols_fd(bt, sz, STDERR_FILENO);
981 }
982
sigint_handler(int signum)983 static void sigint_handler(int signum)
984 {
985 int i;
986
987 for (i = 0; i < env.workers; i++)
988 if (env.worker_socks[i] > 0)
989 close(env.worker_socks[i]);
990 }
991
992 static int current_test_idx;
993 static pthread_mutex_t current_test_lock;
994 static pthread_mutex_t stdout_output_lock;
995
str_msg(const struct msg * msg,char * buf)996 static inline const char *str_msg(const struct msg *msg, char *buf)
997 {
998 switch (msg->type) {
999 case MSG_DO_TEST:
1000 sprintf(buf, "MSG_DO_TEST %d", msg->do_test.num);
1001 break;
1002 case MSG_TEST_DONE:
1003 sprintf(buf, "MSG_TEST_DONE %d (log: %d)",
1004 msg->test_done.num,
1005 msg->test_done.have_log);
1006 break;
1007 case MSG_SUBTEST_DONE:
1008 sprintf(buf, "MSG_SUBTEST_DONE %d (log: %d)",
1009 msg->subtest_done.num,
1010 msg->subtest_done.have_log);
1011 break;
1012 case MSG_TEST_LOG:
1013 sprintf(buf, "MSG_TEST_LOG (cnt: %zu, last: %d)",
1014 strlen(msg->test_log.log_buf),
1015 msg->test_log.is_last);
1016 break;
1017 case MSG_EXIT:
1018 sprintf(buf, "MSG_EXIT");
1019 break;
1020 default:
1021 sprintf(buf, "UNKNOWN");
1022 break;
1023 }
1024
1025 return buf;
1026 }
1027
send_message(int sock,const struct msg * msg)1028 static int send_message(int sock, const struct msg *msg)
1029 {
1030 char buf[256];
1031
1032 if (env.debug)
1033 fprintf(stderr, "Sending msg: %s\n", str_msg(msg, buf));
1034 return send(sock, msg, sizeof(*msg), 0);
1035 }
1036
recv_message(int sock,struct msg * msg)1037 static int recv_message(int sock, struct msg *msg)
1038 {
1039 int ret;
1040 char buf[256];
1041
1042 memset(msg, 0, sizeof(*msg));
1043 ret = recv(sock, msg, sizeof(*msg), 0);
1044 if (ret >= 0) {
1045 if (env.debug)
1046 fprintf(stderr, "Received msg: %s\n", str_msg(msg, buf));
1047 }
1048 return ret;
1049 }
1050
run_one_test(int test_num)1051 static void run_one_test(int test_num)
1052 {
1053 struct prog_test_def *test = &prog_test_defs[test_num];
1054 struct test_state *state = &test_states[test_num];
1055
1056 env.test = test;
1057 env.test_state = state;
1058
1059 stdio_hijack(&state->log_buf, &state->log_cnt);
1060
1061 if (test->run_test)
1062 test->run_test();
1063 else if (test->run_serial_test)
1064 test->run_serial_test();
1065
1066 /* ensure last sub-test is finalized properly */
1067 if (env.subtest_state)
1068 test__end_subtest();
1069
1070 state->tested = true;
1071
1072 if (verbose() && env.worker_id == -1)
1073 print_test_name(test_num + 1, test->test_name,
1074 test_result(state->error_cnt, state->skip_cnt));
1075
1076 reset_affinity();
1077 restore_netns();
1078 if (test->need_cgroup_cleanup)
1079 cleanup_cgroup_environment();
1080
1081 stdio_restore();
1082
1083 dump_test_log(test, state, false, false);
1084 }
1085
1086 struct dispatch_data {
1087 int worker_id;
1088 int sock_fd;
1089 };
1090
read_prog_test_msg(int sock_fd,struct msg * msg,enum msg_type type)1091 static int read_prog_test_msg(int sock_fd, struct msg *msg, enum msg_type type)
1092 {
1093 if (recv_message(sock_fd, msg) < 0)
1094 return 1;
1095
1096 if (msg->type != type) {
1097 printf("%s: unexpected message type %d. expected %d\n", __func__, msg->type, type);
1098 return 1;
1099 }
1100
1101 return 0;
1102 }
1103
dispatch_thread_read_log(int sock_fd,char ** log_buf,size_t * log_cnt)1104 static int dispatch_thread_read_log(int sock_fd, char **log_buf, size_t *log_cnt)
1105 {
1106 FILE *log_fp = NULL;
1107 int result = 0;
1108
1109 log_fp = open_memstream(log_buf, log_cnt);
1110 if (!log_fp)
1111 return 1;
1112
1113 while (true) {
1114 struct msg msg;
1115
1116 if (read_prog_test_msg(sock_fd, &msg, MSG_TEST_LOG)) {
1117 result = 1;
1118 goto out;
1119 }
1120
1121 fprintf(log_fp, "%s", msg.test_log.log_buf);
1122 if (msg.test_log.is_last)
1123 break;
1124 }
1125
1126 out:
1127 fclose(log_fp);
1128 log_fp = NULL;
1129 return result;
1130 }
1131
dispatch_thread_send_subtests(int sock_fd,struct test_state * state)1132 static int dispatch_thread_send_subtests(int sock_fd, struct test_state *state)
1133 {
1134 struct msg msg;
1135 struct subtest_state *subtest_state;
1136 int subtest_num = state->subtest_num;
1137
1138 state->subtest_states = malloc(subtest_num * sizeof(*subtest_state));
1139
1140 for (int i = 0; i < subtest_num; i++) {
1141 subtest_state = &state->subtest_states[i];
1142
1143 memset(subtest_state, 0, sizeof(*subtest_state));
1144
1145 if (read_prog_test_msg(sock_fd, &msg, MSG_SUBTEST_DONE))
1146 return 1;
1147
1148 subtest_state->name = strdup(msg.subtest_done.name);
1149 subtest_state->error_cnt = msg.subtest_done.error_cnt;
1150 subtest_state->skipped = msg.subtest_done.skipped;
1151 subtest_state->filtered = msg.subtest_done.filtered;
1152
1153 /* collect all logs */
1154 if (msg.subtest_done.have_log)
1155 if (dispatch_thread_read_log(sock_fd,
1156 &subtest_state->log_buf,
1157 &subtest_state->log_cnt))
1158 return 1;
1159 }
1160
1161 return 0;
1162 }
1163
dispatch_thread(void * ctx)1164 static void *dispatch_thread(void *ctx)
1165 {
1166 struct dispatch_data *data = ctx;
1167 int sock_fd;
1168
1169 sock_fd = data->sock_fd;
1170
1171 while (true) {
1172 int test_to_run = -1;
1173 struct prog_test_def *test;
1174 struct test_state *state;
1175
1176 /* grab a test */
1177 {
1178 pthread_mutex_lock(¤t_test_lock);
1179
1180 if (current_test_idx >= prog_test_cnt) {
1181 pthread_mutex_unlock(¤t_test_lock);
1182 goto done;
1183 }
1184
1185 test = &prog_test_defs[current_test_idx];
1186 test_to_run = current_test_idx;
1187 current_test_idx++;
1188
1189 pthread_mutex_unlock(¤t_test_lock);
1190 }
1191
1192 if (!test->should_run || test->run_serial_test)
1193 continue;
1194
1195 /* run test through worker */
1196 {
1197 struct msg msg_do_test;
1198
1199 memset(&msg_do_test, 0, sizeof(msg_do_test));
1200 msg_do_test.type = MSG_DO_TEST;
1201 msg_do_test.do_test.num = test_to_run;
1202 if (send_message(sock_fd, &msg_do_test) < 0) {
1203 perror("Fail to send command");
1204 goto done;
1205 }
1206 env.worker_current_test[data->worker_id] = test_to_run;
1207 }
1208
1209 /* wait for test done */
1210 do {
1211 struct msg msg;
1212
1213 if (read_prog_test_msg(sock_fd, &msg, MSG_TEST_DONE))
1214 goto error;
1215 if (test_to_run != msg.test_done.num)
1216 goto error;
1217
1218 state = &test_states[test_to_run];
1219 state->tested = true;
1220 state->error_cnt = msg.test_done.error_cnt;
1221 state->skip_cnt = msg.test_done.skip_cnt;
1222 state->sub_succ_cnt = msg.test_done.sub_succ_cnt;
1223 state->subtest_num = msg.test_done.subtest_num;
1224
1225 /* collect all logs */
1226 if (msg.test_done.have_log) {
1227 if (dispatch_thread_read_log(sock_fd,
1228 &state->log_buf,
1229 &state->log_cnt))
1230 goto error;
1231 }
1232
1233 /* collect all subtests and subtest logs */
1234 if (!state->subtest_num)
1235 break;
1236
1237 if (dispatch_thread_send_subtests(sock_fd, state))
1238 goto error;
1239 } while (false);
1240
1241 pthread_mutex_lock(&stdout_output_lock);
1242 dump_test_log(test, state, false, true);
1243 pthread_mutex_unlock(&stdout_output_lock);
1244 } /* while (true) */
1245 error:
1246 if (env.debug)
1247 fprintf(stderr, "[%d]: Protocol/IO error: %s.\n", data->worker_id, strerror(errno));
1248
1249 done:
1250 {
1251 struct msg msg_exit;
1252
1253 msg_exit.type = MSG_EXIT;
1254 if (send_message(sock_fd, &msg_exit) < 0) {
1255 if (env.debug)
1256 fprintf(stderr, "[%d]: send_message msg_exit: %s.\n",
1257 data->worker_id, strerror(errno));
1258 }
1259 }
1260 return NULL;
1261 }
1262
calculate_summary_and_print_errors(struct test_env * env)1263 static void calculate_summary_and_print_errors(struct test_env *env)
1264 {
1265 int i;
1266 int succ_cnt = 0, fail_cnt = 0, sub_succ_cnt = 0, skip_cnt = 0;
1267
1268 for (i = 0; i < prog_test_cnt; i++) {
1269 struct test_state *state = &test_states[i];
1270
1271 if (!state->tested)
1272 continue;
1273
1274 sub_succ_cnt += state->sub_succ_cnt;
1275 skip_cnt += state->skip_cnt;
1276
1277 if (state->error_cnt)
1278 fail_cnt++;
1279 else
1280 succ_cnt++;
1281 }
1282
1283 /*
1284 * We only print error logs summary when there are failed tests and
1285 * verbose mode is not enabled. Otherwise, results may be incosistent.
1286 *
1287 */
1288 if (!verbose() && fail_cnt) {
1289 printf("\nAll error logs:\n");
1290
1291 /* print error logs again */
1292 for (i = 0; i < prog_test_cnt; i++) {
1293 struct prog_test_def *test = &prog_test_defs[i];
1294 struct test_state *state = &test_states[i];
1295
1296 if (!state->tested || !state->error_cnt)
1297 continue;
1298
1299 dump_test_log(test, state, true, true);
1300 }
1301 }
1302
1303 printf("Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n",
1304 succ_cnt, sub_succ_cnt, skip_cnt, fail_cnt);
1305
1306 env->succ_cnt = succ_cnt;
1307 env->sub_succ_cnt = sub_succ_cnt;
1308 env->fail_cnt = fail_cnt;
1309 env->skip_cnt = skip_cnt;
1310 }
1311
server_main(void)1312 static void server_main(void)
1313 {
1314 pthread_t *dispatcher_threads;
1315 struct dispatch_data *data;
1316 struct sigaction sigact_int = {
1317 .sa_handler = sigint_handler,
1318 .sa_flags = SA_RESETHAND,
1319 };
1320 int i;
1321
1322 sigaction(SIGINT, &sigact_int, NULL);
1323
1324 dispatcher_threads = calloc(sizeof(pthread_t), env.workers);
1325 data = calloc(sizeof(struct dispatch_data), env.workers);
1326
1327 env.worker_current_test = calloc(sizeof(int), env.workers);
1328 for (i = 0; i < env.workers; i++) {
1329 int rc;
1330
1331 data[i].worker_id = i;
1332 data[i].sock_fd = env.worker_socks[i];
1333 rc = pthread_create(&dispatcher_threads[i], NULL, dispatch_thread, &data[i]);
1334 if (rc < 0) {
1335 perror("Failed to launch dispatcher thread");
1336 exit(EXIT_ERR_SETUP_INFRA);
1337 }
1338 }
1339
1340 /* wait for all dispatcher to finish */
1341 for (i = 0; i < env.workers; i++) {
1342 while (true) {
1343 int ret = pthread_tryjoin_np(dispatcher_threads[i], NULL);
1344
1345 if (!ret) {
1346 break;
1347 } else if (ret == EBUSY) {
1348 if (env.debug)
1349 fprintf(stderr, "Still waiting for thread %d (test %d).\n",
1350 i, env.worker_current_test[i] + 1);
1351 usleep(1000 * 1000);
1352 continue;
1353 } else {
1354 fprintf(stderr, "Unexpected error joining dispatcher thread: %d", ret);
1355 break;
1356 }
1357 }
1358 }
1359 free(dispatcher_threads);
1360 free(env.worker_current_test);
1361 free(data);
1362
1363 /* run serial tests */
1364 save_netns();
1365
1366 for (int i = 0; i < prog_test_cnt; i++) {
1367 struct prog_test_def *test = &prog_test_defs[i];
1368
1369 if (!test->should_run || !test->run_serial_test)
1370 continue;
1371
1372 run_one_test(i);
1373 }
1374
1375 /* generate summary */
1376 fflush(stderr);
1377 fflush(stdout);
1378
1379 calculate_summary_and_print_errors(&env);
1380
1381 /* reap all workers */
1382 for (i = 0; i < env.workers; i++) {
1383 int wstatus, pid;
1384
1385 pid = waitpid(env.worker_pids[i], &wstatus, 0);
1386 if (pid != env.worker_pids[i])
1387 perror("Unable to reap worker");
1388 }
1389 }
1390
worker_main_send_log(int sock,char * log_buf,size_t log_cnt)1391 static void worker_main_send_log(int sock, char *log_buf, size_t log_cnt)
1392 {
1393 char *src;
1394 size_t slen;
1395
1396 src = log_buf;
1397 slen = log_cnt;
1398 while (slen) {
1399 struct msg msg_log;
1400 char *dest;
1401 size_t len;
1402
1403 memset(&msg_log, 0, sizeof(msg_log));
1404 msg_log.type = MSG_TEST_LOG;
1405 dest = msg_log.test_log.log_buf;
1406 len = slen >= MAX_LOG_TRUNK_SIZE ? MAX_LOG_TRUNK_SIZE : slen;
1407 memcpy(dest, src, len);
1408
1409 src += len;
1410 slen -= len;
1411 if (!slen)
1412 msg_log.test_log.is_last = true;
1413
1414 assert(send_message(sock, &msg_log) >= 0);
1415 }
1416 }
1417
free_subtest_state(struct subtest_state * state)1418 static void free_subtest_state(struct subtest_state *state)
1419 {
1420 if (state->log_buf) {
1421 free(state->log_buf);
1422 state->log_buf = NULL;
1423 state->log_cnt = 0;
1424 }
1425 free(state->name);
1426 state->name = NULL;
1427 }
1428
worker_main_send_subtests(int sock,struct test_state * state)1429 static int worker_main_send_subtests(int sock, struct test_state *state)
1430 {
1431 int i, result = 0;
1432 struct msg msg;
1433 struct subtest_state *subtest_state;
1434
1435 memset(&msg, 0, sizeof(msg));
1436 msg.type = MSG_SUBTEST_DONE;
1437
1438 for (i = 0; i < state->subtest_num; i++) {
1439 subtest_state = &state->subtest_states[i];
1440
1441 msg.subtest_done.num = i;
1442
1443 strncpy(msg.subtest_done.name, subtest_state->name, MAX_SUBTEST_NAME);
1444
1445 msg.subtest_done.error_cnt = subtest_state->error_cnt;
1446 msg.subtest_done.skipped = subtest_state->skipped;
1447 msg.subtest_done.filtered = subtest_state->filtered;
1448 msg.subtest_done.have_log = false;
1449
1450 if (verbose() || state->force_log || subtest_state->error_cnt) {
1451 if (subtest_state->log_cnt)
1452 msg.subtest_done.have_log = true;
1453 }
1454
1455 if (send_message(sock, &msg) < 0) {
1456 perror("Fail to send message done");
1457 result = 1;
1458 goto out;
1459 }
1460
1461 /* send logs */
1462 if (msg.subtest_done.have_log)
1463 worker_main_send_log(sock, subtest_state->log_buf, subtest_state->log_cnt);
1464
1465 free_subtest_state(subtest_state);
1466 free(subtest_state->name);
1467 }
1468
1469 out:
1470 for (; i < state->subtest_num; i++)
1471 free_subtest_state(&state->subtest_states[i]);
1472 free(state->subtest_states);
1473 return result;
1474 }
1475
worker_main(int sock)1476 static int worker_main(int sock)
1477 {
1478 save_netns();
1479
1480 while (true) {
1481 /* receive command */
1482 struct msg msg;
1483
1484 if (recv_message(sock, &msg) < 0)
1485 goto out;
1486
1487 switch (msg.type) {
1488 case MSG_EXIT:
1489 if (env.debug)
1490 fprintf(stderr, "[%d]: worker exit.\n",
1491 env.worker_id);
1492 goto out;
1493 case MSG_DO_TEST: {
1494 int test_to_run = msg.do_test.num;
1495 struct prog_test_def *test = &prog_test_defs[test_to_run];
1496 struct test_state *state = &test_states[test_to_run];
1497 struct msg msg;
1498
1499 if (env.debug)
1500 fprintf(stderr, "[%d]: #%d:%s running.\n",
1501 env.worker_id,
1502 test_to_run + 1,
1503 test->test_name);
1504
1505 run_one_test(test_to_run);
1506
1507 memset(&msg, 0, sizeof(msg));
1508 msg.type = MSG_TEST_DONE;
1509 msg.test_done.num = test_to_run;
1510 msg.test_done.error_cnt = state->error_cnt;
1511 msg.test_done.skip_cnt = state->skip_cnt;
1512 msg.test_done.sub_succ_cnt = state->sub_succ_cnt;
1513 msg.test_done.subtest_num = state->subtest_num;
1514 msg.test_done.have_log = false;
1515
1516 if (verbose() || state->force_log || state->error_cnt) {
1517 if (state->log_cnt)
1518 msg.test_done.have_log = true;
1519 }
1520 if (send_message(sock, &msg) < 0) {
1521 perror("Fail to send message done");
1522 goto out;
1523 }
1524
1525 /* send logs */
1526 if (msg.test_done.have_log)
1527 worker_main_send_log(sock, state->log_buf, state->log_cnt);
1528
1529 if (state->log_buf) {
1530 free(state->log_buf);
1531 state->log_buf = NULL;
1532 state->log_cnt = 0;
1533 }
1534
1535 if (state->subtest_num)
1536 if (worker_main_send_subtests(sock, state))
1537 goto out;
1538
1539 if (env.debug)
1540 fprintf(stderr, "[%d]: #%d:%s done.\n",
1541 env.worker_id,
1542 test_to_run + 1,
1543 test->test_name);
1544 break;
1545 } /* case MSG_DO_TEST */
1546 default:
1547 if (env.debug)
1548 fprintf(stderr, "[%d]: unknown message.\n", env.worker_id);
1549 return -1;
1550 }
1551 }
1552 out:
1553 return 0;
1554 }
1555
free_test_states(void)1556 static void free_test_states(void)
1557 {
1558 int i, j;
1559
1560 for (i = 0; i < ARRAY_SIZE(prog_test_defs); i++) {
1561 struct test_state *test_state = &test_states[i];
1562
1563 for (j = 0; j < test_state->subtest_num; j++)
1564 free_subtest_state(&test_state->subtest_states[j]);
1565
1566 free(test_state->subtest_states);
1567 free(test_state->log_buf);
1568 test_state->subtest_states = NULL;
1569 test_state->log_buf = NULL;
1570 }
1571 }
1572
main(int argc,char ** argv)1573 int main(int argc, char **argv)
1574 {
1575 static const struct argp argp = {
1576 .options = opts,
1577 .parser = parse_arg,
1578 .doc = argp_program_doc,
1579 };
1580 struct sigaction sigact = {
1581 .sa_handler = crash_handler,
1582 .sa_flags = SA_RESETHAND,
1583 };
1584 int err, i;
1585
1586 sigaction(SIGSEGV, &sigact, NULL);
1587
1588 err = argp_parse(&argp, argc, argv, 0, NULL, &env);
1589 if (err)
1590 return err;
1591
1592 err = cd_flavor_subdir(argv[0]);
1593 if (err)
1594 return err;
1595
1596 /* Use libbpf 1.0 API mode */
1597 libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
1598 libbpf_set_print(libbpf_print_fn);
1599
1600 srand(time(NULL));
1601
1602 env.jit_enabled = is_jit_enabled();
1603 env.nr_cpus = libbpf_num_possible_cpus();
1604 if (env.nr_cpus < 0) {
1605 fprintf(stderr, "Failed to get number of CPUs: %d!\n",
1606 env.nr_cpus);
1607 return -1;
1608 }
1609
1610 env.stdout = stdout;
1611 env.stderr = stderr;
1612
1613 env.has_testmod = true;
1614 if (!env.list_test_names && load_bpf_testmod()) {
1615 fprintf(env.stderr, "WARNING! Selftests relying on bpf_testmod.ko will be skipped.\n");
1616 env.has_testmod = false;
1617 }
1618
1619 /* initializing tests */
1620 for (i = 0; i < prog_test_cnt; i++) {
1621 struct prog_test_def *test = &prog_test_defs[i];
1622
1623 test->test_num = i + 1;
1624 test->should_run = should_run(&env.test_selector,
1625 test->test_num, test->test_name);
1626
1627 if ((test->run_test == NULL && test->run_serial_test == NULL) ||
1628 (test->run_test != NULL && test->run_serial_test != NULL)) {
1629 fprintf(stderr, "Test %d:%s must have either test_%s() or serial_test_%sl() defined.\n",
1630 test->test_num, test->test_name, test->test_name, test->test_name);
1631 exit(EXIT_ERR_SETUP_INFRA);
1632 }
1633 }
1634
1635 /* ignore workers if we are just listing */
1636 if (env.get_test_cnt || env.list_test_names)
1637 env.workers = 0;
1638
1639 /* launch workers if requested */
1640 env.worker_id = -1; /* main process */
1641 if (env.workers) {
1642 env.worker_pids = calloc(sizeof(__pid_t), env.workers);
1643 env.worker_socks = calloc(sizeof(int), env.workers);
1644 if (env.debug)
1645 fprintf(stdout, "Launching %d workers.\n", env.workers);
1646 for (i = 0; i < env.workers; i++) {
1647 int sv[2];
1648 pid_t pid;
1649
1650 if (socketpair(AF_UNIX, SOCK_SEQPACKET | SOCK_CLOEXEC, 0, sv) < 0) {
1651 perror("Fail to create worker socket");
1652 return -1;
1653 }
1654 pid = fork();
1655 if (pid < 0) {
1656 perror("Failed to fork worker");
1657 return -1;
1658 } else if (pid != 0) { /* main process */
1659 close(sv[1]);
1660 env.worker_pids[i] = pid;
1661 env.worker_socks[i] = sv[0];
1662 } else { /* inside each worker process */
1663 close(sv[0]);
1664 env.worker_id = i;
1665 return worker_main(sv[1]);
1666 }
1667 }
1668
1669 if (env.worker_id == -1) {
1670 server_main();
1671 goto out;
1672 }
1673 }
1674
1675 /* The rest of the main process */
1676
1677 /* on single mode */
1678 save_netns();
1679
1680 for (i = 0; i < prog_test_cnt; i++) {
1681 struct prog_test_def *test = &prog_test_defs[i];
1682
1683 if (!test->should_run)
1684 continue;
1685
1686 if (env.get_test_cnt) {
1687 env.succ_cnt++;
1688 continue;
1689 }
1690
1691 if (env.list_test_names) {
1692 fprintf(env.stdout, "%s\n", test->test_name);
1693 env.succ_cnt++;
1694 continue;
1695 }
1696
1697 run_one_test(i);
1698 }
1699
1700 if (env.get_test_cnt) {
1701 printf("%d\n", env.succ_cnt);
1702 goto out;
1703 }
1704
1705 if (env.list_test_names)
1706 goto out;
1707
1708 calculate_summary_and_print_errors(&env);
1709
1710 close(env.saved_netns_fd);
1711 out:
1712 if (!env.list_test_names && env.has_testmod)
1713 unload_bpf_testmod();
1714
1715 free_test_selector(&env.test_selector);
1716 free_test_selector(&env.subtest_selector);
1717 free_test_states();
1718
1719 if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0)
1720 return EXIT_NO_TEST;
1721
1722 return env.fail_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
1723 }
1724