1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Facebook
3 */
4 #define _GNU_SOURCE
5 #include "test_progs.h"
6 #include "testing_helpers.h"
7 #include "cgroup_helpers.h"
8 #include <argp.h>
9 #include <pthread.h>
10 #include <sched.h>
11 #include <signal.h>
12 #include <string.h>
13 #include <execinfo.h> /* backtrace */
14 #include <sys/sysinfo.h> /* get_nprocs */
15 #include <netinet/in.h>
16 #include <sys/select.h>
17 #include <sys/socket.h>
18 #include <sys/un.h>
19 #include <bpf/btf.h>
20 #include "json_writer.h"
21
verbose(void)22 static bool verbose(void)
23 {
24 return env.verbosity > VERBOSE_NONE;
25 }
26
stdio_hijack_init(char ** log_buf,size_t * log_cnt)27 static void stdio_hijack_init(char **log_buf, size_t *log_cnt)
28 {
29 #ifdef __GLIBC__
30 if (verbose() && env.worker_id == -1) {
31 /* nothing to do, output to stdout by default */
32 return;
33 }
34
35 fflush(stdout);
36 fflush(stderr);
37
38 stdout = open_memstream(log_buf, log_cnt);
39 if (!stdout) {
40 stdout = env.stdout;
41 perror("open_memstream");
42 return;
43 }
44
45 if (env.subtest_state)
46 env.subtest_state->stdout = stdout;
47 else
48 env.test_state->stdout = stdout;
49
50 stderr = stdout;
51 #endif
52 }
53
stdio_hijack(char ** log_buf,size_t * log_cnt)54 static void stdio_hijack(char **log_buf, size_t *log_cnt)
55 {
56 #ifdef __GLIBC__
57 if (verbose() && env.worker_id == -1) {
58 /* nothing to do, output to stdout by default */
59 return;
60 }
61
62 env.stdout = stdout;
63 env.stderr = stderr;
64
65 stdio_hijack_init(log_buf, log_cnt);
66 #endif
67 }
68
stdio_restore_cleanup(void)69 static void stdio_restore_cleanup(void)
70 {
71 #ifdef __GLIBC__
72 if (verbose() && env.worker_id == -1) {
73 /* nothing to do, output to stdout by default */
74 return;
75 }
76
77 fflush(stdout);
78
79 if (env.subtest_state) {
80 fclose(env.subtest_state->stdout);
81 env.subtest_state->stdout = NULL;
82 stdout = env.test_state->stdout;
83 stderr = env.test_state->stdout;
84 } else {
85 fclose(env.test_state->stdout);
86 env.test_state->stdout = NULL;
87 }
88 #endif
89 }
90
stdio_restore(void)91 static void stdio_restore(void)
92 {
93 #ifdef __GLIBC__
94 if (verbose() && env.worker_id == -1) {
95 /* nothing to do, output to stdout by default */
96 return;
97 }
98
99 if (stdout == env.stdout)
100 return;
101
102 stdio_restore_cleanup();
103
104 stdout = env.stdout;
105 stderr = env.stderr;
106 #endif
107 }
108
109 /* Adapted from perf/util/string.c */
glob_match(const char * str,const char * pat)110 static bool glob_match(const char *str, const char *pat)
111 {
112 while (*str && *pat && *pat != '*') {
113 if (*str != *pat)
114 return false;
115 str++;
116 pat++;
117 }
118 /* Check wild card */
119 if (*pat == '*') {
120 while (*pat == '*')
121 pat++;
122 if (!*pat) /* Tail wild card matches all */
123 return true;
124 while (*str)
125 if (glob_match(str++, pat))
126 return true;
127 }
128 return !*str && !*pat;
129 }
130
131 #define EXIT_NO_TEST 2
132 #define EXIT_ERR_SETUP_INFRA 3
133
134 /* defined in test_progs.h */
135 struct test_env env = {};
136
137 struct prog_test_def {
138 const char *test_name;
139 int test_num;
140 void (*run_test)(void);
141 void (*run_serial_test)(void);
142 bool should_run;
143 bool need_cgroup_cleanup;
144 };
145
146 /* Override C runtime library's usleep() implementation to ensure nanosleep()
147 * is always called. Usleep is frequently used in selftests as a way to
148 * trigger kprobe and tracepoints.
149 */
usleep(useconds_t usec)150 int usleep(useconds_t usec)
151 {
152 struct timespec ts = {
153 .tv_sec = usec / 1000000,
154 .tv_nsec = (usec % 1000000) * 1000,
155 };
156
157 return syscall(__NR_nanosleep, &ts, NULL);
158 }
159
should_run(struct test_selector * sel,int num,const char * name)160 static bool should_run(struct test_selector *sel, int num, const char *name)
161 {
162 int i;
163
164 for (i = 0; i < sel->blacklist.cnt; i++) {
165 if (glob_match(name, sel->blacklist.tests[i].name) &&
166 !sel->blacklist.tests[i].subtest_cnt)
167 return false;
168 }
169
170 for (i = 0; i < sel->whitelist.cnt; i++) {
171 if (glob_match(name, sel->whitelist.tests[i].name))
172 return true;
173 }
174
175 if (!sel->whitelist.cnt && !sel->num_set)
176 return true;
177
178 return num < sel->num_set_len && sel->num_set[num];
179 }
180
should_run_subtest(struct test_selector * sel,struct test_selector * subtest_sel,int subtest_num,const char * test_name,const char * subtest_name)181 static bool should_run_subtest(struct test_selector *sel,
182 struct test_selector *subtest_sel,
183 int subtest_num,
184 const char *test_name,
185 const char *subtest_name)
186 {
187 int i, j;
188
189 for (i = 0; i < sel->blacklist.cnt; i++) {
190 if (glob_match(test_name, sel->blacklist.tests[i].name)) {
191 if (!sel->blacklist.tests[i].subtest_cnt)
192 return false;
193
194 for (j = 0; j < sel->blacklist.tests[i].subtest_cnt; j++) {
195 if (glob_match(subtest_name,
196 sel->blacklist.tests[i].subtests[j]))
197 return false;
198 }
199 }
200 }
201
202 for (i = 0; i < sel->whitelist.cnt; i++) {
203 if (glob_match(test_name, sel->whitelist.tests[i].name)) {
204 if (!sel->whitelist.tests[i].subtest_cnt)
205 return true;
206
207 for (j = 0; j < sel->whitelist.tests[i].subtest_cnt; j++) {
208 if (glob_match(subtest_name,
209 sel->whitelist.tests[i].subtests[j]))
210 return true;
211 }
212 }
213 }
214
215 if (!sel->whitelist.cnt && !subtest_sel->num_set)
216 return true;
217
218 return subtest_num < subtest_sel->num_set_len && subtest_sel->num_set[subtest_num];
219 }
220
test_result(bool failed,bool skipped)221 static char *test_result(bool failed, bool skipped)
222 {
223 return failed ? "FAIL" : (skipped ? "SKIP" : "OK");
224 }
225
226 #define TEST_NUM_WIDTH 7
227
print_test_result(const struct prog_test_def * test,const struct test_state * test_state)228 static void print_test_result(const struct prog_test_def *test, const struct test_state *test_state)
229 {
230 int skipped_cnt = test_state->skip_cnt;
231 int subtests_cnt = test_state->subtest_num;
232
233 fprintf(env.stdout, "#%-*d %s:", TEST_NUM_WIDTH, test->test_num, test->test_name);
234 if (test_state->error_cnt)
235 fprintf(env.stdout, "FAIL");
236 else if (!skipped_cnt)
237 fprintf(env.stdout, "OK");
238 else if (skipped_cnt == subtests_cnt || !subtests_cnt)
239 fprintf(env.stdout, "SKIP");
240 else
241 fprintf(env.stdout, "OK (SKIP: %d/%d)", skipped_cnt, subtests_cnt);
242
243 fprintf(env.stdout, "\n");
244 }
245
print_test_log(char * log_buf,size_t log_cnt)246 static void print_test_log(char *log_buf, size_t log_cnt)
247 {
248 log_buf[log_cnt] = '\0';
249 fprintf(env.stdout, "%s", log_buf);
250 if (log_buf[log_cnt - 1] != '\n')
251 fprintf(env.stdout, "\n");
252 }
253
print_subtest_name(int test_num,int subtest_num,const char * test_name,char * subtest_name,char * result)254 static void print_subtest_name(int test_num, int subtest_num,
255 const char *test_name, char *subtest_name,
256 char *result)
257 {
258 char test_num_str[TEST_NUM_WIDTH + 1];
259
260 snprintf(test_num_str, sizeof(test_num_str), "%d/%d", test_num, subtest_num);
261
262 fprintf(env.stdout, "#%-*s %s/%s",
263 TEST_NUM_WIDTH, test_num_str,
264 test_name, subtest_name);
265
266 if (result)
267 fprintf(env.stdout, ":%s", result);
268
269 fprintf(env.stdout, "\n");
270 }
271
jsonw_write_log_message(json_writer_t * w,char * log_buf,size_t log_cnt)272 static void jsonw_write_log_message(json_writer_t *w, char *log_buf, size_t log_cnt)
273 {
274 /* open_memstream (from stdio_hijack_init) ensures that log_bug is terminated by a
275 * null byte. Yet in parallel mode, log_buf will be NULL if there is no message.
276 */
277 if (log_cnt) {
278 jsonw_string_field(w, "message", log_buf);
279 } else {
280 jsonw_string_field(w, "message", "");
281 }
282 }
283
dump_test_log(const struct prog_test_def * test,const struct test_state * test_state,bool skip_ok_subtests,bool par_exec_result,json_writer_t * w)284 static void dump_test_log(const struct prog_test_def *test,
285 const struct test_state *test_state,
286 bool skip_ok_subtests,
287 bool par_exec_result,
288 json_writer_t *w)
289 {
290 bool test_failed = test_state->error_cnt > 0;
291 bool force_log = test_state->force_log;
292 bool print_test = verbose() || force_log || test_failed;
293 int i;
294 struct subtest_state *subtest_state;
295 bool subtest_failed;
296 bool subtest_filtered;
297 bool print_subtest;
298
299 /* we do not print anything in the worker thread */
300 if (env.worker_id != -1)
301 return;
302
303 /* there is nothing to print when verbose log is used and execution
304 * is not in parallel mode
305 */
306 if (verbose() && !par_exec_result)
307 return;
308
309 if (test_state->log_cnt && print_test)
310 print_test_log(test_state->log_buf, test_state->log_cnt);
311
312 if (w && print_test) {
313 jsonw_start_object(w);
314 jsonw_string_field(w, "name", test->test_name);
315 jsonw_uint_field(w, "number", test->test_num);
316 jsonw_write_log_message(w, test_state->log_buf, test_state->log_cnt);
317 jsonw_bool_field(w, "failed", test_failed);
318 jsonw_name(w, "subtests");
319 jsonw_start_array(w);
320 }
321
322 for (i = 0; i < test_state->subtest_num; i++) {
323 subtest_state = &test_state->subtest_states[i];
324 subtest_failed = subtest_state->error_cnt;
325 subtest_filtered = subtest_state->filtered;
326 print_subtest = verbose() || force_log || subtest_failed;
327
328 if ((skip_ok_subtests && !subtest_failed) || subtest_filtered)
329 continue;
330
331 if (subtest_state->log_cnt && print_subtest) {
332 print_test_log(subtest_state->log_buf,
333 subtest_state->log_cnt);
334 }
335
336 print_subtest_name(test->test_num, i + 1,
337 test->test_name, subtest_state->name,
338 test_result(subtest_state->error_cnt,
339 subtest_state->skipped));
340
341 if (w && print_subtest) {
342 jsonw_start_object(w);
343 jsonw_string_field(w, "name", subtest_state->name);
344 jsonw_uint_field(w, "number", i+1);
345 jsonw_write_log_message(w, subtest_state->log_buf, subtest_state->log_cnt);
346 jsonw_bool_field(w, "failed", subtest_failed);
347 jsonw_end_object(w);
348 }
349 }
350
351 if (w && print_test) {
352 jsonw_end_array(w);
353 jsonw_end_object(w);
354 }
355
356 print_test_result(test, test_state);
357 }
358
359 static void stdio_restore(void);
360
361 /* A bunch of tests set custom affinity per-thread and/or per-process. Reset
362 * it after each test/sub-test.
363 */
reset_affinity(void)364 static void reset_affinity(void)
365 {
366 cpu_set_t cpuset;
367 int i, err;
368
369 CPU_ZERO(&cpuset);
370 for (i = 0; i < env.nr_cpus; i++)
371 CPU_SET(i, &cpuset);
372
373 err = sched_setaffinity(0, sizeof(cpuset), &cpuset);
374 if (err < 0) {
375 stdio_restore();
376 fprintf(stderr, "Failed to reset process affinity: %d!\n", err);
377 exit(EXIT_ERR_SETUP_INFRA);
378 }
379 err = pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset);
380 if (err < 0) {
381 stdio_restore();
382 fprintf(stderr, "Failed to reset thread affinity: %d!\n", err);
383 exit(EXIT_ERR_SETUP_INFRA);
384 }
385 }
386
save_netns(void)387 static void save_netns(void)
388 {
389 env.saved_netns_fd = open("/proc/self/ns/net", O_RDONLY);
390 if (env.saved_netns_fd == -1) {
391 perror("open(/proc/self/ns/net)");
392 exit(EXIT_ERR_SETUP_INFRA);
393 }
394 }
395
restore_netns(void)396 static void restore_netns(void)
397 {
398 if (setns(env.saved_netns_fd, CLONE_NEWNET) == -1) {
399 stdio_restore();
400 perror("setns(CLONE_NEWNS)");
401 exit(EXIT_ERR_SETUP_INFRA);
402 }
403 }
404
test__end_subtest(void)405 void test__end_subtest(void)
406 {
407 struct prog_test_def *test = env.test;
408 struct test_state *test_state = env.test_state;
409 struct subtest_state *subtest_state = env.subtest_state;
410
411 if (subtest_state->error_cnt) {
412 test_state->error_cnt++;
413 } else {
414 if (!subtest_state->skipped)
415 test_state->sub_succ_cnt++;
416 else
417 test_state->skip_cnt++;
418 }
419
420 if (verbose() && !env.workers)
421 print_subtest_name(test->test_num, test_state->subtest_num,
422 test->test_name, subtest_state->name,
423 test_result(subtest_state->error_cnt,
424 subtest_state->skipped));
425
426 stdio_restore_cleanup();
427 env.subtest_state = NULL;
428 }
429
test__start_subtest(const char * subtest_name)430 bool test__start_subtest(const char *subtest_name)
431 {
432 struct prog_test_def *test = env.test;
433 struct test_state *state = env.test_state;
434 struct subtest_state *subtest_state;
435 size_t sub_state_size = sizeof(*subtest_state);
436
437 if (env.subtest_state)
438 test__end_subtest();
439
440 state->subtest_num++;
441 state->subtest_states =
442 realloc(state->subtest_states,
443 state->subtest_num * sub_state_size);
444 if (!state->subtest_states) {
445 fprintf(stderr, "Not enough memory to allocate subtest result\n");
446 return false;
447 }
448
449 subtest_state = &state->subtest_states[state->subtest_num - 1];
450
451 memset(subtest_state, 0, sub_state_size);
452
453 if (!subtest_name || !subtest_name[0]) {
454 fprintf(env.stderr,
455 "Subtest #%d didn't provide sub-test name!\n",
456 state->subtest_num);
457 return false;
458 }
459
460 subtest_state->name = strdup(subtest_name);
461 if (!subtest_state->name) {
462 fprintf(env.stderr,
463 "Subtest #%d: failed to copy subtest name!\n",
464 state->subtest_num);
465 return false;
466 }
467
468 if (!should_run_subtest(&env.test_selector,
469 &env.subtest_selector,
470 state->subtest_num,
471 test->test_name,
472 subtest_name)) {
473 subtest_state->filtered = true;
474 return false;
475 }
476
477 env.subtest_state = subtest_state;
478 stdio_hijack_init(&subtest_state->log_buf, &subtest_state->log_cnt);
479
480 return true;
481 }
482
test__force_log(void)483 void test__force_log(void)
484 {
485 env.test_state->force_log = true;
486 }
487
test__skip(void)488 void test__skip(void)
489 {
490 if (env.subtest_state)
491 env.subtest_state->skipped = true;
492 else
493 env.test_state->skip_cnt++;
494 }
495
test__fail(void)496 void test__fail(void)
497 {
498 if (env.subtest_state)
499 env.subtest_state->error_cnt++;
500 else
501 env.test_state->error_cnt++;
502 }
503
test__join_cgroup(const char * path)504 int test__join_cgroup(const char *path)
505 {
506 int fd;
507
508 if (!env.test->need_cgroup_cleanup) {
509 if (setup_cgroup_environment()) {
510 fprintf(stderr,
511 "#%d %s: Failed to setup cgroup environment\n",
512 env.test->test_num, env.test->test_name);
513 return -1;
514 }
515
516 env.test->need_cgroup_cleanup = true;
517 }
518
519 fd = create_and_get_cgroup(path);
520 if (fd < 0) {
521 fprintf(stderr,
522 "#%d %s: Failed to create cgroup '%s' (errno=%d)\n",
523 env.test->test_num, env.test->test_name, path, errno);
524 return fd;
525 }
526
527 if (join_cgroup(path)) {
528 fprintf(stderr,
529 "#%d %s: Failed to join cgroup '%s' (errno=%d)\n",
530 env.test->test_num, env.test->test_name, path, errno);
531 return -1;
532 }
533
534 return fd;
535 }
536
bpf_find_map(const char * test,struct bpf_object * obj,const char * name)537 int bpf_find_map(const char *test, struct bpf_object *obj, const char *name)
538 {
539 struct bpf_map *map;
540
541 map = bpf_object__find_map_by_name(obj, name);
542 if (!map) {
543 fprintf(stdout, "%s:FAIL:map '%s' not found\n", test, name);
544 test__fail();
545 return -1;
546 }
547 return bpf_map__fd(map);
548 }
549
is_jit_enabled(void)550 static bool is_jit_enabled(void)
551 {
552 const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
553 bool enabled = false;
554 int sysctl_fd;
555
556 sysctl_fd = open(jit_sysctl, 0, O_RDONLY);
557 if (sysctl_fd != -1) {
558 char tmpc;
559
560 if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1)
561 enabled = (tmpc != '0');
562 close(sysctl_fd);
563 }
564
565 return enabled;
566 }
567
compare_map_keys(int map1_fd,int map2_fd)568 int compare_map_keys(int map1_fd, int map2_fd)
569 {
570 __u32 key, next_key;
571 char val_buf[PERF_MAX_STACK_DEPTH *
572 sizeof(struct bpf_stack_build_id)];
573 int err;
574
575 err = bpf_map_get_next_key(map1_fd, NULL, &key);
576 if (err)
577 return err;
578 err = bpf_map_lookup_elem(map2_fd, &key, val_buf);
579 if (err)
580 return err;
581
582 while (bpf_map_get_next_key(map1_fd, &key, &next_key) == 0) {
583 err = bpf_map_lookup_elem(map2_fd, &next_key, val_buf);
584 if (err)
585 return err;
586
587 key = next_key;
588 }
589 if (errno != ENOENT)
590 return -1;
591
592 return 0;
593 }
594
compare_stack_ips(int smap_fd,int amap_fd,int stack_trace_len)595 int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len)
596 {
597 __u32 key, next_key, *cur_key_p, *next_key_p;
598 char *val_buf1, *val_buf2;
599 int i, err = 0;
600
601 val_buf1 = malloc(stack_trace_len);
602 val_buf2 = malloc(stack_trace_len);
603 cur_key_p = NULL;
604 next_key_p = &key;
605 while (bpf_map_get_next_key(smap_fd, cur_key_p, next_key_p) == 0) {
606 err = bpf_map_lookup_elem(smap_fd, next_key_p, val_buf1);
607 if (err)
608 goto out;
609 err = bpf_map_lookup_elem(amap_fd, next_key_p, val_buf2);
610 if (err)
611 goto out;
612 for (i = 0; i < stack_trace_len; i++) {
613 if (val_buf1[i] != val_buf2[i]) {
614 err = -1;
615 goto out;
616 }
617 }
618 key = *next_key_p;
619 cur_key_p = &key;
620 next_key_p = &next_key;
621 }
622 if (errno != ENOENT)
623 err = -1;
624
625 out:
626 free(val_buf1);
627 free(val_buf2);
628 return err;
629 }
630
631 /* extern declarations for test funcs */
632 #define DEFINE_TEST(name) \
633 extern void test_##name(void) __weak; \
634 extern void serial_test_##name(void) __weak;
635 #include <prog_tests/tests.h>
636 #undef DEFINE_TEST
637
638 static struct prog_test_def prog_test_defs[] = {
639 #define DEFINE_TEST(name) { \
640 .test_name = #name, \
641 .run_test = &test_##name, \
642 .run_serial_test = &serial_test_##name, \
643 },
644 #include <prog_tests/tests.h>
645 #undef DEFINE_TEST
646 };
647
648 static const int prog_test_cnt = ARRAY_SIZE(prog_test_defs);
649
650 static struct test_state test_states[ARRAY_SIZE(prog_test_defs)];
651
652 const char *argp_program_version = "test_progs 0.1";
653 const char *argp_program_bug_address = "<bpf@vger.kernel.org>";
654 static const char argp_program_doc[] =
655 "BPF selftests test runner\v"
656 "Options accepting the NAMES parameter take either a comma-separated list\n"
657 "of test names, or a filename prefixed with @. The file contains one name\n"
658 "(or wildcard pattern) per line, and comments beginning with # are ignored.\n"
659 "\n"
660 "These options can be passed repeatedly to read multiple files.\n";
661
662 enum ARG_KEYS {
663 ARG_TEST_NUM = 'n',
664 ARG_TEST_NAME = 't',
665 ARG_TEST_NAME_BLACKLIST = 'b',
666 ARG_VERIFIER_STATS = 's',
667 ARG_VERBOSE = 'v',
668 ARG_GET_TEST_CNT = 'c',
669 ARG_LIST_TEST_NAMES = 'l',
670 ARG_TEST_NAME_GLOB_ALLOWLIST = 'a',
671 ARG_TEST_NAME_GLOB_DENYLIST = 'd',
672 ARG_NUM_WORKERS = 'j',
673 ARG_DEBUG = -1,
674 ARG_JSON_SUMMARY = 'J'
675 };
676
677 static const struct argp_option opts[] = {
678 { "num", ARG_TEST_NUM, "NUM", 0,
679 "Run test number NUM only " },
680 { "name", ARG_TEST_NAME, "NAMES", 0,
681 "Run tests with names containing any string from NAMES list" },
682 { "name-blacklist", ARG_TEST_NAME_BLACKLIST, "NAMES", 0,
683 "Don't run tests with names containing any string from NAMES list" },
684 { "verifier-stats", ARG_VERIFIER_STATS, NULL, 0,
685 "Output verifier statistics", },
686 { "verbose", ARG_VERBOSE, "LEVEL", OPTION_ARG_OPTIONAL,
687 "Verbose output (use -vv or -vvv for progressively verbose output)" },
688 { "count", ARG_GET_TEST_CNT, NULL, 0,
689 "Get number of selected top-level tests " },
690 { "list", ARG_LIST_TEST_NAMES, NULL, 0,
691 "List test names that would run (without running them) " },
692 { "allow", ARG_TEST_NAME_GLOB_ALLOWLIST, "NAMES", 0,
693 "Run tests with name matching the pattern (supports '*' wildcard)." },
694 { "deny", ARG_TEST_NAME_GLOB_DENYLIST, "NAMES", 0,
695 "Don't run tests with name matching the pattern (supports '*' wildcard)." },
696 { "workers", ARG_NUM_WORKERS, "WORKERS", OPTION_ARG_OPTIONAL,
697 "Number of workers to run in parallel, default to number of cpus." },
698 { "debug", ARG_DEBUG, NULL, 0,
699 "print extra debug information for test_progs." },
700 { "json-summary", ARG_JSON_SUMMARY, "FILE", 0, "Write report in json format to this file."},
701 {},
702 };
703
libbpf_print_fn(enum libbpf_print_level level,const char * format,va_list args)704 static int libbpf_print_fn(enum libbpf_print_level level,
705 const char *format, va_list args)
706 {
707 if (env.verbosity < VERBOSE_VERY && level == LIBBPF_DEBUG)
708 return 0;
709 vfprintf(stdout, format, args);
710 return 0;
711 }
712
free_test_filter_set(const struct test_filter_set * set)713 static void free_test_filter_set(const struct test_filter_set *set)
714 {
715 int i, j;
716
717 if (!set)
718 return;
719
720 for (i = 0; i < set->cnt; i++) {
721 free((void *)set->tests[i].name);
722 for (j = 0; j < set->tests[i].subtest_cnt; j++)
723 free((void *)set->tests[i].subtests[j]);
724
725 free((void *)set->tests[i].subtests);
726 }
727
728 free((void *)set->tests);
729 }
730
free_test_selector(struct test_selector * test_selector)731 static void free_test_selector(struct test_selector *test_selector)
732 {
733 free_test_filter_set(&test_selector->blacklist);
734 free_test_filter_set(&test_selector->whitelist);
735 free(test_selector->num_set);
736 }
737
738 extern int extra_prog_load_log_flags;
739
parse_arg(int key,char * arg,struct argp_state * state)740 static error_t parse_arg(int key, char *arg, struct argp_state *state)
741 {
742 struct test_env *env = state->input;
743 int err = 0;
744
745 switch (key) {
746 case ARG_TEST_NUM: {
747 char *subtest_str = strchr(arg, '/');
748
749 if (subtest_str) {
750 *subtest_str = '\0';
751 if (parse_num_list(subtest_str + 1,
752 &env->subtest_selector.num_set,
753 &env->subtest_selector.num_set_len)) {
754 fprintf(stderr,
755 "Failed to parse subtest numbers.\n");
756 return -EINVAL;
757 }
758 }
759 if (parse_num_list(arg, &env->test_selector.num_set,
760 &env->test_selector.num_set_len)) {
761 fprintf(stderr, "Failed to parse test numbers.\n");
762 return -EINVAL;
763 }
764 break;
765 }
766 case ARG_TEST_NAME_GLOB_ALLOWLIST:
767 case ARG_TEST_NAME: {
768 if (arg[0] == '@')
769 err = parse_test_list_file(arg + 1,
770 &env->test_selector.whitelist,
771 key == ARG_TEST_NAME_GLOB_ALLOWLIST);
772 else
773 err = parse_test_list(arg,
774 &env->test_selector.whitelist,
775 key == ARG_TEST_NAME_GLOB_ALLOWLIST);
776
777 break;
778 }
779 case ARG_TEST_NAME_GLOB_DENYLIST:
780 case ARG_TEST_NAME_BLACKLIST: {
781 if (arg[0] == '@')
782 err = parse_test_list_file(arg + 1,
783 &env->test_selector.blacklist,
784 key == ARG_TEST_NAME_GLOB_DENYLIST);
785 else
786 err = parse_test_list(arg,
787 &env->test_selector.blacklist,
788 key == ARG_TEST_NAME_GLOB_DENYLIST);
789
790 break;
791 }
792 case ARG_VERIFIER_STATS:
793 env->verifier_stats = true;
794 break;
795 case ARG_VERBOSE:
796 env->verbosity = VERBOSE_NORMAL;
797 if (arg) {
798 if (strcmp(arg, "v") == 0) {
799 env->verbosity = VERBOSE_VERY;
800 extra_prog_load_log_flags = 1;
801 } else if (strcmp(arg, "vv") == 0) {
802 env->verbosity = VERBOSE_SUPER;
803 extra_prog_load_log_flags = 2;
804 } else {
805 fprintf(stderr,
806 "Unrecognized verbosity setting ('%s'), only -v and -vv are supported\n",
807 arg);
808 return -EINVAL;
809 }
810 }
811
812 if (verbose()) {
813 if (setenv("SELFTESTS_VERBOSE", "1", 1) == -1) {
814 fprintf(stderr,
815 "Unable to setenv SELFTESTS_VERBOSE=1 (errno=%d)",
816 errno);
817 return -EINVAL;
818 }
819 }
820
821 break;
822 case ARG_GET_TEST_CNT:
823 env->get_test_cnt = true;
824 break;
825 case ARG_LIST_TEST_NAMES:
826 env->list_test_names = true;
827 break;
828 case ARG_NUM_WORKERS:
829 if (arg) {
830 env->workers = atoi(arg);
831 if (!env->workers) {
832 fprintf(stderr, "Invalid number of worker: %s.", arg);
833 return -EINVAL;
834 }
835 } else {
836 env->workers = get_nprocs();
837 }
838 break;
839 case ARG_DEBUG:
840 env->debug = true;
841 break;
842 case ARG_JSON_SUMMARY:
843 env->json = fopen(arg, "w");
844 if (env->json == NULL) {
845 perror("Failed to open json summary file");
846 return -errno;
847 }
848 break;
849 case ARGP_KEY_ARG:
850 argp_usage(state);
851 break;
852 case ARGP_KEY_END:
853 break;
854 default:
855 return ARGP_ERR_UNKNOWN;
856 }
857 return err;
858 }
859
860 /*
861 * Determine if test_progs is running as a "flavored" test runner and switch
862 * into corresponding sub-directory to load correct BPF objects.
863 *
864 * This is done by looking at executable name. If it contains "-flavor"
865 * suffix, then we are running as a flavored test runner.
866 */
cd_flavor_subdir(const char * exec_name)867 int cd_flavor_subdir(const char *exec_name)
868 {
869 /* General form of argv[0] passed here is:
870 * some/path/to/test_progs[-flavor], where -flavor part is optional.
871 * First cut out "test_progs[-flavor]" part, then extract "flavor"
872 * part, if it's there.
873 */
874 const char *flavor = strrchr(exec_name, '/');
875
876 if (!flavor)
877 flavor = exec_name;
878 else
879 flavor++;
880
881 flavor = strrchr(flavor, '-');
882 if (!flavor)
883 return 0;
884 flavor++;
885 if (verbose())
886 fprintf(stdout, "Switching to flavor '%s' subdirectory...\n", flavor);
887
888 return chdir(flavor);
889 }
890
trigger_module_test_read(int read_sz)891 int trigger_module_test_read(int read_sz)
892 {
893 int fd, err;
894
895 fd = open(BPF_TESTMOD_TEST_FILE, O_RDONLY);
896 err = -errno;
897 if (!ASSERT_GE(fd, 0, "testmod_file_open"))
898 return err;
899
900 read(fd, NULL, read_sz);
901 close(fd);
902
903 return 0;
904 }
905
trigger_module_test_write(int write_sz)906 int trigger_module_test_write(int write_sz)
907 {
908 int fd, err;
909 char *buf = malloc(write_sz);
910
911 if (!buf)
912 return -ENOMEM;
913
914 memset(buf, 'a', write_sz);
915 buf[write_sz-1] = '\0';
916
917 fd = open(BPF_TESTMOD_TEST_FILE, O_WRONLY);
918 err = -errno;
919 if (!ASSERT_GE(fd, 0, "testmod_file_open")) {
920 free(buf);
921 return err;
922 }
923
924 write(fd, buf, write_sz);
925 close(fd);
926 free(buf);
927 return 0;
928 }
929
write_sysctl(const char * sysctl,const char * value)930 int write_sysctl(const char *sysctl, const char *value)
931 {
932 int fd, err, len;
933
934 fd = open(sysctl, O_WRONLY);
935 if (!ASSERT_NEQ(fd, -1, "open sysctl"))
936 return -1;
937
938 len = strlen(value);
939 err = write(fd, value, len);
940 close(fd);
941 if (!ASSERT_EQ(err, len, "write sysctl"))
942 return -1;
943
944 return 0;
945 }
946
get_bpf_max_tramp_links_from(struct btf * btf)947 int get_bpf_max_tramp_links_from(struct btf *btf)
948 {
949 const struct btf_enum *e;
950 const struct btf_type *t;
951 __u32 i, type_cnt;
952 const char *name;
953 __u16 j, vlen;
954
955 for (i = 1, type_cnt = btf__type_cnt(btf); i < type_cnt; i++) {
956 t = btf__type_by_id(btf, i);
957 if (!t || !btf_is_enum(t) || t->name_off)
958 continue;
959 e = btf_enum(t);
960 for (j = 0, vlen = btf_vlen(t); j < vlen; j++, e++) {
961 name = btf__str_by_offset(btf, e->name_off);
962 if (name && !strcmp(name, "BPF_MAX_TRAMP_LINKS"))
963 return e->val;
964 }
965 }
966
967 return -1;
968 }
969
get_bpf_max_tramp_links(void)970 int get_bpf_max_tramp_links(void)
971 {
972 struct btf *vmlinux_btf;
973 int ret;
974
975 vmlinux_btf = btf__load_vmlinux_btf();
976 if (!ASSERT_OK_PTR(vmlinux_btf, "vmlinux btf"))
977 return -1;
978 ret = get_bpf_max_tramp_links_from(vmlinux_btf);
979 btf__free(vmlinux_btf);
980
981 return ret;
982 }
983
984 #define MAX_BACKTRACE_SZ 128
crash_handler(int signum)985 void crash_handler(int signum)
986 {
987 void *bt[MAX_BACKTRACE_SZ];
988 size_t sz;
989
990 sz = backtrace(bt, ARRAY_SIZE(bt));
991
992 if (env.stdout)
993 stdio_restore();
994 if (env.test) {
995 env.test_state->error_cnt++;
996 dump_test_log(env.test, env.test_state, true, false, NULL);
997 }
998 if (env.worker_id != -1)
999 fprintf(stderr, "[%d]: ", env.worker_id);
1000 fprintf(stderr, "Caught signal #%d!\nStack trace:\n", signum);
1001 backtrace_symbols_fd(bt, sz, STDERR_FILENO);
1002 }
1003
sigint_handler(int signum)1004 static void sigint_handler(int signum)
1005 {
1006 int i;
1007
1008 for (i = 0; i < env.workers; i++)
1009 if (env.worker_socks[i] > 0)
1010 close(env.worker_socks[i]);
1011 }
1012
1013 static int current_test_idx;
1014 static pthread_mutex_t current_test_lock;
1015 static pthread_mutex_t stdout_output_lock;
1016
str_msg(const struct msg * msg,char * buf)1017 static inline const char *str_msg(const struct msg *msg, char *buf)
1018 {
1019 switch (msg->type) {
1020 case MSG_DO_TEST:
1021 sprintf(buf, "MSG_DO_TEST %d", msg->do_test.num);
1022 break;
1023 case MSG_TEST_DONE:
1024 sprintf(buf, "MSG_TEST_DONE %d (log: %d)",
1025 msg->test_done.num,
1026 msg->test_done.have_log);
1027 break;
1028 case MSG_SUBTEST_DONE:
1029 sprintf(buf, "MSG_SUBTEST_DONE %d (log: %d)",
1030 msg->subtest_done.num,
1031 msg->subtest_done.have_log);
1032 break;
1033 case MSG_TEST_LOG:
1034 sprintf(buf, "MSG_TEST_LOG (cnt: %zu, last: %d)",
1035 strlen(msg->test_log.log_buf),
1036 msg->test_log.is_last);
1037 break;
1038 case MSG_EXIT:
1039 sprintf(buf, "MSG_EXIT");
1040 break;
1041 default:
1042 sprintf(buf, "UNKNOWN");
1043 break;
1044 }
1045
1046 return buf;
1047 }
1048
send_message(int sock,const struct msg * msg)1049 static int send_message(int sock, const struct msg *msg)
1050 {
1051 char buf[256];
1052
1053 if (env.debug)
1054 fprintf(stderr, "Sending msg: %s\n", str_msg(msg, buf));
1055 return send(sock, msg, sizeof(*msg), 0);
1056 }
1057
recv_message(int sock,struct msg * msg)1058 static int recv_message(int sock, struct msg *msg)
1059 {
1060 int ret;
1061 char buf[256];
1062
1063 memset(msg, 0, sizeof(*msg));
1064 ret = recv(sock, msg, sizeof(*msg), 0);
1065 if (ret >= 0) {
1066 if (env.debug)
1067 fprintf(stderr, "Received msg: %s\n", str_msg(msg, buf));
1068 }
1069 return ret;
1070 }
1071
run_one_test(int test_num)1072 static void run_one_test(int test_num)
1073 {
1074 struct prog_test_def *test = &prog_test_defs[test_num];
1075 struct test_state *state = &test_states[test_num];
1076
1077 env.test = test;
1078 env.test_state = state;
1079
1080 stdio_hijack(&state->log_buf, &state->log_cnt);
1081
1082 if (test->run_test)
1083 test->run_test();
1084 else if (test->run_serial_test)
1085 test->run_serial_test();
1086
1087 /* ensure last sub-test is finalized properly */
1088 if (env.subtest_state)
1089 test__end_subtest();
1090
1091 state->tested = true;
1092
1093 if (verbose() && env.worker_id == -1)
1094 print_test_result(test, state);
1095
1096 reset_affinity();
1097 restore_netns();
1098 if (test->need_cgroup_cleanup)
1099 cleanup_cgroup_environment();
1100
1101 stdio_restore();
1102
1103 dump_test_log(test, state, false, false, NULL);
1104 }
1105
1106 struct dispatch_data {
1107 int worker_id;
1108 int sock_fd;
1109 };
1110
read_prog_test_msg(int sock_fd,struct msg * msg,enum msg_type type)1111 static int read_prog_test_msg(int sock_fd, struct msg *msg, enum msg_type type)
1112 {
1113 if (recv_message(sock_fd, msg) < 0)
1114 return 1;
1115
1116 if (msg->type != type) {
1117 printf("%s: unexpected message type %d. expected %d\n", __func__, msg->type, type);
1118 return 1;
1119 }
1120
1121 return 0;
1122 }
1123
dispatch_thread_read_log(int sock_fd,char ** log_buf,size_t * log_cnt)1124 static int dispatch_thread_read_log(int sock_fd, char **log_buf, size_t *log_cnt)
1125 {
1126 FILE *log_fp = NULL;
1127 int result = 0;
1128
1129 log_fp = open_memstream(log_buf, log_cnt);
1130 if (!log_fp)
1131 return 1;
1132
1133 while (true) {
1134 struct msg msg;
1135
1136 if (read_prog_test_msg(sock_fd, &msg, MSG_TEST_LOG)) {
1137 result = 1;
1138 goto out;
1139 }
1140
1141 fprintf(log_fp, "%s", msg.test_log.log_buf);
1142 if (msg.test_log.is_last)
1143 break;
1144 }
1145
1146 out:
1147 fclose(log_fp);
1148 log_fp = NULL;
1149 return result;
1150 }
1151
dispatch_thread_send_subtests(int sock_fd,struct test_state * state)1152 static int dispatch_thread_send_subtests(int sock_fd, struct test_state *state)
1153 {
1154 struct msg msg;
1155 struct subtest_state *subtest_state;
1156 int subtest_num = state->subtest_num;
1157
1158 state->subtest_states = malloc(subtest_num * sizeof(*subtest_state));
1159
1160 for (int i = 0; i < subtest_num; i++) {
1161 subtest_state = &state->subtest_states[i];
1162
1163 memset(subtest_state, 0, sizeof(*subtest_state));
1164
1165 if (read_prog_test_msg(sock_fd, &msg, MSG_SUBTEST_DONE))
1166 return 1;
1167
1168 subtest_state->name = strdup(msg.subtest_done.name);
1169 subtest_state->error_cnt = msg.subtest_done.error_cnt;
1170 subtest_state->skipped = msg.subtest_done.skipped;
1171 subtest_state->filtered = msg.subtest_done.filtered;
1172
1173 /* collect all logs */
1174 if (msg.subtest_done.have_log)
1175 if (dispatch_thread_read_log(sock_fd,
1176 &subtest_state->log_buf,
1177 &subtest_state->log_cnt))
1178 return 1;
1179 }
1180
1181 return 0;
1182 }
1183
dispatch_thread(void * ctx)1184 static void *dispatch_thread(void *ctx)
1185 {
1186 struct dispatch_data *data = ctx;
1187 int sock_fd;
1188
1189 sock_fd = data->sock_fd;
1190
1191 while (true) {
1192 int test_to_run = -1;
1193 struct prog_test_def *test;
1194 struct test_state *state;
1195
1196 /* grab a test */
1197 {
1198 pthread_mutex_lock(¤t_test_lock);
1199
1200 if (current_test_idx >= prog_test_cnt) {
1201 pthread_mutex_unlock(¤t_test_lock);
1202 goto done;
1203 }
1204
1205 test = &prog_test_defs[current_test_idx];
1206 test_to_run = current_test_idx;
1207 current_test_idx++;
1208
1209 pthread_mutex_unlock(¤t_test_lock);
1210 }
1211
1212 if (!test->should_run || test->run_serial_test)
1213 continue;
1214
1215 /* run test through worker */
1216 {
1217 struct msg msg_do_test;
1218
1219 memset(&msg_do_test, 0, sizeof(msg_do_test));
1220 msg_do_test.type = MSG_DO_TEST;
1221 msg_do_test.do_test.num = test_to_run;
1222 if (send_message(sock_fd, &msg_do_test) < 0) {
1223 perror("Fail to send command");
1224 goto done;
1225 }
1226 env.worker_current_test[data->worker_id] = test_to_run;
1227 }
1228
1229 /* wait for test done */
1230 do {
1231 struct msg msg;
1232
1233 if (read_prog_test_msg(sock_fd, &msg, MSG_TEST_DONE))
1234 goto error;
1235 if (test_to_run != msg.test_done.num)
1236 goto error;
1237
1238 state = &test_states[test_to_run];
1239 state->tested = true;
1240 state->error_cnt = msg.test_done.error_cnt;
1241 state->skip_cnt = msg.test_done.skip_cnt;
1242 state->sub_succ_cnt = msg.test_done.sub_succ_cnt;
1243 state->subtest_num = msg.test_done.subtest_num;
1244
1245 /* collect all logs */
1246 if (msg.test_done.have_log) {
1247 if (dispatch_thread_read_log(sock_fd,
1248 &state->log_buf,
1249 &state->log_cnt))
1250 goto error;
1251 }
1252
1253 /* collect all subtests and subtest logs */
1254 if (!state->subtest_num)
1255 break;
1256
1257 if (dispatch_thread_send_subtests(sock_fd, state))
1258 goto error;
1259 } while (false);
1260
1261 pthread_mutex_lock(&stdout_output_lock);
1262 dump_test_log(test, state, false, true, NULL);
1263 pthread_mutex_unlock(&stdout_output_lock);
1264 } /* while (true) */
1265 error:
1266 if (env.debug)
1267 fprintf(stderr, "[%d]: Protocol/IO error: %s.\n", data->worker_id, strerror(errno));
1268
1269 done:
1270 {
1271 struct msg msg_exit;
1272
1273 msg_exit.type = MSG_EXIT;
1274 if (send_message(sock_fd, &msg_exit) < 0) {
1275 if (env.debug)
1276 fprintf(stderr, "[%d]: send_message msg_exit: %s.\n",
1277 data->worker_id, strerror(errno));
1278 }
1279 }
1280 return NULL;
1281 }
1282
calculate_summary_and_print_errors(struct test_env * env)1283 static void calculate_summary_and_print_errors(struct test_env *env)
1284 {
1285 int i;
1286 int succ_cnt = 0, fail_cnt = 0, sub_succ_cnt = 0, skip_cnt = 0;
1287 json_writer_t *w = NULL;
1288
1289 for (i = 0; i < prog_test_cnt; i++) {
1290 struct test_state *state = &test_states[i];
1291
1292 if (!state->tested)
1293 continue;
1294
1295 sub_succ_cnt += state->sub_succ_cnt;
1296 skip_cnt += state->skip_cnt;
1297
1298 if (state->error_cnt)
1299 fail_cnt++;
1300 else
1301 succ_cnt++;
1302 }
1303
1304 if (env->json) {
1305 w = jsonw_new(env->json);
1306 if (!w)
1307 fprintf(env->stderr, "Failed to create new JSON stream.");
1308 }
1309
1310 if (w) {
1311 jsonw_start_object(w);
1312 jsonw_uint_field(w, "success", succ_cnt);
1313 jsonw_uint_field(w, "success_subtest", sub_succ_cnt);
1314 jsonw_uint_field(w, "skipped", skip_cnt);
1315 jsonw_uint_field(w, "failed", fail_cnt);
1316 jsonw_name(w, "results");
1317 jsonw_start_array(w);
1318 }
1319
1320 /*
1321 * We only print error logs summary when there are failed tests and
1322 * verbose mode is not enabled. Otherwise, results may be incosistent.
1323 *
1324 */
1325 if (!verbose() && fail_cnt) {
1326 printf("\nAll error logs:\n");
1327
1328 /* print error logs again */
1329 for (i = 0; i < prog_test_cnt; i++) {
1330 struct prog_test_def *test = &prog_test_defs[i];
1331 struct test_state *state = &test_states[i];
1332
1333 if (!state->tested || !state->error_cnt)
1334 continue;
1335
1336 dump_test_log(test, state, true, true, w);
1337 }
1338 }
1339
1340 if (w) {
1341 jsonw_end_array(w);
1342 jsonw_end_object(w);
1343 jsonw_destroy(&w);
1344 }
1345
1346 if (env->json)
1347 fclose(env->json);
1348
1349 printf("Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n",
1350 succ_cnt, sub_succ_cnt, skip_cnt, fail_cnt);
1351
1352 env->succ_cnt = succ_cnt;
1353 env->sub_succ_cnt = sub_succ_cnt;
1354 env->fail_cnt = fail_cnt;
1355 env->skip_cnt = skip_cnt;
1356 }
1357
server_main(void)1358 static void server_main(void)
1359 {
1360 pthread_t *dispatcher_threads;
1361 struct dispatch_data *data;
1362 struct sigaction sigact_int = {
1363 .sa_handler = sigint_handler,
1364 .sa_flags = SA_RESETHAND,
1365 };
1366 int i;
1367
1368 sigaction(SIGINT, &sigact_int, NULL);
1369
1370 dispatcher_threads = calloc(sizeof(pthread_t), env.workers);
1371 data = calloc(sizeof(struct dispatch_data), env.workers);
1372
1373 env.worker_current_test = calloc(sizeof(int), env.workers);
1374 for (i = 0; i < env.workers; i++) {
1375 int rc;
1376
1377 data[i].worker_id = i;
1378 data[i].sock_fd = env.worker_socks[i];
1379 rc = pthread_create(&dispatcher_threads[i], NULL, dispatch_thread, &data[i]);
1380 if (rc < 0) {
1381 perror("Failed to launch dispatcher thread");
1382 exit(EXIT_ERR_SETUP_INFRA);
1383 }
1384 }
1385
1386 /* wait for all dispatcher to finish */
1387 for (i = 0; i < env.workers; i++) {
1388 while (true) {
1389 int ret = pthread_tryjoin_np(dispatcher_threads[i], NULL);
1390
1391 if (!ret) {
1392 break;
1393 } else if (ret == EBUSY) {
1394 if (env.debug)
1395 fprintf(stderr, "Still waiting for thread %d (test %d).\n",
1396 i, env.worker_current_test[i] + 1);
1397 usleep(1000 * 1000);
1398 continue;
1399 } else {
1400 fprintf(stderr, "Unexpected error joining dispatcher thread: %d", ret);
1401 break;
1402 }
1403 }
1404 }
1405 free(dispatcher_threads);
1406 free(env.worker_current_test);
1407 free(data);
1408
1409 /* run serial tests */
1410 save_netns();
1411
1412 for (int i = 0; i < prog_test_cnt; i++) {
1413 struct prog_test_def *test = &prog_test_defs[i];
1414
1415 if (!test->should_run || !test->run_serial_test)
1416 continue;
1417
1418 run_one_test(i);
1419 }
1420
1421 /* generate summary */
1422 fflush(stderr);
1423 fflush(stdout);
1424
1425 calculate_summary_and_print_errors(&env);
1426
1427 /* reap all workers */
1428 for (i = 0; i < env.workers; i++) {
1429 int wstatus, pid;
1430
1431 pid = waitpid(env.worker_pids[i], &wstatus, 0);
1432 if (pid != env.worker_pids[i])
1433 perror("Unable to reap worker");
1434 }
1435 }
1436
worker_main_send_log(int sock,char * log_buf,size_t log_cnt)1437 static void worker_main_send_log(int sock, char *log_buf, size_t log_cnt)
1438 {
1439 char *src;
1440 size_t slen;
1441
1442 src = log_buf;
1443 slen = log_cnt;
1444 while (slen) {
1445 struct msg msg_log;
1446 char *dest;
1447 size_t len;
1448
1449 memset(&msg_log, 0, sizeof(msg_log));
1450 msg_log.type = MSG_TEST_LOG;
1451 dest = msg_log.test_log.log_buf;
1452 len = slen >= MAX_LOG_TRUNK_SIZE ? MAX_LOG_TRUNK_SIZE : slen;
1453 memcpy(dest, src, len);
1454
1455 src += len;
1456 slen -= len;
1457 if (!slen)
1458 msg_log.test_log.is_last = true;
1459
1460 assert(send_message(sock, &msg_log) >= 0);
1461 }
1462 }
1463
free_subtest_state(struct subtest_state * state)1464 static void free_subtest_state(struct subtest_state *state)
1465 {
1466 if (state->log_buf) {
1467 free(state->log_buf);
1468 state->log_buf = NULL;
1469 state->log_cnt = 0;
1470 }
1471 free(state->name);
1472 state->name = NULL;
1473 }
1474
worker_main_send_subtests(int sock,struct test_state * state)1475 static int worker_main_send_subtests(int sock, struct test_state *state)
1476 {
1477 int i, result = 0;
1478 struct msg msg;
1479 struct subtest_state *subtest_state;
1480
1481 memset(&msg, 0, sizeof(msg));
1482 msg.type = MSG_SUBTEST_DONE;
1483
1484 for (i = 0; i < state->subtest_num; i++) {
1485 subtest_state = &state->subtest_states[i];
1486
1487 msg.subtest_done.num = i;
1488
1489 strncpy(msg.subtest_done.name, subtest_state->name, MAX_SUBTEST_NAME);
1490
1491 msg.subtest_done.error_cnt = subtest_state->error_cnt;
1492 msg.subtest_done.skipped = subtest_state->skipped;
1493 msg.subtest_done.filtered = subtest_state->filtered;
1494 msg.subtest_done.have_log = false;
1495
1496 if (verbose() || state->force_log || subtest_state->error_cnt) {
1497 if (subtest_state->log_cnt)
1498 msg.subtest_done.have_log = true;
1499 }
1500
1501 if (send_message(sock, &msg) < 0) {
1502 perror("Fail to send message done");
1503 result = 1;
1504 goto out;
1505 }
1506
1507 /* send logs */
1508 if (msg.subtest_done.have_log)
1509 worker_main_send_log(sock, subtest_state->log_buf, subtest_state->log_cnt);
1510
1511 free_subtest_state(subtest_state);
1512 free(subtest_state->name);
1513 }
1514
1515 out:
1516 for (; i < state->subtest_num; i++)
1517 free_subtest_state(&state->subtest_states[i]);
1518 free(state->subtest_states);
1519 return result;
1520 }
1521
worker_main(int sock)1522 static int worker_main(int sock)
1523 {
1524 save_netns();
1525
1526 while (true) {
1527 /* receive command */
1528 struct msg msg;
1529
1530 if (recv_message(sock, &msg) < 0)
1531 goto out;
1532
1533 switch (msg.type) {
1534 case MSG_EXIT:
1535 if (env.debug)
1536 fprintf(stderr, "[%d]: worker exit.\n",
1537 env.worker_id);
1538 goto out;
1539 case MSG_DO_TEST: {
1540 int test_to_run = msg.do_test.num;
1541 struct prog_test_def *test = &prog_test_defs[test_to_run];
1542 struct test_state *state = &test_states[test_to_run];
1543 struct msg msg;
1544
1545 if (env.debug)
1546 fprintf(stderr, "[%d]: #%d:%s running.\n",
1547 env.worker_id,
1548 test_to_run + 1,
1549 test->test_name);
1550
1551 run_one_test(test_to_run);
1552
1553 memset(&msg, 0, sizeof(msg));
1554 msg.type = MSG_TEST_DONE;
1555 msg.test_done.num = test_to_run;
1556 msg.test_done.error_cnt = state->error_cnt;
1557 msg.test_done.skip_cnt = state->skip_cnt;
1558 msg.test_done.sub_succ_cnt = state->sub_succ_cnt;
1559 msg.test_done.subtest_num = state->subtest_num;
1560 msg.test_done.have_log = false;
1561
1562 if (verbose() || state->force_log || state->error_cnt) {
1563 if (state->log_cnt)
1564 msg.test_done.have_log = true;
1565 }
1566 if (send_message(sock, &msg) < 0) {
1567 perror("Fail to send message done");
1568 goto out;
1569 }
1570
1571 /* send logs */
1572 if (msg.test_done.have_log)
1573 worker_main_send_log(sock, state->log_buf, state->log_cnt);
1574
1575 if (state->log_buf) {
1576 free(state->log_buf);
1577 state->log_buf = NULL;
1578 state->log_cnt = 0;
1579 }
1580
1581 if (state->subtest_num)
1582 if (worker_main_send_subtests(sock, state))
1583 goto out;
1584
1585 if (env.debug)
1586 fprintf(stderr, "[%d]: #%d:%s done.\n",
1587 env.worker_id,
1588 test_to_run + 1,
1589 test->test_name);
1590 break;
1591 } /* case MSG_DO_TEST */
1592 default:
1593 if (env.debug)
1594 fprintf(stderr, "[%d]: unknown message.\n", env.worker_id);
1595 return -1;
1596 }
1597 }
1598 out:
1599 return 0;
1600 }
1601
free_test_states(void)1602 static void free_test_states(void)
1603 {
1604 int i, j;
1605
1606 for (i = 0; i < ARRAY_SIZE(prog_test_defs); i++) {
1607 struct test_state *test_state = &test_states[i];
1608
1609 for (j = 0; j < test_state->subtest_num; j++)
1610 free_subtest_state(&test_state->subtest_states[j]);
1611
1612 free(test_state->subtest_states);
1613 free(test_state->log_buf);
1614 test_state->subtest_states = NULL;
1615 test_state->log_buf = NULL;
1616 }
1617 }
1618
main(int argc,char ** argv)1619 int main(int argc, char **argv)
1620 {
1621 static const struct argp argp = {
1622 .options = opts,
1623 .parser = parse_arg,
1624 .doc = argp_program_doc,
1625 };
1626 struct sigaction sigact = {
1627 .sa_handler = crash_handler,
1628 .sa_flags = SA_RESETHAND,
1629 };
1630 int err, i;
1631
1632 sigaction(SIGSEGV, &sigact, NULL);
1633
1634 err = argp_parse(&argp, argc, argv, 0, NULL, &env);
1635 if (err)
1636 return err;
1637
1638 err = cd_flavor_subdir(argv[0]);
1639 if (err)
1640 return err;
1641
1642 /* Use libbpf 1.0 API mode */
1643 libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
1644 libbpf_set_print(libbpf_print_fn);
1645
1646 srand(time(NULL));
1647
1648 env.jit_enabled = is_jit_enabled();
1649 env.nr_cpus = libbpf_num_possible_cpus();
1650 if (env.nr_cpus < 0) {
1651 fprintf(stderr, "Failed to get number of CPUs: %d!\n",
1652 env.nr_cpus);
1653 return -1;
1654 }
1655
1656 env.stdout = stdout;
1657 env.stderr = stderr;
1658
1659 env.has_testmod = true;
1660 if (!env.list_test_names) {
1661 /* ensure previous instance of the module is unloaded */
1662 unload_bpf_testmod(verbose());
1663
1664 if (load_bpf_testmod(verbose())) {
1665 fprintf(env.stderr, "WARNING! Selftests relying on bpf_testmod.ko will be skipped.\n");
1666 env.has_testmod = false;
1667 }
1668 }
1669
1670 /* initializing tests */
1671 for (i = 0; i < prog_test_cnt; i++) {
1672 struct prog_test_def *test = &prog_test_defs[i];
1673
1674 test->test_num = i + 1;
1675 test->should_run = should_run(&env.test_selector,
1676 test->test_num, test->test_name);
1677
1678 if ((test->run_test == NULL && test->run_serial_test == NULL) ||
1679 (test->run_test != NULL && test->run_serial_test != NULL)) {
1680 fprintf(stderr, "Test %d:%s must have either test_%s() or serial_test_%sl() defined.\n",
1681 test->test_num, test->test_name, test->test_name, test->test_name);
1682 exit(EXIT_ERR_SETUP_INFRA);
1683 }
1684 }
1685
1686 /* ignore workers if we are just listing */
1687 if (env.get_test_cnt || env.list_test_names)
1688 env.workers = 0;
1689
1690 /* launch workers if requested */
1691 env.worker_id = -1; /* main process */
1692 if (env.workers) {
1693 env.worker_pids = calloc(sizeof(__pid_t), env.workers);
1694 env.worker_socks = calloc(sizeof(int), env.workers);
1695 if (env.debug)
1696 fprintf(stdout, "Launching %d workers.\n", env.workers);
1697 for (i = 0; i < env.workers; i++) {
1698 int sv[2];
1699 pid_t pid;
1700
1701 if (socketpair(AF_UNIX, SOCK_SEQPACKET | SOCK_CLOEXEC, 0, sv) < 0) {
1702 perror("Fail to create worker socket");
1703 return -1;
1704 }
1705 pid = fork();
1706 if (pid < 0) {
1707 perror("Failed to fork worker");
1708 return -1;
1709 } else if (pid != 0) { /* main process */
1710 close(sv[1]);
1711 env.worker_pids[i] = pid;
1712 env.worker_socks[i] = sv[0];
1713 } else { /* inside each worker process */
1714 close(sv[0]);
1715 env.worker_id = i;
1716 return worker_main(sv[1]);
1717 }
1718 }
1719
1720 if (env.worker_id == -1) {
1721 server_main();
1722 goto out;
1723 }
1724 }
1725
1726 /* The rest of the main process */
1727
1728 /* on single mode */
1729 save_netns();
1730
1731 for (i = 0; i < prog_test_cnt; i++) {
1732 struct prog_test_def *test = &prog_test_defs[i];
1733
1734 if (!test->should_run)
1735 continue;
1736
1737 if (env.get_test_cnt) {
1738 env.succ_cnt++;
1739 continue;
1740 }
1741
1742 if (env.list_test_names) {
1743 fprintf(env.stdout, "%s\n", test->test_name);
1744 env.succ_cnt++;
1745 continue;
1746 }
1747
1748 run_one_test(i);
1749 }
1750
1751 if (env.get_test_cnt) {
1752 printf("%d\n", env.succ_cnt);
1753 goto out;
1754 }
1755
1756 if (env.list_test_names)
1757 goto out;
1758
1759 calculate_summary_and_print_errors(&env);
1760
1761 close(env.saved_netns_fd);
1762 out:
1763 if (!env.list_test_names && env.has_testmod)
1764 unload_bpf_testmod(verbose());
1765
1766 free_test_selector(&env.test_selector);
1767 free_test_selector(&env.subtest_selector);
1768 free_test_states();
1769
1770 if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0)
1771 return EXIT_NO_TEST;
1772
1773 return env.fail_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
1774 }
1775