1 // SPDX-License-Identifier: GPL-2.0-or-later OR copyleft-next-0.3.1
2 /*
3  * kmod stress test driver
4  *
5  * Copyright (C) 2017 Luis R. Rodriguez <mcgrof@kernel.org>
6  */
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 
9 /*
10  * This driver provides an interface to trigger and test the kernel's
11  * module loader through a series of configurations and a few triggers.
12  * To test this driver use the following script as root:
13  *
14  * tools/testing/selftests/kmod/kmod.sh --help
15  */
16 
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/kmod.h>
20 #include <linux/printk.h>
21 #include <linux/kthread.h>
22 #include <linux/sched.h>
23 #include <linux/fs.h>
24 #include <linux/miscdevice.h>
25 #include <linux/vmalloc.h>
26 #include <linux/slab.h>
27 #include <linux/device.h>
28 
29 #define TEST_START_NUM_THREADS	50
30 #define TEST_START_DRIVER	"test_module"
31 #define TEST_START_TEST_FS	"xfs"
32 #define TEST_START_TEST_CASE	TEST_KMOD_DRIVER
33 
34 
35 static bool force_init_test = false;
36 module_param(force_init_test, bool_enable_only, 0644);
37 MODULE_PARM_DESC(force_init_test,
38 		 "Force kicking a test immediately after driver loads");
39 
40 /*
41  * For device allocation / registration
42  */
43 static DEFINE_MUTEX(reg_dev_mutex);
44 static LIST_HEAD(reg_test_devs);
45 
46 /*
47  * num_test_devs actually represents the *next* ID of the next
48  * device we will allow to create.
49  */
50 static int num_test_devs;
51 
52 /**
53  * enum kmod_test_case - linker table test case
54  * @TEST_KMOD_DRIVER: stress tests request_module()
55  * @TEST_KMOD_FS_TYPE: stress tests get_fs_type()
56  *
57  * If you add a  test case, please be sure to review if you need to set
58  * @need_mod_put for your tests case.
59  */
60 enum kmod_test_case {
61 	__TEST_KMOD_INVALID = 0,
62 
63 	TEST_KMOD_DRIVER,
64 	TEST_KMOD_FS_TYPE,
65 
66 	__TEST_KMOD_MAX,
67 };
68 
69 struct test_config {
70 	char *test_driver;
71 	char *test_fs;
72 	unsigned int num_threads;
73 	enum kmod_test_case test_case;
74 	int test_result;
75 };
76 
77 struct kmod_test_device;
78 
79 /**
80  * struct kmod_test_device_info - thread info
81  *
82  * @ret_sync: return value if request_module() is used, sync request for
83  * 	@TEST_KMOD_DRIVER
84  * @fs_sync: return value of get_fs_type() for @TEST_KMOD_FS_TYPE
85  * @thread_idx: thread ID
86  * @test_dev: test device test is being performed under
87  * @need_mod_put: Some tests (get_fs_type() is one) requires putting the module
88  *	(module_put(fs_sync->owner)) when done, otherwise you will not be able
89  *	to unload the respective modules and re-test. We use this to keep
90  *	accounting of when we need this and to help out in case we need to
91  *	error out and deal with module_put() on error.
92  */
93 struct kmod_test_device_info {
94 	int ret_sync;
95 	struct file_system_type *fs_sync;
96 	struct task_struct *task_sync;
97 	unsigned int thread_idx;
98 	struct kmod_test_device *test_dev;
99 	bool need_mod_put;
100 };
101 
102 /**
103  * struct kmod_test_device - test device to help test kmod
104  *
105  * @dev_idx: unique ID for test device
106  * @config: configuration for the test
107  * @misc_dev: we use a misc device under the hood
108  * @dev: pointer to misc_dev's own struct device
109  * @config_mutex: protects configuration of test
110  * @trigger_mutex: the test trigger can only be fired once at a time
111  * @thread_lock: protects @done count, and the @info per each thread
112  * @done: number of threads which have completed or failed
113  * @test_is_oom: when we run out of memory, use this to halt moving forward
114  * @kthreads_done: completion used to signal when all work is done
115  * @list: needed to be part of the reg_test_devs
116  * @info: array of info for each thread
117  */
118 struct kmod_test_device {
119 	int dev_idx;
120 	struct test_config config;
121 	struct miscdevice misc_dev;
122 	struct device *dev;
123 	struct mutex config_mutex;
124 	struct mutex trigger_mutex;
125 	struct mutex thread_mutex;
126 
127 	unsigned int done;
128 
129 	bool test_is_oom;
130 	struct completion kthreads_done;
131 	struct list_head list;
132 
133 	struct kmod_test_device_info *info;
134 };
135 
test_case_str(enum kmod_test_case test_case)136 static const char *test_case_str(enum kmod_test_case test_case)
137 {
138 	switch (test_case) {
139 	case TEST_KMOD_DRIVER:
140 		return "TEST_KMOD_DRIVER";
141 	case TEST_KMOD_FS_TYPE:
142 		return "TEST_KMOD_FS_TYPE";
143 	default:
144 		return "invalid";
145 	}
146 }
147 
dev_to_misc_dev(struct device * dev)148 static struct miscdevice *dev_to_misc_dev(struct device *dev)
149 {
150 	return dev_get_drvdata(dev);
151 }
152 
misc_dev_to_test_dev(struct miscdevice * misc_dev)153 static struct kmod_test_device *misc_dev_to_test_dev(struct miscdevice *misc_dev)
154 {
155 	return container_of(misc_dev, struct kmod_test_device, misc_dev);
156 }
157 
dev_to_test_dev(struct device * dev)158 static struct kmod_test_device *dev_to_test_dev(struct device *dev)
159 {
160 	struct miscdevice *misc_dev;
161 
162 	misc_dev = dev_to_misc_dev(dev);
163 
164 	return misc_dev_to_test_dev(misc_dev);
165 }
166 
167 /* Must run with thread_mutex held */
kmod_test_done_check(struct kmod_test_device * test_dev,unsigned int idx)168 static void kmod_test_done_check(struct kmod_test_device *test_dev,
169 				 unsigned int idx)
170 {
171 	struct test_config *config = &test_dev->config;
172 
173 	test_dev->done++;
174 	dev_dbg(test_dev->dev, "Done thread count: %u\n", test_dev->done);
175 
176 	if (test_dev->done == config->num_threads) {
177 		dev_info(test_dev->dev, "Done: %u threads have all run now\n",
178 			 test_dev->done);
179 		dev_info(test_dev->dev, "Last thread to run: %u\n", idx);
180 		complete(&test_dev->kthreads_done);
181 	}
182 }
183 
test_kmod_put_module(struct kmod_test_device_info * info)184 static void test_kmod_put_module(struct kmod_test_device_info *info)
185 {
186 	struct kmod_test_device *test_dev = info->test_dev;
187 	struct test_config *config = &test_dev->config;
188 
189 	if (!info->need_mod_put)
190 		return;
191 
192 	switch (config->test_case) {
193 	case TEST_KMOD_DRIVER:
194 		break;
195 	case TEST_KMOD_FS_TYPE:
196 		if (info->fs_sync && info->fs_sync->owner)
197 			module_put(info->fs_sync->owner);
198 		break;
199 	default:
200 		BUG();
201 	}
202 
203 	info->need_mod_put = true;
204 }
205 
run_request(void * data)206 static int run_request(void *data)
207 {
208 	struct kmod_test_device_info *info = data;
209 	struct kmod_test_device *test_dev = info->test_dev;
210 	struct test_config *config = &test_dev->config;
211 
212 	switch (config->test_case) {
213 	case TEST_KMOD_DRIVER:
214 		info->ret_sync = request_module("%s", config->test_driver);
215 		break;
216 	case TEST_KMOD_FS_TYPE:
217 		info->fs_sync = get_fs_type(config->test_fs);
218 		info->need_mod_put = true;
219 		break;
220 	default:
221 		/* __trigger_config_run() already checked for test sanity */
222 		BUG();
223 		return -EINVAL;
224 	}
225 
226 	dev_dbg(test_dev->dev, "Ran thread %u\n", info->thread_idx);
227 
228 	test_kmod_put_module(info);
229 
230 	mutex_lock(&test_dev->thread_mutex);
231 	info->task_sync = NULL;
232 	kmod_test_done_check(test_dev, info->thread_idx);
233 	mutex_unlock(&test_dev->thread_mutex);
234 
235 	return 0;
236 }
237 
tally_work_test(struct kmod_test_device_info * info)238 static int tally_work_test(struct kmod_test_device_info *info)
239 {
240 	struct kmod_test_device *test_dev = info->test_dev;
241 	struct test_config *config = &test_dev->config;
242 	int err_ret = 0;
243 
244 	switch (config->test_case) {
245 	case TEST_KMOD_DRIVER:
246 		/*
247 		 * Only capture errors, if one is found that's
248 		 * enough, for now.
249 		 */
250 		if (info->ret_sync != 0)
251 			err_ret = info->ret_sync;
252 		dev_info(test_dev->dev,
253 			 "Sync thread %d return status: %d\n",
254 			 info->thread_idx, info->ret_sync);
255 		break;
256 	case TEST_KMOD_FS_TYPE:
257 		/* For now we make this simple */
258 		if (!info->fs_sync)
259 			err_ret = -EINVAL;
260 		dev_info(test_dev->dev, "Sync thread %u fs: %s\n",
261 			 info->thread_idx, info->fs_sync ? config->test_fs :
262 			 "NULL");
263 		break;
264 	default:
265 		BUG();
266 	}
267 
268 	return err_ret;
269 }
270 
271 /*
272  * XXX: add result option to display if all errors did not match.
273  * For now we just keep any error code if one was found.
274  *
275  * If this ran it means *all* tasks were created fine and we
276  * are now just collecting results.
277  *
278  * Only propagate errors, do not override with a subsequent success case.
279  */
tally_up_work(struct kmod_test_device * test_dev)280 static void tally_up_work(struct kmod_test_device *test_dev)
281 {
282 	struct test_config *config = &test_dev->config;
283 	struct kmod_test_device_info *info;
284 	unsigned int idx;
285 	int err_ret = 0;
286 	int ret = 0;
287 
288 	mutex_lock(&test_dev->thread_mutex);
289 
290 	dev_info(test_dev->dev, "Results:\n");
291 
292 	for (idx=0; idx < config->num_threads; idx++) {
293 		info = &test_dev->info[idx];
294 		ret = tally_work_test(info);
295 		if (ret)
296 			err_ret = ret;
297 	}
298 
299 	/*
300 	 * Note: request_module() returns 256 for a module not found even
301 	 * though modprobe itself returns 1.
302 	 */
303 	config->test_result = err_ret;
304 
305 	mutex_unlock(&test_dev->thread_mutex);
306 }
307 
try_one_request(struct kmod_test_device * test_dev,unsigned int idx)308 static int try_one_request(struct kmod_test_device *test_dev, unsigned int idx)
309 {
310 	struct kmod_test_device_info *info = &test_dev->info[idx];
311 	int fail_ret = -ENOMEM;
312 
313 	mutex_lock(&test_dev->thread_mutex);
314 
315 	info->thread_idx = idx;
316 	info->test_dev = test_dev;
317 	info->task_sync = kthread_run(run_request, info, "%s-%u",
318 				      KBUILD_MODNAME, idx);
319 
320 	if (!info->task_sync || IS_ERR(info->task_sync)) {
321 		test_dev->test_is_oom = true;
322 		dev_err(test_dev->dev, "Setting up thread %u failed\n", idx);
323 		info->task_sync = NULL;
324 		goto err_out;
325 	} else
326 		dev_dbg(test_dev->dev, "Kicked off thread %u\n", idx);
327 
328 	mutex_unlock(&test_dev->thread_mutex);
329 
330 	return 0;
331 
332 err_out:
333 	info->ret_sync = fail_ret;
334 	mutex_unlock(&test_dev->thread_mutex);
335 
336 	return fail_ret;
337 }
338 
test_dev_kmod_stop_tests(struct kmod_test_device * test_dev)339 static void test_dev_kmod_stop_tests(struct kmod_test_device *test_dev)
340 {
341 	struct test_config *config = &test_dev->config;
342 	struct kmod_test_device_info *info;
343 	unsigned int i;
344 
345 	dev_info(test_dev->dev, "Ending request_module() tests\n");
346 
347 	mutex_lock(&test_dev->thread_mutex);
348 
349 	for (i=0; i < config->num_threads; i++) {
350 		info = &test_dev->info[i];
351 		if (info->task_sync && !IS_ERR(info->task_sync)) {
352 			dev_info(test_dev->dev,
353 				 "Stopping still-running thread %i\n", i);
354 			kthread_stop(info->task_sync);
355 		}
356 
357 		/*
358 		 * info->task_sync is well protected, it can only be
359 		 * NULL or a pointer to a struct. If its NULL we either
360 		 * never ran, or we did and we completed the work. Completed
361 		 * tasks *always* put the module for us. This is a sanity
362 		 * check -- just in case.
363 		 */
364 		if (info->task_sync && info->need_mod_put)
365 			test_kmod_put_module(info);
366 	}
367 
368 	mutex_unlock(&test_dev->thread_mutex);
369 }
370 
371 /*
372  * Only wait *iff* we did not run into any errors during all of our thread
373  * set up. If run into any issues we stop threads and just bail out with
374  * an error to the trigger. This also means we don't need any tally work
375  * for any threads which fail.
376  */
try_requests(struct kmod_test_device * test_dev)377 static int try_requests(struct kmod_test_device *test_dev)
378 {
379 	struct test_config *config = &test_dev->config;
380 	unsigned int idx;
381 	int ret;
382 	bool any_error = false;
383 
384 	for (idx=0; idx < config->num_threads; idx++) {
385 		if (test_dev->test_is_oom) {
386 			any_error = true;
387 			break;
388 		}
389 
390 		ret = try_one_request(test_dev, idx);
391 		if (ret) {
392 			any_error = true;
393 			break;
394 		}
395 	}
396 
397 	if (!any_error) {
398 		test_dev->test_is_oom = false;
399 		dev_info(test_dev->dev,
400 			 "No errors were found while initializing threads\n");
401 		wait_for_completion(&test_dev->kthreads_done);
402 		tally_up_work(test_dev);
403 	} else {
404 		test_dev->test_is_oom = true;
405 		dev_info(test_dev->dev,
406 			 "At least one thread failed to start, stop all work\n");
407 		test_dev_kmod_stop_tests(test_dev);
408 		return -ENOMEM;
409 	}
410 
411 	return 0;
412 }
413 
run_test_driver(struct kmod_test_device * test_dev)414 static int run_test_driver(struct kmod_test_device *test_dev)
415 {
416 	struct test_config *config = &test_dev->config;
417 
418 	dev_info(test_dev->dev, "Test case: %s (%u)\n",
419 		 test_case_str(config->test_case),
420 		 config->test_case);
421 	dev_info(test_dev->dev, "Test driver to load: %s\n",
422 		 config->test_driver);
423 	dev_info(test_dev->dev, "Number of threads to run: %u\n",
424 		 config->num_threads);
425 	dev_info(test_dev->dev, "Thread IDs will range from 0 - %u\n",
426 		 config->num_threads - 1);
427 
428 	return try_requests(test_dev);
429 }
430 
run_test_fs_type(struct kmod_test_device * test_dev)431 static int run_test_fs_type(struct kmod_test_device *test_dev)
432 {
433 	struct test_config *config = &test_dev->config;
434 
435 	dev_info(test_dev->dev, "Test case: %s (%u)\n",
436 		 test_case_str(config->test_case),
437 		 config->test_case);
438 	dev_info(test_dev->dev, "Test filesystem to load: %s\n",
439 		 config->test_fs);
440 	dev_info(test_dev->dev, "Number of threads to run: %u\n",
441 		 config->num_threads);
442 	dev_info(test_dev->dev, "Thread IDs will range from 0 - %u\n",
443 		 config->num_threads - 1);
444 
445 	return try_requests(test_dev);
446 }
447 
config_show(struct device * dev,struct device_attribute * attr,char * buf)448 static ssize_t config_show(struct device *dev,
449 			   struct device_attribute *attr,
450 			   char *buf)
451 {
452 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
453 	struct test_config *config = &test_dev->config;
454 	int len = 0;
455 
456 	mutex_lock(&test_dev->config_mutex);
457 
458 	len += snprintf(buf, PAGE_SIZE,
459 			"Custom trigger configuration for: %s\n",
460 			dev_name(dev));
461 
462 	len += snprintf(buf+len, PAGE_SIZE - len,
463 			"Number of threads:\t%u\n",
464 			config->num_threads);
465 
466 	len += snprintf(buf+len, PAGE_SIZE - len,
467 			"Test_case:\t%s (%u)\n",
468 			test_case_str(config->test_case),
469 			config->test_case);
470 
471 	if (config->test_driver)
472 		len += snprintf(buf+len, PAGE_SIZE - len,
473 				"driver:\t%s\n",
474 				config->test_driver);
475 	else
476 		len += snprintf(buf+len, PAGE_SIZE - len,
477 				"driver:\tEMPTY\n");
478 
479 	if (config->test_fs)
480 		len += snprintf(buf+len, PAGE_SIZE - len,
481 				"fs:\t%s\n",
482 				config->test_fs);
483 	else
484 		len += snprintf(buf+len, PAGE_SIZE - len,
485 				"fs:\tEMPTY\n");
486 
487 	mutex_unlock(&test_dev->config_mutex);
488 
489 	return len;
490 }
491 static DEVICE_ATTR_RO(config);
492 
493 /*
494  * This ensures we don't allow kicking threads through if our configuration
495  * is faulty.
496  */
__trigger_config_run(struct kmod_test_device * test_dev)497 static int __trigger_config_run(struct kmod_test_device *test_dev)
498 {
499 	struct test_config *config = &test_dev->config;
500 
501 	test_dev->done = 0;
502 
503 	switch (config->test_case) {
504 	case TEST_KMOD_DRIVER:
505 		return run_test_driver(test_dev);
506 	case TEST_KMOD_FS_TYPE:
507 		return run_test_fs_type(test_dev);
508 	default:
509 		dev_warn(test_dev->dev,
510 			 "Invalid test case requested: %u\n",
511 			 config->test_case);
512 		return -EINVAL;
513 	}
514 }
515 
trigger_config_run(struct kmod_test_device * test_dev)516 static int trigger_config_run(struct kmod_test_device *test_dev)
517 {
518 	struct test_config *config = &test_dev->config;
519 	int ret;
520 
521 	mutex_lock(&test_dev->trigger_mutex);
522 	mutex_lock(&test_dev->config_mutex);
523 
524 	ret = __trigger_config_run(test_dev);
525 	if (ret < 0)
526 		goto out;
527 	dev_info(test_dev->dev, "General test result: %d\n",
528 		 config->test_result);
529 
530 	/*
531 	 * We must return 0 after a trigger even unless something went
532 	 * wrong with the setup of the test. If the test setup went fine
533 	 * then userspace must just check the result of config->test_result.
534 	 * One issue with relying on the return from a call in the kernel
535 	 * is if the kernel returns a positive value using this trigger
536 	 * will not return the value to userspace, it would be lost.
537 	 *
538 	 * By not relying on capturing the return value of tests we are using
539 	 * through the trigger it also us to run tests with set -e and only
540 	 * fail when something went wrong with the driver upon trigger
541 	 * requests.
542 	 */
543 	ret = 0;
544 
545 out:
546 	mutex_unlock(&test_dev->config_mutex);
547 	mutex_unlock(&test_dev->trigger_mutex);
548 
549 	return ret;
550 }
551 
552 static ssize_t
trigger_config_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)553 trigger_config_store(struct device *dev,
554 		     struct device_attribute *attr,
555 		     const char *buf, size_t count)
556 {
557 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
558 	int ret;
559 
560 	if (test_dev->test_is_oom)
561 		return -ENOMEM;
562 
563 	/* For all intents and purposes we don't care what userspace
564 	 * sent this trigger, we care only that we were triggered.
565 	 * We treat the return value only for caputuring issues with
566 	 * the test setup. At this point all the test variables should
567 	 * have been allocated so typically this should never fail.
568 	 */
569 	ret = trigger_config_run(test_dev);
570 	if (unlikely(ret < 0))
571 		goto out;
572 
573 	/*
574 	 * Note: any return > 0 will be treated as success
575 	 * and the error value will not be available to userspace.
576 	 * Do not rely on trying to send to userspace a test value
577 	 * return value as positive return errors will be lost.
578 	 */
579 	if (WARN_ON(ret > 0))
580 		return -EINVAL;
581 
582 	ret = count;
583 out:
584 	return ret;
585 }
586 static DEVICE_ATTR_WO(trigger_config);
587 
588 /*
589  * XXX: move to kstrncpy() once merged.
590  *
591  * Users should use kfree_const() when freeing these.
592  */
__kstrncpy(char ** dst,const char * name,size_t count,gfp_t gfp)593 static int __kstrncpy(char **dst, const char *name, size_t count, gfp_t gfp)
594 {
595 	*dst = kstrndup(name, count, gfp);
596 	if (!*dst)
597 		return -ENOSPC;
598 	return count;
599 }
600 
config_copy_test_driver_name(struct test_config * config,const char * name,size_t count)601 static int config_copy_test_driver_name(struct test_config *config,
602 				    const char *name,
603 				    size_t count)
604 {
605 	return __kstrncpy(&config->test_driver, name, count, GFP_KERNEL);
606 }
607 
608 
config_copy_test_fs(struct test_config * config,const char * name,size_t count)609 static int config_copy_test_fs(struct test_config *config, const char *name,
610 			       size_t count)
611 {
612 	return __kstrncpy(&config->test_fs, name, count, GFP_KERNEL);
613 }
614 
__kmod_config_free(struct test_config * config)615 static void __kmod_config_free(struct test_config *config)
616 {
617 	if (!config)
618 		return;
619 
620 	kfree_const(config->test_driver);
621 	config->test_driver = NULL;
622 
623 	kfree_const(config->test_fs);
624 	config->test_fs = NULL;
625 }
626 
kmod_config_free(struct kmod_test_device * test_dev)627 static void kmod_config_free(struct kmod_test_device *test_dev)
628 {
629 	struct test_config *config;
630 
631 	if (!test_dev)
632 		return;
633 
634 	config = &test_dev->config;
635 
636 	mutex_lock(&test_dev->config_mutex);
637 	__kmod_config_free(config);
638 	mutex_unlock(&test_dev->config_mutex);
639 }
640 
config_test_driver_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)641 static ssize_t config_test_driver_store(struct device *dev,
642 					struct device_attribute *attr,
643 					const char *buf, size_t count)
644 {
645 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
646 	struct test_config *config = &test_dev->config;
647 	int copied;
648 
649 	mutex_lock(&test_dev->config_mutex);
650 
651 	kfree_const(config->test_driver);
652 	config->test_driver = NULL;
653 
654 	copied = config_copy_test_driver_name(config, buf, count);
655 	mutex_unlock(&test_dev->config_mutex);
656 
657 	return copied;
658 }
659 
660 /*
661  * As per sysfs_kf_seq_show() the buf is max PAGE_SIZE.
662  */
config_test_show_str(struct mutex * config_mutex,char * dst,char * src)663 static ssize_t config_test_show_str(struct mutex *config_mutex,
664 				    char *dst,
665 				    char *src)
666 {
667 	int len;
668 
669 	mutex_lock(config_mutex);
670 	len = snprintf(dst, PAGE_SIZE, "%s\n", src);
671 	mutex_unlock(config_mutex);
672 
673 	return len;
674 }
675 
config_test_driver_show(struct device * dev,struct device_attribute * attr,char * buf)676 static ssize_t config_test_driver_show(struct device *dev,
677 					struct device_attribute *attr,
678 					char *buf)
679 {
680 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
681 	struct test_config *config = &test_dev->config;
682 
683 	return config_test_show_str(&test_dev->config_mutex, buf,
684 				    config->test_driver);
685 }
686 static DEVICE_ATTR_RW(config_test_driver);
687 
config_test_fs_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)688 static ssize_t config_test_fs_store(struct device *dev,
689 				    struct device_attribute *attr,
690 				    const char *buf, size_t count)
691 {
692 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
693 	struct test_config *config = &test_dev->config;
694 	int copied;
695 
696 	mutex_lock(&test_dev->config_mutex);
697 
698 	kfree_const(config->test_fs);
699 	config->test_fs = NULL;
700 
701 	copied = config_copy_test_fs(config, buf, count);
702 	mutex_unlock(&test_dev->config_mutex);
703 
704 	return copied;
705 }
706 
config_test_fs_show(struct device * dev,struct device_attribute * attr,char * buf)707 static ssize_t config_test_fs_show(struct device *dev,
708 				   struct device_attribute *attr,
709 				   char *buf)
710 {
711 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
712 	struct test_config *config = &test_dev->config;
713 
714 	return config_test_show_str(&test_dev->config_mutex, buf,
715 				    config->test_fs);
716 }
717 static DEVICE_ATTR_RW(config_test_fs);
718 
trigger_config_run_type(struct kmod_test_device * test_dev,enum kmod_test_case test_case,const char * test_str)719 static int trigger_config_run_type(struct kmod_test_device *test_dev,
720 				   enum kmod_test_case test_case,
721 				   const char *test_str)
722 {
723 	int copied = 0;
724 	struct test_config *config = &test_dev->config;
725 
726 	mutex_lock(&test_dev->config_mutex);
727 
728 	switch (test_case) {
729 	case TEST_KMOD_DRIVER:
730 		kfree_const(config->test_driver);
731 		config->test_driver = NULL;
732 		copied = config_copy_test_driver_name(config, test_str,
733 						      strlen(test_str));
734 		break;
735 	case TEST_KMOD_FS_TYPE:
736 		kfree_const(config->test_fs);
737 		config->test_fs = NULL;
738 		copied = config_copy_test_fs(config, test_str,
739 					     strlen(test_str));
740 		break;
741 	default:
742 		mutex_unlock(&test_dev->config_mutex);
743 		return -EINVAL;
744 	}
745 
746 	config->test_case = test_case;
747 
748 	mutex_unlock(&test_dev->config_mutex);
749 
750 	if (copied <= 0 || copied != strlen(test_str)) {
751 		test_dev->test_is_oom = true;
752 		return -ENOMEM;
753 	}
754 
755 	test_dev->test_is_oom = false;
756 
757 	return trigger_config_run(test_dev);
758 }
759 
free_test_dev_info(struct kmod_test_device * test_dev)760 static void free_test_dev_info(struct kmod_test_device *test_dev)
761 {
762 	vfree(test_dev->info);
763 	test_dev->info = NULL;
764 }
765 
kmod_config_sync_info(struct kmod_test_device * test_dev)766 static int kmod_config_sync_info(struct kmod_test_device *test_dev)
767 {
768 	struct test_config *config = &test_dev->config;
769 
770 	free_test_dev_info(test_dev);
771 	test_dev->info =
772 		vzalloc(array_size(sizeof(struct kmod_test_device_info),
773 				   config->num_threads));
774 	if (!test_dev->info)
775 		return -ENOMEM;
776 
777 	return 0;
778 }
779 
780 /*
781  * Old kernels may not have this, if you want to port this code to
782  * test it on older kernels.
783  */
784 #ifdef get_kmod_umh_limit
kmod_init_test_thread_limit(void)785 static unsigned int kmod_init_test_thread_limit(void)
786 {
787 	return get_kmod_umh_limit();
788 }
789 #else
kmod_init_test_thread_limit(void)790 static unsigned int kmod_init_test_thread_limit(void)
791 {
792 	return TEST_START_NUM_THREADS;
793 }
794 #endif
795 
__kmod_config_init(struct kmod_test_device * test_dev)796 static int __kmod_config_init(struct kmod_test_device *test_dev)
797 {
798 	struct test_config *config = &test_dev->config;
799 	int ret = -ENOMEM, copied;
800 
801 	__kmod_config_free(config);
802 
803 	copied = config_copy_test_driver_name(config, TEST_START_DRIVER,
804 					      strlen(TEST_START_DRIVER));
805 	if (copied != strlen(TEST_START_DRIVER))
806 		goto err_out;
807 
808 	copied = config_copy_test_fs(config, TEST_START_TEST_FS,
809 				     strlen(TEST_START_TEST_FS));
810 	if (copied != strlen(TEST_START_TEST_FS))
811 		goto err_out;
812 
813 	config->num_threads = kmod_init_test_thread_limit();
814 	config->test_result = 0;
815 	config->test_case = TEST_START_TEST_CASE;
816 
817 	ret = kmod_config_sync_info(test_dev);
818 	if (ret)
819 		goto err_out;
820 
821 	test_dev->test_is_oom = false;
822 
823 	return 0;
824 
825 err_out:
826 	test_dev->test_is_oom = true;
827 	WARN_ON(test_dev->test_is_oom);
828 
829 	__kmod_config_free(config);
830 
831 	return ret;
832 }
833 
reset_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)834 static ssize_t reset_store(struct device *dev,
835 			   struct device_attribute *attr,
836 			   const char *buf, size_t count)
837 {
838 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
839 	int ret;
840 
841 	mutex_lock(&test_dev->trigger_mutex);
842 	mutex_lock(&test_dev->config_mutex);
843 
844 	ret = __kmod_config_init(test_dev);
845 	if (ret < 0) {
846 		ret = -ENOMEM;
847 		dev_err(dev, "could not alloc settings for config trigger: %d\n",
848 		       ret);
849 		goto out;
850 	}
851 
852 	dev_info(dev, "reset\n");
853 	ret = count;
854 
855 out:
856 	mutex_unlock(&test_dev->config_mutex);
857 	mutex_unlock(&test_dev->trigger_mutex);
858 
859 	return ret;
860 }
861 static DEVICE_ATTR_WO(reset);
862 
test_dev_config_update_uint_sync(struct kmod_test_device * test_dev,const char * buf,size_t size,unsigned int * config,int (* test_sync)(struct kmod_test_device * test_dev))863 static int test_dev_config_update_uint_sync(struct kmod_test_device *test_dev,
864 					    const char *buf, size_t size,
865 					    unsigned int *config,
866 					    int (*test_sync)(struct kmod_test_device *test_dev))
867 {
868 	int ret;
869 	unsigned int val;
870 	unsigned int old_val;
871 
872 	ret = kstrtouint(buf, 10, &val);
873 	if (ret)
874 		return ret;
875 
876 	mutex_lock(&test_dev->config_mutex);
877 
878 	old_val = *config;
879 	*(unsigned int *)config = val;
880 
881 	ret = test_sync(test_dev);
882 	if (ret) {
883 		*(unsigned int *)config = old_val;
884 
885 		ret = test_sync(test_dev);
886 		WARN_ON(ret);
887 
888 		mutex_unlock(&test_dev->config_mutex);
889 		return -EINVAL;
890 	}
891 
892 	mutex_unlock(&test_dev->config_mutex);
893 	/* Always return full write size even if we didn't consume all */
894 	return size;
895 }
896 
test_dev_config_update_uint_range(struct kmod_test_device * test_dev,const char * buf,size_t size,unsigned int * config,unsigned int min,unsigned int max)897 static int test_dev_config_update_uint_range(struct kmod_test_device *test_dev,
898 					     const char *buf, size_t size,
899 					     unsigned int *config,
900 					     unsigned int min,
901 					     unsigned int max)
902 {
903 	unsigned int val;
904 	int ret;
905 
906 	ret = kstrtouint(buf, 10, &val);
907 	if (ret)
908 		return ret;
909 
910 	if (val < min || val > max)
911 		return -EINVAL;
912 
913 	mutex_lock(&test_dev->config_mutex);
914 	*config = val;
915 	mutex_unlock(&test_dev->config_mutex);
916 
917 	/* Always return full write size even if we didn't consume all */
918 	return size;
919 }
920 
test_dev_config_update_int(struct kmod_test_device * test_dev,const char * buf,size_t size,int * config)921 static int test_dev_config_update_int(struct kmod_test_device *test_dev,
922 				      const char *buf, size_t size,
923 				      int *config)
924 {
925 	int val;
926 	int ret;
927 
928 	ret = kstrtoint(buf, 10, &val);
929 	if (ret)
930 		return ret;
931 
932 	mutex_lock(&test_dev->config_mutex);
933 	*config = val;
934 	mutex_unlock(&test_dev->config_mutex);
935 	/* Always return full write size even if we didn't consume all */
936 	return size;
937 }
938 
test_dev_config_show_int(struct kmod_test_device * test_dev,char * buf,int config)939 static ssize_t test_dev_config_show_int(struct kmod_test_device *test_dev,
940 					char *buf,
941 					int config)
942 {
943 	int val;
944 
945 	mutex_lock(&test_dev->config_mutex);
946 	val = config;
947 	mutex_unlock(&test_dev->config_mutex);
948 
949 	return snprintf(buf, PAGE_SIZE, "%d\n", val);
950 }
951 
test_dev_config_show_uint(struct kmod_test_device * test_dev,char * buf,unsigned int config)952 static ssize_t test_dev_config_show_uint(struct kmod_test_device *test_dev,
953 					 char *buf,
954 					 unsigned int config)
955 {
956 	unsigned int val;
957 
958 	mutex_lock(&test_dev->config_mutex);
959 	val = config;
960 	mutex_unlock(&test_dev->config_mutex);
961 
962 	return snprintf(buf, PAGE_SIZE, "%u\n", val);
963 }
964 
test_result_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)965 static ssize_t test_result_store(struct device *dev,
966 				 struct device_attribute *attr,
967 				 const char *buf, size_t count)
968 {
969 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
970 	struct test_config *config = &test_dev->config;
971 
972 	return test_dev_config_update_int(test_dev, buf, count,
973 					  &config->test_result);
974 }
975 
config_num_threads_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)976 static ssize_t config_num_threads_store(struct device *dev,
977 					struct device_attribute *attr,
978 					const char *buf, size_t count)
979 {
980 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
981 	struct test_config *config = &test_dev->config;
982 
983 	return test_dev_config_update_uint_sync(test_dev, buf, count,
984 						&config->num_threads,
985 						kmod_config_sync_info);
986 }
987 
config_num_threads_show(struct device * dev,struct device_attribute * attr,char * buf)988 static ssize_t config_num_threads_show(struct device *dev,
989 				       struct device_attribute *attr,
990 				       char *buf)
991 {
992 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
993 	struct test_config *config = &test_dev->config;
994 
995 	return test_dev_config_show_int(test_dev, buf, config->num_threads);
996 }
997 static DEVICE_ATTR_RW(config_num_threads);
998 
config_test_case_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)999 static ssize_t config_test_case_store(struct device *dev,
1000 				      struct device_attribute *attr,
1001 				      const char *buf, size_t count)
1002 {
1003 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
1004 	struct test_config *config = &test_dev->config;
1005 
1006 	return test_dev_config_update_uint_range(test_dev, buf, count,
1007 						 &config->test_case,
1008 						 __TEST_KMOD_INVALID + 1,
1009 						 __TEST_KMOD_MAX - 1);
1010 }
1011 
config_test_case_show(struct device * dev,struct device_attribute * attr,char * buf)1012 static ssize_t config_test_case_show(struct device *dev,
1013 				     struct device_attribute *attr,
1014 				     char *buf)
1015 {
1016 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
1017 	struct test_config *config = &test_dev->config;
1018 
1019 	return test_dev_config_show_uint(test_dev, buf, config->test_case);
1020 }
1021 static DEVICE_ATTR_RW(config_test_case);
1022 
test_result_show(struct device * dev,struct device_attribute * attr,char * buf)1023 static ssize_t test_result_show(struct device *dev,
1024 				struct device_attribute *attr,
1025 				char *buf)
1026 {
1027 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
1028 	struct test_config *config = &test_dev->config;
1029 
1030 	return test_dev_config_show_int(test_dev, buf, config->test_result);
1031 }
1032 static DEVICE_ATTR_RW(test_result);
1033 
1034 #define TEST_KMOD_DEV_ATTR(name)		&dev_attr_##name.attr
1035 
1036 static struct attribute *test_dev_attrs[] = {
1037 	TEST_KMOD_DEV_ATTR(trigger_config),
1038 	TEST_KMOD_DEV_ATTR(config),
1039 	TEST_KMOD_DEV_ATTR(reset),
1040 
1041 	TEST_KMOD_DEV_ATTR(config_test_driver),
1042 	TEST_KMOD_DEV_ATTR(config_test_fs),
1043 	TEST_KMOD_DEV_ATTR(config_num_threads),
1044 	TEST_KMOD_DEV_ATTR(config_test_case),
1045 	TEST_KMOD_DEV_ATTR(test_result),
1046 
1047 	NULL,
1048 };
1049 
1050 ATTRIBUTE_GROUPS(test_dev);
1051 
kmod_config_init(struct kmod_test_device * test_dev)1052 static int kmod_config_init(struct kmod_test_device *test_dev)
1053 {
1054 	int ret;
1055 
1056 	mutex_lock(&test_dev->config_mutex);
1057 	ret = __kmod_config_init(test_dev);
1058 	mutex_unlock(&test_dev->config_mutex);
1059 
1060 	return ret;
1061 }
1062 
alloc_test_dev_kmod(int idx)1063 static struct kmod_test_device *alloc_test_dev_kmod(int idx)
1064 {
1065 	int ret;
1066 	struct kmod_test_device *test_dev;
1067 	struct miscdevice *misc_dev;
1068 
1069 	test_dev = vzalloc(sizeof(struct kmod_test_device));
1070 	if (!test_dev)
1071 		goto err_out;
1072 
1073 	mutex_init(&test_dev->config_mutex);
1074 	mutex_init(&test_dev->trigger_mutex);
1075 	mutex_init(&test_dev->thread_mutex);
1076 
1077 	init_completion(&test_dev->kthreads_done);
1078 
1079 	ret = kmod_config_init(test_dev);
1080 	if (ret < 0) {
1081 		pr_err("Cannot alloc kmod_config_init()\n");
1082 		goto err_out_free;
1083 	}
1084 
1085 	test_dev->dev_idx = idx;
1086 	misc_dev = &test_dev->misc_dev;
1087 
1088 	misc_dev->minor = MISC_DYNAMIC_MINOR;
1089 	misc_dev->name = kasprintf(GFP_KERNEL, "test_kmod%d", idx);
1090 	if (!misc_dev->name) {
1091 		pr_err("Cannot alloc misc_dev->name\n");
1092 		goto err_out_free_config;
1093 	}
1094 	misc_dev->groups = test_dev_groups;
1095 
1096 	return test_dev;
1097 
1098 err_out_free_config:
1099 	free_test_dev_info(test_dev);
1100 	kmod_config_free(test_dev);
1101 err_out_free:
1102 	vfree(test_dev);
1103 	test_dev = NULL;
1104 err_out:
1105 	return NULL;
1106 }
1107 
free_test_dev_kmod(struct kmod_test_device * test_dev)1108 static void free_test_dev_kmod(struct kmod_test_device *test_dev)
1109 {
1110 	if (test_dev) {
1111 		kfree_const(test_dev->misc_dev.name);
1112 		test_dev->misc_dev.name = NULL;
1113 		free_test_dev_info(test_dev);
1114 		kmod_config_free(test_dev);
1115 		vfree(test_dev);
1116 		test_dev = NULL;
1117 	}
1118 }
1119 
register_test_dev_kmod(void)1120 static struct kmod_test_device *register_test_dev_kmod(void)
1121 {
1122 	struct kmod_test_device *test_dev = NULL;
1123 	int ret;
1124 
1125 	mutex_lock(&reg_dev_mutex);
1126 
1127 	/* int should suffice for number of devices, test for wrap */
1128 	if (num_test_devs + 1 == INT_MAX) {
1129 		pr_err("reached limit of number of test devices\n");
1130 		goto out;
1131 	}
1132 
1133 	test_dev = alloc_test_dev_kmod(num_test_devs);
1134 	if (!test_dev)
1135 		goto out;
1136 
1137 	ret = misc_register(&test_dev->misc_dev);
1138 	if (ret) {
1139 		pr_err("could not register misc device: %d\n", ret);
1140 		free_test_dev_kmod(test_dev);
1141 		test_dev = NULL;
1142 		goto out;
1143 	}
1144 
1145 	test_dev->dev = test_dev->misc_dev.this_device;
1146 	list_add_tail(&test_dev->list, &reg_test_devs);
1147 	dev_info(test_dev->dev, "interface ready\n");
1148 
1149 	num_test_devs++;
1150 
1151 out:
1152 	mutex_unlock(&reg_dev_mutex);
1153 
1154 	return test_dev;
1155 
1156 }
1157 
test_kmod_init(void)1158 static int __init test_kmod_init(void)
1159 {
1160 	struct kmod_test_device *test_dev;
1161 	int ret;
1162 
1163 	test_dev = register_test_dev_kmod();
1164 	if (!test_dev) {
1165 		pr_err("Cannot add first test kmod device\n");
1166 		return -ENODEV;
1167 	}
1168 
1169 	/*
1170 	 * With some work we might be able to gracefully enable
1171 	 * testing with this driver built-in, for now this seems
1172 	 * rather risky. For those willing to try have at it,
1173 	 * and enable the below. Good luck! If that works, try
1174 	 * lowering the init level for more fun.
1175 	 */
1176 	if (force_init_test) {
1177 		ret = trigger_config_run_type(test_dev,
1178 					      TEST_KMOD_DRIVER, "tun");
1179 		if (WARN_ON(ret))
1180 			return ret;
1181 		ret = trigger_config_run_type(test_dev,
1182 					      TEST_KMOD_FS_TYPE, "btrfs");
1183 		if (WARN_ON(ret))
1184 			return ret;
1185 	}
1186 
1187 	return 0;
1188 }
1189 late_initcall(test_kmod_init);
1190 
1191 static
unregister_test_dev_kmod(struct kmod_test_device * test_dev)1192 void unregister_test_dev_kmod(struct kmod_test_device *test_dev)
1193 {
1194 	mutex_lock(&test_dev->trigger_mutex);
1195 	mutex_lock(&test_dev->config_mutex);
1196 
1197 	test_dev_kmod_stop_tests(test_dev);
1198 
1199 	dev_info(test_dev->dev, "removing interface\n");
1200 	misc_deregister(&test_dev->misc_dev);
1201 
1202 	mutex_unlock(&test_dev->config_mutex);
1203 	mutex_unlock(&test_dev->trigger_mutex);
1204 
1205 	free_test_dev_kmod(test_dev);
1206 }
1207 
test_kmod_exit(void)1208 static void __exit test_kmod_exit(void)
1209 {
1210 	struct kmod_test_device *test_dev, *tmp;
1211 
1212 	mutex_lock(&reg_dev_mutex);
1213 	list_for_each_entry_safe(test_dev, tmp, &reg_test_devs, list) {
1214 		list_del(&test_dev->list);
1215 		unregister_test_dev_kmod(test_dev);
1216 	}
1217 	mutex_unlock(&reg_dev_mutex);
1218 }
1219 module_exit(test_kmod_exit);
1220 
1221 MODULE_AUTHOR("Luis R. Rodriguez <mcgrof@kernel.org>");
1222 MODULE_LICENSE("GPL");
1223