1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Module-based torture test facility for locking
4 *
5 * Copyright (C) IBM Corporation, 2014
6 *
7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8 * Davidlohr Bueso <dave@stgolabs.net>
9 * Based on kernel/rcu/torture.c.
10 */
11
12 #define pr_fmt(fmt) fmt
13
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/kthread.h>
17 #include <linux/sched/rt.h>
18 #include <linux/spinlock.h>
19 #include <linux/mutex.h>
20 #include <linux/rwsem.h>
21 #include <linux/smp.h>
22 #include <linux/interrupt.h>
23 #include <linux/sched.h>
24 #include <uapi/linux/sched/types.h>
25 #include <linux/rtmutex.h>
26 #include <linux/atomic.h>
27 #include <linux/moduleparam.h>
28 #include <linux/delay.h>
29 #include <linux/slab.h>
30 #include <linux/torture.h>
31 #include <linux/reboot.h>
32
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
35
36 torture_param(int, nwriters_stress, -1, "Number of write-locking stress-test threads");
37 torture_param(int, nreaders_stress, -1, "Number of read-locking stress-test threads");
38 torture_param(int, long_hold, 100, "Do occasional long hold of lock (ms), 0=disable");
39 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
40 torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (s), 0=disable");
41 torture_param(int, shuffle_interval, 3, "Number of jiffies between shuffles, 0=disable");
42 torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
43 torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s");
44 torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
45 torture_param(int, rt_boost, 2,
46 "Do periodic rt-boost. 0=Disable, 1=Only for rt_mutex, 2=For all lock types.");
47 torture_param(int, rt_boost_factor, 50, "A factor determining how often rt-boost happens.");
48 torture_param(int, writer_fifo, 0, "Run writers at sched_set_fifo() priority");
49 torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
50 torture_param(int, nested_locks, 0, "Number of nested locks (max = 8)");
51 /* Going much higher trips "BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!" errors */
52 #define MAX_NESTED_LOCKS 8
53
54 static char *torture_type = IS_ENABLED(CONFIG_PREEMPT_RT) ? "raw_spin_lock" : "spin_lock";
55 module_param(torture_type, charp, 0444);
56 MODULE_PARM_DESC(torture_type,
57 "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
58
59 static struct task_struct *stats_task;
60 static struct task_struct **writer_tasks;
61 static struct task_struct **reader_tasks;
62
63 static bool lock_is_write_held;
64 static atomic_t lock_is_read_held;
65 static unsigned long last_lock_release;
66
67 struct lock_stress_stats {
68 long n_lock_fail;
69 long n_lock_acquired;
70 };
71
72 /* Forward reference. */
73 static void lock_torture_cleanup(void);
74
75 /*
76 * Operations vector for selecting different types of tests.
77 */
78 struct lock_torture_ops {
79 void (*init)(void);
80 void (*exit)(void);
81 int (*nested_lock)(int tid, u32 lockset);
82 int (*writelock)(int tid);
83 void (*write_delay)(struct torture_random_state *trsp);
84 void (*task_boost)(struct torture_random_state *trsp);
85 void (*writeunlock)(int tid);
86 void (*nested_unlock)(int tid, u32 lockset);
87 int (*readlock)(int tid);
88 void (*read_delay)(struct torture_random_state *trsp);
89 void (*readunlock)(int tid);
90
91 unsigned long flags; /* for irq spinlocks */
92 const char *name;
93 };
94
95 struct lock_torture_cxt {
96 int nrealwriters_stress;
97 int nrealreaders_stress;
98 bool debug_lock;
99 bool init_called;
100 atomic_t n_lock_torture_errors;
101 struct lock_torture_ops *cur_ops;
102 struct lock_stress_stats *lwsa; /* writer statistics */
103 struct lock_stress_stats *lrsa; /* reader statistics */
104 };
105 static struct lock_torture_cxt cxt = { 0, 0, false, false,
106 ATOMIC_INIT(0),
107 NULL, NULL};
108 /*
109 * Definitions for lock torture testing.
110 */
111
torture_lock_busted_write_lock(int tid __maybe_unused)112 static int torture_lock_busted_write_lock(int tid __maybe_unused)
113 {
114 return 0; /* BUGGY, do not use in real life!!! */
115 }
116
torture_lock_busted_write_delay(struct torture_random_state * trsp)117 static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
118 {
119 const unsigned long longdelay_ms = long_hold ? long_hold : ULONG_MAX;
120
121 /* We want a long delay occasionally to force massive contention. */
122 if (!(torture_random(trsp) %
123 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
124 mdelay(longdelay_ms);
125 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
126 torture_preempt_schedule(); /* Allow test to be preempted. */
127 }
128
torture_lock_busted_write_unlock(int tid __maybe_unused)129 static void torture_lock_busted_write_unlock(int tid __maybe_unused)
130 {
131 /* BUGGY, do not use in real life!!! */
132 }
133
__torture_rt_boost(struct torture_random_state * trsp)134 static void __torture_rt_boost(struct torture_random_state *trsp)
135 {
136 const unsigned int factor = rt_boost_factor;
137
138 if (!rt_task(current)) {
139 /*
140 * Boost priority once every rt_boost_factor operations. When
141 * the task tries to take the lock, the rtmutex it will account
142 * for the new priority, and do any corresponding pi-dance.
143 */
144 if (trsp && !(torture_random(trsp) %
145 (cxt.nrealwriters_stress * factor))) {
146 sched_set_fifo(current);
147 } else /* common case, do nothing */
148 return;
149 } else {
150 /*
151 * The task will remain boosted for another 10 * rt_boost_factor
152 * operations, then restored back to its original prio, and so
153 * forth.
154 *
155 * When @trsp is nil, we want to force-reset the task for
156 * stopping the kthread.
157 */
158 if (!trsp || !(torture_random(trsp) %
159 (cxt.nrealwriters_stress * factor * 2))) {
160 sched_set_normal(current, 0);
161 } else /* common case, do nothing */
162 return;
163 }
164 }
165
torture_rt_boost(struct torture_random_state * trsp)166 static void torture_rt_boost(struct torture_random_state *trsp)
167 {
168 if (rt_boost != 2)
169 return;
170
171 __torture_rt_boost(trsp);
172 }
173
174 static struct lock_torture_ops lock_busted_ops = {
175 .writelock = torture_lock_busted_write_lock,
176 .write_delay = torture_lock_busted_write_delay,
177 .task_boost = torture_rt_boost,
178 .writeunlock = torture_lock_busted_write_unlock,
179 .readlock = NULL,
180 .read_delay = NULL,
181 .readunlock = NULL,
182 .name = "lock_busted"
183 };
184
185 static DEFINE_SPINLOCK(torture_spinlock);
186
torture_spin_lock_write_lock(int tid __maybe_unused)187 static int torture_spin_lock_write_lock(int tid __maybe_unused)
188 __acquires(torture_spinlock)
189 {
190 spin_lock(&torture_spinlock);
191 return 0;
192 }
193
torture_spin_lock_write_delay(struct torture_random_state * trsp)194 static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
195 {
196 const unsigned long shortdelay_us = 2;
197 const unsigned long longdelay_ms = long_hold ? long_hold : ULONG_MAX;
198 unsigned long j;
199
200 /* We want a short delay mostly to emulate likely code, and
201 * we want a long delay occasionally to force massive contention.
202 */
203 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * longdelay_ms))) {
204 j = jiffies;
205 mdelay(longdelay_ms);
206 pr_alert("%s: delay = %lu jiffies.\n", __func__, jiffies - j);
207 }
208 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 200 * shortdelay_us)))
209 udelay(shortdelay_us);
210 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
211 torture_preempt_schedule(); /* Allow test to be preempted. */
212 }
213
torture_spin_lock_write_unlock(int tid __maybe_unused)214 static void torture_spin_lock_write_unlock(int tid __maybe_unused)
215 __releases(torture_spinlock)
216 {
217 spin_unlock(&torture_spinlock);
218 }
219
220 static struct lock_torture_ops spin_lock_ops = {
221 .writelock = torture_spin_lock_write_lock,
222 .write_delay = torture_spin_lock_write_delay,
223 .task_boost = torture_rt_boost,
224 .writeunlock = torture_spin_lock_write_unlock,
225 .readlock = NULL,
226 .read_delay = NULL,
227 .readunlock = NULL,
228 .name = "spin_lock"
229 };
230
torture_spin_lock_write_lock_irq(int tid __maybe_unused)231 static int torture_spin_lock_write_lock_irq(int tid __maybe_unused)
232 __acquires(torture_spinlock)
233 {
234 unsigned long flags;
235
236 spin_lock_irqsave(&torture_spinlock, flags);
237 cxt.cur_ops->flags = flags;
238 return 0;
239 }
240
torture_lock_spin_write_unlock_irq(int tid __maybe_unused)241 static void torture_lock_spin_write_unlock_irq(int tid __maybe_unused)
242 __releases(torture_spinlock)
243 {
244 spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags);
245 }
246
247 static struct lock_torture_ops spin_lock_irq_ops = {
248 .writelock = torture_spin_lock_write_lock_irq,
249 .write_delay = torture_spin_lock_write_delay,
250 .task_boost = torture_rt_boost,
251 .writeunlock = torture_lock_spin_write_unlock_irq,
252 .readlock = NULL,
253 .read_delay = NULL,
254 .readunlock = NULL,
255 .name = "spin_lock_irq"
256 };
257
258 static DEFINE_RAW_SPINLOCK(torture_raw_spinlock);
259
torture_raw_spin_lock_write_lock(int tid __maybe_unused)260 static int torture_raw_spin_lock_write_lock(int tid __maybe_unused)
261 __acquires(torture_raw_spinlock)
262 {
263 raw_spin_lock(&torture_raw_spinlock);
264 return 0;
265 }
266
torture_raw_spin_lock_write_unlock(int tid __maybe_unused)267 static void torture_raw_spin_lock_write_unlock(int tid __maybe_unused)
268 __releases(torture_raw_spinlock)
269 {
270 raw_spin_unlock(&torture_raw_spinlock);
271 }
272
273 static struct lock_torture_ops raw_spin_lock_ops = {
274 .writelock = torture_raw_spin_lock_write_lock,
275 .write_delay = torture_spin_lock_write_delay,
276 .task_boost = torture_rt_boost,
277 .writeunlock = torture_raw_spin_lock_write_unlock,
278 .readlock = NULL,
279 .read_delay = NULL,
280 .readunlock = NULL,
281 .name = "raw_spin_lock"
282 };
283
torture_raw_spin_lock_write_lock_irq(int tid __maybe_unused)284 static int torture_raw_spin_lock_write_lock_irq(int tid __maybe_unused)
285 __acquires(torture_raw_spinlock)
286 {
287 unsigned long flags;
288
289 raw_spin_lock_irqsave(&torture_raw_spinlock, flags);
290 cxt.cur_ops->flags = flags;
291 return 0;
292 }
293
torture_raw_spin_lock_write_unlock_irq(int tid __maybe_unused)294 static void torture_raw_spin_lock_write_unlock_irq(int tid __maybe_unused)
295 __releases(torture_raw_spinlock)
296 {
297 raw_spin_unlock_irqrestore(&torture_raw_spinlock, cxt.cur_ops->flags);
298 }
299
300 static struct lock_torture_ops raw_spin_lock_irq_ops = {
301 .writelock = torture_raw_spin_lock_write_lock_irq,
302 .write_delay = torture_spin_lock_write_delay,
303 .task_boost = torture_rt_boost,
304 .writeunlock = torture_raw_spin_lock_write_unlock_irq,
305 .readlock = NULL,
306 .read_delay = NULL,
307 .readunlock = NULL,
308 .name = "raw_spin_lock_irq"
309 };
310
311 static DEFINE_RWLOCK(torture_rwlock);
312
torture_rwlock_write_lock(int tid __maybe_unused)313 static int torture_rwlock_write_lock(int tid __maybe_unused)
314 __acquires(torture_rwlock)
315 {
316 write_lock(&torture_rwlock);
317 return 0;
318 }
319
torture_rwlock_write_delay(struct torture_random_state * trsp)320 static void torture_rwlock_write_delay(struct torture_random_state *trsp)
321 {
322 const unsigned long shortdelay_us = 2;
323 const unsigned long longdelay_ms = long_hold ? long_hold : ULONG_MAX;
324
325 /* We want a short delay mostly to emulate likely code, and
326 * we want a long delay occasionally to force massive contention.
327 */
328 if (!(torture_random(trsp) %
329 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
330 mdelay(longdelay_ms);
331 else
332 udelay(shortdelay_us);
333 }
334
torture_rwlock_write_unlock(int tid __maybe_unused)335 static void torture_rwlock_write_unlock(int tid __maybe_unused)
336 __releases(torture_rwlock)
337 {
338 write_unlock(&torture_rwlock);
339 }
340
torture_rwlock_read_lock(int tid __maybe_unused)341 static int torture_rwlock_read_lock(int tid __maybe_unused)
342 __acquires(torture_rwlock)
343 {
344 read_lock(&torture_rwlock);
345 return 0;
346 }
347
torture_rwlock_read_delay(struct torture_random_state * trsp)348 static void torture_rwlock_read_delay(struct torture_random_state *trsp)
349 {
350 const unsigned long shortdelay_us = 10;
351 const unsigned long longdelay_ms = 100;
352
353 /* We want a short delay mostly to emulate likely code, and
354 * we want a long delay occasionally to force massive contention.
355 */
356 if (!(torture_random(trsp) %
357 (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
358 mdelay(longdelay_ms);
359 else
360 udelay(shortdelay_us);
361 }
362
torture_rwlock_read_unlock(int tid __maybe_unused)363 static void torture_rwlock_read_unlock(int tid __maybe_unused)
364 __releases(torture_rwlock)
365 {
366 read_unlock(&torture_rwlock);
367 }
368
369 static struct lock_torture_ops rw_lock_ops = {
370 .writelock = torture_rwlock_write_lock,
371 .write_delay = torture_rwlock_write_delay,
372 .task_boost = torture_rt_boost,
373 .writeunlock = torture_rwlock_write_unlock,
374 .readlock = torture_rwlock_read_lock,
375 .read_delay = torture_rwlock_read_delay,
376 .readunlock = torture_rwlock_read_unlock,
377 .name = "rw_lock"
378 };
379
torture_rwlock_write_lock_irq(int tid __maybe_unused)380 static int torture_rwlock_write_lock_irq(int tid __maybe_unused)
381 __acquires(torture_rwlock)
382 {
383 unsigned long flags;
384
385 write_lock_irqsave(&torture_rwlock, flags);
386 cxt.cur_ops->flags = flags;
387 return 0;
388 }
389
torture_rwlock_write_unlock_irq(int tid __maybe_unused)390 static void torture_rwlock_write_unlock_irq(int tid __maybe_unused)
391 __releases(torture_rwlock)
392 {
393 write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
394 }
395
torture_rwlock_read_lock_irq(int tid __maybe_unused)396 static int torture_rwlock_read_lock_irq(int tid __maybe_unused)
397 __acquires(torture_rwlock)
398 {
399 unsigned long flags;
400
401 read_lock_irqsave(&torture_rwlock, flags);
402 cxt.cur_ops->flags = flags;
403 return 0;
404 }
405
torture_rwlock_read_unlock_irq(int tid __maybe_unused)406 static void torture_rwlock_read_unlock_irq(int tid __maybe_unused)
407 __releases(torture_rwlock)
408 {
409 read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
410 }
411
412 static struct lock_torture_ops rw_lock_irq_ops = {
413 .writelock = torture_rwlock_write_lock_irq,
414 .write_delay = torture_rwlock_write_delay,
415 .task_boost = torture_rt_boost,
416 .writeunlock = torture_rwlock_write_unlock_irq,
417 .readlock = torture_rwlock_read_lock_irq,
418 .read_delay = torture_rwlock_read_delay,
419 .readunlock = torture_rwlock_read_unlock_irq,
420 .name = "rw_lock_irq"
421 };
422
423 static DEFINE_MUTEX(torture_mutex);
424 static struct mutex torture_nested_mutexes[MAX_NESTED_LOCKS];
425 static struct lock_class_key nested_mutex_keys[MAX_NESTED_LOCKS];
426
torture_mutex_init(void)427 static void torture_mutex_init(void)
428 {
429 int i;
430
431 for (i = 0; i < MAX_NESTED_LOCKS; i++)
432 __mutex_init(&torture_nested_mutexes[i], __func__,
433 &nested_mutex_keys[i]);
434 }
435
torture_mutex_nested_lock(int tid __maybe_unused,u32 lockset)436 static int torture_mutex_nested_lock(int tid __maybe_unused,
437 u32 lockset)
438 {
439 int i;
440
441 for (i = 0; i < nested_locks; i++)
442 if (lockset & (1 << i))
443 mutex_lock(&torture_nested_mutexes[i]);
444 return 0;
445 }
446
torture_mutex_lock(int tid __maybe_unused)447 static int torture_mutex_lock(int tid __maybe_unused)
448 __acquires(torture_mutex)
449 {
450 mutex_lock(&torture_mutex);
451 return 0;
452 }
453
torture_mutex_delay(struct torture_random_state * trsp)454 static void torture_mutex_delay(struct torture_random_state *trsp)
455 {
456 const unsigned long longdelay_ms = long_hold ? long_hold : ULONG_MAX;
457
458 /* We want a long delay occasionally to force massive contention. */
459 if (!(torture_random(trsp) %
460 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
461 mdelay(longdelay_ms * 5);
462 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
463 torture_preempt_schedule(); /* Allow test to be preempted. */
464 }
465
torture_mutex_unlock(int tid __maybe_unused)466 static void torture_mutex_unlock(int tid __maybe_unused)
467 __releases(torture_mutex)
468 {
469 mutex_unlock(&torture_mutex);
470 }
471
torture_mutex_nested_unlock(int tid __maybe_unused,u32 lockset)472 static void torture_mutex_nested_unlock(int tid __maybe_unused,
473 u32 lockset)
474 {
475 int i;
476
477 for (i = nested_locks - 1; i >= 0; i--)
478 if (lockset & (1 << i))
479 mutex_unlock(&torture_nested_mutexes[i]);
480 }
481
482 static struct lock_torture_ops mutex_lock_ops = {
483 .init = torture_mutex_init,
484 .nested_lock = torture_mutex_nested_lock,
485 .writelock = torture_mutex_lock,
486 .write_delay = torture_mutex_delay,
487 .task_boost = torture_rt_boost,
488 .writeunlock = torture_mutex_unlock,
489 .nested_unlock = torture_mutex_nested_unlock,
490 .readlock = NULL,
491 .read_delay = NULL,
492 .readunlock = NULL,
493 .name = "mutex_lock"
494 };
495
496 #include <linux/ww_mutex.h>
497 /*
498 * The torture ww_mutexes should belong to the same lock class as
499 * torture_ww_class to avoid lockdep problem. The ww_mutex_init()
500 * function is called for initialization to ensure that.
501 */
502 static DEFINE_WD_CLASS(torture_ww_class);
503 static struct ww_mutex torture_ww_mutex_0, torture_ww_mutex_1, torture_ww_mutex_2;
504 static struct ww_acquire_ctx *ww_acquire_ctxs;
505
torture_ww_mutex_init(void)506 static void torture_ww_mutex_init(void)
507 {
508 ww_mutex_init(&torture_ww_mutex_0, &torture_ww_class);
509 ww_mutex_init(&torture_ww_mutex_1, &torture_ww_class);
510 ww_mutex_init(&torture_ww_mutex_2, &torture_ww_class);
511
512 ww_acquire_ctxs = kmalloc_array(cxt.nrealwriters_stress,
513 sizeof(*ww_acquire_ctxs),
514 GFP_KERNEL);
515 if (!ww_acquire_ctxs)
516 VERBOSE_TOROUT_STRING("ww_acquire_ctx: Out of memory");
517 }
518
torture_ww_mutex_exit(void)519 static void torture_ww_mutex_exit(void)
520 {
521 kfree(ww_acquire_ctxs);
522 }
523
torture_ww_mutex_lock(int tid)524 static int torture_ww_mutex_lock(int tid)
525 __acquires(torture_ww_mutex_0)
526 __acquires(torture_ww_mutex_1)
527 __acquires(torture_ww_mutex_2)
528 {
529 LIST_HEAD(list);
530 struct reorder_lock {
531 struct list_head link;
532 struct ww_mutex *lock;
533 } locks[3], *ll, *ln;
534 struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid];
535
536 locks[0].lock = &torture_ww_mutex_0;
537 list_add(&locks[0].link, &list);
538
539 locks[1].lock = &torture_ww_mutex_1;
540 list_add(&locks[1].link, &list);
541
542 locks[2].lock = &torture_ww_mutex_2;
543 list_add(&locks[2].link, &list);
544
545 ww_acquire_init(ctx, &torture_ww_class);
546
547 list_for_each_entry(ll, &list, link) {
548 int err;
549
550 err = ww_mutex_lock(ll->lock, ctx);
551 if (!err)
552 continue;
553
554 ln = ll;
555 list_for_each_entry_continue_reverse(ln, &list, link)
556 ww_mutex_unlock(ln->lock);
557
558 if (err != -EDEADLK)
559 return err;
560
561 ww_mutex_lock_slow(ll->lock, ctx);
562 list_move(&ll->link, &list);
563 }
564
565 return 0;
566 }
567
torture_ww_mutex_unlock(int tid)568 static void torture_ww_mutex_unlock(int tid)
569 __releases(torture_ww_mutex_0)
570 __releases(torture_ww_mutex_1)
571 __releases(torture_ww_mutex_2)
572 {
573 struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid];
574
575 ww_mutex_unlock(&torture_ww_mutex_0);
576 ww_mutex_unlock(&torture_ww_mutex_1);
577 ww_mutex_unlock(&torture_ww_mutex_2);
578 ww_acquire_fini(ctx);
579 }
580
581 static struct lock_torture_ops ww_mutex_lock_ops = {
582 .init = torture_ww_mutex_init,
583 .exit = torture_ww_mutex_exit,
584 .writelock = torture_ww_mutex_lock,
585 .write_delay = torture_mutex_delay,
586 .task_boost = torture_rt_boost,
587 .writeunlock = torture_ww_mutex_unlock,
588 .readlock = NULL,
589 .read_delay = NULL,
590 .readunlock = NULL,
591 .name = "ww_mutex_lock"
592 };
593
594 #ifdef CONFIG_RT_MUTEXES
595 static DEFINE_RT_MUTEX(torture_rtmutex);
596 static struct rt_mutex torture_nested_rtmutexes[MAX_NESTED_LOCKS];
597 static struct lock_class_key nested_rtmutex_keys[MAX_NESTED_LOCKS];
598
torture_rtmutex_init(void)599 static void torture_rtmutex_init(void)
600 {
601 int i;
602
603 for (i = 0; i < MAX_NESTED_LOCKS; i++)
604 __rt_mutex_init(&torture_nested_rtmutexes[i], __func__,
605 &nested_rtmutex_keys[i]);
606 }
607
torture_rtmutex_nested_lock(int tid __maybe_unused,u32 lockset)608 static int torture_rtmutex_nested_lock(int tid __maybe_unused,
609 u32 lockset)
610 {
611 int i;
612
613 for (i = 0; i < nested_locks; i++)
614 if (lockset & (1 << i))
615 rt_mutex_lock(&torture_nested_rtmutexes[i]);
616 return 0;
617 }
618
torture_rtmutex_lock(int tid __maybe_unused)619 static int torture_rtmutex_lock(int tid __maybe_unused)
620 __acquires(torture_rtmutex)
621 {
622 rt_mutex_lock(&torture_rtmutex);
623 return 0;
624 }
625
torture_rtmutex_delay(struct torture_random_state * trsp)626 static void torture_rtmutex_delay(struct torture_random_state *trsp)
627 {
628 const unsigned long shortdelay_us = 2;
629 const unsigned long longdelay_ms = long_hold ? long_hold : ULONG_MAX;
630
631 /*
632 * We want a short delay mostly to emulate likely code, and
633 * we want a long delay occasionally to force massive contention.
634 */
635 if (!(torture_random(trsp) %
636 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
637 mdelay(longdelay_ms);
638 if (!(torture_random(trsp) %
639 (cxt.nrealwriters_stress * 200 * shortdelay_us)))
640 udelay(shortdelay_us);
641 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
642 torture_preempt_schedule(); /* Allow test to be preempted. */
643 }
644
torture_rtmutex_unlock(int tid __maybe_unused)645 static void torture_rtmutex_unlock(int tid __maybe_unused)
646 __releases(torture_rtmutex)
647 {
648 rt_mutex_unlock(&torture_rtmutex);
649 }
650
torture_rt_boost_rtmutex(struct torture_random_state * trsp)651 static void torture_rt_boost_rtmutex(struct torture_random_state *trsp)
652 {
653 if (!rt_boost)
654 return;
655
656 __torture_rt_boost(trsp);
657 }
658
torture_rtmutex_nested_unlock(int tid __maybe_unused,u32 lockset)659 static void torture_rtmutex_nested_unlock(int tid __maybe_unused,
660 u32 lockset)
661 {
662 int i;
663
664 for (i = nested_locks - 1; i >= 0; i--)
665 if (lockset & (1 << i))
666 rt_mutex_unlock(&torture_nested_rtmutexes[i]);
667 }
668
669 static struct lock_torture_ops rtmutex_lock_ops = {
670 .init = torture_rtmutex_init,
671 .nested_lock = torture_rtmutex_nested_lock,
672 .writelock = torture_rtmutex_lock,
673 .write_delay = torture_rtmutex_delay,
674 .task_boost = torture_rt_boost_rtmutex,
675 .writeunlock = torture_rtmutex_unlock,
676 .nested_unlock = torture_rtmutex_nested_unlock,
677 .readlock = NULL,
678 .read_delay = NULL,
679 .readunlock = NULL,
680 .name = "rtmutex_lock"
681 };
682 #endif
683
684 static DECLARE_RWSEM(torture_rwsem);
torture_rwsem_down_write(int tid __maybe_unused)685 static int torture_rwsem_down_write(int tid __maybe_unused)
686 __acquires(torture_rwsem)
687 {
688 down_write(&torture_rwsem);
689 return 0;
690 }
691
torture_rwsem_write_delay(struct torture_random_state * trsp)692 static void torture_rwsem_write_delay(struct torture_random_state *trsp)
693 {
694 const unsigned long longdelay_ms = long_hold ? long_hold : ULONG_MAX;
695
696 /* We want a long delay occasionally to force massive contention. */
697 if (!(torture_random(trsp) %
698 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
699 mdelay(longdelay_ms * 10);
700 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
701 torture_preempt_schedule(); /* Allow test to be preempted. */
702 }
703
torture_rwsem_up_write(int tid __maybe_unused)704 static void torture_rwsem_up_write(int tid __maybe_unused)
705 __releases(torture_rwsem)
706 {
707 up_write(&torture_rwsem);
708 }
709
torture_rwsem_down_read(int tid __maybe_unused)710 static int torture_rwsem_down_read(int tid __maybe_unused)
711 __acquires(torture_rwsem)
712 {
713 down_read(&torture_rwsem);
714 return 0;
715 }
716
torture_rwsem_read_delay(struct torture_random_state * trsp)717 static void torture_rwsem_read_delay(struct torture_random_state *trsp)
718 {
719 const unsigned long longdelay_ms = 100;
720
721 /* We want a long delay occasionally to force massive contention. */
722 if (!(torture_random(trsp) %
723 (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
724 mdelay(longdelay_ms * 2);
725 else
726 mdelay(longdelay_ms / 2);
727 if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
728 torture_preempt_schedule(); /* Allow test to be preempted. */
729 }
730
torture_rwsem_up_read(int tid __maybe_unused)731 static void torture_rwsem_up_read(int tid __maybe_unused)
732 __releases(torture_rwsem)
733 {
734 up_read(&torture_rwsem);
735 }
736
737 static struct lock_torture_ops rwsem_lock_ops = {
738 .writelock = torture_rwsem_down_write,
739 .write_delay = torture_rwsem_write_delay,
740 .task_boost = torture_rt_boost,
741 .writeunlock = torture_rwsem_up_write,
742 .readlock = torture_rwsem_down_read,
743 .read_delay = torture_rwsem_read_delay,
744 .readunlock = torture_rwsem_up_read,
745 .name = "rwsem_lock"
746 };
747
748 #include <linux/percpu-rwsem.h>
749 static struct percpu_rw_semaphore pcpu_rwsem;
750
torture_percpu_rwsem_init(void)751 static void torture_percpu_rwsem_init(void)
752 {
753 BUG_ON(percpu_init_rwsem(&pcpu_rwsem));
754 }
755
torture_percpu_rwsem_exit(void)756 static void torture_percpu_rwsem_exit(void)
757 {
758 percpu_free_rwsem(&pcpu_rwsem);
759 }
760
torture_percpu_rwsem_down_write(int tid __maybe_unused)761 static int torture_percpu_rwsem_down_write(int tid __maybe_unused)
762 __acquires(pcpu_rwsem)
763 {
764 percpu_down_write(&pcpu_rwsem);
765 return 0;
766 }
767
torture_percpu_rwsem_up_write(int tid __maybe_unused)768 static void torture_percpu_rwsem_up_write(int tid __maybe_unused)
769 __releases(pcpu_rwsem)
770 {
771 percpu_up_write(&pcpu_rwsem);
772 }
773
torture_percpu_rwsem_down_read(int tid __maybe_unused)774 static int torture_percpu_rwsem_down_read(int tid __maybe_unused)
775 __acquires(pcpu_rwsem)
776 {
777 percpu_down_read(&pcpu_rwsem);
778 return 0;
779 }
780
torture_percpu_rwsem_up_read(int tid __maybe_unused)781 static void torture_percpu_rwsem_up_read(int tid __maybe_unused)
782 __releases(pcpu_rwsem)
783 {
784 percpu_up_read(&pcpu_rwsem);
785 }
786
787 static struct lock_torture_ops percpu_rwsem_lock_ops = {
788 .init = torture_percpu_rwsem_init,
789 .exit = torture_percpu_rwsem_exit,
790 .writelock = torture_percpu_rwsem_down_write,
791 .write_delay = torture_rwsem_write_delay,
792 .task_boost = torture_rt_boost,
793 .writeunlock = torture_percpu_rwsem_up_write,
794 .readlock = torture_percpu_rwsem_down_read,
795 .read_delay = torture_rwsem_read_delay,
796 .readunlock = torture_percpu_rwsem_up_read,
797 .name = "percpu_rwsem_lock"
798 };
799
800 /*
801 * Lock torture writer kthread. Repeatedly acquires and releases
802 * the lock, checking for duplicate acquisitions.
803 */
lock_torture_writer(void * arg)804 static int lock_torture_writer(void *arg)
805 {
806 struct lock_stress_stats *lwsp = arg;
807 int tid = lwsp - cxt.lwsa;
808 DEFINE_TORTURE_RANDOM(rand);
809 u32 lockset_mask;
810 bool skip_main_lock;
811
812 VERBOSE_TOROUT_STRING("lock_torture_writer task started");
813 if (!rt_task(current))
814 set_user_nice(current, MAX_NICE);
815
816 do {
817 if ((torture_random(&rand) & 0xfffff) == 0)
818 schedule_timeout_uninterruptible(1);
819
820 lockset_mask = torture_random(&rand);
821 /*
822 * When using nested_locks, we want to occasionally
823 * skip the main lock so we can avoid always serializing
824 * the lock chains on that central lock. By skipping the
825 * main lock occasionally, we can create different
826 * contention patterns (allowing for multiple disjoint
827 * blocked trees)
828 */
829 skip_main_lock = (nested_locks &&
830 !(torture_random(&rand) % 100));
831
832 cxt.cur_ops->task_boost(&rand);
833 if (cxt.cur_ops->nested_lock)
834 cxt.cur_ops->nested_lock(tid, lockset_mask);
835
836 if (!skip_main_lock) {
837 cxt.cur_ops->writelock(tid);
838 if (WARN_ON_ONCE(lock_is_write_held))
839 lwsp->n_lock_fail++;
840 lock_is_write_held = true;
841 if (WARN_ON_ONCE(atomic_read(&lock_is_read_held)))
842 lwsp->n_lock_fail++; /* rare, but... */
843
844 lwsp->n_lock_acquired++;
845 }
846 if (!skip_main_lock) {
847 cxt.cur_ops->write_delay(&rand);
848 lock_is_write_held = false;
849 WRITE_ONCE(last_lock_release, jiffies);
850 cxt.cur_ops->writeunlock(tid);
851 }
852 if (cxt.cur_ops->nested_unlock)
853 cxt.cur_ops->nested_unlock(tid, lockset_mask);
854
855 stutter_wait("lock_torture_writer");
856 } while (!torture_must_stop());
857
858 cxt.cur_ops->task_boost(NULL); /* reset prio */
859 torture_kthread_stopping("lock_torture_writer");
860 return 0;
861 }
862
863 /*
864 * Lock torture reader kthread. Repeatedly acquires and releases
865 * the reader lock.
866 */
lock_torture_reader(void * arg)867 static int lock_torture_reader(void *arg)
868 {
869 struct lock_stress_stats *lrsp = arg;
870 int tid = lrsp - cxt.lrsa;
871 DEFINE_TORTURE_RANDOM(rand);
872
873 VERBOSE_TOROUT_STRING("lock_torture_reader task started");
874 set_user_nice(current, MAX_NICE);
875
876 do {
877 if ((torture_random(&rand) & 0xfffff) == 0)
878 schedule_timeout_uninterruptible(1);
879
880 cxt.cur_ops->readlock(tid);
881 atomic_inc(&lock_is_read_held);
882 if (WARN_ON_ONCE(lock_is_write_held))
883 lrsp->n_lock_fail++; /* rare, but... */
884
885 lrsp->n_lock_acquired++;
886 cxt.cur_ops->read_delay(&rand);
887 atomic_dec(&lock_is_read_held);
888 cxt.cur_ops->readunlock(tid);
889
890 stutter_wait("lock_torture_reader");
891 } while (!torture_must_stop());
892 torture_kthread_stopping("lock_torture_reader");
893 return 0;
894 }
895
896 /*
897 * Create an lock-torture-statistics message in the specified buffer.
898 */
__torture_print_stats(char * page,struct lock_stress_stats * statp,bool write)899 static void __torture_print_stats(char *page,
900 struct lock_stress_stats *statp, bool write)
901 {
902 long cur;
903 bool fail = false;
904 int i, n_stress;
905 long max = 0, min = statp ? data_race(statp[0].n_lock_acquired) : 0;
906 long long sum = 0;
907
908 n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
909 for (i = 0; i < n_stress; i++) {
910 if (data_race(statp[i].n_lock_fail))
911 fail = true;
912 cur = data_race(statp[i].n_lock_acquired);
913 sum += cur;
914 if (max < cur)
915 max = cur;
916 if (min > cur)
917 min = cur;
918 }
919 page += sprintf(page,
920 "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n",
921 write ? "Writes" : "Reads ",
922 sum, max, min,
923 !onoff_interval && max / 2 > min ? "???" : "",
924 fail, fail ? "!!!" : "");
925 if (fail)
926 atomic_inc(&cxt.n_lock_torture_errors);
927 }
928
929 /*
930 * Print torture statistics. Caller must ensure that there is only one
931 * call to this function at a given time!!! This is normally accomplished
932 * by relying on the module system to only have one copy of the module
933 * loaded, and then by giving the lock_torture_stats kthread full control
934 * (or the init/cleanup functions when lock_torture_stats thread is not
935 * running).
936 */
lock_torture_stats_print(void)937 static void lock_torture_stats_print(void)
938 {
939 int size = cxt.nrealwriters_stress * 200 + 8192;
940 char *buf;
941
942 if (cxt.cur_ops->readlock)
943 size += cxt.nrealreaders_stress * 200 + 8192;
944
945 buf = kmalloc(size, GFP_KERNEL);
946 if (!buf) {
947 pr_err("lock_torture_stats_print: Out of memory, need: %d",
948 size);
949 return;
950 }
951
952 __torture_print_stats(buf, cxt.lwsa, true);
953 pr_alert("%s", buf);
954 kfree(buf);
955
956 if (cxt.cur_ops->readlock) {
957 buf = kmalloc(size, GFP_KERNEL);
958 if (!buf) {
959 pr_err("lock_torture_stats_print: Out of memory, need: %d",
960 size);
961 return;
962 }
963
964 __torture_print_stats(buf, cxt.lrsa, false);
965 pr_alert("%s", buf);
966 kfree(buf);
967 }
968 }
969
970 /*
971 * Periodically prints torture statistics, if periodic statistics printing
972 * was specified via the stat_interval module parameter.
973 *
974 * No need to worry about fullstop here, since this one doesn't reference
975 * volatile state or register callbacks.
976 */
lock_torture_stats(void * arg)977 static int lock_torture_stats(void *arg)
978 {
979 VERBOSE_TOROUT_STRING("lock_torture_stats task started");
980 do {
981 schedule_timeout_interruptible(stat_interval * HZ);
982 lock_torture_stats_print();
983 torture_shutdown_absorb("lock_torture_stats");
984 } while (!torture_must_stop());
985 torture_kthread_stopping("lock_torture_stats");
986 return 0;
987 }
988
989 static inline void
lock_torture_print_module_parms(struct lock_torture_ops * cur_ops,const char * tag)990 lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
991 const char *tag)
992 {
993 pr_alert("%s" TORTURE_FLAG
994 "--- %s%s: nwriters_stress=%d nreaders_stress=%d nested_locks=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
995 torture_type, tag, cxt.debug_lock ? " [debug]": "",
996 cxt.nrealwriters_stress, cxt.nrealreaders_stress,
997 nested_locks, stat_interval, verbose, shuffle_interval,
998 stutter, shutdown_secs, onoff_interval, onoff_holdoff);
999 }
1000
lock_torture_cleanup(void)1001 static void lock_torture_cleanup(void)
1002 {
1003 int i;
1004
1005 if (torture_cleanup_begin())
1006 return;
1007
1008 /*
1009 * Indicates early cleanup, meaning that the test has not run,
1010 * such as when passing bogus args when loading the module.
1011 * However cxt->cur_ops.init() may have been invoked, so beside
1012 * perform the underlying torture-specific cleanups, cur_ops.exit()
1013 * will be invoked if needed.
1014 */
1015 if (!cxt.lwsa && !cxt.lrsa)
1016 goto end;
1017
1018 if (writer_tasks) {
1019 for (i = 0; i < cxt.nrealwriters_stress; i++)
1020 torture_stop_kthread(lock_torture_writer, writer_tasks[i]);
1021 kfree(writer_tasks);
1022 writer_tasks = NULL;
1023 }
1024
1025 if (reader_tasks) {
1026 for (i = 0; i < cxt.nrealreaders_stress; i++)
1027 torture_stop_kthread(lock_torture_reader,
1028 reader_tasks[i]);
1029 kfree(reader_tasks);
1030 reader_tasks = NULL;
1031 }
1032
1033 torture_stop_kthread(lock_torture_stats, stats_task);
1034 lock_torture_stats_print(); /* -After- the stats thread is stopped! */
1035
1036 if (atomic_read(&cxt.n_lock_torture_errors))
1037 lock_torture_print_module_parms(cxt.cur_ops,
1038 "End of test: FAILURE");
1039 else if (torture_onoff_failures())
1040 lock_torture_print_module_parms(cxt.cur_ops,
1041 "End of test: LOCK_HOTPLUG");
1042 else
1043 lock_torture_print_module_parms(cxt.cur_ops,
1044 "End of test: SUCCESS");
1045
1046 kfree(cxt.lwsa);
1047 cxt.lwsa = NULL;
1048 kfree(cxt.lrsa);
1049 cxt.lrsa = NULL;
1050
1051 end:
1052 if (cxt.init_called) {
1053 if (cxt.cur_ops->exit)
1054 cxt.cur_ops->exit();
1055 cxt.init_called = false;
1056 }
1057 torture_cleanup_end();
1058 }
1059
lock_torture_init(void)1060 static int __init lock_torture_init(void)
1061 {
1062 int i, j;
1063 int firsterr = 0;
1064 static struct lock_torture_ops *torture_ops[] = {
1065 &lock_busted_ops,
1066 &spin_lock_ops, &spin_lock_irq_ops,
1067 &raw_spin_lock_ops, &raw_spin_lock_irq_ops,
1068 &rw_lock_ops, &rw_lock_irq_ops,
1069 &mutex_lock_ops,
1070 &ww_mutex_lock_ops,
1071 #ifdef CONFIG_RT_MUTEXES
1072 &rtmutex_lock_ops,
1073 #endif
1074 &rwsem_lock_ops,
1075 &percpu_rwsem_lock_ops,
1076 };
1077
1078 if (!torture_init_begin(torture_type, verbose))
1079 return -EBUSY;
1080
1081 /* Process args and tell the world that the torturer is on the job. */
1082 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
1083 cxt.cur_ops = torture_ops[i];
1084 if (strcmp(torture_type, cxt.cur_ops->name) == 0)
1085 break;
1086 }
1087 if (i == ARRAY_SIZE(torture_ops)) {
1088 pr_alert("lock-torture: invalid torture type: \"%s\"\n",
1089 torture_type);
1090 pr_alert("lock-torture types:");
1091 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
1092 pr_alert(" %s", torture_ops[i]->name);
1093 pr_alert("\n");
1094 firsterr = -EINVAL;
1095 goto unwind;
1096 }
1097
1098 if (nwriters_stress == 0 &&
1099 (!cxt.cur_ops->readlock || nreaders_stress == 0)) {
1100 pr_alert("lock-torture: must run at least one locking thread\n");
1101 firsterr = -EINVAL;
1102 goto unwind;
1103 }
1104
1105 if (nwriters_stress >= 0)
1106 cxt.nrealwriters_stress = nwriters_stress;
1107 else
1108 cxt.nrealwriters_stress = 2 * num_online_cpus();
1109
1110 if (cxt.cur_ops->init) {
1111 cxt.cur_ops->init();
1112 cxt.init_called = true;
1113 }
1114
1115 #ifdef CONFIG_DEBUG_MUTEXES
1116 if (str_has_prefix(torture_type, "mutex"))
1117 cxt.debug_lock = true;
1118 #endif
1119 #ifdef CONFIG_DEBUG_RT_MUTEXES
1120 if (str_has_prefix(torture_type, "rtmutex"))
1121 cxt.debug_lock = true;
1122 #endif
1123 #ifdef CONFIG_DEBUG_SPINLOCK
1124 if ((str_has_prefix(torture_type, "spin")) ||
1125 (str_has_prefix(torture_type, "rw_lock")))
1126 cxt.debug_lock = true;
1127 #endif
1128
1129 /* Initialize the statistics so that each run gets its own numbers. */
1130 if (nwriters_stress) {
1131 lock_is_write_held = false;
1132 cxt.lwsa = kmalloc_array(cxt.nrealwriters_stress,
1133 sizeof(*cxt.lwsa),
1134 GFP_KERNEL);
1135 if (cxt.lwsa == NULL) {
1136 VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
1137 firsterr = -ENOMEM;
1138 goto unwind;
1139 }
1140
1141 for (i = 0; i < cxt.nrealwriters_stress; i++) {
1142 cxt.lwsa[i].n_lock_fail = 0;
1143 cxt.lwsa[i].n_lock_acquired = 0;
1144 }
1145 }
1146
1147 if (cxt.cur_ops->readlock) {
1148 if (nreaders_stress >= 0)
1149 cxt.nrealreaders_stress = nreaders_stress;
1150 else {
1151 /*
1152 * By default distribute evenly the number of
1153 * readers and writers. We still run the same number
1154 * of threads as the writer-only locks default.
1155 */
1156 if (nwriters_stress < 0) /* user doesn't care */
1157 cxt.nrealwriters_stress = num_online_cpus();
1158 cxt.nrealreaders_stress = cxt.nrealwriters_stress;
1159 }
1160
1161 if (nreaders_stress) {
1162 cxt.lrsa = kmalloc_array(cxt.nrealreaders_stress,
1163 sizeof(*cxt.lrsa),
1164 GFP_KERNEL);
1165 if (cxt.lrsa == NULL) {
1166 VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
1167 firsterr = -ENOMEM;
1168 kfree(cxt.lwsa);
1169 cxt.lwsa = NULL;
1170 goto unwind;
1171 }
1172
1173 for (i = 0; i < cxt.nrealreaders_stress; i++) {
1174 cxt.lrsa[i].n_lock_fail = 0;
1175 cxt.lrsa[i].n_lock_acquired = 0;
1176 }
1177 }
1178 }
1179
1180 lock_torture_print_module_parms(cxt.cur_ops, "Start of test");
1181
1182 /* Prepare torture context. */
1183 if (onoff_interval > 0) {
1184 firsterr = torture_onoff_init(onoff_holdoff * HZ,
1185 onoff_interval * HZ, NULL);
1186 if (torture_init_error(firsterr))
1187 goto unwind;
1188 }
1189 if (shuffle_interval > 0) {
1190 firsterr = torture_shuffle_init(shuffle_interval);
1191 if (torture_init_error(firsterr))
1192 goto unwind;
1193 }
1194 if (shutdown_secs > 0) {
1195 firsterr = torture_shutdown_init(shutdown_secs,
1196 lock_torture_cleanup);
1197 if (torture_init_error(firsterr))
1198 goto unwind;
1199 }
1200 if (stutter > 0) {
1201 firsterr = torture_stutter_init(stutter, stutter);
1202 if (torture_init_error(firsterr))
1203 goto unwind;
1204 }
1205
1206 if (nwriters_stress) {
1207 writer_tasks = kcalloc(cxt.nrealwriters_stress,
1208 sizeof(writer_tasks[0]),
1209 GFP_KERNEL);
1210 if (writer_tasks == NULL) {
1211 TOROUT_ERRSTRING("writer_tasks: Out of memory");
1212 firsterr = -ENOMEM;
1213 goto unwind;
1214 }
1215 }
1216
1217 /* cap nested_locks to MAX_NESTED_LOCKS */
1218 if (nested_locks > MAX_NESTED_LOCKS)
1219 nested_locks = MAX_NESTED_LOCKS;
1220
1221 if (cxt.cur_ops->readlock) {
1222 reader_tasks = kcalloc(cxt.nrealreaders_stress,
1223 sizeof(reader_tasks[0]),
1224 GFP_KERNEL);
1225 if (reader_tasks == NULL) {
1226 TOROUT_ERRSTRING("reader_tasks: Out of memory");
1227 kfree(writer_tasks);
1228 writer_tasks = NULL;
1229 firsterr = -ENOMEM;
1230 goto unwind;
1231 }
1232 }
1233
1234 /*
1235 * Create the kthreads and start torturing (oh, those poor little locks).
1236 *
1237 * TODO: Note that we interleave writers with readers, giving writers a
1238 * slight advantage, by creating its kthread first. This can be modified
1239 * for very specific needs, or even let the user choose the policy, if
1240 * ever wanted.
1241 */
1242 for (i = 0, j = 0; i < cxt.nrealwriters_stress ||
1243 j < cxt.nrealreaders_stress; i++, j++) {
1244 if (i >= cxt.nrealwriters_stress)
1245 goto create_reader;
1246
1247 /* Create writer. */
1248 firsterr = torture_create_kthread_cb(lock_torture_writer, &cxt.lwsa[i],
1249 writer_tasks[i],
1250 writer_fifo ? sched_set_fifo : NULL);
1251 if (torture_init_error(firsterr))
1252 goto unwind;
1253
1254 create_reader:
1255 if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
1256 continue;
1257 /* Create reader. */
1258 firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j],
1259 reader_tasks[j]);
1260 if (torture_init_error(firsterr))
1261 goto unwind;
1262 }
1263 if (stat_interval > 0) {
1264 firsterr = torture_create_kthread(lock_torture_stats, NULL,
1265 stats_task);
1266 if (torture_init_error(firsterr))
1267 goto unwind;
1268 }
1269 torture_init_end();
1270 return 0;
1271
1272 unwind:
1273 torture_init_end();
1274 lock_torture_cleanup();
1275 if (shutdown_secs) {
1276 WARN_ON(!IS_MODULE(CONFIG_LOCK_TORTURE_TEST));
1277 kernel_power_off();
1278 }
1279 return firsterr;
1280 }
1281
1282 module_init(lock_torture_init);
1283 module_exit(lock_torture_cleanup);
1284