1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * lib/locking-selftest.c
4 *
5 * Testsuite for various locking APIs: spinlocks, rwlocks,
6 * mutexes and rw-semaphores.
7 *
8 * It is checking both false positives and false negatives.
9 *
10 * Started by Ingo Molnar:
11 *
12 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
13 */
14 #include <linux/rwsem.h>
15 #include <linux/mutex.h>
16 #include <linux/ww_mutex.h>
17 #include <linux/sched.h>
18 #include <linux/sched/mm.h>
19 #include <linux/delay.h>
20 #include <linux/lockdep.h>
21 #include <linux/spinlock.h>
22 #include <linux/kallsyms.h>
23 #include <linux/interrupt.h>
24 #include <linux/debug_locks.h>
25 #include <linux/irqflags.h>
26 #include <linux/rtmutex.h>
27 #include <linux/local_lock.h>
28
29 #ifdef CONFIG_PREEMPT_RT
30 # define NON_RT(...)
31 #else
32 # define NON_RT(...) __VA_ARGS__
33 #endif
34
35 /*
36 * Change this to 1 if you want to see the failure printouts:
37 */
38 static unsigned int debug_locks_verbose;
39 unsigned int force_read_lock_recursive;
40
41 static DEFINE_WD_CLASS(ww_lockdep);
42
setup_debug_locks_verbose(char * str)43 static int __init setup_debug_locks_verbose(char *str)
44 {
45 get_option(&str, &debug_locks_verbose);
46
47 return 1;
48 }
49
50 __setup("debug_locks_verbose=", setup_debug_locks_verbose);
51
52 #define FAILURE 0
53 #define SUCCESS 1
54
55 #define LOCKTYPE_SPIN 0x1
56 #define LOCKTYPE_RWLOCK 0x2
57 #define LOCKTYPE_MUTEX 0x4
58 #define LOCKTYPE_RWSEM 0x8
59 #define LOCKTYPE_WW 0x10
60 #define LOCKTYPE_RTMUTEX 0x20
61 #define LOCKTYPE_LL 0x40
62 #define LOCKTYPE_SPECIAL 0x80
63
64 static struct ww_acquire_ctx t, t2;
65 static struct ww_mutex o, o2, o3;
66
67 /*
68 * Normal standalone locks, for the circular and irq-context
69 * dependency tests:
70 */
71 static DEFINE_SPINLOCK(lock_A);
72 static DEFINE_SPINLOCK(lock_B);
73 static DEFINE_SPINLOCK(lock_C);
74 static DEFINE_SPINLOCK(lock_D);
75
76 static DEFINE_RAW_SPINLOCK(raw_lock_A);
77 static DEFINE_RAW_SPINLOCK(raw_lock_B);
78
79 static DEFINE_RWLOCK(rwlock_A);
80 static DEFINE_RWLOCK(rwlock_B);
81 static DEFINE_RWLOCK(rwlock_C);
82 static DEFINE_RWLOCK(rwlock_D);
83
84 static DEFINE_MUTEX(mutex_A);
85 static DEFINE_MUTEX(mutex_B);
86 static DEFINE_MUTEX(mutex_C);
87 static DEFINE_MUTEX(mutex_D);
88
89 static DECLARE_RWSEM(rwsem_A);
90 static DECLARE_RWSEM(rwsem_B);
91 static DECLARE_RWSEM(rwsem_C);
92 static DECLARE_RWSEM(rwsem_D);
93
94 #ifdef CONFIG_RT_MUTEXES
95
96 static DEFINE_RT_MUTEX(rtmutex_A);
97 static DEFINE_RT_MUTEX(rtmutex_B);
98 static DEFINE_RT_MUTEX(rtmutex_C);
99 static DEFINE_RT_MUTEX(rtmutex_D);
100
101 #endif
102
103 /*
104 * Locks that we initialize dynamically as well so that
105 * e.g. X1 and X2 becomes two instances of the same class,
106 * but X* and Y* are different classes. We do this so that
107 * we do not trigger a real lockup:
108 */
109 static DEFINE_SPINLOCK(lock_X1);
110 static DEFINE_SPINLOCK(lock_X2);
111 static DEFINE_SPINLOCK(lock_Y1);
112 static DEFINE_SPINLOCK(lock_Y2);
113 static DEFINE_SPINLOCK(lock_Z1);
114 static DEFINE_SPINLOCK(lock_Z2);
115
116 static DEFINE_RWLOCK(rwlock_X1);
117 static DEFINE_RWLOCK(rwlock_X2);
118 static DEFINE_RWLOCK(rwlock_Y1);
119 static DEFINE_RWLOCK(rwlock_Y2);
120 static DEFINE_RWLOCK(rwlock_Z1);
121 static DEFINE_RWLOCK(rwlock_Z2);
122
123 static DEFINE_MUTEX(mutex_X1);
124 static DEFINE_MUTEX(mutex_X2);
125 static DEFINE_MUTEX(mutex_Y1);
126 static DEFINE_MUTEX(mutex_Y2);
127 static DEFINE_MUTEX(mutex_Z1);
128 static DEFINE_MUTEX(mutex_Z2);
129
130 static DECLARE_RWSEM(rwsem_X1);
131 static DECLARE_RWSEM(rwsem_X2);
132 static DECLARE_RWSEM(rwsem_Y1);
133 static DECLARE_RWSEM(rwsem_Y2);
134 static DECLARE_RWSEM(rwsem_Z1);
135 static DECLARE_RWSEM(rwsem_Z2);
136
137 #ifdef CONFIG_RT_MUTEXES
138
139 static DEFINE_RT_MUTEX(rtmutex_X1);
140 static DEFINE_RT_MUTEX(rtmutex_X2);
141 static DEFINE_RT_MUTEX(rtmutex_Y1);
142 static DEFINE_RT_MUTEX(rtmutex_Y2);
143 static DEFINE_RT_MUTEX(rtmutex_Z1);
144 static DEFINE_RT_MUTEX(rtmutex_Z2);
145
146 #endif
147
148 static DEFINE_PER_CPU(local_lock_t, local_A);
149
150 /*
151 * non-inlined runtime initializers, to let separate locks share
152 * the same lock-class:
153 */
154 #define INIT_CLASS_FUNC(class) \
155 static noinline void \
156 init_class_##class(spinlock_t *lock, rwlock_t *rwlock, \
157 struct mutex *mutex, struct rw_semaphore *rwsem)\
158 { \
159 spin_lock_init(lock); \
160 rwlock_init(rwlock); \
161 mutex_init(mutex); \
162 init_rwsem(rwsem); \
163 }
164
165 INIT_CLASS_FUNC(X)
INIT_CLASS_FUNC(Y)166 INIT_CLASS_FUNC(Y)
167 INIT_CLASS_FUNC(Z)
168
169 static void init_shared_classes(void)
170 {
171 #ifdef CONFIG_RT_MUTEXES
172 static struct lock_class_key rt_X, rt_Y, rt_Z;
173
174 __rt_mutex_init(&rtmutex_X1, __func__, &rt_X);
175 __rt_mutex_init(&rtmutex_X2, __func__, &rt_X);
176 __rt_mutex_init(&rtmutex_Y1, __func__, &rt_Y);
177 __rt_mutex_init(&rtmutex_Y2, __func__, &rt_Y);
178 __rt_mutex_init(&rtmutex_Z1, __func__, &rt_Z);
179 __rt_mutex_init(&rtmutex_Z2, __func__, &rt_Z);
180 #endif
181
182 init_class_X(&lock_X1, &rwlock_X1, &mutex_X1, &rwsem_X1);
183 init_class_X(&lock_X2, &rwlock_X2, &mutex_X2, &rwsem_X2);
184
185 init_class_Y(&lock_Y1, &rwlock_Y1, &mutex_Y1, &rwsem_Y1);
186 init_class_Y(&lock_Y2, &rwlock_Y2, &mutex_Y2, &rwsem_Y2);
187
188 init_class_Z(&lock_Z1, &rwlock_Z1, &mutex_Z1, &rwsem_Z1);
189 init_class_Z(&lock_Z2, &rwlock_Z2, &mutex_Z2, &rwsem_Z2);
190 }
191
192 /*
193 * For spinlocks and rwlocks we also do hardirq-safe / softirq-safe tests.
194 * The following functions use a lock from a simulated hardirq/softirq
195 * context, causing the locks to be marked as hardirq-safe/softirq-safe:
196 */
197
198 #define HARDIRQ_DISABLE local_irq_disable
199 #define HARDIRQ_ENABLE local_irq_enable
200
201 #define HARDIRQ_ENTER() \
202 local_irq_disable(); \
203 __irq_enter(); \
204 lockdep_hardirq_threaded(); \
205 WARN_ON(!in_irq());
206
207 #define HARDIRQ_EXIT() \
208 __irq_exit(); \
209 local_irq_enable();
210
211 #define SOFTIRQ_DISABLE local_bh_disable
212 #define SOFTIRQ_ENABLE local_bh_enable
213
214 #define SOFTIRQ_ENTER() \
215 local_bh_disable(); \
216 local_irq_disable(); \
217 lockdep_softirq_enter(); \
218 WARN_ON(!in_softirq());
219
220 #define SOFTIRQ_EXIT() \
221 lockdep_softirq_exit(); \
222 local_irq_enable(); \
223 local_bh_enable();
224
225 /*
226 * Shortcuts for lock/unlock API variants, to keep
227 * the testcases compact:
228 */
229 #define L(x) spin_lock(&lock_##x)
230 #define U(x) spin_unlock(&lock_##x)
231 #define LU(x) L(x); U(x)
232 #define SI(x) spin_lock_init(&lock_##x)
233
234 #define WL(x) write_lock(&rwlock_##x)
235 #define WU(x) write_unlock(&rwlock_##x)
236 #define WLU(x) WL(x); WU(x)
237
238 #define RL(x) read_lock(&rwlock_##x)
239 #define RU(x) read_unlock(&rwlock_##x)
240 #define RLU(x) RL(x); RU(x)
241 #define RWI(x) rwlock_init(&rwlock_##x)
242
243 #define ML(x) mutex_lock(&mutex_##x)
244 #define MU(x) mutex_unlock(&mutex_##x)
245 #define MI(x) mutex_init(&mutex_##x)
246
247 #define RTL(x) rt_mutex_lock(&rtmutex_##x)
248 #define RTU(x) rt_mutex_unlock(&rtmutex_##x)
249 #define RTI(x) rt_mutex_init(&rtmutex_##x)
250
251 #define WSL(x) down_write(&rwsem_##x)
252 #define WSU(x) up_write(&rwsem_##x)
253
254 #define RSL(x) down_read(&rwsem_##x)
255 #define RSU(x) up_read(&rwsem_##x)
256 #define RWSI(x) init_rwsem(&rwsem_##x)
257
258 #ifndef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
259 #define WWAI(x) ww_acquire_init(x, &ww_lockdep)
260 #else
261 #define WWAI(x) do { ww_acquire_init(x, &ww_lockdep); (x)->deadlock_inject_countdown = ~0U; } while (0)
262 #endif
263 #define WWAD(x) ww_acquire_done(x)
264 #define WWAF(x) ww_acquire_fini(x)
265
266 #define WWL(x, c) ww_mutex_lock(x, c)
267 #define WWT(x) ww_mutex_trylock(x, NULL)
268 #define WWL1(x) ww_mutex_lock(x, NULL)
269 #define WWU(x) ww_mutex_unlock(x)
270
271
272 #define LOCK_UNLOCK_2(x,y) LOCK(x); LOCK(y); UNLOCK(y); UNLOCK(x)
273
274 /*
275 * Generate different permutations of the same testcase, using
276 * the same basic lock-dependency/state events:
277 */
278
279 #define GENERATE_TESTCASE(name) \
280 \
281 static void name(void) { E(); }
282
283 #define GENERATE_PERMUTATIONS_2_EVENTS(name) \
284 \
285 static void name##_12(void) { E1(); E2(); } \
286 static void name##_21(void) { E2(); E1(); }
287
288 #define GENERATE_PERMUTATIONS_3_EVENTS(name) \
289 \
290 static void name##_123(void) { E1(); E2(); E3(); } \
291 static void name##_132(void) { E1(); E3(); E2(); } \
292 static void name##_213(void) { E2(); E1(); E3(); } \
293 static void name##_231(void) { E2(); E3(); E1(); } \
294 static void name##_312(void) { E3(); E1(); E2(); } \
295 static void name##_321(void) { E3(); E2(); E1(); }
296
297 /*
298 * AA deadlock:
299 */
300
301 #define E() \
302 \
303 LOCK(X1); \
304 LOCK(X2); /* this one should fail */
305
306 /*
307 * 6 testcases:
308 */
309 #include "locking-selftest-spin.h"
310 GENERATE_TESTCASE(AA_spin)
311 #include "locking-selftest-wlock.h"
312 GENERATE_TESTCASE(AA_wlock)
313 #include "locking-selftest-rlock.h"
314 GENERATE_TESTCASE(AA_rlock)
315 #include "locking-selftest-mutex.h"
316 GENERATE_TESTCASE(AA_mutex)
317 #include "locking-selftest-wsem.h"
318 GENERATE_TESTCASE(AA_wsem)
319 #include "locking-selftest-rsem.h"
320 GENERATE_TESTCASE(AA_rsem)
321
322 #ifdef CONFIG_RT_MUTEXES
323 #include "locking-selftest-rtmutex.h"
324 GENERATE_TESTCASE(AA_rtmutex);
325 #endif
326
327 #undef E
328
329 /*
330 * Special-case for read-locking, they are
331 * allowed to recurse on the same lock class:
332 */
rlock_AA1(void)333 static void rlock_AA1(void)
334 {
335 RL(X1);
336 RL(X1); // this one should NOT fail
337 }
338
rlock_AA1B(void)339 static void rlock_AA1B(void)
340 {
341 RL(X1);
342 RL(X2); // this one should NOT fail
343 }
344
rsem_AA1(void)345 static void rsem_AA1(void)
346 {
347 RSL(X1);
348 RSL(X1); // this one should fail
349 }
350
rsem_AA1B(void)351 static void rsem_AA1B(void)
352 {
353 RSL(X1);
354 RSL(X2); // this one should fail
355 }
356 /*
357 * The mixing of read and write locks is not allowed:
358 */
rlock_AA2(void)359 static void rlock_AA2(void)
360 {
361 RL(X1);
362 WL(X2); // this one should fail
363 }
364
rsem_AA2(void)365 static void rsem_AA2(void)
366 {
367 RSL(X1);
368 WSL(X2); // this one should fail
369 }
370
rlock_AA3(void)371 static void rlock_AA3(void)
372 {
373 WL(X1);
374 RL(X2); // this one should fail
375 }
376
rsem_AA3(void)377 static void rsem_AA3(void)
378 {
379 WSL(X1);
380 RSL(X2); // this one should fail
381 }
382
383 /*
384 * read_lock(A)
385 * spin_lock(B)
386 * spin_lock(B)
387 * write_lock(A)
388 */
rlock_ABBA1(void)389 static void rlock_ABBA1(void)
390 {
391 RL(X1);
392 L(Y1);
393 U(Y1);
394 RU(X1);
395
396 L(Y1);
397 WL(X1);
398 WU(X1);
399 U(Y1); // should fail
400 }
401
rwsem_ABBA1(void)402 static void rwsem_ABBA1(void)
403 {
404 RSL(X1);
405 ML(Y1);
406 MU(Y1);
407 RSU(X1);
408
409 ML(Y1);
410 WSL(X1);
411 WSU(X1);
412 MU(Y1); // should fail
413 }
414
415 /*
416 * read_lock(A)
417 * spin_lock(B)
418 * spin_lock(B)
419 * write_lock(A)
420 *
421 * This test case is aimed at poking whether the chain cache prevents us from
422 * detecting a read-lock/lock-write deadlock: if the chain cache doesn't differ
423 * read/write locks, the following case may happen
424 *
425 * { read_lock(A)->lock(B) dependency exists }
426 *
427 * P0:
428 * lock(B);
429 * read_lock(A);
430 *
431 * { Not a deadlock, B -> A is added in the chain cache }
432 *
433 * P1:
434 * lock(B);
435 * write_lock(A);
436 *
437 * { B->A found in chain cache, not reported as a deadlock }
438 *
439 */
rlock_chaincache_ABBA1(void)440 static void rlock_chaincache_ABBA1(void)
441 {
442 RL(X1);
443 L(Y1);
444 U(Y1);
445 RU(X1);
446
447 L(Y1);
448 RL(X1);
449 RU(X1);
450 U(Y1);
451
452 L(Y1);
453 WL(X1);
454 WU(X1);
455 U(Y1); // should fail
456 }
457
458 /*
459 * read_lock(A)
460 * spin_lock(B)
461 * spin_lock(B)
462 * read_lock(A)
463 */
rlock_ABBA2(void)464 static void rlock_ABBA2(void)
465 {
466 RL(X1);
467 L(Y1);
468 U(Y1);
469 RU(X1);
470
471 L(Y1);
472 RL(X1);
473 RU(X1);
474 U(Y1); // should NOT fail
475 }
476
rwsem_ABBA2(void)477 static void rwsem_ABBA2(void)
478 {
479 RSL(X1);
480 ML(Y1);
481 MU(Y1);
482 RSU(X1);
483
484 ML(Y1);
485 RSL(X1);
486 RSU(X1);
487 MU(Y1); // should fail
488 }
489
490
491 /*
492 * write_lock(A)
493 * spin_lock(B)
494 * spin_lock(B)
495 * write_lock(A)
496 */
rlock_ABBA3(void)497 static void rlock_ABBA3(void)
498 {
499 WL(X1);
500 L(Y1);
501 U(Y1);
502 WU(X1);
503
504 L(Y1);
505 WL(X1);
506 WU(X1);
507 U(Y1); // should fail
508 }
509
rwsem_ABBA3(void)510 static void rwsem_ABBA3(void)
511 {
512 WSL(X1);
513 ML(Y1);
514 MU(Y1);
515 WSU(X1);
516
517 ML(Y1);
518 WSL(X1);
519 WSU(X1);
520 MU(Y1); // should fail
521 }
522
523 /*
524 * ABBA deadlock:
525 */
526
527 #define E() \
528 \
529 LOCK_UNLOCK_2(A, B); \
530 LOCK_UNLOCK_2(B, A); /* fail */
531
532 /*
533 * 6 testcases:
534 */
535 #include "locking-selftest-spin.h"
536 GENERATE_TESTCASE(ABBA_spin)
537 #include "locking-selftest-wlock.h"
538 GENERATE_TESTCASE(ABBA_wlock)
539 #include "locking-selftest-rlock.h"
540 GENERATE_TESTCASE(ABBA_rlock)
541 #include "locking-selftest-mutex.h"
542 GENERATE_TESTCASE(ABBA_mutex)
543 #include "locking-selftest-wsem.h"
544 GENERATE_TESTCASE(ABBA_wsem)
545 #include "locking-selftest-rsem.h"
546 GENERATE_TESTCASE(ABBA_rsem)
547
548 #ifdef CONFIG_RT_MUTEXES
549 #include "locking-selftest-rtmutex.h"
550 GENERATE_TESTCASE(ABBA_rtmutex);
551 #endif
552
553 #undef E
554
555 /*
556 * AB BC CA deadlock:
557 */
558
559 #define E() \
560 \
561 LOCK_UNLOCK_2(A, B); \
562 LOCK_UNLOCK_2(B, C); \
563 LOCK_UNLOCK_2(C, A); /* fail */
564
565 /*
566 * 6 testcases:
567 */
568 #include "locking-selftest-spin.h"
569 GENERATE_TESTCASE(ABBCCA_spin)
570 #include "locking-selftest-wlock.h"
571 GENERATE_TESTCASE(ABBCCA_wlock)
572 #include "locking-selftest-rlock.h"
573 GENERATE_TESTCASE(ABBCCA_rlock)
574 #include "locking-selftest-mutex.h"
575 GENERATE_TESTCASE(ABBCCA_mutex)
576 #include "locking-selftest-wsem.h"
577 GENERATE_TESTCASE(ABBCCA_wsem)
578 #include "locking-selftest-rsem.h"
579 GENERATE_TESTCASE(ABBCCA_rsem)
580
581 #ifdef CONFIG_RT_MUTEXES
582 #include "locking-selftest-rtmutex.h"
583 GENERATE_TESTCASE(ABBCCA_rtmutex);
584 #endif
585
586 #undef E
587
588 /*
589 * AB CA BC deadlock:
590 */
591
592 #define E() \
593 \
594 LOCK_UNLOCK_2(A, B); \
595 LOCK_UNLOCK_2(C, A); \
596 LOCK_UNLOCK_2(B, C); /* fail */
597
598 /*
599 * 6 testcases:
600 */
601 #include "locking-selftest-spin.h"
602 GENERATE_TESTCASE(ABCABC_spin)
603 #include "locking-selftest-wlock.h"
604 GENERATE_TESTCASE(ABCABC_wlock)
605 #include "locking-selftest-rlock.h"
606 GENERATE_TESTCASE(ABCABC_rlock)
607 #include "locking-selftest-mutex.h"
608 GENERATE_TESTCASE(ABCABC_mutex)
609 #include "locking-selftest-wsem.h"
610 GENERATE_TESTCASE(ABCABC_wsem)
611 #include "locking-selftest-rsem.h"
612 GENERATE_TESTCASE(ABCABC_rsem)
613
614 #ifdef CONFIG_RT_MUTEXES
615 #include "locking-selftest-rtmutex.h"
616 GENERATE_TESTCASE(ABCABC_rtmutex);
617 #endif
618
619 #undef E
620
621 /*
622 * AB BC CD DA deadlock:
623 */
624
625 #define E() \
626 \
627 LOCK_UNLOCK_2(A, B); \
628 LOCK_UNLOCK_2(B, C); \
629 LOCK_UNLOCK_2(C, D); \
630 LOCK_UNLOCK_2(D, A); /* fail */
631
632 /*
633 * 6 testcases:
634 */
635 #include "locking-selftest-spin.h"
636 GENERATE_TESTCASE(ABBCCDDA_spin)
637 #include "locking-selftest-wlock.h"
638 GENERATE_TESTCASE(ABBCCDDA_wlock)
639 #include "locking-selftest-rlock.h"
640 GENERATE_TESTCASE(ABBCCDDA_rlock)
641 #include "locking-selftest-mutex.h"
642 GENERATE_TESTCASE(ABBCCDDA_mutex)
643 #include "locking-selftest-wsem.h"
644 GENERATE_TESTCASE(ABBCCDDA_wsem)
645 #include "locking-selftest-rsem.h"
646 GENERATE_TESTCASE(ABBCCDDA_rsem)
647
648 #ifdef CONFIG_RT_MUTEXES
649 #include "locking-selftest-rtmutex.h"
650 GENERATE_TESTCASE(ABBCCDDA_rtmutex);
651 #endif
652
653 #undef E
654
655 /*
656 * AB CD BD DA deadlock:
657 */
658 #define E() \
659 \
660 LOCK_UNLOCK_2(A, B); \
661 LOCK_UNLOCK_2(C, D); \
662 LOCK_UNLOCK_2(B, D); \
663 LOCK_UNLOCK_2(D, A); /* fail */
664
665 /*
666 * 6 testcases:
667 */
668 #include "locking-selftest-spin.h"
669 GENERATE_TESTCASE(ABCDBDDA_spin)
670 #include "locking-selftest-wlock.h"
671 GENERATE_TESTCASE(ABCDBDDA_wlock)
672 #include "locking-selftest-rlock.h"
673 GENERATE_TESTCASE(ABCDBDDA_rlock)
674 #include "locking-selftest-mutex.h"
675 GENERATE_TESTCASE(ABCDBDDA_mutex)
676 #include "locking-selftest-wsem.h"
677 GENERATE_TESTCASE(ABCDBDDA_wsem)
678 #include "locking-selftest-rsem.h"
679 GENERATE_TESTCASE(ABCDBDDA_rsem)
680
681 #ifdef CONFIG_RT_MUTEXES
682 #include "locking-selftest-rtmutex.h"
683 GENERATE_TESTCASE(ABCDBDDA_rtmutex);
684 #endif
685
686 #undef E
687
688 /*
689 * AB CD BC DA deadlock:
690 */
691 #define E() \
692 \
693 LOCK_UNLOCK_2(A, B); \
694 LOCK_UNLOCK_2(C, D); \
695 LOCK_UNLOCK_2(B, C); \
696 LOCK_UNLOCK_2(D, A); /* fail */
697
698 /*
699 * 6 testcases:
700 */
701 #include "locking-selftest-spin.h"
702 GENERATE_TESTCASE(ABCDBCDA_spin)
703 #include "locking-selftest-wlock.h"
704 GENERATE_TESTCASE(ABCDBCDA_wlock)
705 #include "locking-selftest-rlock.h"
706 GENERATE_TESTCASE(ABCDBCDA_rlock)
707 #include "locking-selftest-mutex.h"
708 GENERATE_TESTCASE(ABCDBCDA_mutex)
709 #include "locking-selftest-wsem.h"
710 GENERATE_TESTCASE(ABCDBCDA_wsem)
711 #include "locking-selftest-rsem.h"
712 GENERATE_TESTCASE(ABCDBCDA_rsem)
713
714 #ifdef CONFIG_RT_MUTEXES
715 #include "locking-selftest-rtmutex.h"
716 GENERATE_TESTCASE(ABCDBCDA_rtmutex);
717 #endif
718
719 #undef E
720
721 #ifdef CONFIG_PREEMPT_RT
722 # define RT_PREPARE_DBL_UNLOCK() { migrate_disable(); rcu_read_lock(); }
723 #else
724 # define RT_PREPARE_DBL_UNLOCK()
725 #endif
726 /*
727 * Double unlock:
728 */
729 #define E() \
730 \
731 LOCK(A); \
732 RT_PREPARE_DBL_UNLOCK(); \
733 UNLOCK(A); \
734 UNLOCK(A); /* fail */
735
736 /*
737 * 6 testcases:
738 */
739 #include "locking-selftest-spin.h"
740 GENERATE_TESTCASE(double_unlock_spin)
741 #include "locking-selftest-wlock.h"
742 GENERATE_TESTCASE(double_unlock_wlock)
743 #include "locking-selftest-rlock.h"
744 GENERATE_TESTCASE(double_unlock_rlock)
745 #include "locking-selftest-mutex.h"
746 GENERATE_TESTCASE(double_unlock_mutex)
747 #include "locking-selftest-wsem.h"
748 GENERATE_TESTCASE(double_unlock_wsem)
749 #include "locking-selftest-rsem.h"
750 GENERATE_TESTCASE(double_unlock_rsem)
751
752 #ifdef CONFIG_RT_MUTEXES
753 #include "locking-selftest-rtmutex.h"
754 GENERATE_TESTCASE(double_unlock_rtmutex);
755 #endif
756
757 #undef E
758
759 /*
760 * initializing a held lock:
761 */
762 #define E() \
763 \
764 LOCK(A); \
765 INIT(A); /* fail */
766
767 /*
768 * 6 testcases:
769 */
770 #include "locking-selftest-spin.h"
771 GENERATE_TESTCASE(init_held_spin)
772 #include "locking-selftest-wlock.h"
773 GENERATE_TESTCASE(init_held_wlock)
774 #include "locking-selftest-rlock.h"
775 GENERATE_TESTCASE(init_held_rlock)
776 #include "locking-selftest-mutex.h"
777 GENERATE_TESTCASE(init_held_mutex)
778 #include "locking-selftest-wsem.h"
779 GENERATE_TESTCASE(init_held_wsem)
780 #include "locking-selftest-rsem.h"
781 GENERATE_TESTCASE(init_held_rsem)
782
783 #ifdef CONFIG_RT_MUTEXES
784 #include "locking-selftest-rtmutex.h"
785 GENERATE_TESTCASE(init_held_rtmutex);
786 #endif
787
788 #undef E
789
790 /*
791 * locking an irq-safe lock with irqs enabled:
792 */
793 #define E1() \
794 \
795 IRQ_ENTER(); \
796 LOCK(A); \
797 UNLOCK(A); \
798 IRQ_EXIT();
799
800 #define E2() \
801 \
802 LOCK(A); \
803 UNLOCK(A);
804
805 /*
806 * Generate 24 testcases:
807 */
808 #include "locking-selftest-spin-hardirq.h"
809 GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_spin)
810
811 #include "locking-selftest-rlock-hardirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock)812 GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock)
813
814 #include "locking-selftest-wlock-hardirq.h"
815 GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_wlock)
816
817 #ifndef CONFIG_PREEMPT_RT
818 #include "locking-selftest-spin-softirq.h"
819 GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_spin)
820
821 #include "locking-selftest-rlock-softirq.h"
822 GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_rlock)
823
824 #include "locking-selftest-wlock-softirq.h"
825 GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock)
826 #endif
827
828 #undef E1
829 #undef E2
830
831 #ifndef CONFIG_PREEMPT_RT
832 /*
833 * Enabling hardirqs with a softirq-safe lock held:
834 */
835 #define E1() \
836 \
837 SOFTIRQ_ENTER(); \
838 LOCK(A); \
839 UNLOCK(A); \
840 SOFTIRQ_EXIT();
841
842 #define E2() \
843 \
844 HARDIRQ_DISABLE(); \
845 LOCK(A); \
846 HARDIRQ_ENABLE(); \
847 UNLOCK(A);
848
849 /*
850 * Generate 12 testcases:
851 */
852 #include "locking-selftest-spin.h"
853 GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_spin)
854
855 #include "locking-selftest-wlock.h"
856 GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_wlock)
857
858 #include "locking-selftest-rlock.h"
859 GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock)
860
861 #undef E1
862 #undef E2
863
864 #endif
865
866 /*
867 * Enabling irqs with an irq-safe lock held:
868 */
869 #define E1() \
870 \
871 IRQ_ENTER(); \
872 LOCK(A); \
873 UNLOCK(A); \
874 IRQ_EXIT();
875
876 #define E2() \
877 \
878 IRQ_DISABLE(); \
879 LOCK(A); \
880 IRQ_ENABLE(); \
881 UNLOCK(A);
882
883 /*
884 * Generate 24 testcases:
885 */
886 #include "locking-selftest-spin-hardirq.h"
887 GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_spin)
888
889 #include "locking-selftest-rlock-hardirq.h"
890 GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock)
891
892 #include "locking-selftest-wlock-hardirq.h"
893 GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_wlock)
894
895 #ifndef CONFIG_PREEMPT_RT
896 #include "locking-selftest-spin-softirq.h"
897 GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_spin)
898
899 #include "locking-selftest-rlock-softirq.h"
900 GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_rlock)
901
902 #include "locking-selftest-wlock-softirq.h"
903 GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
904 #endif
905
906 #undef E1
907 #undef E2
908
909 /*
910 * Acquiring a irq-unsafe lock while holding an irq-safe-lock:
911 */
912 #define E1() \
913 \
914 LOCK(A); \
915 LOCK(B); \
916 UNLOCK(B); \
917 UNLOCK(A); \
918
919 #define E2() \
920 \
921 LOCK(B); \
922 UNLOCK(B);
923
924 #define E3() \
925 \
926 IRQ_ENTER(); \
927 LOCK(A); \
928 UNLOCK(A); \
929 IRQ_EXIT();
930
931 /*
932 * Generate 36 testcases:
933 */
934 #include "locking-selftest-spin-hardirq.h"
935 GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_spin)
936
937 #include "locking-selftest-rlock-hardirq.h"
938 GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock)
939
940 #include "locking-selftest-wlock-hardirq.h"
941 GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_wlock)
942
943 #ifndef CONFIG_PREEMPT_RT
944 #include "locking-selftest-spin-softirq.h"
945 GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_spin)
946
947 #include "locking-selftest-rlock-softirq.h"
948 GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_rlock)
949
950 #include "locking-selftest-wlock-softirq.h"
951 GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
952 #endif
953
954 #undef E1
955 #undef E2
956 #undef E3
957
958 /*
959 * If a lock turns into softirq-safe, but earlier it took
960 * a softirq-unsafe lock:
961 */
962
963 #define E1() \
964 IRQ_DISABLE(); \
965 LOCK(A); \
966 LOCK(B); \
967 UNLOCK(B); \
968 UNLOCK(A); \
969 IRQ_ENABLE();
970
971 #define E2() \
972 LOCK(B); \
973 UNLOCK(B);
974
975 #define E3() \
976 IRQ_ENTER(); \
977 LOCK(A); \
978 UNLOCK(A); \
979 IRQ_EXIT();
980
981 /*
982 * Generate 36 testcases:
983 */
984 #include "locking-selftest-spin-hardirq.h"
985 GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_spin)
986
987 #include "locking-selftest-rlock-hardirq.h"
988 GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock)
989
990 #include "locking-selftest-wlock-hardirq.h"
991 GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_wlock)
992
993 #ifndef CONFIG_PREEMPT_RT
994 #include "locking-selftest-spin-softirq.h"
995 GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_spin)
996
997 #include "locking-selftest-rlock-softirq.h"
998 GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_rlock)
999
1000 #include "locking-selftest-wlock-softirq.h"
1001 GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock)
1002 #endif
1003
1004 #undef E1
1005 #undef E2
1006 #undef E3
1007
1008 /*
1009 * read-lock / write-lock irq inversion.
1010 *
1011 * Deadlock scenario:
1012 *
1013 * CPU#1 is at #1, i.e. it has write-locked A, but has not
1014 * taken B yet.
1015 *
1016 * CPU#2 is at #2, i.e. it has locked B.
1017 *
1018 * Hardirq hits CPU#2 at point #2 and is trying to read-lock A.
1019 *
1020 * The deadlock occurs because CPU#1 will spin on B, and CPU#2
1021 * will spin on A.
1022 */
1023
1024 #define E1() \
1025 \
1026 IRQ_DISABLE(); \
1027 WL(A); \
1028 LOCK(B); \
1029 UNLOCK(B); \
1030 WU(A); \
1031 IRQ_ENABLE();
1032
1033 #define E2() \
1034 \
1035 LOCK(B); \
1036 UNLOCK(B);
1037
1038 #define E3() \
1039 \
1040 IRQ_ENTER(); \
1041 RL(A); \
1042 RU(A); \
1043 IRQ_EXIT();
1044
1045 /*
1046 * Generate 36 testcases:
1047 */
1048 #include "locking-selftest-spin-hardirq.h"
1049 GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_spin)
1050
1051 #include "locking-selftest-rlock-hardirq.h"
1052 GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_rlock)
1053
1054 #include "locking-selftest-wlock-hardirq.h"
1055 GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_wlock)
1056
1057 #ifndef CONFIG_PREEMPT_RT
1058 #include "locking-selftest-spin-softirq.h"
1059 GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_spin)
1060
1061 #include "locking-selftest-rlock-softirq.h"
1062 GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_rlock)
1063
1064 #include "locking-selftest-wlock-softirq.h"
1065 GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock)
1066 #endif
1067
1068 #undef E1
1069 #undef E2
1070 #undef E3
1071
1072 /*
1073 * write-read / write-read / write-read deadlock even if read is recursive
1074 */
1075
1076 #define E1() \
1077 \
1078 WL(X1); \
1079 RL(Y1); \
1080 RU(Y1); \
1081 WU(X1);
1082
1083 #define E2() \
1084 \
1085 WL(Y1); \
1086 RL(Z1); \
1087 RU(Z1); \
1088 WU(Y1);
1089
1090 #define E3() \
1091 \
1092 WL(Z1); \
1093 RL(X1); \
1094 RU(X1); \
1095 WU(Z1);
1096
1097 #include "locking-selftest-rlock.h"
1098 GENERATE_PERMUTATIONS_3_EVENTS(W1R2_W2R3_W3R1)
1099
1100 #undef E1
1101 #undef E2
1102 #undef E3
1103
1104 /*
1105 * write-write / read-read / write-read deadlock even if read is recursive
1106 */
1107
1108 #define E1() \
1109 \
1110 WL(X1); \
1111 WL(Y1); \
1112 WU(Y1); \
1113 WU(X1);
1114
1115 #define E2() \
1116 \
1117 RL(Y1); \
1118 RL(Z1); \
1119 RU(Z1); \
1120 RU(Y1);
1121
1122 #define E3() \
1123 \
1124 WL(Z1); \
1125 RL(X1); \
1126 RU(X1); \
1127 WU(Z1);
1128
1129 #include "locking-selftest-rlock.h"
1130 GENERATE_PERMUTATIONS_3_EVENTS(W1W2_R2R3_W3R1)
1131
1132 #undef E1
1133 #undef E2
1134 #undef E3
1135
1136 /*
1137 * write-write / read-read / read-write is not deadlock when read is recursive
1138 */
1139
1140 #define E1() \
1141 \
1142 WL(X1); \
1143 WL(Y1); \
1144 WU(Y1); \
1145 WU(X1);
1146
1147 #define E2() \
1148 \
1149 RL(Y1); \
1150 RL(Z1); \
1151 RU(Z1); \
1152 RU(Y1);
1153
1154 #define E3() \
1155 \
1156 RL(Z1); \
1157 WL(X1); \
1158 WU(X1); \
1159 RU(Z1);
1160
1161 #include "locking-selftest-rlock.h"
1162 GENERATE_PERMUTATIONS_3_EVENTS(W1R2_R2R3_W3W1)
1163
1164 #undef E1
1165 #undef E2
1166 #undef E3
1167
1168 /*
1169 * write-read / read-read / write-write is not deadlock when read is recursive
1170 */
1171
1172 #define E1() \
1173 \
1174 WL(X1); \
1175 RL(Y1); \
1176 RU(Y1); \
1177 WU(X1);
1178
1179 #define E2() \
1180 \
1181 RL(Y1); \
1182 RL(Z1); \
1183 RU(Z1); \
1184 RU(Y1);
1185
1186 #define E3() \
1187 \
1188 WL(Z1); \
1189 WL(X1); \
1190 WU(X1); \
1191 WU(Z1);
1192
1193 #include "locking-selftest-rlock.h"
1194 GENERATE_PERMUTATIONS_3_EVENTS(W1W2_R2R3_R3W1)
1195
1196 #undef E1
1197 #undef E2
1198 #undef E3
1199 /*
1200 * read-lock / write-lock recursion that is actually safe.
1201 */
1202
1203 #define E1() \
1204 \
1205 IRQ_DISABLE(); \
1206 WL(A); \
1207 WU(A); \
1208 IRQ_ENABLE();
1209
1210 #define E2() \
1211 \
1212 RL(A); \
1213 RU(A); \
1214
1215 #define E3() \
1216 \
1217 IRQ_ENTER(); \
1218 LOCK(A); \
1219 L(B); \
1220 U(B); \
1221 UNLOCK(A); \
1222 IRQ_EXIT();
1223
1224 /*
1225 * Generate 24 testcases:
1226 */
1227 #include "locking-selftest-hardirq.h"
1228 #include "locking-selftest-rlock.h"
1229 GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_hard_rlock)
1230
1231 #include "locking-selftest-wlock.h"
1232 GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_hard_wlock)
1233
1234 #ifndef CONFIG_PREEMPT_RT
1235 #include "locking-selftest-softirq.h"
1236 #include "locking-selftest-rlock.h"
1237 GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft_rlock)
1238
1239 #include "locking-selftest-wlock.h"
1240 GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft_wlock)
1241 #endif
1242
1243 #undef E1
1244 #undef E2
1245 #undef E3
1246
1247 /*
1248 * read-lock / write-lock recursion that is unsafe.
1249 */
1250
1251 #define E1() \
1252 \
1253 IRQ_DISABLE(); \
1254 L(B); \
1255 LOCK(A); \
1256 UNLOCK(A); \
1257 U(B); \
1258 IRQ_ENABLE();
1259
1260 #define E2() \
1261 \
1262 RL(A); \
1263 RU(A); \
1264
1265 #define E3() \
1266 \
1267 IRQ_ENTER(); \
1268 L(B); \
1269 U(B); \
1270 IRQ_EXIT();
1271
1272 /*
1273 * Generate 24 testcases:
1274 */
1275 #include "locking-selftest-hardirq.h"
1276 #include "locking-selftest-rlock.h"
1277 GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_hard_rlock)
1278
1279 #include "locking-selftest-wlock.h"
1280 GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_hard_wlock)
1281
1282 #ifndef CONFIG_PREEMPT_RT
1283 #include "locking-selftest-softirq.h"
1284 #include "locking-selftest-rlock.h"
1285 GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft_rlock)
1286
1287 #include "locking-selftest-wlock.h"
1288 GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft_wlock)
1289 #endif
1290
1291 #undef E1
1292 #undef E2
1293 #undef E3
1294 /*
1295 * read-lock / write-lock recursion that is unsafe.
1296 *
1297 * A is a ENABLED_*_READ lock
1298 * B is a USED_IN_*_READ lock
1299 *
1300 * read_lock(A);
1301 * write_lock(B);
1302 * <interrupt>
1303 * read_lock(B);
1304 * write_lock(A); // if this one is read_lock(), no deadlock
1305 */
1306
1307 #define E1() \
1308 \
1309 IRQ_DISABLE(); \
1310 WL(B); \
1311 LOCK(A); \
1312 UNLOCK(A); \
1313 WU(B); \
1314 IRQ_ENABLE();
1315
1316 #define E2() \
1317 \
1318 RL(A); \
1319 RU(A); \
1320
1321 #define E3() \
1322 \
1323 IRQ_ENTER(); \
1324 RL(B); \
1325 RU(B); \
1326 IRQ_EXIT();
1327
1328 /*
1329 * Generate 24 testcases:
1330 */
1331 #include "locking-selftest-hardirq.h"
1332 #include "locking-selftest-rlock.h"
1333 GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_hard_rlock)
1334
1335 #include "locking-selftest-wlock.h"
1336 GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_hard_wlock)
1337
1338 #ifndef CONFIG_PREEMPT_RT
1339 #include "locking-selftest-softirq.h"
1340 #include "locking-selftest-rlock.h"
1341 GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_rlock)
1342
1343 #include "locking-selftest-wlock.h"
1344 GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_wlock)
1345 #endif
1346
1347 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1348 # define I_SPINLOCK(x) lockdep_reset_lock(&lock_##x.dep_map)
1349 # define I_RAW_SPINLOCK(x) lockdep_reset_lock(&raw_lock_##x.dep_map)
1350 # define I_RWLOCK(x) lockdep_reset_lock(&rwlock_##x.dep_map)
1351 # define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map)
1352 # define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map)
1353 # define I_WW(x) lockdep_reset_lock(&x.dep_map)
1354 # define I_LOCAL_LOCK(x) lockdep_reset_lock(this_cpu_ptr(&local_##x.dep_map))
1355 #ifdef CONFIG_RT_MUTEXES
1356 # define I_RTMUTEX(x) lockdep_reset_lock(&rtmutex_##x.dep_map)
1357 #endif
1358 #else
1359 # define I_SPINLOCK(x)
1360 # define I_RAW_SPINLOCK(x)
1361 # define I_RWLOCK(x)
1362 # define I_MUTEX(x)
1363 # define I_RWSEM(x)
1364 # define I_WW(x)
1365 # define I_LOCAL_LOCK(x)
1366 #endif
1367
1368 #ifndef I_RTMUTEX
1369 # define I_RTMUTEX(x)
1370 #endif
1371
1372 #ifdef CONFIG_RT_MUTEXES
1373 #define I2_RTMUTEX(x) rt_mutex_init(&rtmutex_##x)
1374 #else
1375 #define I2_RTMUTEX(x)
1376 #endif
1377
1378 #define I1(x) \
1379 do { \
1380 I_SPINLOCK(x); \
1381 I_RWLOCK(x); \
1382 I_MUTEX(x); \
1383 I_RWSEM(x); \
1384 I_RTMUTEX(x); \
1385 } while (0)
1386
1387 #define I2(x) \
1388 do { \
1389 spin_lock_init(&lock_##x); \
1390 rwlock_init(&rwlock_##x); \
1391 mutex_init(&mutex_##x); \
1392 init_rwsem(&rwsem_##x); \
1393 I2_RTMUTEX(x); \
1394 } while (0)
1395
1396 static void reset_locks(void)
1397 {
1398 local_irq_disable();
1399 lockdep_free_key_range(&ww_lockdep.acquire_key, 1);
1400 lockdep_free_key_range(&ww_lockdep.mutex_key, 1);
1401
1402 I1(A); I1(B); I1(C); I1(D);
1403 I1(X1); I1(X2); I1(Y1); I1(Y2); I1(Z1); I1(Z2);
1404 I_WW(t); I_WW(t2); I_WW(o.base); I_WW(o2.base); I_WW(o3.base);
1405 I_RAW_SPINLOCK(A); I_RAW_SPINLOCK(B);
1406 I_LOCAL_LOCK(A);
1407
1408 lockdep_reset();
1409
1410 I2(A); I2(B); I2(C); I2(D);
1411 init_shared_classes();
1412 raw_spin_lock_init(&raw_lock_A);
1413 raw_spin_lock_init(&raw_lock_B);
1414 local_lock_init(this_cpu_ptr(&local_A));
1415
1416 ww_mutex_init(&o, &ww_lockdep); ww_mutex_init(&o2, &ww_lockdep); ww_mutex_init(&o3, &ww_lockdep);
1417 memset(&t, 0, sizeof(t)); memset(&t2, 0, sizeof(t2));
1418 memset(&ww_lockdep.acquire_key, 0, sizeof(ww_lockdep.acquire_key));
1419 memset(&ww_lockdep.mutex_key, 0, sizeof(ww_lockdep.mutex_key));
1420 local_irq_enable();
1421 }
1422
1423 #undef I
1424
1425 static int testcase_total;
1426 static int testcase_successes;
1427 static int expected_testcase_failures;
1428 static int unexpected_testcase_failures;
1429
dotest(void (* testcase_fn)(void),int expected,int lockclass_mask)1430 static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
1431 {
1432 int saved_preempt_count = preempt_count();
1433 #ifdef CONFIG_PREEMPT_RT
1434 #ifdef CONFIG_SMP
1435 int saved_mgd_count = current->migration_disabled;
1436 #endif
1437 int saved_rcu_count = current->rcu_read_lock_nesting;
1438 #endif
1439
1440 WARN_ON(irqs_disabled());
1441
1442 debug_locks_silent = !(debug_locks_verbose & lockclass_mask);
1443
1444 testcase_fn();
1445 /*
1446 * Filter out expected failures:
1447 */
1448 #ifndef CONFIG_PROVE_LOCKING
1449 if (expected == FAILURE && debug_locks) {
1450 expected_testcase_failures++;
1451 pr_cont("failed|");
1452 }
1453 else
1454 #endif
1455 if (debug_locks != expected) {
1456 unexpected_testcase_failures++;
1457 pr_cont("FAILED|");
1458 } else {
1459 testcase_successes++;
1460 pr_cont(" ok |");
1461 }
1462 testcase_total++;
1463
1464 if (debug_locks_verbose & lockclass_mask)
1465 pr_cont(" lockclass mask: %x, debug_locks: %d, expected: %d\n",
1466 lockclass_mask, debug_locks, expected);
1467 /*
1468 * Some tests (e.g. double-unlock) might corrupt the preemption
1469 * count, so restore it:
1470 */
1471 preempt_count_set(saved_preempt_count);
1472
1473 #ifdef CONFIG_PREEMPT_RT
1474 #ifdef CONFIG_SMP
1475 while (current->migration_disabled > saved_mgd_count)
1476 migrate_enable();
1477 #endif
1478
1479 while (current->rcu_read_lock_nesting > saved_rcu_count)
1480 rcu_read_unlock();
1481 WARN_ON_ONCE(current->rcu_read_lock_nesting < saved_rcu_count);
1482 #endif
1483
1484 #ifdef CONFIG_TRACE_IRQFLAGS
1485 if (softirq_count())
1486 current->softirqs_enabled = 0;
1487 else
1488 current->softirqs_enabled = 1;
1489 #endif
1490
1491 reset_locks();
1492 }
1493
1494 #ifdef CONFIG_RT_MUTEXES
1495 #define dotest_rt(fn, e, m) dotest((fn), (e), (m))
1496 #else
1497 #define dotest_rt(fn, e, m)
1498 #endif
1499
print_testname(const char * testname)1500 static inline void print_testname(const char *testname)
1501 {
1502 printk("%33s:", testname);
1503 }
1504
1505 #define DO_TESTCASE_1(desc, name, nr) \
1506 print_testname(desc"/"#nr); \
1507 dotest(name##_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
1508 pr_cont("\n");
1509
1510 #define DO_TESTCASE_1B(desc, name, nr) \
1511 print_testname(desc"/"#nr); \
1512 dotest(name##_##nr, FAILURE, LOCKTYPE_RWLOCK); \
1513 pr_cont("\n");
1514
1515 #define DO_TESTCASE_1RR(desc, name, nr) \
1516 print_testname(desc"/"#nr); \
1517 pr_cont(" |"); \
1518 dotest(name##_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
1519 pr_cont("\n");
1520
1521 #define DO_TESTCASE_1RRB(desc, name, nr) \
1522 print_testname(desc"/"#nr); \
1523 pr_cont(" |"); \
1524 dotest(name##_##nr, FAILURE, LOCKTYPE_RWLOCK); \
1525 pr_cont("\n");
1526
1527
1528 #define DO_TESTCASE_3(desc, name, nr) \
1529 print_testname(desc"/"#nr); \
1530 dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN); \
1531 dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \
1532 dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
1533 pr_cont("\n");
1534
1535 #define DO_TESTCASE_3RW(desc, name, nr) \
1536 print_testname(desc"/"#nr); \
1537 dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN|LOCKTYPE_RWLOCK);\
1538 dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \
1539 dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
1540 pr_cont("\n");
1541
1542 #define DO_TESTCASE_2RW(desc, name, nr) \
1543 print_testname(desc"/"#nr); \
1544 pr_cont(" |"); \
1545 dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \
1546 dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
1547 pr_cont("\n");
1548
1549 #define DO_TESTCASE_2x2RW(desc, name, nr) \
1550 DO_TESTCASE_2RW("hard-"desc, name##_hard, nr) \
1551 NON_RT(DO_TESTCASE_2RW("soft-"desc, name##_soft, nr)) \
1552
1553 #define DO_TESTCASE_6x2x2RW(desc, name) \
1554 DO_TESTCASE_2x2RW(desc, name, 123); \
1555 DO_TESTCASE_2x2RW(desc, name, 132); \
1556 DO_TESTCASE_2x2RW(desc, name, 213); \
1557 DO_TESTCASE_2x2RW(desc, name, 231); \
1558 DO_TESTCASE_2x2RW(desc, name, 312); \
1559 DO_TESTCASE_2x2RW(desc, name, 321);
1560
1561 #define DO_TESTCASE_6(desc, name) \
1562 print_testname(desc); \
1563 dotest(name##_spin, FAILURE, LOCKTYPE_SPIN); \
1564 dotest(name##_wlock, FAILURE, LOCKTYPE_RWLOCK); \
1565 dotest(name##_rlock, FAILURE, LOCKTYPE_RWLOCK); \
1566 dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \
1567 dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \
1568 dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \
1569 dotest_rt(name##_rtmutex, FAILURE, LOCKTYPE_RTMUTEX); \
1570 pr_cont("\n");
1571
1572 #define DO_TESTCASE_6_SUCCESS(desc, name) \
1573 print_testname(desc); \
1574 dotest(name##_spin, SUCCESS, LOCKTYPE_SPIN); \
1575 dotest(name##_wlock, SUCCESS, LOCKTYPE_RWLOCK); \
1576 dotest(name##_rlock, SUCCESS, LOCKTYPE_RWLOCK); \
1577 dotest(name##_mutex, SUCCESS, LOCKTYPE_MUTEX); \
1578 dotest(name##_wsem, SUCCESS, LOCKTYPE_RWSEM); \
1579 dotest(name##_rsem, SUCCESS, LOCKTYPE_RWSEM); \
1580 dotest_rt(name##_rtmutex, SUCCESS, LOCKTYPE_RTMUTEX); \
1581 pr_cont("\n");
1582
1583 /*
1584 * 'read' variant: rlocks must not trigger.
1585 */
1586 #define DO_TESTCASE_6R(desc, name) \
1587 print_testname(desc); \
1588 dotest(name##_spin, FAILURE, LOCKTYPE_SPIN); \
1589 dotest(name##_wlock, FAILURE, LOCKTYPE_RWLOCK); \
1590 dotest(name##_rlock, SUCCESS, LOCKTYPE_RWLOCK); \
1591 dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \
1592 dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \
1593 dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \
1594 dotest_rt(name##_rtmutex, FAILURE, LOCKTYPE_RTMUTEX); \
1595 pr_cont("\n");
1596
1597 #define DO_TESTCASE_2I(desc, name, nr) \
1598 DO_TESTCASE_1("hard-"desc, name##_hard, nr); \
1599 NON_RT(DO_TESTCASE_1("soft-"desc, name##_soft, nr));
1600
1601 #define DO_TESTCASE_2IB(desc, name, nr) \
1602 DO_TESTCASE_1B("hard-"desc, name##_hard, nr); \
1603 NON_RT(DO_TESTCASE_1B("soft-"desc, name##_soft, nr));
1604
1605 #define DO_TESTCASE_6I(desc, name, nr) \
1606 DO_TESTCASE_3("hard-"desc, name##_hard, nr); \
1607 NON_RT(DO_TESTCASE_3("soft-"desc, name##_soft, nr));
1608
1609 #define DO_TESTCASE_6IRW(desc, name, nr) \
1610 DO_TESTCASE_3RW("hard-"desc, name##_hard, nr); \
1611 NON_RT(DO_TESTCASE_3RW("soft-"desc, name##_soft, nr));
1612
1613 #define DO_TESTCASE_2x3(desc, name) \
1614 DO_TESTCASE_3(desc, name, 12); \
1615 DO_TESTCASE_3(desc, name, 21);
1616
1617 #define DO_TESTCASE_2x6(desc, name) \
1618 DO_TESTCASE_6I(desc, name, 12); \
1619 DO_TESTCASE_6I(desc, name, 21);
1620
1621 #define DO_TESTCASE_6x2(desc, name) \
1622 DO_TESTCASE_2I(desc, name, 123); \
1623 DO_TESTCASE_2I(desc, name, 132); \
1624 DO_TESTCASE_2I(desc, name, 213); \
1625 DO_TESTCASE_2I(desc, name, 231); \
1626 DO_TESTCASE_2I(desc, name, 312); \
1627 DO_TESTCASE_2I(desc, name, 321);
1628
1629 #define DO_TESTCASE_6x2B(desc, name) \
1630 DO_TESTCASE_2IB(desc, name, 123); \
1631 DO_TESTCASE_2IB(desc, name, 132); \
1632 DO_TESTCASE_2IB(desc, name, 213); \
1633 DO_TESTCASE_2IB(desc, name, 231); \
1634 DO_TESTCASE_2IB(desc, name, 312); \
1635 DO_TESTCASE_2IB(desc, name, 321);
1636
1637 #define DO_TESTCASE_6x1RR(desc, name) \
1638 DO_TESTCASE_1RR(desc, name, 123); \
1639 DO_TESTCASE_1RR(desc, name, 132); \
1640 DO_TESTCASE_1RR(desc, name, 213); \
1641 DO_TESTCASE_1RR(desc, name, 231); \
1642 DO_TESTCASE_1RR(desc, name, 312); \
1643 DO_TESTCASE_1RR(desc, name, 321);
1644
1645 #define DO_TESTCASE_6x1RRB(desc, name) \
1646 DO_TESTCASE_1RRB(desc, name, 123); \
1647 DO_TESTCASE_1RRB(desc, name, 132); \
1648 DO_TESTCASE_1RRB(desc, name, 213); \
1649 DO_TESTCASE_1RRB(desc, name, 231); \
1650 DO_TESTCASE_1RRB(desc, name, 312); \
1651 DO_TESTCASE_1RRB(desc, name, 321);
1652
1653 #define DO_TESTCASE_6x6(desc, name) \
1654 DO_TESTCASE_6I(desc, name, 123); \
1655 DO_TESTCASE_6I(desc, name, 132); \
1656 DO_TESTCASE_6I(desc, name, 213); \
1657 DO_TESTCASE_6I(desc, name, 231); \
1658 DO_TESTCASE_6I(desc, name, 312); \
1659 DO_TESTCASE_6I(desc, name, 321);
1660
1661 #define DO_TESTCASE_6x6RW(desc, name) \
1662 DO_TESTCASE_6IRW(desc, name, 123); \
1663 DO_TESTCASE_6IRW(desc, name, 132); \
1664 DO_TESTCASE_6IRW(desc, name, 213); \
1665 DO_TESTCASE_6IRW(desc, name, 231); \
1666 DO_TESTCASE_6IRW(desc, name, 312); \
1667 DO_TESTCASE_6IRW(desc, name, 321);
1668
ww_test_fail_acquire(void)1669 static void ww_test_fail_acquire(void)
1670 {
1671 int ret;
1672
1673 WWAI(&t);
1674 t.stamp++;
1675
1676 ret = WWL(&o, &t);
1677
1678 if (WARN_ON(!o.ctx) ||
1679 WARN_ON(ret))
1680 return;
1681
1682 /* No lockdep test, pure API */
1683 ret = WWL(&o, &t);
1684 WARN_ON(ret != -EALREADY);
1685
1686 ret = WWT(&o);
1687 WARN_ON(ret);
1688
1689 t2 = t;
1690 t2.stamp++;
1691 ret = WWL(&o, &t2);
1692 WARN_ON(ret != -EDEADLK);
1693 WWU(&o);
1694
1695 if (WWT(&o))
1696 WWU(&o);
1697 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1698 else
1699 DEBUG_LOCKS_WARN_ON(1);
1700 #endif
1701 }
1702
1703 #ifdef CONFIG_PREEMPT_RT
1704 #define ww_mutex_base_lock(b) rt_mutex_lock(b)
1705 #define ww_mutex_base_trylock(b) rt_mutex_trylock(b)
1706 #define ww_mutex_base_lock_nest_lock(b, b2) rt_mutex_lock_nest_lock(b, b2)
1707 #define ww_mutex_base_lock_interruptible(b) rt_mutex_lock_interruptible(b)
1708 #define ww_mutex_base_lock_killable(b) rt_mutex_lock_killable(b)
1709 #define ww_mutex_base_unlock(b) rt_mutex_unlock(b)
1710 #else
1711 #define ww_mutex_base_lock(b) mutex_lock(b)
1712 #define ww_mutex_base_trylock(b) mutex_trylock(b)
1713 #define ww_mutex_base_lock_nest_lock(b, b2) mutex_lock_nest_lock(b, b2)
1714 #define ww_mutex_base_lock_interruptible(b) mutex_lock_interruptible(b)
1715 #define ww_mutex_base_lock_killable(b) mutex_lock_killable(b)
1716 #define ww_mutex_base_unlock(b) mutex_unlock(b)
1717 #endif
1718
ww_test_normal(void)1719 static void ww_test_normal(void)
1720 {
1721 int ret;
1722
1723 WWAI(&t);
1724
1725 /*
1726 * None of the ww_mutex codepaths should be taken in the 'normal'
1727 * mutex calls. The easiest way to verify this is by using the
1728 * normal mutex calls, and making sure o.ctx is unmodified.
1729 */
1730
1731 /* mutex_lock (and indirectly, mutex_lock_nested) */
1732 o.ctx = (void *)~0UL;
1733 ww_mutex_base_lock(&o.base);
1734 ww_mutex_base_unlock(&o.base);
1735 WARN_ON(o.ctx != (void *)~0UL);
1736
1737 /* mutex_lock_interruptible (and *_nested) */
1738 o.ctx = (void *)~0UL;
1739 ret = ww_mutex_base_lock_interruptible(&o.base);
1740 if (!ret)
1741 ww_mutex_base_unlock(&o.base);
1742 else
1743 WARN_ON(1);
1744 WARN_ON(o.ctx != (void *)~0UL);
1745
1746 /* mutex_lock_killable (and *_nested) */
1747 o.ctx = (void *)~0UL;
1748 ret = ww_mutex_base_lock_killable(&o.base);
1749 if (!ret)
1750 ww_mutex_base_unlock(&o.base);
1751 else
1752 WARN_ON(1);
1753 WARN_ON(o.ctx != (void *)~0UL);
1754
1755 /* trylock, succeeding */
1756 o.ctx = (void *)~0UL;
1757 ret = ww_mutex_base_trylock(&o.base);
1758 WARN_ON(!ret);
1759 if (ret)
1760 ww_mutex_base_unlock(&o.base);
1761 else
1762 WARN_ON(1);
1763 WARN_ON(o.ctx != (void *)~0UL);
1764
1765 /* trylock, failing */
1766 o.ctx = (void *)~0UL;
1767 ww_mutex_base_lock(&o.base);
1768 ret = ww_mutex_base_trylock(&o.base);
1769 WARN_ON(ret);
1770 ww_mutex_base_unlock(&o.base);
1771 WARN_ON(o.ctx != (void *)~0UL);
1772
1773 /* nest_lock */
1774 o.ctx = (void *)~0UL;
1775 ww_mutex_base_lock_nest_lock(&o.base, &t);
1776 ww_mutex_base_unlock(&o.base);
1777 WARN_ON(o.ctx != (void *)~0UL);
1778 }
1779
ww_test_two_contexts(void)1780 static void ww_test_two_contexts(void)
1781 {
1782 WWAI(&t);
1783 WWAI(&t2);
1784 }
1785
ww_test_diff_class(void)1786 static void ww_test_diff_class(void)
1787 {
1788 WWAI(&t);
1789 #ifdef DEBUG_WW_MUTEXES
1790 t.ww_class = NULL;
1791 #endif
1792 WWL(&o, &t);
1793 }
1794
ww_test_context_done_twice(void)1795 static void ww_test_context_done_twice(void)
1796 {
1797 WWAI(&t);
1798 WWAD(&t);
1799 WWAD(&t);
1800 WWAF(&t);
1801 }
1802
ww_test_context_unlock_twice(void)1803 static void ww_test_context_unlock_twice(void)
1804 {
1805 WWAI(&t);
1806 WWAD(&t);
1807 WWAF(&t);
1808 WWAF(&t);
1809 }
1810
ww_test_context_fini_early(void)1811 static void ww_test_context_fini_early(void)
1812 {
1813 WWAI(&t);
1814 WWL(&o, &t);
1815 WWAD(&t);
1816 WWAF(&t);
1817 }
1818
ww_test_context_lock_after_done(void)1819 static void ww_test_context_lock_after_done(void)
1820 {
1821 WWAI(&t);
1822 WWAD(&t);
1823 WWL(&o, &t);
1824 }
1825
ww_test_object_unlock_twice(void)1826 static void ww_test_object_unlock_twice(void)
1827 {
1828 WWL1(&o);
1829 WWU(&o);
1830 WWU(&o);
1831 }
1832
ww_test_object_lock_unbalanced(void)1833 static void ww_test_object_lock_unbalanced(void)
1834 {
1835 WWAI(&t);
1836 WWL(&o, &t);
1837 t.acquired = 0;
1838 WWU(&o);
1839 WWAF(&t);
1840 }
1841
ww_test_object_lock_stale_context(void)1842 static void ww_test_object_lock_stale_context(void)
1843 {
1844 WWAI(&t);
1845 o.ctx = &t2;
1846 WWL(&o, &t);
1847 }
1848
ww_test_edeadlk_normal(void)1849 static void ww_test_edeadlk_normal(void)
1850 {
1851 int ret;
1852
1853 ww_mutex_base_lock(&o2.base);
1854 o2.ctx = &t2;
1855 mutex_release(&o2.base.dep_map, _THIS_IP_);
1856
1857 WWAI(&t);
1858 t2 = t;
1859 t2.stamp--;
1860
1861 ret = WWL(&o, &t);
1862 WARN_ON(ret);
1863
1864 ret = WWL(&o2, &t);
1865 WARN_ON(ret != -EDEADLK);
1866
1867 o2.ctx = NULL;
1868 mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
1869 ww_mutex_base_unlock(&o2.base);
1870 WWU(&o);
1871
1872 WWL(&o2, &t);
1873 }
1874
ww_test_edeadlk_normal_slow(void)1875 static void ww_test_edeadlk_normal_slow(void)
1876 {
1877 int ret;
1878
1879 ww_mutex_base_lock(&o2.base);
1880 mutex_release(&o2.base.dep_map, _THIS_IP_);
1881 o2.ctx = &t2;
1882
1883 WWAI(&t);
1884 t2 = t;
1885 t2.stamp--;
1886
1887 ret = WWL(&o, &t);
1888 WARN_ON(ret);
1889
1890 ret = WWL(&o2, &t);
1891 WARN_ON(ret != -EDEADLK);
1892
1893 o2.ctx = NULL;
1894 mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
1895 ww_mutex_base_unlock(&o2.base);
1896 WWU(&o);
1897
1898 ww_mutex_lock_slow(&o2, &t);
1899 }
1900
ww_test_edeadlk_no_unlock(void)1901 static void ww_test_edeadlk_no_unlock(void)
1902 {
1903 int ret;
1904
1905 ww_mutex_base_lock(&o2.base);
1906 o2.ctx = &t2;
1907 mutex_release(&o2.base.dep_map, _THIS_IP_);
1908
1909 WWAI(&t);
1910 t2 = t;
1911 t2.stamp--;
1912
1913 ret = WWL(&o, &t);
1914 WARN_ON(ret);
1915
1916 ret = WWL(&o2, &t);
1917 WARN_ON(ret != -EDEADLK);
1918
1919 o2.ctx = NULL;
1920 mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
1921 ww_mutex_base_unlock(&o2.base);
1922
1923 WWL(&o2, &t);
1924 }
1925
ww_test_edeadlk_no_unlock_slow(void)1926 static void ww_test_edeadlk_no_unlock_slow(void)
1927 {
1928 int ret;
1929
1930 ww_mutex_base_lock(&o2.base);
1931 mutex_release(&o2.base.dep_map, _THIS_IP_);
1932 o2.ctx = &t2;
1933
1934 WWAI(&t);
1935 t2 = t;
1936 t2.stamp--;
1937
1938 ret = WWL(&o, &t);
1939 WARN_ON(ret);
1940
1941 ret = WWL(&o2, &t);
1942 WARN_ON(ret != -EDEADLK);
1943
1944 o2.ctx = NULL;
1945 mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
1946 ww_mutex_base_unlock(&o2.base);
1947
1948 ww_mutex_lock_slow(&o2, &t);
1949 }
1950
ww_test_edeadlk_acquire_more(void)1951 static void ww_test_edeadlk_acquire_more(void)
1952 {
1953 int ret;
1954
1955 ww_mutex_base_lock(&o2.base);
1956 mutex_release(&o2.base.dep_map, _THIS_IP_);
1957 o2.ctx = &t2;
1958
1959 WWAI(&t);
1960 t2 = t;
1961 t2.stamp--;
1962
1963 ret = WWL(&o, &t);
1964 WARN_ON(ret);
1965
1966 ret = WWL(&o2, &t);
1967 WARN_ON(ret != -EDEADLK);
1968
1969 ret = WWL(&o3, &t);
1970 }
1971
ww_test_edeadlk_acquire_more_slow(void)1972 static void ww_test_edeadlk_acquire_more_slow(void)
1973 {
1974 int ret;
1975
1976 ww_mutex_base_lock(&o2.base);
1977 mutex_release(&o2.base.dep_map, _THIS_IP_);
1978 o2.ctx = &t2;
1979
1980 WWAI(&t);
1981 t2 = t;
1982 t2.stamp--;
1983
1984 ret = WWL(&o, &t);
1985 WARN_ON(ret);
1986
1987 ret = WWL(&o2, &t);
1988 WARN_ON(ret != -EDEADLK);
1989
1990 ww_mutex_lock_slow(&o3, &t);
1991 }
1992
ww_test_edeadlk_acquire_more_edeadlk(void)1993 static void ww_test_edeadlk_acquire_more_edeadlk(void)
1994 {
1995 int ret;
1996
1997 ww_mutex_base_lock(&o2.base);
1998 mutex_release(&o2.base.dep_map, _THIS_IP_);
1999 o2.ctx = &t2;
2000
2001 ww_mutex_base_lock(&o3.base);
2002 mutex_release(&o3.base.dep_map, _THIS_IP_);
2003 o3.ctx = &t2;
2004
2005 WWAI(&t);
2006 t2 = t;
2007 t2.stamp--;
2008
2009 ret = WWL(&o, &t);
2010 WARN_ON(ret);
2011
2012 ret = WWL(&o2, &t);
2013 WARN_ON(ret != -EDEADLK);
2014
2015 ret = WWL(&o3, &t);
2016 WARN_ON(ret != -EDEADLK);
2017 }
2018
ww_test_edeadlk_acquire_more_edeadlk_slow(void)2019 static void ww_test_edeadlk_acquire_more_edeadlk_slow(void)
2020 {
2021 int ret;
2022
2023 ww_mutex_base_lock(&o2.base);
2024 mutex_release(&o2.base.dep_map, _THIS_IP_);
2025 o2.ctx = &t2;
2026
2027 ww_mutex_base_lock(&o3.base);
2028 mutex_release(&o3.base.dep_map, _THIS_IP_);
2029 o3.ctx = &t2;
2030
2031 WWAI(&t);
2032 t2 = t;
2033 t2.stamp--;
2034
2035 ret = WWL(&o, &t);
2036 WARN_ON(ret);
2037
2038 ret = WWL(&o2, &t);
2039 WARN_ON(ret != -EDEADLK);
2040
2041 ww_mutex_lock_slow(&o3, &t);
2042 }
2043
ww_test_edeadlk_acquire_wrong(void)2044 static void ww_test_edeadlk_acquire_wrong(void)
2045 {
2046 int ret;
2047
2048 ww_mutex_base_lock(&o2.base);
2049 mutex_release(&o2.base.dep_map, _THIS_IP_);
2050 o2.ctx = &t2;
2051
2052 WWAI(&t);
2053 t2 = t;
2054 t2.stamp--;
2055
2056 ret = WWL(&o, &t);
2057 WARN_ON(ret);
2058
2059 ret = WWL(&o2, &t);
2060 WARN_ON(ret != -EDEADLK);
2061 if (!ret)
2062 WWU(&o2);
2063
2064 WWU(&o);
2065
2066 ret = WWL(&o3, &t);
2067 }
2068
ww_test_edeadlk_acquire_wrong_slow(void)2069 static void ww_test_edeadlk_acquire_wrong_slow(void)
2070 {
2071 int ret;
2072
2073 ww_mutex_base_lock(&o2.base);
2074 mutex_release(&o2.base.dep_map, _THIS_IP_);
2075 o2.ctx = &t2;
2076
2077 WWAI(&t);
2078 t2 = t;
2079 t2.stamp--;
2080
2081 ret = WWL(&o, &t);
2082 WARN_ON(ret);
2083
2084 ret = WWL(&o2, &t);
2085 WARN_ON(ret != -EDEADLK);
2086 if (!ret)
2087 WWU(&o2);
2088
2089 WWU(&o);
2090
2091 ww_mutex_lock_slow(&o3, &t);
2092 }
2093
ww_test_spin_nest_unlocked(void)2094 static void ww_test_spin_nest_unlocked(void)
2095 {
2096 spin_lock_nest_lock(&lock_A, &o.base);
2097 U(A);
2098 }
2099
2100 /* This is not a deadlock, because we have X1 to serialize Y1 and Y2 */
ww_test_spin_nest_lock(void)2101 static void ww_test_spin_nest_lock(void)
2102 {
2103 spin_lock(&lock_X1);
2104 spin_lock_nest_lock(&lock_Y1, &lock_X1);
2105 spin_lock(&lock_A);
2106 spin_lock_nest_lock(&lock_Y2, &lock_X1);
2107 spin_unlock(&lock_A);
2108 spin_unlock(&lock_Y2);
2109 spin_unlock(&lock_Y1);
2110 spin_unlock(&lock_X1);
2111 }
2112
ww_test_unneeded_slow(void)2113 static void ww_test_unneeded_slow(void)
2114 {
2115 WWAI(&t);
2116
2117 ww_mutex_lock_slow(&o, &t);
2118 }
2119
ww_test_context_block(void)2120 static void ww_test_context_block(void)
2121 {
2122 int ret;
2123
2124 WWAI(&t);
2125
2126 ret = WWL(&o, &t);
2127 WARN_ON(ret);
2128 WWL1(&o2);
2129 }
2130
ww_test_context_try(void)2131 static void ww_test_context_try(void)
2132 {
2133 int ret;
2134
2135 WWAI(&t);
2136
2137 ret = WWL(&o, &t);
2138 WARN_ON(ret);
2139
2140 ret = WWT(&o2);
2141 WARN_ON(!ret);
2142 WWU(&o2);
2143 WWU(&o);
2144 }
2145
ww_test_context_context(void)2146 static void ww_test_context_context(void)
2147 {
2148 int ret;
2149
2150 WWAI(&t);
2151
2152 ret = WWL(&o, &t);
2153 WARN_ON(ret);
2154
2155 ret = WWL(&o2, &t);
2156 WARN_ON(ret);
2157
2158 WWU(&o2);
2159 WWU(&o);
2160 }
2161
ww_test_try_block(void)2162 static void ww_test_try_block(void)
2163 {
2164 bool ret;
2165
2166 ret = WWT(&o);
2167 WARN_ON(!ret);
2168
2169 WWL1(&o2);
2170 WWU(&o2);
2171 WWU(&o);
2172 }
2173
ww_test_try_try(void)2174 static void ww_test_try_try(void)
2175 {
2176 bool ret;
2177
2178 ret = WWT(&o);
2179 WARN_ON(!ret);
2180 ret = WWT(&o2);
2181 WARN_ON(!ret);
2182 WWU(&o2);
2183 WWU(&o);
2184 }
2185
ww_test_try_context(void)2186 static void ww_test_try_context(void)
2187 {
2188 int ret;
2189
2190 ret = WWT(&o);
2191 WARN_ON(!ret);
2192
2193 WWAI(&t);
2194
2195 ret = WWL(&o2, &t);
2196 WARN_ON(ret);
2197 }
2198
ww_test_block_block(void)2199 static void ww_test_block_block(void)
2200 {
2201 WWL1(&o);
2202 WWL1(&o2);
2203 }
2204
ww_test_block_try(void)2205 static void ww_test_block_try(void)
2206 {
2207 bool ret;
2208
2209 WWL1(&o);
2210 ret = WWT(&o2);
2211 WARN_ON(!ret);
2212 }
2213
ww_test_block_context(void)2214 static void ww_test_block_context(void)
2215 {
2216 int ret;
2217
2218 WWL1(&o);
2219 WWAI(&t);
2220
2221 ret = WWL(&o2, &t);
2222 WARN_ON(ret);
2223 }
2224
ww_test_spin_block(void)2225 static void ww_test_spin_block(void)
2226 {
2227 L(A);
2228 U(A);
2229
2230 WWL1(&o);
2231 L(A);
2232 U(A);
2233 WWU(&o);
2234
2235 L(A);
2236 WWL1(&o);
2237 WWU(&o);
2238 U(A);
2239 }
2240
ww_test_spin_try(void)2241 static void ww_test_spin_try(void)
2242 {
2243 bool ret;
2244
2245 L(A);
2246 U(A);
2247
2248 ret = WWT(&o);
2249 WARN_ON(!ret);
2250 L(A);
2251 U(A);
2252 WWU(&o);
2253
2254 L(A);
2255 ret = WWT(&o);
2256 WARN_ON(!ret);
2257 WWU(&o);
2258 U(A);
2259 }
2260
ww_test_spin_context(void)2261 static void ww_test_spin_context(void)
2262 {
2263 int ret;
2264
2265 L(A);
2266 U(A);
2267
2268 WWAI(&t);
2269
2270 ret = WWL(&o, &t);
2271 WARN_ON(ret);
2272 L(A);
2273 U(A);
2274 WWU(&o);
2275
2276 L(A);
2277 ret = WWL(&o, &t);
2278 WARN_ON(ret);
2279 WWU(&o);
2280 U(A);
2281 }
2282
ww_tests(void)2283 static void ww_tests(void)
2284 {
2285 printk(" --------------------------------------------------------------------------\n");
2286 printk(" | Wound/wait tests |\n");
2287 printk(" ---------------------\n");
2288
2289 print_testname("ww api failures");
2290 dotest(ww_test_fail_acquire, SUCCESS, LOCKTYPE_WW);
2291 dotest(ww_test_normal, SUCCESS, LOCKTYPE_WW);
2292 dotest(ww_test_unneeded_slow, FAILURE, LOCKTYPE_WW);
2293 pr_cont("\n");
2294
2295 print_testname("ww contexts mixing");
2296 dotest(ww_test_two_contexts, FAILURE, LOCKTYPE_WW);
2297 dotest(ww_test_diff_class, FAILURE, LOCKTYPE_WW);
2298 pr_cont("\n");
2299
2300 print_testname("finishing ww context");
2301 dotest(ww_test_context_done_twice, FAILURE, LOCKTYPE_WW);
2302 dotest(ww_test_context_unlock_twice, FAILURE, LOCKTYPE_WW);
2303 dotest(ww_test_context_fini_early, FAILURE, LOCKTYPE_WW);
2304 dotest(ww_test_context_lock_after_done, FAILURE, LOCKTYPE_WW);
2305 pr_cont("\n");
2306
2307 print_testname("locking mismatches");
2308 dotest(ww_test_object_unlock_twice, FAILURE, LOCKTYPE_WW);
2309 dotest(ww_test_object_lock_unbalanced, FAILURE, LOCKTYPE_WW);
2310 dotest(ww_test_object_lock_stale_context, FAILURE, LOCKTYPE_WW);
2311 pr_cont("\n");
2312
2313 print_testname("EDEADLK handling");
2314 dotest(ww_test_edeadlk_normal, SUCCESS, LOCKTYPE_WW);
2315 dotest(ww_test_edeadlk_normal_slow, SUCCESS, LOCKTYPE_WW);
2316 dotest(ww_test_edeadlk_no_unlock, FAILURE, LOCKTYPE_WW);
2317 dotest(ww_test_edeadlk_no_unlock_slow, FAILURE, LOCKTYPE_WW);
2318 dotest(ww_test_edeadlk_acquire_more, FAILURE, LOCKTYPE_WW);
2319 dotest(ww_test_edeadlk_acquire_more_slow, FAILURE, LOCKTYPE_WW);
2320 dotest(ww_test_edeadlk_acquire_more_edeadlk, FAILURE, LOCKTYPE_WW);
2321 dotest(ww_test_edeadlk_acquire_more_edeadlk_slow, FAILURE, LOCKTYPE_WW);
2322 dotest(ww_test_edeadlk_acquire_wrong, FAILURE, LOCKTYPE_WW);
2323 dotest(ww_test_edeadlk_acquire_wrong_slow, FAILURE, LOCKTYPE_WW);
2324 pr_cont("\n");
2325
2326 print_testname("spinlock nest unlocked");
2327 dotest(ww_test_spin_nest_unlocked, FAILURE, LOCKTYPE_WW);
2328 pr_cont("\n");
2329
2330 print_testname("spinlock nest test");
2331 dotest(ww_test_spin_nest_lock, SUCCESS, LOCKTYPE_WW);
2332 pr_cont("\n");
2333
2334 printk(" -----------------------------------------------------\n");
2335 printk(" |block | try |context|\n");
2336 printk(" -----------------------------------------------------\n");
2337
2338 print_testname("context");
2339 dotest(ww_test_context_block, FAILURE, LOCKTYPE_WW);
2340 dotest(ww_test_context_try, SUCCESS, LOCKTYPE_WW);
2341 dotest(ww_test_context_context, SUCCESS, LOCKTYPE_WW);
2342 pr_cont("\n");
2343
2344 print_testname("try");
2345 dotest(ww_test_try_block, FAILURE, LOCKTYPE_WW);
2346 dotest(ww_test_try_try, SUCCESS, LOCKTYPE_WW);
2347 dotest(ww_test_try_context, FAILURE, LOCKTYPE_WW);
2348 pr_cont("\n");
2349
2350 print_testname("block");
2351 dotest(ww_test_block_block, FAILURE, LOCKTYPE_WW);
2352 dotest(ww_test_block_try, SUCCESS, LOCKTYPE_WW);
2353 dotest(ww_test_block_context, FAILURE, LOCKTYPE_WW);
2354 pr_cont("\n");
2355
2356 print_testname("spinlock");
2357 dotest(ww_test_spin_block, FAILURE, LOCKTYPE_WW);
2358 dotest(ww_test_spin_try, SUCCESS, LOCKTYPE_WW);
2359 dotest(ww_test_spin_context, FAILURE, LOCKTYPE_WW);
2360 pr_cont("\n");
2361 }
2362
2363
2364 /*
2365 * <in hardirq handler>
2366 * read_lock(&A);
2367 * <hardirq disable>
2368 * spin_lock(&B);
2369 * spin_lock(&B);
2370 * read_lock(&A);
2371 *
2372 * is a deadlock.
2373 */
queued_read_lock_hardirq_RE_Er(void)2374 static void queued_read_lock_hardirq_RE_Er(void)
2375 {
2376 HARDIRQ_ENTER();
2377 read_lock(&rwlock_A);
2378 LOCK(B);
2379 UNLOCK(B);
2380 read_unlock(&rwlock_A);
2381 HARDIRQ_EXIT();
2382
2383 HARDIRQ_DISABLE();
2384 LOCK(B);
2385 read_lock(&rwlock_A);
2386 read_unlock(&rwlock_A);
2387 UNLOCK(B);
2388 HARDIRQ_ENABLE();
2389 }
2390
2391 /*
2392 * <in hardirq handler>
2393 * spin_lock(&B);
2394 * <hardirq disable>
2395 * read_lock(&A);
2396 * read_lock(&A);
2397 * spin_lock(&B);
2398 *
2399 * is not a deadlock.
2400 */
queued_read_lock_hardirq_ER_rE(void)2401 static void queued_read_lock_hardirq_ER_rE(void)
2402 {
2403 HARDIRQ_ENTER();
2404 LOCK(B);
2405 read_lock(&rwlock_A);
2406 read_unlock(&rwlock_A);
2407 UNLOCK(B);
2408 HARDIRQ_EXIT();
2409
2410 HARDIRQ_DISABLE();
2411 read_lock(&rwlock_A);
2412 LOCK(B);
2413 UNLOCK(B);
2414 read_unlock(&rwlock_A);
2415 HARDIRQ_ENABLE();
2416 }
2417
2418 /*
2419 * <hardirq disable>
2420 * spin_lock(&B);
2421 * read_lock(&A);
2422 * <in hardirq handler>
2423 * spin_lock(&B);
2424 * read_lock(&A);
2425 *
2426 * is a deadlock. Because the two read_lock()s are both non-recursive readers.
2427 */
queued_read_lock_hardirq_inversion(void)2428 static void queued_read_lock_hardirq_inversion(void)
2429 {
2430
2431 HARDIRQ_ENTER();
2432 LOCK(B);
2433 UNLOCK(B);
2434 HARDIRQ_EXIT();
2435
2436 HARDIRQ_DISABLE();
2437 LOCK(B);
2438 read_lock(&rwlock_A);
2439 read_unlock(&rwlock_A);
2440 UNLOCK(B);
2441 HARDIRQ_ENABLE();
2442
2443 read_lock(&rwlock_A);
2444 read_unlock(&rwlock_A);
2445 }
2446
queued_read_lock_tests(void)2447 static void queued_read_lock_tests(void)
2448 {
2449 printk(" --------------------------------------------------------------------------\n");
2450 printk(" | queued read lock tests |\n");
2451 printk(" ---------------------------\n");
2452 print_testname("hardirq read-lock/lock-read");
2453 dotest(queued_read_lock_hardirq_RE_Er, FAILURE, LOCKTYPE_RWLOCK);
2454 pr_cont("\n");
2455
2456 print_testname("hardirq lock-read/read-lock");
2457 dotest(queued_read_lock_hardirq_ER_rE, SUCCESS, LOCKTYPE_RWLOCK);
2458 pr_cont("\n");
2459
2460 print_testname("hardirq inversion");
2461 dotest(queued_read_lock_hardirq_inversion, FAILURE, LOCKTYPE_RWLOCK);
2462 pr_cont("\n");
2463 }
2464
fs_reclaim_correct_nesting(void)2465 static void fs_reclaim_correct_nesting(void)
2466 {
2467 fs_reclaim_acquire(GFP_KERNEL);
2468 might_alloc(GFP_NOFS);
2469 fs_reclaim_release(GFP_KERNEL);
2470 }
2471
fs_reclaim_wrong_nesting(void)2472 static void fs_reclaim_wrong_nesting(void)
2473 {
2474 fs_reclaim_acquire(GFP_KERNEL);
2475 might_alloc(GFP_KERNEL);
2476 fs_reclaim_release(GFP_KERNEL);
2477 }
2478
fs_reclaim_protected_nesting(void)2479 static void fs_reclaim_protected_nesting(void)
2480 {
2481 unsigned int flags;
2482
2483 fs_reclaim_acquire(GFP_KERNEL);
2484 flags = memalloc_nofs_save();
2485 might_alloc(GFP_KERNEL);
2486 memalloc_nofs_restore(flags);
2487 fs_reclaim_release(GFP_KERNEL);
2488 }
2489
fs_reclaim_tests(void)2490 static void fs_reclaim_tests(void)
2491 {
2492 printk(" --------------------\n");
2493 printk(" | fs_reclaim tests |\n");
2494 printk(" --------------------\n");
2495
2496 print_testname("correct nesting");
2497 dotest(fs_reclaim_correct_nesting, SUCCESS, 0);
2498 pr_cont("\n");
2499
2500 print_testname("wrong nesting");
2501 dotest(fs_reclaim_wrong_nesting, FAILURE, 0);
2502 pr_cont("\n");
2503
2504 print_testname("protected nesting");
2505 dotest(fs_reclaim_protected_nesting, SUCCESS, 0);
2506 pr_cont("\n");
2507 }
2508
2509 #define __guard(cleanup) __maybe_unused __attribute__((__cleanup__(cleanup)))
2510
hardirq_exit(int * _)2511 static void hardirq_exit(int *_)
2512 {
2513 HARDIRQ_EXIT();
2514 }
2515
2516 #define HARDIRQ_CONTEXT(name, ...) \
2517 int hardirq_guard_##name __guard(hardirq_exit); \
2518 HARDIRQ_ENTER();
2519
2520 #define NOTTHREADED_HARDIRQ_CONTEXT(name, ...) \
2521 int notthreaded_hardirq_guard_##name __guard(hardirq_exit); \
2522 local_irq_disable(); \
2523 __irq_enter(); \
2524 WARN_ON(!in_irq());
2525
softirq_exit(int * _)2526 static void softirq_exit(int *_)
2527 {
2528 SOFTIRQ_EXIT();
2529 }
2530
2531 #define SOFTIRQ_CONTEXT(name, ...) \
2532 int softirq_guard_##name __guard(softirq_exit); \
2533 SOFTIRQ_ENTER();
2534
rcu_exit(int * _)2535 static void rcu_exit(int *_)
2536 {
2537 rcu_read_unlock();
2538 }
2539
2540 #define RCU_CONTEXT(name, ...) \
2541 int rcu_guard_##name __guard(rcu_exit); \
2542 rcu_read_lock();
2543
rcu_bh_exit(int * _)2544 static void rcu_bh_exit(int *_)
2545 {
2546 rcu_read_unlock_bh();
2547 }
2548
2549 #define RCU_BH_CONTEXT(name, ...) \
2550 int rcu_bh_guard_##name __guard(rcu_bh_exit); \
2551 rcu_read_lock_bh();
2552
rcu_sched_exit(int * _)2553 static void rcu_sched_exit(int *_)
2554 {
2555 rcu_read_unlock_sched();
2556 }
2557
2558 #define RCU_SCHED_CONTEXT(name, ...) \
2559 int rcu_sched_guard_##name __guard(rcu_sched_exit); \
2560 rcu_read_lock_sched();
2561
raw_spinlock_exit(raw_spinlock_t ** lock)2562 static void raw_spinlock_exit(raw_spinlock_t **lock)
2563 {
2564 raw_spin_unlock(*lock);
2565 }
2566
2567 #define RAW_SPINLOCK_CONTEXT(name, lock) \
2568 raw_spinlock_t *raw_spinlock_guard_##name __guard(raw_spinlock_exit) = &(lock); \
2569 raw_spin_lock(&(lock));
2570
spinlock_exit(spinlock_t ** lock)2571 static void spinlock_exit(spinlock_t **lock)
2572 {
2573 spin_unlock(*lock);
2574 }
2575
2576 #define SPINLOCK_CONTEXT(name, lock) \
2577 spinlock_t *spinlock_guard_##name __guard(spinlock_exit) = &(lock); \
2578 spin_lock(&(lock));
2579
mutex_exit(struct mutex ** lock)2580 static void mutex_exit(struct mutex **lock)
2581 {
2582 mutex_unlock(*lock);
2583 }
2584
2585 #define MUTEX_CONTEXT(name, lock) \
2586 struct mutex *mutex_guard_##name __guard(mutex_exit) = &(lock); \
2587 mutex_lock(&(lock));
2588
2589 #define GENERATE_2_CONTEXT_TESTCASE(outer, outer_lock, inner, inner_lock) \
2590 \
2591 static void __maybe_unused inner##_in_##outer(void) \
2592 { \
2593 outer##_CONTEXT(_, outer_lock); \
2594 { \
2595 inner##_CONTEXT(_, inner_lock); \
2596 } \
2597 }
2598
2599 /*
2600 * wait contexts (considering PREEMPT_RT)
2601 *
2602 * o: inner is allowed in outer
2603 * x: inner is disallowed in outer
2604 *
2605 * \ inner | RCU | RAW_SPIN | SPIN | MUTEX
2606 * outer \ | | | |
2607 * ---------------+-------+----------+------+-------
2608 * HARDIRQ | o | o | o | x
2609 * ---------------+-------+----------+------+-------
2610 * NOTTHREADED_IRQ| o | o | x | x
2611 * ---------------+-------+----------+------+-------
2612 * SOFTIRQ | o | o | o | x
2613 * ---------------+-------+----------+------+-------
2614 * RCU | o | o | o | x
2615 * ---------------+-------+----------+------+-------
2616 * RCU_BH | o | o | o | x
2617 * ---------------+-------+----------+------+-------
2618 * RCU_SCHED | o | o | x | x
2619 * ---------------+-------+----------+------+-------
2620 * RAW_SPIN | o | o | x | x
2621 * ---------------+-------+----------+------+-------
2622 * SPIN | o | o | o | x
2623 * ---------------+-------+----------+------+-------
2624 * MUTEX | o | o | o | o
2625 * ---------------+-------+----------+------+-------
2626 */
2627
2628 #define GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(inner, inner_lock) \
2629 GENERATE_2_CONTEXT_TESTCASE(HARDIRQ, , inner, inner_lock) \
2630 GENERATE_2_CONTEXT_TESTCASE(NOTTHREADED_HARDIRQ, , inner, inner_lock) \
2631 GENERATE_2_CONTEXT_TESTCASE(SOFTIRQ, , inner, inner_lock) \
2632 GENERATE_2_CONTEXT_TESTCASE(RCU, , inner, inner_lock) \
2633 GENERATE_2_CONTEXT_TESTCASE(RCU_BH, , inner, inner_lock) \
2634 GENERATE_2_CONTEXT_TESTCASE(RCU_SCHED, , inner, inner_lock) \
2635 GENERATE_2_CONTEXT_TESTCASE(RAW_SPINLOCK, raw_lock_A, inner, inner_lock) \
2636 GENERATE_2_CONTEXT_TESTCASE(SPINLOCK, lock_A, inner, inner_lock) \
2637 GENERATE_2_CONTEXT_TESTCASE(MUTEX, mutex_A, inner, inner_lock)
2638
2639 GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(RCU, )
GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(RAW_SPINLOCK,raw_lock_B)2640 GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(RAW_SPINLOCK, raw_lock_B)
2641 GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(SPINLOCK, lock_B)
2642 GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(MUTEX, mutex_B)
2643
2644 /* the outer context allows all kinds of preemption */
2645 #define DO_CONTEXT_TESTCASE_OUTER_PREEMPTIBLE(outer) \
2646 dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \
2647 dotest(RAW_SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
2648 dotest(SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
2649 dotest(MUTEX_in_##outer, SUCCESS, LOCKTYPE_MUTEX); \
2650
2651 /*
2652 * the outer context only allows the preemption introduced by spinlock_t (which
2653 * is a sleepable lock for PREEMPT_RT)
2654 */
2655 #define DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(outer) \
2656 dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \
2657 dotest(RAW_SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
2658 dotest(SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
2659 dotest(MUTEX_in_##outer, FAILURE, LOCKTYPE_MUTEX); \
2660
2661 /* the outer doesn't allows any kind of preemption */
2662 #define DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(outer) \
2663 dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \
2664 dotest(RAW_SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
2665 dotest(SPINLOCK_in_##outer, FAILURE, LOCKTYPE_SPIN); \
2666 dotest(MUTEX_in_##outer, FAILURE, LOCKTYPE_MUTEX); \
2667
2668 static void wait_context_tests(void)
2669 {
2670 printk(" --------------------------------------------------------------------------\n");
2671 printk(" | wait context tests |\n");
2672 printk(" --------------------------------------------------------------------------\n");
2673 printk(" | rcu | raw | spin |mutex |\n");
2674 printk(" --------------------------------------------------------------------------\n");
2675 print_testname("in hardirq context");
2676 DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(HARDIRQ);
2677 pr_cont("\n");
2678
2679 print_testname("in hardirq context (not threaded)");
2680 DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(NOTTHREADED_HARDIRQ);
2681 pr_cont("\n");
2682
2683 print_testname("in softirq context");
2684 DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(SOFTIRQ);
2685 pr_cont("\n");
2686
2687 print_testname("in RCU context");
2688 DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU);
2689 pr_cont("\n");
2690
2691 print_testname("in RCU-bh context");
2692 DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU_BH);
2693 pr_cont("\n");
2694
2695 print_testname("in RCU-sched context");
2696 DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(RCU_SCHED);
2697 pr_cont("\n");
2698
2699 print_testname("in RAW_SPINLOCK context");
2700 DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(RAW_SPINLOCK);
2701 pr_cont("\n");
2702
2703 print_testname("in SPINLOCK context");
2704 DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(SPINLOCK);
2705 pr_cont("\n");
2706
2707 print_testname("in MUTEX context");
2708 DO_CONTEXT_TESTCASE_OUTER_PREEMPTIBLE(MUTEX);
2709 pr_cont("\n");
2710 }
2711
local_lock_2(void)2712 static void local_lock_2(void)
2713 {
2714 local_lock(&local_A); /* IRQ-ON */
2715 local_unlock(&local_A);
2716
2717 HARDIRQ_ENTER();
2718 spin_lock(&lock_A); /* IN-IRQ */
2719 spin_unlock(&lock_A);
2720 HARDIRQ_EXIT()
2721
2722 HARDIRQ_DISABLE();
2723 spin_lock(&lock_A);
2724 local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle, false */
2725 local_unlock(&local_A);
2726 spin_unlock(&lock_A);
2727 HARDIRQ_ENABLE();
2728 }
2729
local_lock_3A(void)2730 static void local_lock_3A(void)
2731 {
2732 local_lock(&local_A); /* IRQ-ON */
2733 spin_lock(&lock_B); /* IRQ-ON */
2734 spin_unlock(&lock_B);
2735 local_unlock(&local_A);
2736
2737 HARDIRQ_ENTER();
2738 spin_lock(&lock_A); /* IN-IRQ */
2739 spin_unlock(&lock_A);
2740 HARDIRQ_EXIT()
2741
2742 HARDIRQ_DISABLE();
2743 spin_lock(&lock_A);
2744 local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */
2745 local_unlock(&local_A);
2746 spin_unlock(&lock_A);
2747 HARDIRQ_ENABLE();
2748 }
2749
local_lock_3B(void)2750 static void local_lock_3B(void)
2751 {
2752 local_lock(&local_A); /* IRQ-ON */
2753 spin_lock(&lock_B); /* IRQ-ON */
2754 spin_unlock(&lock_B);
2755 local_unlock(&local_A);
2756
2757 HARDIRQ_ENTER();
2758 spin_lock(&lock_A); /* IN-IRQ */
2759 spin_unlock(&lock_A);
2760 HARDIRQ_EXIT()
2761
2762 HARDIRQ_DISABLE();
2763 spin_lock(&lock_A);
2764 local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */
2765 local_unlock(&local_A);
2766 spin_unlock(&lock_A);
2767 HARDIRQ_ENABLE();
2768
2769 HARDIRQ_DISABLE();
2770 spin_lock(&lock_A);
2771 spin_lock(&lock_B); /* IN-IRQ <-> IRQ-ON cycle, true */
2772 spin_unlock(&lock_B);
2773 spin_unlock(&lock_A);
2774 HARDIRQ_DISABLE();
2775
2776 }
2777
local_lock_tests(void)2778 static void local_lock_tests(void)
2779 {
2780 printk(" --------------------------------------------------------------------------\n");
2781 printk(" | local_lock tests |\n");
2782 printk(" ---------------------\n");
2783
2784 print_testname("local_lock inversion 2");
2785 dotest(local_lock_2, SUCCESS, LOCKTYPE_LL);
2786 pr_cont("\n");
2787
2788 print_testname("local_lock inversion 3A");
2789 dotest(local_lock_3A, SUCCESS, LOCKTYPE_LL);
2790 pr_cont("\n");
2791
2792 print_testname("local_lock inversion 3B");
2793 dotest(local_lock_3B, FAILURE, LOCKTYPE_LL);
2794 pr_cont("\n");
2795 }
2796
hardirq_deadlock_softirq_not_deadlock(void)2797 static void hardirq_deadlock_softirq_not_deadlock(void)
2798 {
2799 /* mutex_A is hardirq-unsafe and softirq-unsafe */
2800 /* mutex_A -> lock_C */
2801 mutex_lock(&mutex_A);
2802 HARDIRQ_DISABLE();
2803 spin_lock(&lock_C);
2804 spin_unlock(&lock_C);
2805 HARDIRQ_ENABLE();
2806 mutex_unlock(&mutex_A);
2807
2808 /* lock_A is hardirq-safe */
2809 HARDIRQ_ENTER();
2810 spin_lock(&lock_A);
2811 spin_unlock(&lock_A);
2812 HARDIRQ_EXIT();
2813
2814 /* lock_A -> lock_B */
2815 HARDIRQ_DISABLE();
2816 spin_lock(&lock_A);
2817 spin_lock(&lock_B);
2818 spin_unlock(&lock_B);
2819 spin_unlock(&lock_A);
2820 HARDIRQ_ENABLE();
2821
2822 /* lock_B -> lock_C */
2823 HARDIRQ_DISABLE();
2824 spin_lock(&lock_B);
2825 spin_lock(&lock_C);
2826 spin_unlock(&lock_C);
2827 spin_unlock(&lock_B);
2828 HARDIRQ_ENABLE();
2829
2830 /* lock_D is softirq-safe */
2831 SOFTIRQ_ENTER();
2832 spin_lock(&lock_D);
2833 spin_unlock(&lock_D);
2834 SOFTIRQ_EXIT();
2835
2836 /* And lock_D is hardirq-unsafe */
2837 SOFTIRQ_DISABLE();
2838 spin_lock(&lock_D);
2839 spin_unlock(&lock_D);
2840 SOFTIRQ_ENABLE();
2841
2842 /*
2843 * mutex_A -> lock_C -> lock_D is softirq-unsafe -> softirq-safe, not
2844 * deadlock.
2845 *
2846 * lock_A -> lock_B -> lock_C -> lock_D is hardirq-safe ->
2847 * hardirq-unsafe, deadlock.
2848 */
2849 HARDIRQ_DISABLE();
2850 spin_lock(&lock_C);
2851 spin_lock(&lock_D);
2852 spin_unlock(&lock_D);
2853 spin_unlock(&lock_C);
2854 HARDIRQ_ENABLE();
2855 }
2856
locking_selftest(void)2857 void locking_selftest(void)
2858 {
2859 /*
2860 * Got a locking failure before the selftest ran?
2861 */
2862 if (!debug_locks) {
2863 printk("----------------------------------\n");
2864 printk("| Locking API testsuite disabled |\n");
2865 printk("----------------------------------\n");
2866 return;
2867 }
2868
2869 /*
2870 * treats read_lock() as recursive read locks for testing purpose
2871 */
2872 force_read_lock_recursive = 1;
2873
2874 /*
2875 * Run the testsuite:
2876 */
2877 printk("------------------------\n");
2878 printk("| Locking API testsuite:\n");
2879 printk("----------------------------------------------------------------------------\n");
2880 printk(" | spin |wlock |rlock |mutex | wsem | rsem |rtmutex\n");
2881 printk(" --------------------------------------------------------------------------\n");
2882
2883 init_shared_classes();
2884 lockdep_set_selftest_task(current);
2885
2886 DO_TESTCASE_6R("A-A deadlock", AA);
2887 DO_TESTCASE_6R("A-B-B-A deadlock", ABBA);
2888 DO_TESTCASE_6R("A-B-B-C-C-A deadlock", ABBCCA);
2889 DO_TESTCASE_6R("A-B-C-A-B-C deadlock", ABCABC);
2890 DO_TESTCASE_6R("A-B-B-C-C-D-D-A deadlock", ABBCCDDA);
2891 DO_TESTCASE_6R("A-B-C-D-B-D-D-A deadlock", ABCDBDDA);
2892 DO_TESTCASE_6R("A-B-C-D-B-C-D-A deadlock", ABCDBCDA);
2893 DO_TESTCASE_6("double unlock", double_unlock);
2894 DO_TESTCASE_6("initialize held", init_held);
2895
2896 printk(" --------------------------------------------------------------------------\n");
2897 print_testname("recursive read-lock");
2898 pr_cont(" |");
2899 dotest(rlock_AA1, SUCCESS, LOCKTYPE_RWLOCK);
2900 pr_cont(" |");
2901 dotest(rsem_AA1, FAILURE, LOCKTYPE_RWSEM);
2902 pr_cont("\n");
2903
2904 print_testname("recursive read-lock #2");
2905 pr_cont(" |");
2906 dotest(rlock_AA1B, SUCCESS, LOCKTYPE_RWLOCK);
2907 pr_cont(" |");
2908 dotest(rsem_AA1B, FAILURE, LOCKTYPE_RWSEM);
2909 pr_cont("\n");
2910
2911 print_testname("mixed read-write-lock");
2912 pr_cont(" |");
2913 dotest(rlock_AA2, FAILURE, LOCKTYPE_RWLOCK);
2914 pr_cont(" |");
2915 dotest(rsem_AA2, FAILURE, LOCKTYPE_RWSEM);
2916 pr_cont("\n");
2917
2918 print_testname("mixed write-read-lock");
2919 pr_cont(" |");
2920 dotest(rlock_AA3, FAILURE, LOCKTYPE_RWLOCK);
2921 pr_cont(" |");
2922 dotest(rsem_AA3, FAILURE, LOCKTYPE_RWSEM);
2923 pr_cont("\n");
2924
2925 print_testname("mixed read-lock/lock-write ABBA");
2926 pr_cont(" |");
2927 dotest(rlock_ABBA1, FAILURE, LOCKTYPE_RWLOCK);
2928 pr_cont(" |");
2929 dotest(rwsem_ABBA1, FAILURE, LOCKTYPE_RWSEM);
2930
2931 print_testname("mixed read-lock/lock-read ABBA");
2932 pr_cont(" |");
2933 dotest(rlock_ABBA2, SUCCESS, LOCKTYPE_RWLOCK);
2934 pr_cont(" |");
2935 dotest(rwsem_ABBA2, FAILURE, LOCKTYPE_RWSEM);
2936
2937 print_testname("mixed write-lock/lock-write ABBA");
2938 pr_cont(" |");
2939 dotest(rlock_ABBA3, FAILURE, LOCKTYPE_RWLOCK);
2940 pr_cont(" |");
2941 dotest(rwsem_ABBA3, FAILURE, LOCKTYPE_RWSEM);
2942
2943 print_testname("chain cached mixed R-L/L-W ABBA");
2944 pr_cont(" |");
2945 dotest(rlock_chaincache_ABBA1, FAILURE, LOCKTYPE_RWLOCK);
2946
2947 DO_TESTCASE_6x1RRB("rlock W1R2/W2R3/W3R1", W1R2_W2R3_W3R1);
2948 DO_TESTCASE_6x1RRB("rlock W1W2/R2R3/W3R1", W1W2_R2R3_W3R1);
2949 DO_TESTCASE_6x1RR("rlock W1W2/R2R3/R3W1", W1W2_R2R3_R3W1);
2950 DO_TESTCASE_6x1RR("rlock W1R2/R2R3/W3W1", W1R2_R2R3_W3W1);
2951
2952 printk(" --------------------------------------------------------------------------\n");
2953 /*
2954 * irq-context testcases:
2955 */
2956 DO_TESTCASE_2x6("irqs-on + irq-safe-A", irqsafe1);
2957 NON_RT(DO_TESTCASE_2x3("sirq-safe-A => hirqs-on", irqsafe2A));
2958 DO_TESTCASE_2x6("safe-A + irqs-on", irqsafe2B);
2959 DO_TESTCASE_6x6("safe-A + unsafe-B #1", irqsafe3);
2960 DO_TESTCASE_6x6("safe-A + unsafe-B #2", irqsafe4);
2961 DO_TESTCASE_6x6RW("irq lock-inversion", irq_inversion);
2962
2963 DO_TESTCASE_6x2x2RW("irq read-recursion", irq_read_recursion);
2964 DO_TESTCASE_6x2x2RW("irq read-recursion #2", irq_read_recursion2);
2965 DO_TESTCASE_6x2x2RW("irq read-recursion #3", irq_read_recursion3);
2966
2967 ww_tests();
2968
2969 force_read_lock_recursive = 0;
2970 /*
2971 * queued_read_lock() specific test cases can be put here
2972 */
2973 if (IS_ENABLED(CONFIG_QUEUED_RWLOCKS))
2974 queued_read_lock_tests();
2975
2976 fs_reclaim_tests();
2977
2978 /* Wait context test cases that are specific for RAW_LOCK_NESTING */
2979 if (IS_ENABLED(CONFIG_PROVE_RAW_LOCK_NESTING))
2980 wait_context_tests();
2981
2982 local_lock_tests();
2983
2984 print_testname("hardirq_unsafe_softirq_safe");
2985 dotest(hardirq_deadlock_softirq_not_deadlock, FAILURE, LOCKTYPE_SPECIAL);
2986 pr_cont("\n");
2987
2988 if (unexpected_testcase_failures) {
2989 printk("-----------------------------------------------------------------\n");
2990 debug_locks = 0;
2991 printk("BUG: %3d unexpected failures (out of %3d) - debugging disabled! |\n",
2992 unexpected_testcase_failures, testcase_total);
2993 printk("-----------------------------------------------------------------\n");
2994 } else if (expected_testcase_failures && testcase_successes) {
2995 printk("--------------------------------------------------------\n");
2996 printk("%3d out of %3d testcases failed, as expected. |\n",
2997 expected_testcase_failures, testcase_total);
2998 printk("----------------------------------------------------\n");
2999 debug_locks = 1;
3000 } else if (expected_testcase_failures && !testcase_successes) {
3001 printk("--------------------------------------------------------\n");
3002 printk("All %3d testcases failed, as expected. |\n",
3003 expected_testcase_failures);
3004 printk("----------------------------------------\n");
3005 debug_locks = 1;
3006 } else {
3007 printk("-------------------------------------------------------\n");
3008 printk("Good, all %3d testcases passed! |\n",
3009 testcase_successes);
3010 printk("---------------------------------\n");
3011 debug_locks = 1;
3012 }
3013 lockdep_set_selftest_task(NULL);
3014 debug_locks_silent = 0;
3015 }
3016