1 /*
2  * Runtime locking correctness validator
3  *
4  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
6  *
7  * see Documentation/lockdep-design.txt for more details.
8  */
9 #ifndef __LINUX_LOCKDEP_H
10 #define __LINUX_LOCKDEP_H
11 
12 struct task_struct;
13 struct lockdep_map;
14 
15 /* for sysctl */
16 extern int prove_locking;
17 extern int lock_stat;
18 
19 #ifdef CONFIG_LOCKDEP
20 
21 #include <linux/linkage.h>
22 #include <linux/list.h>
23 #include <linux/debug_locks.h>
24 #include <linux/stacktrace.h>
25 
26 /*
27  * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
28  * the total number of states... :-(
29  */
30 #define XXX_LOCK_USAGE_STATES		(1+3*4)
31 
32 #define MAX_LOCKDEP_SUBCLASSES		8UL
33 
34 /*
35  * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
36  * cached in the instance of lockdep_map
37  *
38  * Currently main class (subclass == 0) and signle depth subclass
39  * are cached in lockdep_map. This optimization is mainly targeting
40  * on rq->lock. double_rq_lock() acquires this highly competitive with
41  * single depth.
42  */
43 #define NR_LOCKDEP_CACHING_CLASSES	2
44 
45 /*
46  * Lock-classes are keyed via unique addresses, by embedding the
47  * lockclass-key into the kernel (or module) .data section. (For
48  * static locks we use the lock address itself as the key.)
49  */
50 struct lockdep_subclass_key {
51 	char __one_byte;
52 } __attribute__ ((__packed__));
53 
54 struct lock_class_key {
55 	struct lockdep_subclass_key	subkeys[MAX_LOCKDEP_SUBCLASSES];
56 };
57 
58 extern struct lock_class_key __lockdep_no_validate__;
59 
60 #define LOCKSTAT_POINTS		4
61 
62 /*
63  * The lock-class itself:
64  */
65 struct lock_class {
66 	/*
67 	 * class-hash:
68 	 */
69 	struct list_head		hash_entry;
70 
71 	/*
72 	 * global list of all lock-classes:
73 	 */
74 	struct list_head		lock_entry;
75 
76 	struct lockdep_subclass_key	*key;
77 	unsigned int			subclass;
78 	unsigned int			dep_gen_id;
79 
80 	/*
81 	 * IRQ/softirq usage tracking bits:
82 	 */
83 	unsigned long			usage_mask;
84 	struct stack_trace		usage_traces[XXX_LOCK_USAGE_STATES];
85 
86 	/*
87 	 * These fields represent a directed graph of lock dependencies,
88 	 * to every node we attach a list of "forward" and a list of
89 	 * "backward" graph nodes.
90 	 */
91 	struct list_head		locks_after, locks_before;
92 
93 	/*
94 	 * Generation counter, when doing certain classes of graph walking,
95 	 * to ensure that we check one node only once:
96 	 */
97 	unsigned int			version;
98 
99 	/*
100 	 * Statistics counter:
101 	 */
102 	unsigned long			ops;
103 
104 	const char			*name;
105 	int				name_version;
106 
107 #ifdef CONFIG_LOCK_STAT
108 	unsigned long			contention_point[LOCKSTAT_POINTS];
109 	unsigned long			contending_point[LOCKSTAT_POINTS];
110 #endif
111 };
112 
113 #ifdef CONFIG_LOCK_STAT
114 struct lock_time {
115 	s64				min;
116 	s64				max;
117 	s64				total;
118 	unsigned long			nr;
119 };
120 
121 enum bounce_type {
122 	bounce_acquired_write,
123 	bounce_acquired_read,
124 	bounce_contended_write,
125 	bounce_contended_read,
126 	nr_bounce_types,
127 
128 	bounce_acquired = bounce_acquired_write,
129 	bounce_contended = bounce_contended_write,
130 };
131 
132 struct lock_class_stats {
133 	unsigned long			contention_point[4];
134 	unsigned long			contending_point[4];
135 	struct lock_time		read_waittime;
136 	struct lock_time		write_waittime;
137 	struct lock_time		read_holdtime;
138 	struct lock_time		write_holdtime;
139 	unsigned long			bounces[nr_bounce_types];
140 };
141 
142 struct lock_class_stats lock_stats(struct lock_class *class);
143 void clear_lock_stats(struct lock_class *class);
144 #endif
145 
146 /*
147  * Map the lock object (the lock instance) to the lock-class object.
148  * This is embedded into specific lock instances:
149  */
150 struct lockdep_map {
151 	struct lock_class_key		*key;
152 	struct lock_class		*class_cache[NR_LOCKDEP_CACHING_CLASSES];
153 	const char			*name;
154 #ifdef CONFIG_LOCK_STAT
155 	int				cpu;
156 	unsigned long			ip;
157 #endif
158 };
159 
160 /*
161  * Every lock has a list of other locks that were taken after it.
162  * We only grow the list, never remove from it:
163  */
164 struct lock_list {
165 	struct list_head		entry;
166 	struct lock_class		*class;
167 	struct stack_trace		trace;
168 	int				distance;
169 
170 	/*
171 	 * The parent field is used to implement breadth-first search, and the
172 	 * bit 0 is reused to indicate if the lock has been accessed in BFS.
173 	 */
174 	struct lock_list		*parent;
175 };
176 
177 /*
178  * We record lock dependency chains, so that we can cache them:
179  */
180 struct lock_chain {
181 	u8				irq_context;
182 	u8				depth;
183 	u16				base;
184 	struct list_head		entry;
185 	u64				chain_key;
186 };
187 
188 #define MAX_LOCKDEP_KEYS_BITS		13
189 /*
190  * Subtract one because we offset hlock->class_idx by 1 in order
191  * to make 0 mean no class. This avoids overflowing the class_idx
192  * bitfield and hitting the BUG in hlock_class().
193  */
194 #define MAX_LOCKDEP_KEYS		((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
195 
196 struct held_lock {
197 	/*
198 	 * One-way hash of the dependency chain up to this point. We
199 	 * hash the hashes step by step as the dependency chain grows.
200 	 *
201 	 * We use it for dependency-caching and we skip detection
202 	 * passes and dependency-updates if there is a cache-hit, so
203 	 * it is absolutely critical for 100% coverage of the validator
204 	 * to have a unique key value for every unique dependency path
205 	 * that can occur in the system, to make a unique hash value
206 	 * as likely as possible - hence the 64-bit width.
207 	 *
208 	 * The task struct holds the current hash value (initialized
209 	 * with zero), here we store the previous hash value:
210 	 */
211 	u64				prev_chain_key;
212 	unsigned long			acquire_ip;
213 	struct lockdep_map		*instance;
214 	struct lockdep_map		*nest_lock;
215 #ifdef CONFIG_LOCK_STAT
216 	u64 				waittime_stamp;
217 	u64				holdtime_stamp;
218 #endif
219 	unsigned int			class_idx:MAX_LOCKDEP_KEYS_BITS;
220 	/*
221 	 * The lock-stack is unified in that the lock chains of interrupt
222 	 * contexts nest ontop of process context chains, but we 'separate'
223 	 * the hashes by starting with 0 if we cross into an interrupt
224 	 * context, and we also keep do not add cross-context lock
225 	 * dependencies - the lock usage graph walking covers that area
226 	 * anyway, and we'd just unnecessarily increase the number of
227 	 * dependencies otherwise. [Note: hardirq and softirq contexts
228 	 * are separated from each other too.]
229 	 *
230 	 * The following field is used to detect when we cross into an
231 	 * interrupt context:
232 	 */
233 	unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
234 	unsigned int trylock:1;						/* 16 bits */
235 
236 	unsigned int read:2;        /* see lock_acquire() comment */
237 	unsigned int check:2;       /* see lock_acquire() comment */
238 	unsigned int hardirqs_off:1;
239 	unsigned int references:11;					/* 32 bits */
240 };
241 
242 /*
243  * Initialization, self-test and debugging-output methods:
244  */
245 extern void lockdep_init(void);
246 extern void lockdep_info(void);
247 extern void lockdep_reset(void);
248 extern void lockdep_reset_lock(struct lockdep_map *lock);
249 extern void lockdep_free_key_range(void *start, unsigned long size);
250 extern void lockdep_sys_exit(void);
251 
252 extern void lockdep_off(void);
253 extern void lockdep_on(void);
254 
255 /*
256  * These methods are used by specific locking variants (spinlocks,
257  * rwlocks, mutexes and rwsems) to pass init/acquire/release events
258  * to lockdep:
259  */
260 
261 extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
262 			     struct lock_class_key *key, int subclass);
263 
264 /*
265  * To initialize a lockdep_map statically use this macro.
266  * Note that _name must not be NULL.
267  */
268 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
269 	{ .name = (_name), .key = (void *)(_key), }
270 
271 /*
272  * Reinitialize a lock key - for cases where there is special locking or
273  * special initialization of locks so that the validator gets the scope
274  * of dependencies wrong: they are either too broad (they need a class-split)
275  * or they are too narrow (they suffer from a false class-split):
276  */
277 #define lockdep_set_class(lock, key) \
278 		lockdep_init_map(&(lock)->dep_map, #key, key, 0)
279 #define lockdep_set_class_and_name(lock, key, name) \
280 		lockdep_init_map(&(lock)->dep_map, name, key, 0)
281 #define lockdep_set_class_and_subclass(lock, key, sub) \
282 		lockdep_init_map(&(lock)->dep_map, #key, key, sub)
283 #define lockdep_set_subclass(lock, sub)	\
284 		lockdep_init_map(&(lock)->dep_map, #lock, \
285 				 (lock)->dep_map.key, sub)
286 
287 #define lockdep_set_novalidate_class(lock) \
288 	lockdep_set_class(lock, &__lockdep_no_validate__)
289 /*
290  * Compare locking classes
291  */
292 #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
293 
lockdep_match_key(struct lockdep_map * lock,struct lock_class_key * key)294 static inline int lockdep_match_key(struct lockdep_map *lock,
295 				    struct lock_class_key *key)
296 {
297 	return lock->key == key;
298 }
299 
300 /*
301  * Acquire a lock.
302  *
303  * Values for "read":
304  *
305  *   0: exclusive (write) acquire
306  *   1: read-acquire (no recursion allowed)
307  *   2: read-acquire with same-instance recursion allowed
308  *
309  * Values for check:
310  *
311  *   0: disabled
312  *   1: simple checks (freeing, held-at-exit-time, etc.)
313  *   2: full validation
314  */
315 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
316 			 int trylock, int read, int check,
317 			 struct lockdep_map *nest_lock, unsigned long ip);
318 
319 extern void lock_release(struct lockdep_map *lock, int nested,
320 			 unsigned long ip);
321 
322 #define lockdep_is_held(lock)	lock_is_held(&(lock)->dep_map)
323 
324 extern int lock_is_held(struct lockdep_map *lock);
325 
326 extern void lock_set_class(struct lockdep_map *lock, const char *name,
327 			   struct lock_class_key *key, unsigned int subclass,
328 			   unsigned long ip);
329 
lock_set_subclass(struct lockdep_map * lock,unsigned int subclass,unsigned long ip)330 static inline void lock_set_subclass(struct lockdep_map *lock,
331 		unsigned int subclass, unsigned long ip)
332 {
333 	lock_set_class(lock, lock->name, lock->key, subclass, ip);
334 }
335 
336 extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
337 extern void lockdep_clear_current_reclaim_state(void);
338 extern void lockdep_trace_alloc(gfp_t mask);
339 
340 # define INIT_LOCKDEP				.lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
341 
342 #define lockdep_depth(tsk)	(debug_locks ? (tsk)->lockdep_depth : 0)
343 
344 #define lockdep_assert_held(l)	WARN_ON(debug_locks && !lockdep_is_held(l))
345 
346 #define lockdep_recursing(tsk)	((tsk)->lockdep_recursion)
347 
348 #else /* !LOCKDEP */
349 
lockdep_off(void)350 static inline void lockdep_off(void)
351 {
352 }
353 
lockdep_on(void)354 static inline void lockdep_on(void)
355 {
356 }
357 
358 # define lock_acquire(l, s, t, r, c, n, i)	do { } while (0)
359 # define lock_release(l, n, i)			do { } while (0)
360 # define lock_set_class(l, n, k, s, i)		do { } while (0)
361 # define lock_set_subclass(l, s, i)		do { } while (0)
362 # define lockdep_set_current_reclaim_state(g)	do { } while (0)
363 # define lockdep_clear_current_reclaim_state()	do { } while (0)
364 # define lockdep_trace_alloc(g)			do { } while (0)
365 # define lockdep_init()				do { } while (0)
366 # define lockdep_info()				do { } while (0)
367 # define lockdep_init_map(lock, name, key, sub) \
368 		do { (void)(name); (void)(key); } while (0)
369 # define lockdep_set_class(lock, key)		do { (void)(key); } while (0)
370 # define lockdep_set_class_and_name(lock, key, name) \
371 		do { (void)(key); (void)(name); } while (0)
372 #define lockdep_set_class_and_subclass(lock, key, sub) \
373 		do { (void)(key); } while (0)
374 #define lockdep_set_subclass(lock, sub)		do { } while (0)
375 
376 #define lockdep_set_novalidate_class(lock) do { } while (0)
377 
378 /*
379  * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
380  * case since the result is not well defined and the caller should rather
381  * #ifdef the call himself.
382  */
383 
384 # define INIT_LOCKDEP
385 # define lockdep_reset()		do { debug_locks = 1; } while (0)
386 # define lockdep_free_key_range(start, size)	do { } while (0)
387 # define lockdep_sys_exit() 			do { } while (0)
388 /*
389  * The class key takes no space if lockdep is disabled:
390  */
391 struct lock_class_key { };
392 
393 #define lockdep_depth(tsk)	(0)
394 
395 #define lockdep_assert_held(l)			do { } while (0)
396 
397 #define lockdep_recursing(tsk)			(0)
398 
399 #endif /* !LOCKDEP */
400 
401 #ifdef CONFIG_LOCK_STAT
402 
403 extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
404 extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
405 
406 #define LOCK_CONTENDED(_lock, try, lock)			\
407 do {								\
408 	if (!try(_lock)) {					\
409 		lock_contended(&(_lock)->dep_map, _RET_IP_);	\
410 		lock(_lock);					\
411 	}							\
412 	lock_acquired(&(_lock)->dep_map, _RET_IP_);			\
413 } while (0)
414 
415 #else /* CONFIG_LOCK_STAT */
416 
417 #define lock_contended(lockdep_map, ip) do {} while (0)
418 #define lock_acquired(lockdep_map, ip) do {} while (0)
419 
420 #define LOCK_CONTENDED(_lock, try, lock) \
421 	lock(_lock)
422 
423 #endif /* CONFIG_LOCK_STAT */
424 
425 #ifdef CONFIG_LOCKDEP
426 
427 /*
428  * On lockdep we dont want the hand-coded irq-enable of
429  * _raw_*_lock_flags() code, because lockdep assumes
430  * that interrupts are not re-enabled during lock-acquire:
431  */
432 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
433 	LOCK_CONTENDED((_lock), (try), (lock))
434 
435 #else /* CONFIG_LOCKDEP */
436 
437 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
438 	lockfl((_lock), (flags))
439 
440 #endif /* CONFIG_LOCKDEP */
441 
442 #ifdef CONFIG_TRACE_IRQFLAGS
443 extern void print_irqtrace_events(struct task_struct *curr);
444 #else
print_irqtrace_events(struct task_struct * curr)445 static inline void print_irqtrace_events(struct task_struct *curr)
446 {
447 }
448 #endif
449 
450 /*
451  * For trivial one-depth nesting of a lock-class, the following
452  * global define can be used. (Subsystems with multiple levels
453  * of nesting should define their own lock-nesting subclasses.)
454  */
455 #define SINGLE_DEPTH_NESTING			1
456 
457 /*
458  * Map the dependency ops to NOP or to real lockdep ops, depending
459  * on the per lock-class debug mode:
460  */
461 
462 #ifdef CONFIG_DEBUG_LOCK_ALLOC
463 # ifdef CONFIG_PROVE_LOCKING
464 #  define spin_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, NULL, i)
465 #  define spin_acquire_nest(l, s, t, n, i)	lock_acquire(l, s, t, 0, 2, n, i)
466 # else
467 #  define spin_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, NULL, i)
468 #  define spin_acquire_nest(l, s, t, n, i)	lock_acquire(l, s, t, 0, 1, NULL, i)
469 # endif
470 # define spin_release(l, n, i)			lock_release(l, n, i)
471 #else
472 # define spin_acquire(l, s, t, i)		do { } while (0)
473 # define spin_release(l, n, i)			do { } while (0)
474 #endif
475 
476 #ifdef CONFIG_DEBUG_LOCK_ALLOC
477 # ifdef CONFIG_PROVE_LOCKING
478 #  define rwlock_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, NULL, i)
479 #  define rwlock_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 2, 2, NULL, i)
480 # else
481 #  define rwlock_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, NULL, i)
482 #  define rwlock_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 2, 1, NULL, i)
483 # endif
484 # define rwlock_release(l, n, i)		lock_release(l, n, i)
485 #else
486 # define rwlock_acquire(l, s, t, i)		do { } while (0)
487 # define rwlock_acquire_read(l, s, t, i)	do { } while (0)
488 # define rwlock_release(l, n, i)		do { } while (0)
489 #endif
490 
491 #ifdef CONFIG_DEBUG_LOCK_ALLOC
492 # ifdef CONFIG_PROVE_LOCKING
493 #  define mutex_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, NULL, i)
494 #  define mutex_acquire_nest(l, s, t, n, i)	lock_acquire(l, s, t, 0, 2, n, i)
495 # else
496 #  define mutex_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, NULL, i)
497 #  define mutex_acquire_nest(l, s, t, n, i)	lock_acquire(l, s, t, 0, 1, n, i)
498 # endif
499 # define mutex_release(l, n, i)			lock_release(l, n, i)
500 #else
501 # define mutex_acquire(l, s, t, i)		do { } while (0)
502 # define mutex_acquire_nest(l, s, t, n, i)	do { } while (0)
503 # define mutex_release(l, n, i)			do { } while (0)
504 #endif
505 
506 #ifdef CONFIG_DEBUG_LOCK_ALLOC
507 # ifdef CONFIG_PROVE_LOCKING
508 #  define rwsem_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, NULL, i)
509 #  define rwsem_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 1, 2, NULL, i)
510 # else
511 #  define rwsem_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, NULL, i)
512 #  define rwsem_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 1, 1, NULL, i)
513 # endif
514 # define rwsem_release(l, n, i)			lock_release(l, n, i)
515 #else
516 # define rwsem_acquire(l, s, t, i)		do { } while (0)
517 # define rwsem_acquire_read(l, s, t, i)		do { } while (0)
518 # define rwsem_release(l, n, i)			do { } while (0)
519 #endif
520 
521 #ifdef CONFIG_DEBUG_LOCK_ALLOC
522 # ifdef CONFIG_PROVE_LOCKING
523 #  define lock_map_acquire(l)		lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
524 #  define lock_map_acquire_read(l)	lock_acquire(l, 0, 0, 2, 2, NULL, _THIS_IP_)
525 # else
526 #  define lock_map_acquire(l)		lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
527 #  define lock_map_acquire_read(l)	lock_acquire(l, 0, 0, 2, 1, NULL, _THIS_IP_)
528 # endif
529 # define lock_map_release(l)			lock_release(l, 1, _THIS_IP_)
530 #else
531 # define lock_map_acquire(l)			do { } while (0)
532 # define lock_map_acquire_read(l)		do { } while (0)
533 # define lock_map_release(l)			do { } while (0)
534 #endif
535 
536 #ifdef CONFIG_PROVE_LOCKING
537 # define might_lock(lock) 						\
538 do {									\
539 	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
540 	lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_);	\
541 	lock_release(&(lock)->dep_map, 0, _THIS_IP_);			\
542 } while (0)
543 # define might_lock_read(lock) 						\
544 do {									\
545 	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
546 	lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_);	\
547 	lock_release(&(lock)->dep_map, 0, _THIS_IP_);			\
548 } while (0)
549 #else
550 # define might_lock(lock) do { } while (0)
551 # define might_lock_read(lock) do { } while (0)
552 #endif
553 
554 #ifdef CONFIG_PROVE_RCU
555 void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
556 #endif
557 
558 #endif /* __LINUX_LOCKDEP_H */
559