1 /*
2  * RT-Mutexes: simple blocking mutual exclusion locks with PI support
3  *
4  * started by Ingo Molnar and Thomas Gleixner.
5  *
6  *  Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7  *  Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8  *  Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
9  *  Copyright (C) 2006 Esben Nielsen
10  *
11  *  See Documentation/rt-mutex-design.txt for details.
12  */
13 #include <linux/spinlock.h>
14 #include <linux/export.h>
15 #include <linux/sched.h>
16 #include <linux/timer.h>
17 
18 #include "rtmutex_common.h"
19 
20 /*
21  * lock->owner state tracking:
22  *
23  * lock->owner holds the task_struct pointer of the owner. Bit 0
24  * is used to keep track of the "lock has waiters" state.
25  *
26  * owner	bit0
27  * NULL		0	lock is free (fast acquire possible)
28  * NULL		1	lock is free and has waiters and the top waiter
29  *				is going to take the lock*
30  * taskpointer	0	lock is held (fast release possible)
31  * taskpointer	1	lock is held and has waiters**
32  *
33  * The fast atomic compare exchange based acquire and release is only
34  * possible when bit 0 of lock->owner is 0.
35  *
36  * (*) It also can be a transitional state when grabbing the lock
37  * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
38  * we need to set the bit0 before looking at the lock, and the owner may be
39  * NULL in this small time, hence this can be a transitional state.
40  *
41  * (**) There is a small time when bit 0 is set but there are no
42  * waiters. This can happen when grabbing the lock in the slow path.
43  * To prevent a cmpxchg of the owner releasing the lock, we need to
44  * set this bit before looking at the lock.
45  */
46 
47 static void
rt_mutex_set_owner(struct rt_mutex * lock,struct task_struct * owner)48 rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
49 {
50 	unsigned long val = (unsigned long)owner;
51 
52 	if (rt_mutex_has_waiters(lock))
53 		val |= RT_MUTEX_HAS_WAITERS;
54 
55 	lock->owner = (struct task_struct *)val;
56 }
57 
clear_rt_mutex_waiters(struct rt_mutex * lock)58 static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
59 {
60 	lock->owner = (struct task_struct *)
61 			((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
62 }
63 
fixup_rt_mutex_waiters(struct rt_mutex * lock)64 static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
65 {
66 	if (!rt_mutex_has_waiters(lock))
67 		clear_rt_mutex_waiters(lock);
68 }
69 
70 /*
71  * We can speed up the acquire/release, if the architecture
72  * supports cmpxchg and if there's no debugging state to be set up
73  */
74 #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
75 # define rt_mutex_cmpxchg(l,c,n)	(cmpxchg(&l->owner, c, n) == c)
mark_rt_mutex_waiters(struct rt_mutex * lock)76 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
77 {
78 	unsigned long owner, *p = (unsigned long *) &lock->owner;
79 
80 	do {
81 		owner = *p;
82 	} while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
83 }
84 
85 /*
86  * Safe fastpath aware unlock:
87  * 1) Clear the waiters bit
88  * 2) Drop lock->wait_lock
89  * 3) Try to unlock the lock with cmpxchg
90  */
unlock_rt_mutex_safe(struct rt_mutex * lock)91 static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
92 	__releases(lock->wait_lock)
93 {
94 	struct task_struct *owner = rt_mutex_owner(lock);
95 
96 	clear_rt_mutex_waiters(lock);
97 	raw_spin_unlock(&lock->wait_lock);
98 	/*
99 	 * If a new waiter comes in between the unlock and the cmpxchg
100 	 * we have two situations:
101 	 *
102 	 * unlock(wait_lock);
103 	 *					lock(wait_lock);
104 	 * cmpxchg(p, owner, 0) == owner
105 	 *					mark_rt_mutex_waiters(lock);
106 	 *					acquire(lock);
107 	 * or:
108 	 *
109 	 * unlock(wait_lock);
110 	 *					lock(wait_lock);
111 	 *					mark_rt_mutex_waiters(lock);
112 	 *
113 	 * cmpxchg(p, owner, 0) != owner
114 	 *					enqueue_waiter();
115 	 *					unlock(wait_lock);
116 	 * lock(wait_lock);
117 	 * wake waiter();
118 	 * unlock(wait_lock);
119 	 *					lock(wait_lock);
120 	 *					acquire(lock);
121 	 */
122 	return rt_mutex_cmpxchg(lock, owner, NULL);
123 }
124 
125 #else
126 # define rt_mutex_cmpxchg(l,c,n)	(0)
mark_rt_mutex_waiters(struct rt_mutex * lock)127 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
128 {
129 	lock->owner = (struct task_struct *)
130 			((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
131 }
132 
133 /*
134  * Simple slow path only version: lock->owner is protected by lock->wait_lock.
135  */
unlock_rt_mutex_safe(struct rt_mutex * lock)136 static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
137 	__releases(lock->wait_lock)
138 {
139 	lock->owner = NULL;
140 	raw_spin_unlock(&lock->wait_lock);
141 	return true;
142 }
143 #endif
144 
145 /*
146  * Calculate task priority from the waiter list priority
147  *
148  * Return task->normal_prio when the waiter list is empty or when
149  * the waiter is not allowed to do priority boosting
150  */
rt_mutex_getprio(struct task_struct * task)151 int rt_mutex_getprio(struct task_struct *task)
152 {
153 	if (likely(!task_has_pi_waiters(task)))
154 		return task->normal_prio;
155 
156 	return min(task_top_pi_waiter(task)->pi_list_entry.prio,
157 		   task->normal_prio);
158 }
159 
160 /*
161  * Adjust the priority of a task, after its pi_waiters got modified.
162  *
163  * This can be both boosting and unboosting. task->pi_lock must be held.
164  */
__rt_mutex_adjust_prio(struct task_struct * task)165 static void __rt_mutex_adjust_prio(struct task_struct *task)
166 {
167 	int prio = rt_mutex_getprio(task);
168 
169 	if (task->prio != prio)
170 		rt_mutex_setprio(task, prio);
171 }
172 
173 /*
174  * Adjust task priority (undo boosting). Called from the exit path of
175  * rt_mutex_slowunlock() and rt_mutex_slowlock().
176  *
177  * (Note: We do this outside of the protection of lock->wait_lock to
178  * allow the lock to be taken while or before we readjust the priority
179  * of task. We do not use the spin_xx_mutex() variants here as we are
180  * outside of the debug path.)
181  */
rt_mutex_adjust_prio(struct task_struct * task)182 static void rt_mutex_adjust_prio(struct task_struct *task)
183 {
184 	unsigned long flags;
185 
186 	raw_spin_lock_irqsave(&task->pi_lock, flags);
187 	__rt_mutex_adjust_prio(task);
188 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
189 }
190 
191 /*
192  * Max number of times we'll walk the boosting chain:
193  */
194 int max_lock_depth = 1024;
195 
task_blocked_on_lock(struct task_struct * p)196 static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
197 {
198 	return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
199 }
200 
201 /*
202  * Adjust the priority chain. Also used for deadlock detection.
203  * Decreases task's usage by one - may thus free the task.
204  * Returns 0 or -EDEADLK.
205  */
rt_mutex_adjust_prio_chain(struct task_struct * task,int deadlock_detect,struct rt_mutex * orig_lock,struct rt_mutex * next_lock,struct rt_mutex_waiter * orig_waiter,struct task_struct * top_task)206 static int rt_mutex_adjust_prio_chain(struct task_struct *task,
207 				      int deadlock_detect,
208 				      struct rt_mutex *orig_lock,
209 				      struct rt_mutex *next_lock,
210 				      struct rt_mutex_waiter *orig_waiter,
211 				      struct task_struct *top_task)
212 {
213 	struct rt_mutex *lock;
214 	struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
215 	int detect_deadlock, ret = 0, depth = 0;
216 	unsigned long flags;
217 
218 	detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter,
219 							 deadlock_detect);
220 
221 	/*
222 	 * The (de)boosting is a step by step approach with a lot of
223 	 * pitfalls. We want this to be preemptible and we want hold a
224 	 * maximum of two locks per step. So we have to check
225 	 * carefully whether things change under us.
226 	 */
227  again:
228 	if (++depth > max_lock_depth) {
229 		static int prev_max;
230 
231 		/*
232 		 * Print this only once. If the admin changes the limit,
233 		 * print a new message when reaching the limit again.
234 		 */
235 		if (prev_max != max_lock_depth) {
236 			prev_max = max_lock_depth;
237 			printk(KERN_WARNING "Maximum lock depth %d reached "
238 			       "task: %s (%d)\n", max_lock_depth,
239 			       top_task->comm, task_pid_nr(top_task));
240 		}
241 		put_task_struct(task);
242 
243 		return -EDEADLK;
244 	}
245  retry:
246 	/*
247 	 * Task can not go away as we did a get_task() before !
248 	 */
249 	raw_spin_lock_irqsave(&task->pi_lock, flags);
250 
251 	waiter = task->pi_blocked_on;
252 	/*
253 	 * Check whether the end of the boosting chain has been
254 	 * reached or the state of the chain has changed while we
255 	 * dropped the locks.
256 	 */
257 	if (!waiter)
258 		goto out_unlock_pi;
259 
260 	/*
261 	 * Check the orig_waiter state. After we dropped the locks,
262 	 * the previous owner of the lock might have released the lock.
263 	 */
264 	if (orig_waiter && !rt_mutex_owner(orig_lock))
265 		goto out_unlock_pi;
266 
267 	/*
268 	 * We dropped all locks after taking a refcount on @task, so
269 	 * the task might have moved on in the lock chain or even left
270 	 * the chain completely and blocks now on an unrelated lock or
271 	 * on @orig_lock.
272 	 *
273 	 * We stored the lock on which @task was blocked in @next_lock,
274 	 * so we can detect the chain change.
275 	 */
276 	if (next_lock != waiter->lock)
277 		goto out_unlock_pi;
278 
279 	/*
280 	 * Drop out, when the task has no waiters. Note,
281 	 * top_waiter can be NULL, when we are in the deboosting
282 	 * mode!
283 	 */
284 	if (top_waiter) {
285 		if (!task_has_pi_waiters(task))
286 			goto out_unlock_pi;
287 		/*
288 		 * If deadlock detection is off, we stop here if we
289 		 * are not the top pi waiter of the task.
290 		 */
291 		if (!detect_deadlock && top_waiter != task_top_pi_waiter(task))
292 			goto out_unlock_pi;
293 	}
294 
295 	/*
296 	 * When deadlock detection is off then we check, if further
297 	 * priority adjustment is necessary.
298 	 */
299 	if (!detect_deadlock && waiter->list_entry.prio == task->prio)
300 		goto out_unlock_pi;
301 
302 	lock = waiter->lock;
303 	if (!raw_spin_trylock(&lock->wait_lock)) {
304 		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
305 		cpu_relax();
306 		goto retry;
307 	}
308 
309 	/*
310 	 * Deadlock detection. If the lock is the same as the original
311 	 * lock which caused us to walk the lock chain or if the
312 	 * current lock is owned by the task which initiated the chain
313 	 * walk, we detected a deadlock.
314 	 */
315 	if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
316 		debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
317 		raw_spin_unlock(&lock->wait_lock);
318 		ret = -EDEADLK;
319 		goto out_unlock_pi;
320 	}
321 
322 	top_waiter = rt_mutex_top_waiter(lock);
323 
324 	/* Requeue the waiter */
325 	plist_del(&waiter->list_entry, &lock->wait_list);
326 	waiter->list_entry.prio = task->prio;
327 	plist_add(&waiter->list_entry, &lock->wait_list);
328 
329 	/* Release the task */
330 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
331 	if (!rt_mutex_owner(lock)) {
332 		/*
333 		 * If the requeue above changed the top waiter, then we need
334 		 * to wake the new top waiter up to try to get the lock.
335 		 */
336 
337 		if (top_waiter != rt_mutex_top_waiter(lock))
338 			wake_up_process(rt_mutex_top_waiter(lock)->task);
339 		raw_spin_unlock(&lock->wait_lock);
340 		goto out_put_task;
341 	}
342 	put_task_struct(task);
343 
344 	/* Grab the next task */
345 	task = rt_mutex_owner(lock);
346 	get_task_struct(task);
347 	raw_spin_lock_irqsave(&task->pi_lock, flags);
348 
349 	if (waiter == rt_mutex_top_waiter(lock)) {
350 		/* Boost the owner */
351 		plist_del(&top_waiter->pi_list_entry, &task->pi_waiters);
352 		waiter->pi_list_entry.prio = waiter->list_entry.prio;
353 		plist_add(&waiter->pi_list_entry, &task->pi_waiters);
354 		__rt_mutex_adjust_prio(task);
355 
356 	} else if (top_waiter == waiter) {
357 		/* Deboost the owner */
358 		plist_del(&waiter->pi_list_entry, &task->pi_waiters);
359 		waiter = rt_mutex_top_waiter(lock);
360 		waiter->pi_list_entry.prio = waiter->list_entry.prio;
361 		plist_add(&waiter->pi_list_entry, &task->pi_waiters);
362 		__rt_mutex_adjust_prio(task);
363 	}
364 
365 	/*
366 	 * Check whether the task which owns the current lock is pi
367 	 * blocked itself. If yes we store a pointer to the lock for
368 	 * the lock chain change detection above. After we dropped
369 	 * task->pi_lock next_lock cannot be dereferenced anymore.
370 	 */
371 	next_lock = task_blocked_on_lock(task);
372 
373 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
374 
375 	top_waiter = rt_mutex_top_waiter(lock);
376 	raw_spin_unlock(&lock->wait_lock);
377 
378 	/*
379 	 * We reached the end of the lock chain. Stop right here. No
380 	 * point to go back just to figure that out.
381 	 */
382 	if (!next_lock)
383 		goto out_put_task;
384 
385 	if (!detect_deadlock && waiter != top_waiter)
386 		goto out_put_task;
387 
388 	goto again;
389 
390  out_unlock_pi:
391 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
392  out_put_task:
393 	put_task_struct(task);
394 
395 	return ret;
396 }
397 
398 /*
399  * Try to take an rt-mutex
400  *
401  * Must be called with lock->wait_lock held.
402  *
403  * @lock:   the lock to be acquired.
404  * @task:   the task which wants to acquire the lock
405  * @waiter: the waiter that is queued to the lock's wait list. (could be NULL)
406  */
try_to_take_rt_mutex(struct rt_mutex * lock,struct task_struct * task,struct rt_mutex_waiter * waiter)407 static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
408 		struct rt_mutex_waiter *waiter)
409 {
410 	/*
411 	 * We have to be careful here if the atomic speedups are
412 	 * enabled, such that, when
413 	 *  - no other waiter is on the lock
414 	 *  - the lock has been released since we did the cmpxchg
415 	 * the lock can be released or taken while we are doing the
416 	 * checks and marking the lock with RT_MUTEX_HAS_WAITERS.
417 	 *
418 	 * The atomic acquire/release aware variant of
419 	 * mark_rt_mutex_waiters uses a cmpxchg loop. After setting
420 	 * the WAITERS bit, the atomic release / acquire can not
421 	 * happen anymore and lock->wait_lock protects us from the
422 	 * non-atomic case.
423 	 *
424 	 * Note, that this might set lock->owner =
425 	 * RT_MUTEX_HAS_WAITERS in the case the lock is not contended
426 	 * any more. This is fixed up when we take the ownership.
427 	 * This is the transitional state explained at the top of this file.
428 	 */
429 	mark_rt_mutex_waiters(lock);
430 
431 	if (rt_mutex_owner(lock))
432 		return 0;
433 
434 	/*
435 	 * It will get the lock because of one of these conditions:
436 	 * 1) there is no waiter
437 	 * 2) higher priority than waiters
438 	 * 3) it is top waiter
439 	 */
440 	if (rt_mutex_has_waiters(lock)) {
441 		if (task->prio >= rt_mutex_top_waiter(lock)->list_entry.prio) {
442 			if (!waiter || waiter != rt_mutex_top_waiter(lock))
443 				return 0;
444 		}
445 	}
446 
447 	if (waiter || rt_mutex_has_waiters(lock)) {
448 		unsigned long flags;
449 		struct rt_mutex_waiter *top;
450 
451 		raw_spin_lock_irqsave(&task->pi_lock, flags);
452 
453 		/* remove the queued waiter. */
454 		if (waiter) {
455 			plist_del(&waiter->list_entry, &lock->wait_list);
456 			task->pi_blocked_on = NULL;
457 		}
458 
459 		/*
460 		 * We have to enqueue the top waiter(if it exists) into
461 		 * task->pi_waiters list.
462 		 */
463 		if (rt_mutex_has_waiters(lock)) {
464 			top = rt_mutex_top_waiter(lock);
465 			top->pi_list_entry.prio = top->list_entry.prio;
466 			plist_add(&top->pi_list_entry, &task->pi_waiters);
467 		}
468 		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
469 	}
470 
471 	/* We got the lock. */
472 	debug_rt_mutex_lock(lock);
473 
474 	rt_mutex_set_owner(lock, task);
475 
476 	rt_mutex_deadlock_account_lock(lock, task);
477 
478 	return 1;
479 }
480 
481 /*
482  * Task blocks on lock.
483  *
484  * Prepare waiter and propagate pi chain
485  *
486  * This must be called with lock->wait_lock held.
487  */
task_blocks_on_rt_mutex(struct rt_mutex * lock,struct rt_mutex_waiter * waiter,struct task_struct * task,int detect_deadlock)488 static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
489 				   struct rt_mutex_waiter *waiter,
490 				   struct task_struct *task,
491 				   int detect_deadlock)
492 {
493 	struct task_struct *owner = rt_mutex_owner(lock);
494 	struct rt_mutex_waiter *top_waiter = waiter;
495 	struct rt_mutex *next_lock;
496 	int chain_walk = 0, res;
497 	unsigned long flags;
498 
499 	/*
500 	 * Early deadlock detection. We really don't want the task to
501 	 * enqueue on itself just to untangle the mess later. It's not
502 	 * only an optimization. We drop the locks, so another waiter
503 	 * can come in before the chain walk detects the deadlock. So
504 	 * the other will detect the deadlock and return -EDEADLOCK,
505 	 * which is wrong, as the other waiter is not in a deadlock
506 	 * situation.
507 	 */
508 	if (owner == task)
509 		return -EDEADLK;
510 
511 	raw_spin_lock_irqsave(&task->pi_lock, flags);
512 	__rt_mutex_adjust_prio(task);
513 	waiter->task = task;
514 	waiter->lock = lock;
515 	plist_node_init(&waiter->list_entry, task->prio);
516 	plist_node_init(&waiter->pi_list_entry, task->prio);
517 
518 	/* Get the top priority waiter on the lock */
519 	if (rt_mutex_has_waiters(lock))
520 		top_waiter = rt_mutex_top_waiter(lock);
521 	plist_add(&waiter->list_entry, &lock->wait_list);
522 
523 	task->pi_blocked_on = waiter;
524 
525 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
526 
527 	if (!owner)
528 		return 0;
529 
530 	raw_spin_lock_irqsave(&owner->pi_lock, flags);
531 	if (waiter == rt_mutex_top_waiter(lock)) {
532 		plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
533 		plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
534 
535 		__rt_mutex_adjust_prio(owner);
536 		if (owner->pi_blocked_on)
537 			chain_walk = 1;
538 	} else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) {
539 		chain_walk = 1;
540 	}
541 
542 	/* Store the lock on which owner is blocked or NULL */
543 	next_lock = task_blocked_on_lock(owner);
544 
545 	raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
546 	/*
547 	 * Even if full deadlock detection is on, if the owner is not
548 	 * blocked itself, we can avoid finding this out in the chain
549 	 * walk.
550 	 */
551 	if (!chain_walk || !next_lock)
552 		return 0;
553 
554 	/*
555 	 * The owner can't disappear while holding a lock,
556 	 * so the owner struct is protected by wait_lock.
557 	 * Gets dropped in rt_mutex_adjust_prio_chain()!
558 	 */
559 	get_task_struct(owner);
560 
561 	raw_spin_unlock(&lock->wait_lock);
562 
563 	res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock,
564 					 next_lock, waiter, task);
565 
566 	raw_spin_lock(&lock->wait_lock);
567 
568 	return res;
569 }
570 
571 /*
572  * Wake up the next waiter on the lock.
573  *
574  * Remove the top waiter from the current tasks pi waiter list and
575  * wake it up.
576  *
577  * Called with lock->wait_lock held.
578  */
wakeup_next_waiter(struct rt_mutex * lock)579 static void wakeup_next_waiter(struct rt_mutex *lock)
580 {
581 	struct rt_mutex_waiter *waiter;
582 	unsigned long flags;
583 
584 	raw_spin_lock_irqsave(&current->pi_lock, flags);
585 
586 	waiter = rt_mutex_top_waiter(lock);
587 
588 	/*
589 	 * Remove it from current->pi_waiters. We do not adjust a
590 	 * possible priority boost right now. We execute wakeup in the
591 	 * boosted mode and go back to normal after releasing
592 	 * lock->wait_lock.
593 	 */
594 	plist_del(&waiter->pi_list_entry, &current->pi_waiters);
595 
596 	/*
597 	 * As we are waking up the top waiter, and the waiter stays
598 	 * queued on the lock until it gets the lock, this lock
599 	 * obviously has waiters. Just set the bit here and this has
600 	 * the added benefit of forcing all new tasks into the
601 	 * slow path making sure no task of lower priority than
602 	 * the top waiter can steal this lock.
603 	 */
604 	lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
605 
606 	raw_spin_unlock_irqrestore(&current->pi_lock, flags);
607 
608 	/*
609 	 * It's safe to dereference waiter as it cannot go away as
610 	 * long as we hold lock->wait_lock. The waiter task needs to
611 	 * acquire it in order to dequeue the waiter.
612 	 */
613 	wake_up_process(waiter->task);
614 }
615 
616 /*
617  * Remove a waiter from a lock and give up
618  *
619  * Must be called with lock->wait_lock held and
620  * have just failed to try_to_take_rt_mutex().
621  */
remove_waiter(struct rt_mutex * lock,struct rt_mutex_waiter * waiter)622 static void remove_waiter(struct rt_mutex *lock,
623 			  struct rt_mutex_waiter *waiter)
624 {
625 	int first = (waiter == rt_mutex_top_waiter(lock));
626 	struct task_struct *owner = rt_mutex_owner(lock);
627 	struct rt_mutex *next_lock = NULL;
628 	unsigned long flags;
629 
630 	raw_spin_lock_irqsave(&current->pi_lock, flags);
631 	plist_del(&waiter->list_entry, &lock->wait_list);
632 	current->pi_blocked_on = NULL;
633 	raw_spin_unlock_irqrestore(&current->pi_lock, flags);
634 
635 	if (!owner)
636 		return;
637 
638 	if (first) {
639 
640 		raw_spin_lock_irqsave(&owner->pi_lock, flags);
641 
642 		plist_del(&waiter->pi_list_entry, &owner->pi_waiters);
643 
644 		if (rt_mutex_has_waiters(lock)) {
645 			struct rt_mutex_waiter *next;
646 
647 			next = rt_mutex_top_waiter(lock);
648 			plist_add(&next->pi_list_entry, &owner->pi_waiters);
649 		}
650 		__rt_mutex_adjust_prio(owner);
651 
652 		/* Store the lock on which owner is blocked or NULL */
653 		next_lock = task_blocked_on_lock(owner);
654 
655 		raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
656 	}
657 
658 	WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
659 
660 	if (!next_lock)
661 		return;
662 
663 	/* gets dropped in rt_mutex_adjust_prio_chain()! */
664 	get_task_struct(owner);
665 
666 	raw_spin_unlock(&lock->wait_lock);
667 
668 	rt_mutex_adjust_prio_chain(owner, 0, lock, next_lock, NULL, current);
669 
670 	raw_spin_lock(&lock->wait_lock);
671 }
672 
673 /*
674  * Recheck the pi chain, in case we got a priority setting
675  *
676  * Called from sched_setscheduler
677  */
rt_mutex_adjust_pi(struct task_struct * task)678 void rt_mutex_adjust_pi(struct task_struct *task)
679 {
680 	struct rt_mutex_waiter *waiter;
681 	struct rt_mutex *next_lock;
682 	unsigned long flags;
683 
684 	raw_spin_lock_irqsave(&task->pi_lock, flags);
685 
686 	waiter = task->pi_blocked_on;
687 	if (!waiter || waiter->list_entry.prio == task->prio) {
688 		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
689 		return;
690 	}
691 	next_lock = waiter->lock;
692 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
693 
694 	/* gets dropped in rt_mutex_adjust_prio_chain()! */
695 	get_task_struct(task);
696 
697 	rt_mutex_adjust_prio_chain(task, 0, NULL, next_lock, NULL, task);
698 }
699 
700 /**
701  * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
702  * @lock:		 the rt_mutex to take
703  * @state:		 the state the task should block in (TASK_INTERRUPTIBLE
704  * 			 or TASK_UNINTERRUPTIBLE)
705  * @timeout:		 the pre-initialized and started timer, or NULL for none
706  * @waiter:		 the pre-initialized rt_mutex_waiter
707  *
708  * lock->wait_lock must be held by the caller.
709  */
710 static int __sched
__rt_mutex_slowlock(struct rt_mutex * lock,int state,struct hrtimer_sleeper * timeout,struct rt_mutex_waiter * waiter)711 __rt_mutex_slowlock(struct rt_mutex *lock, int state,
712 		    struct hrtimer_sleeper *timeout,
713 		    struct rt_mutex_waiter *waiter)
714 {
715 	int ret = 0;
716 
717 	for (;;) {
718 		/* Try to acquire the lock: */
719 		if (try_to_take_rt_mutex(lock, current, waiter))
720 			break;
721 
722 		/*
723 		 * TASK_INTERRUPTIBLE checks for signals and
724 		 * timeout. Ignored otherwise.
725 		 */
726 		if (unlikely(state == TASK_INTERRUPTIBLE)) {
727 			/* Signal pending? */
728 			if (signal_pending(current))
729 				ret = -EINTR;
730 			if (timeout && !timeout->task)
731 				ret = -ETIMEDOUT;
732 			if (ret)
733 				break;
734 		}
735 
736 		raw_spin_unlock(&lock->wait_lock);
737 
738 		debug_rt_mutex_print_deadlock(waiter);
739 
740 		schedule_rt_mutex(lock);
741 
742 		raw_spin_lock(&lock->wait_lock);
743 		set_current_state(state);
744 	}
745 
746 	return ret;
747 }
748 
rt_mutex_handle_deadlock(int res,int detect_deadlock,struct rt_mutex_waiter * w)749 static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
750 				     struct rt_mutex_waiter *w)
751 {
752 	/*
753 	 * If the result is not -EDEADLOCK or the caller requested
754 	 * deadlock detection, nothing to do here.
755 	 */
756 	if (res != -EDEADLOCK || detect_deadlock)
757 		return;
758 
759 	/*
760 	 * Yell lowdly and stop the task right here.
761 	 */
762 	rt_mutex_print_deadlock(w);
763 	while (1) {
764 		set_current_state(TASK_INTERRUPTIBLE);
765 		schedule();
766 	}
767 }
768 
769 /*
770  * Slow path lock function:
771  */
772 static int __sched
rt_mutex_slowlock(struct rt_mutex * lock,int state,struct hrtimer_sleeper * timeout,int detect_deadlock)773 rt_mutex_slowlock(struct rt_mutex *lock, int state,
774 		  struct hrtimer_sleeper *timeout,
775 		  int detect_deadlock)
776 {
777 	struct rt_mutex_waiter waiter;
778 	int ret = 0;
779 
780 	debug_rt_mutex_init_waiter(&waiter);
781 
782 	raw_spin_lock(&lock->wait_lock);
783 
784 	/* Try to acquire the lock again: */
785 	if (try_to_take_rt_mutex(lock, current, NULL)) {
786 		raw_spin_unlock(&lock->wait_lock);
787 		return 0;
788 	}
789 
790 	set_current_state(state);
791 
792 	/* Setup the timer, when timeout != NULL */
793 	if (unlikely(timeout)) {
794 		hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
795 		if (!hrtimer_active(&timeout->timer))
796 			timeout->task = NULL;
797 	}
798 
799 	ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock);
800 
801 	if (likely(!ret))
802 		ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
803 
804 	set_current_state(TASK_RUNNING);
805 
806 	if (unlikely(ret)) {
807 		remove_waiter(lock, &waiter);
808 		rt_mutex_handle_deadlock(ret, detect_deadlock, &waiter);
809 	}
810 
811 	/*
812 	 * try_to_take_rt_mutex() sets the waiter bit
813 	 * unconditionally. We might have to fix that up.
814 	 */
815 	fixup_rt_mutex_waiters(lock);
816 
817 	raw_spin_unlock(&lock->wait_lock);
818 
819 	/* Remove pending timer: */
820 	if (unlikely(timeout))
821 		hrtimer_cancel(&timeout->timer);
822 
823 	debug_rt_mutex_free_waiter(&waiter);
824 
825 	return ret;
826 }
827 
828 /*
829  * Slow path try-lock function:
830  */
831 static inline int
rt_mutex_slowtrylock(struct rt_mutex * lock)832 rt_mutex_slowtrylock(struct rt_mutex *lock)
833 {
834 	int ret = 0;
835 
836 	raw_spin_lock(&lock->wait_lock);
837 
838 	if (likely(rt_mutex_owner(lock) != current)) {
839 
840 		ret = try_to_take_rt_mutex(lock, current, NULL);
841 		/*
842 		 * try_to_take_rt_mutex() sets the lock waiters
843 		 * bit unconditionally. Clean this up.
844 		 */
845 		fixup_rt_mutex_waiters(lock);
846 	}
847 
848 	raw_spin_unlock(&lock->wait_lock);
849 
850 	return ret;
851 }
852 
853 /*
854  * Slow path to release a rt-mutex:
855  */
856 static void __sched
rt_mutex_slowunlock(struct rt_mutex * lock)857 rt_mutex_slowunlock(struct rt_mutex *lock)
858 {
859 	raw_spin_lock(&lock->wait_lock);
860 
861 	debug_rt_mutex_unlock(lock);
862 
863 	rt_mutex_deadlock_account_unlock(current);
864 
865 	/*
866 	 * We must be careful here if the fast path is enabled. If we
867 	 * have no waiters queued we cannot set owner to NULL here
868 	 * because of:
869 	 *
870 	 * foo->lock->owner = NULL;
871 	 *			rtmutex_lock(foo->lock);   <- fast path
872 	 *			free = atomic_dec_and_test(foo->refcnt);
873 	 *			rtmutex_unlock(foo->lock); <- fast path
874 	 *			if (free)
875 	 *				kfree(foo);
876 	 * raw_spin_unlock(foo->lock->wait_lock);
877 	 *
878 	 * So for the fastpath enabled kernel:
879 	 *
880 	 * Nothing can set the waiters bit as long as we hold
881 	 * lock->wait_lock. So we do the following sequence:
882 	 *
883 	 *	owner = rt_mutex_owner(lock);
884 	 *	clear_rt_mutex_waiters(lock);
885 	 *	raw_spin_unlock(&lock->wait_lock);
886 	 *	if (cmpxchg(&lock->owner, owner, 0) == owner)
887 	 *		return;
888 	 *	goto retry;
889 	 *
890 	 * The fastpath disabled variant is simple as all access to
891 	 * lock->owner is serialized by lock->wait_lock:
892 	 *
893 	 *	lock->owner = NULL;
894 	 *	raw_spin_unlock(&lock->wait_lock);
895 	 */
896 	while (!rt_mutex_has_waiters(lock)) {
897 		/* Drops lock->wait_lock ! */
898 		if (unlock_rt_mutex_safe(lock) == true)
899 			return;
900 		/* Relock the rtmutex and try again */
901 		raw_spin_lock(&lock->wait_lock);
902 	}
903 
904 	/*
905 	 * The wakeup next waiter path does not suffer from the above
906 	 * race. See the comments there.
907 	 */
908 	wakeup_next_waiter(lock);
909 
910 	raw_spin_unlock(&lock->wait_lock);
911 
912 	/* Undo pi boosting if necessary: */
913 	rt_mutex_adjust_prio(current);
914 }
915 
916 /*
917  * debug aware fast / slowpath lock,trylock,unlock
918  *
919  * The atomic acquire/release ops are compiled away, when either the
920  * architecture does not support cmpxchg or when debugging is enabled.
921  */
922 static inline int
rt_mutex_fastlock(struct rt_mutex * lock,int state,int detect_deadlock,int (* slowfn)(struct rt_mutex * lock,int state,struct hrtimer_sleeper * timeout,int detect_deadlock))923 rt_mutex_fastlock(struct rt_mutex *lock, int state,
924 		  int detect_deadlock,
925 		  int (*slowfn)(struct rt_mutex *lock, int state,
926 				struct hrtimer_sleeper *timeout,
927 				int detect_deadlock))
928 {
929 	if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
930 		rt_mutex_deadlock_account_lock(lock, current);
931 		return 0;
932 	} else
933 		return slowfn(lock, state, NULL, detect_deadlock);
934 }
935 
936 static inline int
rt_mutex_timed_fastlock(struct rt_mutex * lock,int state,struct hrtimer_sleeper * timeout,int detect_deadlock,int (* slowfn)(struct rt_mutex * lock,int state,struct hrtimer_sleeper * timeout,int detect_deadlock))937 rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
938 			struct hrtimer_sleeper *timeout, int detect_deadlock,
939 			int (*slowfn)(struct rt_mutex *lock, int state,
940 				      struct hrtimer_sleeper *timeout,
941 				      int detect_deadlock))
942 {
943 	if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
944 		rt_mutex_deadlock_account_lock(lock, current);
945 		return 0;
946 	} else
947 		return slowfn(lock, state, timeout, detect_deadlock);
948 }
949 
950 static inline int
rt_mutex_fasttrylock(struct rt_mutex * lock,int (* slowfn)(struct rt_mutex * lock))951 rt_mutex_fasttrylock(struct rt_mutex *lock,
952 		     int (*slowfn)(struct rt_mutex *lock))
953 {
954 	if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
955 		rt_mutex_deadlock_account_lock(lock, current);
956 		return 1;
957 	}
958 	return slowfn(lock);
959 }
960 
961 static inline void
rt_mutex_fastunlock(struct rt_mutex * lock,void (* slowfn)(struct rt_mutex * lock))962 rt_mutex_fastunlock(struct rt_mutex *lock,
963 		    void (*slowfn)(struct rt_mutex *lock))
964 {
965 	if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
966 		rt_mutex_deadlock_account_unlock(current);
967 	else
968 		slowfn(lock);
969 }
970 
971 /**
972  * rt_mutex_lock - lock a rt_mutex
973  *
974  * @lock: the rt_mutex to be locked
975  */
rt_mutex_lock(struct rt_mutex * lock)976 void __sched rt_mutex_lock(struct rt_mutex *lock)
977 {
978 	might_sleep();
979 
980 	rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock);
981 }
982 EXPORT_SYMBOL_GPL(rt_mutex_lock);
983 
984 /**
985  * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
986  *
987  * @lock: 		the rt_mutex to be locked
988  * @detect_deadlock:	deadlock detection on/off
989  *
990  * Returns:
991  *  0 		on success
992  * -EINTR 	when interrupted by a signal
993  * -EDEADLK	when the lock would deadlock (when deadlock detection is on)
994  */
rt_mutex_lock_interruptible(struct rt_mutex * lock,int detect_deadlock)995 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
996 						 int detect_deadlock)
997 {
998 	might_sleep();
999 
1000 	return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE,
1001 				 detect_deadlock, rt_mutex_slowlock);
1002 }
1003 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
1004 
1005 /**
1006  * rt_mutex_timed_lock - lock a rt_mutex interruptible
1007  *			the timeout structure is provided
1008  *			by the caller
1009  *
1010  * @lock: 		the rt_mutex to be locked
1011  * @timeout:		timeout structure or NULL (no timeout)
1012  * @detect_deadlock:	deadlock detection on/off
1013  *
1014  * Returns:
1015  *  0 		on success
1016  * -EINTR 	when interrupted by a signal
1017  * -ETIMEDOUT	when the timeout expired
1018  * -EDEADLK	when the lock would deadlock (when deadlock detection is on)
1019  */
1020 int
rt_mutex_timed_lock(struct rt_mutex * lock,struct hrtimer_sleeper * timeout,int detect_deadlock)1021 rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout,
1022 		    int detect_deadlock)
1023 {
1024 	might_sleep();
1025 
1026 	return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
1027 				       detect_deadlock, rt_mutex_slowlock);
1028 }
1029 EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
1030 
1031 /**
1032  * rt_mutex_trylock - try to lock a rt_mutex
1033  *
1034  * @lock:	the rt_mutex to be locked
1035  *
1036  * Returns 1 on success and 0 on contention
1037  */
rt_mutex_trylock(struct rt_mutex * lock)1038 int __sched rt_mutex_trylock(struct rt_mutex *lock)
1039 {
1040 	return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
1041 }
1042 EXPORT_SYMBOL_GPL(rt_mutex_trylock);
1043 
1044 /**
1045  * rt_mutex_unlock - unlock a rt_mutex
1046  *
1047  * @lock: the rt_mutex to be unlocked
1048  */
rt_mutex_unlock(struct rt_mutex * lock)1049 void __sched rt_mutex_unlock(struct rt_mutex *lock)
1050 {
1051 	rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
1052 }
1053 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
1054 
1055 /**
1056  * rt_mutex_destroy - mark a mutex unusable
1057  * @lock: the mutex to be destroyed
1058  *
1059  * This function marks the mutex uninitialized, and any subsequent
1060  * use of the mutex is forbidden. The mutex must not be locked when
1061  * this function is called.
1062  */
rt_mutex_destroy(struct rt_mutex * lock)1063 void rt_mutex_destroy(struct rt_mutex *lock)
1064 {
1065 	WARN_ON(rt_mutex_is_locked(lock));
1066 #ifdef CONFIG_DEBUG_RT_MUTEXES
1067 	lock->magic = NULL;
1068 #endif
1069 }
1070 
1071 EXPORT_SYMBOL_GPL(rt_mutex_destroy);
1072 
1073 /**
1074  * __rt_mutex_init - initialize the rt lock
1075  *
1076  * @lock: the rt lock to be initialized
1077  *
1078  * Initialize the rt lock to unlocked state.
1079  *
1080  * Initializing of a locked rt lock is not allowed
1081  */
__rt_mutex_init(struct rt_mutex * lock,const char * name)1082 void __rt_mutex_init(struct rt_mutex *lock, const char *name)
1083 {
1084 	lock->owner = NULL;
1085 	raw_spin_lock_init(&lock->wait_lock);
1086 	plist_head_init(&lock->wait_list);
1087 
1088 	debug_rt_mutex_init(lock, name);
1089 }
1090 EXPORT_SYMBOL_GPL(__rt_mutex_init);
1091 
1092 /**
1093  * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
1094  *				proxy owner
1095  *
1096  * @lock: 	the rt_mutex to be locked
1097  * @proxy_owner:the task to set as owner
1098  *
1099  * No locking. Caller has to do serializing itself
1100  * Special API call for PI-futex support
1101  */
rt_mutex_init_proxy_locked(struct rt_mutex * lock,struct task_struct * proxy_owner)1102 void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
1103 				struct task_struct *proxy_owner)
1104 {
1105 	__rt_mutex_init(lock, NULL);
1106 	debug_rt_mutex_proxy_lock(lock, proxy_owner);
1107 	rt_mutex_set_owner(lock, proxy_owner);
1108 	rt_mutex_deadlock_account_lock(lock, proxy_owner);
1109 }
1110 
1111 /**
1112  * rt_mutex_proxy_unlock - release a lock on behalf of owner
1113  *
1114  * @lock: 	the rt_mutex to be locked
1115  *
1116  * No locking. Caller has to do serializing itself
1117  * Special API call for PI-futex support
1118  */
rt_mutex_proxy_unlock(struct rt_mutex * lock,struct task_struct * proxy_owner)1119 void rt_mutex_proxy_unlock(struct rt_mutex *lock,
1120 			   struct task_struct *proxy_owner)
1121 {
1122 	debug_rt_mutex_proxy_unlock(lock);
1123 	rt_mutex_set_owner(lock, NULL);
1124 	rt_mutex_deadlock_account_unlock(proxy_owner);
1125 }
1126 
1127 /**
1128  * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
1129  * @lock:		the rt_mutex to take
1130  * @waiter:		the pre-initialized rt_mutex_waiter
1131  * @task:		the task to prepare
1132  * @detect_deadlock:	perform deadlock detection (1) or not (0)
1133  *
1134  * Returns:
1135  *  0 - task blocked on lock
1136  *  1 - acquired the lock for task, caller should wake it up
1137  * <0 - error
1138  *
1139  * Special API call for FUTEX_REQUEUE_PI support.
1140  */
rt_mutex_start_proxy_lock(struct rt_mutex * lock,struct rt_mutex_waiter * waiter,struct task_struct * task,int detect_deadlock)1141 int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1142 			      struct rt_mutex_waiter *waiter,
1143 			      struct task_struct *task, int detect_deadlock)
1144 {
1145 	int ret;
1146 
1147 	raw_spin_lock(&lock->wait_lock);
1148 
1149 	if (try_to_take_rt_mutex(lock, task, NULL)) {
1150 		raw_spin_unlock(&lock->wait_lock);
1151 		return 1;
1152 	}
1153 
1154 	/* We enforce deadlock detection for futexes */
1155 	ret = task_blocks_on_rt_mutex(lock, waiter, task, 1);
1156 
1157 	if (ret && !rt_mutex_owner(lock)) {
1158 		/*
1159 		 * Reset the return value. We might have
1160 		 * returned with -EDEADLK and the owner
1161 		 * released the lock while we were walking the
1162 		 * pi chain.  Let the waiter sort it out.
1163 		 */
1164 		ret = 0;
1165 	}
1166 
1167 	if (unlikely(ret))
1168 		remove_waiter(lock, waiter);
1169 
1170 	raw_spin_unlock(&lock->wait_lock);
1171 
1172 	debug_rt_mutex_print_deadlock(waiter);
1173 
1174 	return ret;
1175 }
1176 
1177 /**
1178  * rt_mutex_next_owner - return the next owner of the lock
1179  *
1180  * @lock: the rt lock query
1181  *
1182  * Returns the next owner of the lock or NULL
1183  *
1184  * Caller has to serialize against other accessors to the lock
1185  * itself.
1186  *
1187  * Special API call for PI-futex support
1188  */
rt_mutex_next_owner(struct rt_mutex * lock)1189 struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
1190 {
1191 	if (!rt_mutex_has_waiters(lock))
1192 		return NULL;
1193 
1194 	return rt_mutex_top_waiter(lock)->task;
1195 }
1196 
1197 /**
1198  * rt_mutex_finish_proxy_lock() - Complete lock acquisition
1199  * @lock:		the rt_mutex we were woken on
1200  * @to:			the timeout, null if none. hrtimer should already have
1201  * 			been started.
1202  * @waiter:		the pre-initialized rt_mutex_waiter
1203  * @detect_deadlock:	perform deadlock detection (1) or not (0)
1204  *
1205  * Complete the lock acquisition started our behalf by another thread.
1206  *
1207  * Returns:
1208  *  0 - success
1209  * <0 - error, one of -EINTR, -ETIMEDOUT, or -EDEADLK
1210  *
1211  * Special API call for PI-futex requeue support
1212  */
rt_mutex_finish_proxy_lock(struct rt_mutex * lock,struct hrtimer_sleeper * to,struct rt_mutex_waiter * waiter,int detect_deadlock)1213 int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
1214 			       struct hrtimer_sleeper *to,
1215 			       struct rt_mutex_waiter *waiter,
1216 			       int detect_deadlock)
1217 {
1218 	int ret;
1219 
1220 	raw_spin_lock(&lock->wait_lock);
1221 
1222 	set_current_state(TASK_INTERRUPTIBLE);
1223 
1224 	ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
1225 
1226 	set_current_state(TASK_RUNNING);
1227 
1228 	if (unlikely(ret))
1229 		remove_waiter(lock, waiter);
1230 
1231 	/*
1232 	 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
1233 	 * have to fix that up.
1234 	 */
1235 	fixup_rt_mutex_waiters(lock);
1236 
1237 	raw_spin_unlock(&lock->wait_lock);
1238 
1239 	return ret;
1240 }
1241