1 /*
2  * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3  * Internal non-public definitions that provide either classic
4  * or preemptible semantics.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19  *
20  * Copyright Red Hat, 2009
21  * Copyright IBM Corporation, 2009
22  *
23  * Author: Ingo Molnar <mingo@elte.hu>
24  *	   Paul E. McKenney <paulmck@linux.vnet.ibm.com>
25  */
26 
27 #include <linux/delay.h>
28 
29 #define RCU_KTHREAD_PRIO 1
30 
31 #ifdef CONFIG_RCU_BOOST
32 #define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
33 #else
34 #define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
35 #endif
36 
37 /*
38  * Check the RCU kernel configuration parameters and print informative
39  * messages about anything out of the ordinary.  If you like #ifdef, you
40  * will love this function.
41  */
rcu_bootup_announce_oddness(void)42 static void __init rcu_bootup_announce_oddness(void)
43 {
44 #ifdef CONFIG_RCU_TRACE
45 	printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n");
46 #endif
47 #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
48 	printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
49 	       CONFIG_RCU_FANOUT);
50 #endif
51 #ifdef CONFIG_RCU_FANOUT_EXACT
52 	printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n");
53 #endif
54 #ifdef CONFIG_RCU_FAST_NO_HZ
55 	printk(KERN_INFO
56 	       "\tRCU dyntick-idle grace-period acceleration is enabled.\n");
57 #endif
58 #ifdef CONFIG_PROVE_RCU
59 	printk(KERN_INFO "\tRCU lockdep checking is enabled.\n");
60 #endif
61 #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
62 	printk(KERN_INFO "\tRCU torture testing starts during boot.\n");
63 #endif
64 #if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
65 	printk(KERN_INFO "\tDump stacks of tasks blocking RCU-preempt GP.\n");
66 #endif
67 #if defined(CONFIG_RCU_CPU_STALL_INFO)
68 	printk(KERN_INFO "\tAdditional per-CPU info printed with stalls.\n");
69 #endif
70 #if NUM_RCU_LVL_4 != 0
71 	printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n");
72 #endif
73 }
74 
75 #ifdef CONFIG_TREE_PREEMPT_RCU
76 
77 struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt);
78 DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
79 static struct rcu_state *rcu_state = &rcu_preempt_state;
80 
81 static void rcu_read_unlock_special(struct task_struct *t);
82 static int rcu_preempted_readers_exp(struct rcu_node *rnp);
83 
84 /*
85  * Tell them what RCU they are running.
86  */
rcu_bootup_announce(void)87 static void __init rcu_bootup_announce(void)
88 {
89 	printk(KERN_INFO "Preemptible hierarchical RCU implementation.\n");
90 	rcu_bootup_announce_oddness();
91 }
92 
93 /*
94  * Return the number of RCU-preempt batches processed thus far
95  * for debug and statistics.
96  */
rcu_batches_completed_preempt(void)97 long rcu_batches_completed_preempt(void)
98 {
99 	return rcu_preempt_state.completed;
100 }
101 EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
102 
103 /*
104  * Return the number of RCU batches processed thus far for debug & stats.
105  */
rcu_batches_completed(void)106 long rcu_batches_completed(void)
107 {
108 	return rcu_batches_completed_preempt();
109 }
110 EXPORT_SYMBOL_GPL(rcu_batches_completed);
111 
112 /*
113  * Force a quiescent state for preemptible RCU.
114  */
rcu_force_quiescent_state(void)115 void rcu_force_quiescent_state(void)
116 {
117 	force_quiescent_state(&rcu_preempt_state, 0);
118 }
119 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
120 
121 /*
122  * Record a preemptible-RCU quiescent state for the specified CPU.  Note
123  * that this just means that the task currently running on the CPU is
124  * not in a quiescent state.  There might be any number of tasks blocked
125  * while in an RCU read-side critical section.
126  *
127  * Unlike the other rcu_*_qs() functions, callers to this function
128  * must disable irqs in order to protect the assignment to
129  * ->rcu_read_unlock_special.
130  */
rcu_preempt_qs(int cpu)131 static void rcu_preempt_qs(int cpu)
132 {
133 	struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
134 
135 	rdp->passed_quiesce_gpnum = rdp->gpnum;
136 	barrier();
137 	if (rdp->passed_quiesce == 0)
138 		trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs");
139 	rdp->passed_quiesce = 1;
140 	current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
141 }
142 
143 /*
144  * We have entered the scheduler, and the current task might soon be
145  * context-switched away from.  If this task is in an RCU read-side
146  * critical section, we will no longer be able to rely on the CPU to
147  * record that fact, so we enqueue the task on the blkd_tasks list.
148  * The task will dequeue itself when it exits the outermost enclosing
149  * RCU read-side critical section.  Therefore, the current grace period
150  * cannot be permitted to complete until the blkd_tasks list entries
151  * predating the current grace period drain, in other words, until
152  * rnp->gp_tasks becomes NULL.
153  *
154  * Caller must disable preemption.
155  */
rcu_preempt_note_context_switch(int cpu)156 static void rcu_preempt_note_context_switch(int cpu)
157 {
158 	struct task_struct *t = current;
159 	unsigned long flags;
160 	struct rcu_data *rdp;
161 	struct rcu_node *rnp;
162 
163 	if (t->rcu_read_lock_nesting > 0 &&
164 	    (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
165 
166 		/* Possibly blocking in an RCU read-side critical section. */
167 		rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
168 		rnp = rdp->mynode;
169 		raw_spin_lock_irqsave(&rnp->lock, flags);
170 		t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
171 		t->rcu_blocked_node = rnp;
172 
173 		/*
174 		 * If this CPU has already checked in, then this task
175 		 * will hold up the next grace period rather than the
176 		 * current grace period.  Queue the task accordingly.
177 		 * If the task is queued for the current grace period
178 		 * (i.e., this CPU has not yet passed through a quiescent
179 		 * state for the current grace period), then as long
180 		 * as that task remains queued, the current grace period
181 		 * cannot end.  Note that there is some uncertainty as
182 		 * to exactly when the current grace period started.
183 		 * We take a conservative approach, which can result
184 		 * in unnecessarily waiting on tasks that started very
185 		 * slightly after the current grace period began.  C'est
186 		 * la vie!!!
187 		 *
188 		 * But first, note that the current CPU must still be
189 		 * on line!
190 		 */
191 		WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
192 		WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
193 		if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
194 			list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
195 			rnp->gp_tasks = &t->rcu_node_entry;
196 #ifdef CONFIG_RCU_BOOST
197 			if (rnp->boost_tasks != NULL)
198 				rnp->boost_tasks = rnp->gp_tasks;
199 #endif /* #ifdef CONFIG_RCU_BOOST */
200 		} else {
201 			list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
202 			if (rnp->qsmask & rdp->grpmask)
203 				rnp->gp_tasks = &t->rcu_node_entry;
204 		}
205 		trace_rcu_preempt_task(rdp->rsp->name,
206 				       t->pid,
207 				       (rnp->qsmask & rdp->grpmask)
208 				       ? rnp->gpnum
209 				       : rnp->gpnum + 1);
210 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
211 	} else if (t->rcu_read_lock_nesting < 0 &&
212 		   t->rcu_read_unlock_special) {
213 
214 		/*
215 		 * Complete exit from RCU read-side critical section on
216 		 * behalf of preempted instance of __rcu_read_unlock().
217 		 */
218 		rcu_read_unlock_special(t);
219 	}
220 
221 	/*
222 	 * Either we were not in an RCU read-side critical section to
223 	 * begin with, or we have now recorded that critical section
224 	 * globally.  Either way, we can now note a quiescent state
225 	 * for this CPU.  Again, if we were in an RCU read-side critical
226 	 * section, and if that critical section was blocking the current
227 	 * grace period, then the fact that the task has been enqueued
228 	 * means that we continue to block the current grace period.
229 	 */
230 	local_irq_save(flags);
231 	rcu_preempt_qs(cpu);
232 	local_irq_restore(flags);
233 }
234 
235 /*
236  * Tree-preemptible RCU implementation for rcu_read_lock().
237  * Just increment ->rcu_read_lock_nesting, shared state will be updated
238  * if we block.
239  */
__rcu_read_lock(void)240 void __rcu_read_lock(void)
241 {
242 	current->rcu_read_lock_nesting++;
243 	barrier();  /* needed if we ever invoke rcu_read_lock in rcutree.c */
244 }
245 EXPORT_SYMBOL_GPL(__rcu_read_lock);
246 
247 /*
248  * Check for preempted RCU readers blocking the current grace period
249  * for the specified rcu_node structure.  If the caller needs a reliable
250  * answer, it must hold the rcu_node's ->lock.
251  */
rcu_preempt_blocked_readers_cgp(struct rcu_node * rnp)252 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
253 {
254 	return rnp->gp_tasks != NULL;
255 }
256 
257 /*
258  * Record a quiescent state for all tasks that were previously queued
259  * on the specified rcu_node structure and that were blocking the current
260  * RCU grace period.  The caller must hold the specified rnp->lock with
261  * irqs disabled, and this lock is released upon return, but irqs remain
262  * disabled.
263  */
rcu_report_unblock_qs_rnp(struct rcu_node * rnp,unsigned long flags)264 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
265 	__releases(rnp->lock)
266 {
267 	unsigned long mask;
268 	struct rcu_node *rnp_p;
269 
270 	if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
271 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
272 		return;  /* Still need more quiescent states! */
273 	}
274 
275 	rnp_p = rnp->parent;
276 	if (rnp_p == NULL) {
277 		/*
278 		 * Either there is only one rcu_node in the tree,
279 		 * or tasks were kicked up to root rcu_node due to
280 		 * CPUs going offline.
281 		 */
282 		rcu_report_qs_rsp(&rcu_preempt_state, flags);
283 		return;
284 	}
285 
286 	/* Report up the rest of the hierarchy. */
287 	mask = rnp->grpmask;
288 	raw_spin_unlock(&rnp->lock);	/* irqs remain disabled. */
289 	raw_spin_lock(&rnp_p->lock);	/* irqs already disabled. */
290 	rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
291 }
292 
293 /*
294  * Advance a ->blkd_tasks-list pointer to the next entry, instead
295  * returning NULL if at the end of the list.
296  */
rcu_next_node_entry(struct task_struct * t,struct rcu_node * rnp)297 static struct list_head *rcu_next_node_entry(struct task_struct *t,
298 					     struct rcu_node *rnp)
299 {
300 	struct list_head *np;
301 
302 	np = t->rcu_node_entry.next;
303 	if (np == &rnp->blkd_tasks)
304 		np = NULL;
305 	return np;
306 }
307 
308 /*
309  * Handle special cases during rcu_read_unlock(), such as needing to
310  * notify RCU core processing or task having blocked during the RCU
311  * read-side critical section.
312  */
rcu_read_unlock_special(struct task_struct * t)313 static noinline void rcu_read_unlock_special(struct task_struct *t)
314 {
315 	int empty;
316 	int empty_exp;
317 	int empty_exp_now;
318 	unsigned long flags;
319 	struct list_head *np;
320 #ifdef CONFIG_RCU_BOOST
321 	struct rt_mutex *rbmp = NULL;
322 #endif /* #ifdef CONFIG_RCU_BOOST */
323 	struct rcu_node *rnp;
324 	int special;
325 
326 	/* NMI handlers cannot block and cannot safely manipulate state. */
327 	if (in_nmi())
328 		return;
329 
330 	local_irq_save(flags);
331 
332 	/*
333 	 * If RCU core is waiting for this CPU to exit critical section,
334 	 * let it know that we have done so.
335 	 */
336 	special = t->rcu_read_unlock_special;
337 	if (special & RCU_READ_UNLOCK_NEED_QS) {
338 		rcu_preempt_qs(smp_processor_id());
339 	}
340 
341 	/* Hardware IRQ handlers cannot block. */
342 	if (in_irq() || in_serving_softirq()) {
343 		local_irq_restore(flags);
344 		return;
345 	}
346 
347 	/* Clean up if blocked during RCU read-side critical section. */
348 	if (special & RCU_READ_UNLOCK_BLOCKED) {
349 		t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
350 
351 		/*
352 		 * Remove this task from the list it blocked on.  The
353 		 * task can migrate while we acquire the lock, but at
354 		 * most one time.  So at most two passes through loop.
355 		 */
356 		for (;;) {
357 			rnp = t->rcu_blocked_node;
358 			raw_spin_lock(&rnp->lock);  /* irqs already disabled. */
359 			if (rnp == t->rcu_blocked_node)
360 				break;
361 			raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
362 		}
363 		empty = !rcu_preempt_blocked_readers_cgp(rnp);
364 		empty_exp = !rcu_preempted_readers_exp(rnp);
365 		smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
366 		np = rcu_next_node_entry(t, rnp);
367 		list_del_init(&t->rcu_node_entry);
368 		t->rcu_blocked_node = NULL;
369 		trace_rcu_unlock_preempted_task("rcu_preempt",
370 						rnp->gpnum, t->pid);
371 		if (&t->rcu_node_entry == rnp->gp_tasks)
372 			rnp->gp_tasks = np;
373 		if (&t->rcu_node_entry == rnp->exp_tasks)
374 			rnp->exp_tasks = np;
375 #ifdef CONFIG_RCU_BOOST
376 		if (&t->rcu_node_entry == rnp->boost_tasks)
377 			rnp->boost_tasks = np;
378 		/* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */
379 		if (t->rcu_boost_mutex) {
380 			rbmp = t->rcu_boost_mutex;
381 			t->rcu_boost_mutex = NULL;
382 		}
383 #endif /* #ifdef CONFIG_RCU_BOOST */
384 
385 		/*
386 		 * If this was the last task on the current list, and if
387 		 * we aren't waiting on any CPUs, report the quiescent state.
388 		 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
389 		 * so we must take a snapshot of the expedited state.
390 		 */
391 		empty_exp_now = !rcu_preempted_readers_exp(rnp);
392 		if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
393 			trace_rcu_quiescent_state_report("preempt_rcu",
394 							 rnp->gpnum,
395 							 0, rnp->qsmask,
396 							 rnp->level,
397 							 rnp->grplo,
398 							 rnp->grphi,
399 							 !!rnp->gp_tasks);
400 			rcu_report_unblock_qs_rnp(rnp, flags);
401 		} else
402 			raw_spin_unlock_irqrestore(&rnp->lock, flags);
403 
404 #ifdef CONFIG_RCU_BOOST
405 		/* Unboost if we were boosted. */
406 		if (rbmp)
407 			rt_mutex_unlock(rbmp);
408 #endif /* #ifdef CONFIG_RCU_BOOST */
409 
410 		/*
411 		 * If this was the last task on the expedited lists,
412 		 * then we need to report up the rcu_node hierarchy.
413 		 */
414 		if (!empty_exp && empty_exp_now)
415 			rcu_report_exp_rnp(&rcu_preempt_state, rnp, true);
416 	} else {
417 		local_irq_restore(flags);
418 	}
419 }
420 
421 /*
422  * Tree-preemptible RCU implementation for rcu_read_unlock().
423  * Decrement ->rcu_read_lock_nesting.  If the result is zero (outermost
424  * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
425  * invoke rcu_read_unlock_special() to clean up after a context switch
426  * in an RCU read-side critical section and other special cases.
427  */
__rcu_read_unlock(void)428 void __rcu_read_unlock(void)
429 {
430 	struct task_struct *t = current;
431 
432 	if (t->rcu_read_lock_nesting != 1)
433 		--t->rcu_read_lock_nesting;
434 	else {
435 		barrier();  /* critical section before exit code. */
436 		t->rcu_read_lock_nesting = INT_MIN;
437 		barrier();  /* assign before ->rcu_read_unlock_special load */
438 		if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
439 			rcu_read_unlock_special(t);
440 		barrier();  /* ->rcu_read_unlock_special load before assign */
441 		t->rcu_read_lock_nesting = 0;
442 	}
443 #ifdef CONFIG_PROVE_LOCKING
444 	{
445 		int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
446 
447 		WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
448 	}
449 #endif /* #ifdef CONFIG_PROVE_LOCKING */
450 }
451 EXPORT_SYMBOL_GPL(__rcu_read_unlock);
452 
453 #ifdef CONFIG_RCU_CPU_STALL_VERBOSE
454 
455 /*
456  * Dump detailed information for all tasks blocking the current RCU
457  * grace period on the specified rcu_node structure.
458  */
rcu_print_detail_task_stall_rnp(struct rcu_node * rnp)459 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
460 {
461 	unsigned long flags;
462 	struct task_struct *t;
463 
464 	if (!rcu_preempt_blocked_readers_cgp(rnp))
465 		return;
466 	raw_spin_lock_irqsave(&rnp->lock, flags);
467 	t = list_entry(rnp->gp_tasks,
468 		       struct task_struct, rcu_node_entry);
469 	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
470 		sched_show_task(t);
471 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
472 }
473 
474 /*
475  * Dump detailed information for all tasks blocking the current RCU
476  * grace period.
477  */
rcu_print_detail_task_stall(struct rcu_state * rsp)478 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
479 {
480 	struct rcu_node *rnp = rcu_get_root(rsp);
481 
482 	rcu_print_detail_task_stall_rnp(rnp);
483 	rcu_for_each_leaf_node(rsp, rnp)
484 		rcu_print_detail_task_stall_rnp(rnp);
485 }
486 
487 #else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
488 
rcu_print_detail_task_stall(struct rcu_state * rsp)489 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
490 {
491 }
492 
493 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
494 
495 #ifdef CONFIG_RCU_CPU_STALL_INFO
496 
rcu_print_task_stall_begin(struct rcu_node * rnp)497 static void rcu_print_task_stall_begin(struct rcu_node *rnp)
498 {
499 	printk(KERN_ERR "\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
500 	       rnp->level, rnp->grplo, rnp->grphi);
501 }
502 
rcu_print_task_stall_end(void)503 static void rcu_print_task_stall_end(void)
504 {
505 	printk(KERN_CONT "\n");
506 }
507 
508 #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
509 
rcu_print_task_stall_begin(struct rcu_node * rnp)510 static void rcu_print_task_stall_begin(struct rcu_node *rnp)
511 {
512 }
513 
rcu_print_task_stall_end(void)514 static void rcu_print_task_stall_end(void)
515 {
516 }
517 
518 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
519 
520 /*
521  * Scan the current list of tasks blocked within RCU read-side critical
522  * sections, printing out the tid of each.
523  */
rcu_print_task_stall(struct rcu_node * rnp)524 static int rcu_print_task_stall(struct rcu_node *rnp)
525 {
526 	struct task_struct *t;
527 	int ndetected = 0;
528 
529 	if (!rcu_preempt_blocked_readers_cgp(rnp))
530 		return 0;
531 	rcu_print_task_stall_begin(rnp);
532 	t = list_entry(rnp->gp_tasks,
533 		       struct task_struct, rcu_node_entry);
534 	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
535 		printk(KERN_CONT " P%d", t->pid);
536 		ndetected++;
537 	}
538 	rcu_print_task_stall_end();
539 	return ndetected;
540 }
541 
542 /*
543  * Suppress preemptible RCU's CPU stall warnings by pushing the
544  * time of the next stall-warning message comfortably far into the
545  * future.
546  */
rcu_preempt_stall_reset(void)547 static void rcu_preempt_stall_reset(void)
548 {
549 	rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2;
550 }
551 
552 /*
553  * Check that the list of blocked tasks for the newly completed grace
554  * period is in fact empty.  It is a serious bug to complete a grace
555  * period that still has RCU readers blocked!  This function must be
556  * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
557  * must be held by the caller.
558  *
559  * Also, if there are blocked tasks on the list, they automatically
560  * block the newly created grace period, so set up ->gp_tasks accordingly.
561  */
rcu_preempt_check_blocked_tasks(struct rcu_node * rnp)562 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
563 {
564 	WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
565 	if (!list_empty(&rnp->blkd_tasks))
566 		rnp->gp_tasks = rnp->blkd_tasks.next;
567 	WARN_ON_ONCE(rnp->qsmask);
568 }
569 
570 #ifdef CONFIG_HOTPLUG_CPU
571 
572 /*
573  * Handle tasklist migration for case in which all CPUs covered by the
574  * specified rcu_node have gone offline.  Move them up to the root
575  * rcu_node.  The reason for not just moving them to the immediate
576  * parent is to remove the need for rcu_read_unlock_special() to
577  * make more than two attempts to acquire the target rcu_node's lock.
578  * Returns true if there were tasks blocking the current RCU grace
579  * period.
580  *
581  * Returns 1 if there was previously a task blocking the current grace
582  * period on the specified rcu_node structure.
583  *
584  * The caller must hold rnp->lock with irqs disabled.
585  */
rcu_preempt_offline_tasks(struct rcu_state * rsp,struct rcu_node * rnp,struct rcu_data * rdp)586 static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
587 				     struct rcu_node *rnp,
588 				     struct rcu_data *rdp)
589 {
590 	struct list_head *lp;
591 	struct list_head *lp_root;
592 	int retval = 0;
593 	struct rcu_node *rnp_root = rcu_get_root(rsp);
594 	struct task_struct *t;
595 
596 	if (rnp == rnp_root) {
597 		WARN_ONCE(1, "Last CPU thought to be offlined?");
598 		return 0;  /* Shouldn't happen: at least one CPU online. */
599 	}
600 
601 	/* If we are on an internal node, complain bitterly. */
602 	WARN_ON_ONCE(rnp != rdp->mynode);
603 
604 	/*
605 	 * Move tasks up to root rcu_node.  Don't try to get fancy for
606 	 * this corner-case operation -- just put this node's tasks
607 	 * at the head of the root node's list, and update the root node's
608 	 * ->gp_tasks and ->exp_tasks pointers to those of this node's,
609 	 * if non-NULL.  This might result in waiting for more tasks than
610 	 * absolutely necessary, but this is a good performance/complexity
611 	 * tradeoff.
612 	 */
613 	if (rcu_preempt_blocked_readers_cgp(rnp) && rnp->qsmask == 0)
614 		retval |= RCU_OFL_TASKS_NORM_GP;
615 	if (rcu_preempted_readers_exp(rnp))
616 		retval |= RCU_OFL_TASKS_EXP_GP;
617 	lp = &rnp->blkd_tasks;
618 	lp_root = &rnp_root->blkd_tasks;
619 	while (!list_empty(lp)) {
620 		t = list_entry(lp->next, typeof(*t), rcu_node_entry);
621 		raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
622 		list_del(&t->rcu_node_entry);
623 		t->rcu_blocked_node = rnp_root;
624 		list_add(&t->rcu_node_entry, lp_root);
625 		if (&t->rcu_node_entry == rnp->gp_tasks)
626 			rnp_root->gp_tasks = rnp->gp_tasks;
627 		if (&t->rcu_node_entry == rnp->exp_tasks)
628 			rnp_root->exp_tasks = rnp->exp_tasks;
629 #ifdef CONFIG_RCU_BOOST
630 		if (&t->rcu_node_entry == rnp->boost_tasks)
631 			rnp_root->boost_tasks = rnp->boost_tasks;
632 #endif /* #ifdef CONFIG_RCU_BOOST */
633 		raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
634 	}
635 
636 #ifdef CONFIG_RCU_BOOST
637 	/* In case root is being boosted and leaf is not. */
638 	raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
639 	if (rnp_root->boost_tasks != NULL &&
640 	    rnp_root->boost_tasks != rnp_root->gp_tasks)
641 		rnp_root->boost_tasks = rnp_root->gp_tasks;
642 	raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
643 #endif /* #ifdef CONFIG_RCU_BOOST */
644 
645 	rnp->gp_tasks = NULL;
646 	rnp->exp_tasks = NULL;
647 	return retval;
648 }
649 
650 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
651 
652 /*
653  * Do CPU-offline processing for preemptible RCU.
654  */
rcu_preempt_cleanup_dead_cpu(int cpu)655 static void rcu_preempt_cleanup_dead_cpu(int cpu)
656 {
657 	rcu_cleanup_dead_cpu(cpu, &rcu_preempt_state);
658 }
659 
660 /*
661  * Check for a quiescent state from the current CPU.  When a task blocks,
662  * the task is recorded in the corresponding CPU's rcu_node structure,
663  * which is checked elsewhere.
664  *
665  * Caller must disable hard irqs.
666  */
rcu_preempt_check_callbacks(int cpu)667 static void rcu_preempt_check_callbacks(int cpu)
668 {
669 	struct task_struct *t = current;
670 
671 	if (t->rcu_read_lock_nesting == 0) {
672 		rcu_preempt_qs(cpu);
673 		return;
674 	}
675 	if (t->rcu_read_lock_nesting > 0 &&
676 	    per_cpu(rcu_preempt_data, cpu).qs_pending)
677 		t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
678 }
679 
680 /*
681  * Process callbacks for preemptible RCU.
682  */
rcu_preempt_process_callbacks(void)683 static void rcu_preempt_process_callbacks(void)
684 {
685 	__rcu_process_callbacks(&rcu_preempt_state,
686 				&__get_cpu_var(rcu_preempt_data));
687 }
688 
689 #ifdef CONFIG_RCU_BOOST
690 
rcu_preempt_do_callbacks(void)691 static void rcu_preempt_do_callbacks(void)
692 {
693 	rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
694 }
695 
696 #endif /* #ifdef CONFIG_RCU_BOOST */
697 
698 /*
699  * Queue a preemptible-RCU callback for invocation after a grace period.
700  */
call_rcu(struct rcu_head * head,void (* func)(struct rcu_head * rcu))701 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
702 {
703 	__call_rcu(head, func, &rcu_preempt_state, 0);
704 }
705 EXPORT_SYMBOL_GPL(call_rcu);
706 
707 /*
708  * Queue an RCU callback for lazy invocation after a grace period.
709  * This will likely be later named something like "call_rcu_lazy()",
710  * but this change will require some way of tagging the lazy RCU
711  * callbacks in the list of pending callbacks.  Until then, this
712  * function may only be called from __kfree_rcu().
713  */
kfree_call_rcu(struct rcu_head * head,void (* func)(struct rcu_head * rcu))714 void kfree_call_rcu(struct rcu_head *head,
715 		    void (*func)(struct rcu_head *rcu))
716 {
717 	__call_rcu(head, func, &rcu_preempt_state, 1);
718 }
719 EXPORT_SYMBOL_GPL(kfree_call_rcu);
720 
721 /**
722  * synchronize_rcu - wait until a grace period has elapsed.
723  *
724  * Control will return to the caller some time after a full grace
725  * period has elapsed, in other words after all currently executing RCU
726  * read-side critical sections have completed.  Note, however, that
727  * upon return from synchronize_rcu(), the caller might well be executing
728  * concurrently with new RCU read-side critical sections that began while
729  * synchronize_rcu() was waiting.  RCU read-side critical sections are
730  * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
731  */
synchronize_rcu(void)732 void synchronize_rcu(void)
733 {
734 	rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
735 			   !lock_is_held(&rcu_lock_map) &&
736 			   !lock_is_held(&rcu_sched_lock_map),
737 			   "Illegal synchronize_rcu() in RCU read-side critical section");
738 	if (!rcu_scheduler_active)
739 		return;
740 	wait_rcu_gp(call_rcu);
741 }
742 EXPORT_SYMBOL_GPL(synchronize_rcu);
743 
744 static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
745 static long sync_rcu_preempt_exp_count;
746 static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
747 
748 /*
749  * Return non-zero if there are any tasks in RCU read-side critical
750  * sections blocking the current preemptible-RCU expedited grace period.
751  * If there is no preemptible-RCU expedited grace period currently in
752  * progress, returns zero unconditionally.
753  */
rcu_preempted_readers_exp(struct rcu_node * rnp)754 static int rcu_preempted_readers_exp(struct rcu_node *rnp)
755 {
756 	return rnp->exp_tasks != NULL;
757 }
758 
759 /*
760  * return non-zero if there is no RCU expedited grace period in progress
761  * for the specified rcu_node structure, in other words, if all CPUs and
762  * tasks covered by the specified rcu_node structure have done their bit
763  * for the current expedited grace period.  Works only for preemptible
764  * RCU -- other RCU implementation use other means.
765  *
766  * Caller must hold sync_rcu_preempt_exp_mutex.
767  */
sync_rcu_preempt_exp_done(struct rcu_node * rnp)768 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
769 {
770 	return !rcu_preempted_readers_exp(rnp) &&
771 	       ACCESS_ONCE(rnp->expmask) == 0;
772 }
773 
774 /*
775  * Report the exit from RCU read-side critical section for the last task
776  * that queued itself during or before the current expedited preemptible-RCU
777  * grace period.  This event is reported either to the rcu_node structure on
778  * which the task was queued or to one of that rcu_node structure's ancestors,
779  * recursively up the tree.  (Calm down, calm down, we do the recursion
780  * iteratively!)
781  *
782  * Most callers will set the "wake" flag, but the task initiating the
783  * expedited grace period need not wake itself.
784  *
785  * Caller must hold sync_rcu_preempt_exp_mutex.
786  */
rcu_report_exp_rnp(struct rcu_state * rsp,struct rcu_node * rnp,bool wake)787 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
788 			       bool wake)
789 {
790 	unsigned long flags;
791 	unsigned long mask;
792 
793 	raw_spin_lock_irqsave(&rnp->lock, flags);
794 	for (;;) {
795 		if (!sync_rcu_preempt_exp_done(rnp)) {
796 			raw_spin_unlock_irqrestore(&rnp->lock, flags);
797 			break;
798 		}
799 		if (rnp->parent == NULL) {
800 			raw_spin_unlock_irqrestore(&rnp->lock, flags);
801 			if (wake)
802 				wake_up(&sync_rcu_preempt_exp_wq);
803 			break;
804 		}
805 		mask = rnp->grpmask;
806 		raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
807 		rnp = rnp->parent;
808 		raw_spin_lock(&rnp->lock); /* irqs already disabled */
809 		rnp->expmask &= ~mask;
810 	}
811 }
812 
813 /*
814  * Snapshot the tasks blocking the newly started preemptible-RCU expedited
815  * grace period for the specified rcu_node structure.  If there are no such
816  * tasks, report it up the rcu_node hierarchy.
817  *
818  * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock.
819  */
820 static void
sync_rcu_preempt_exp_init(struct rcu_state * rsp,struct rcu_node * rnp)821 sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
822 {
823 	unsigned long flags;
824 	int must_wait = 0;
825 
826 	raw_spin_lock_irqsave(&rnp->lock, flags);
827 	if (list_empty(&rnp->blkd_tasks))
828 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
829 	else {
830 		rnp->exp_tasks = rnp->blkd_tasks.next;
831 		rcu_initiate_boost(rnp, flags);  /* releases rnp->lock */
832 		must_wait = 1;
833 	}
834 	if (!must_wait)
835 		rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */
836 }
837 
838 /**
839  * synchronize_rcu_expedited - Brute-force RCU grace period
840  *
841  * Wait for an RCU-preempt grace period, but expedite it.  The basic
842  * idea is to invoke synchronize_sched_expedited() to push all the tasks to
843  * the ->blkd_tasks lists and wait for this list to drain.  This consumes
844  * significant time on all CPUs and is unfriendly to real-time workloads,
845  * so is thus not recommended for any sort of common-case code.
846  * In fact, if you are using synchronize_rcu_expedited() in a loop,
847  * please restructure your code to batch your updates, and then Use a
848  * single synchronize_rcu() instead.
849  *
850  * Note that it is illegal to call this function while holding any lock
851  * that is acquired by a CPU-hotplug notifier.  And yes, it is also illegal
852  * to call this function from a CPU-hotplug notifier.  Failing to observe
853  * these restriction will result in deadlock.
854  */
synchronize_rcu_expedited(void)855 void synchronize_rcu_expedited(void)
856 {
857 	unsigned long flags;
858 	struct rcu_node *rnp;
859 	struct rcu_state *rsp = &rcu_preempt_state;
860 	long snap;
861 	int trycount = 0;
862 
863 	smp_mb(); /* Caller's modifications seen first by other CPUs. */
864 	snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
865 	smp_mb(); /* Above access cannot bleed into critical section. */
866 
867 	/*
868 	 * Acquire lock, falling back to synchronize_rcu() if too many
869 	 * lock-acquisition failures.  Of course, if someone does the
870 	 * expedited grace period for us, just leave.
871 	 */
872 	while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
873 		if (trycount++ < 10)
874 			udelay(trycount * num_online_cpus());
875 		else {
876 			synchronize_rcu();
877 			return;
878 		}
879 		if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
880 			goto mb_ret; /* Others did our work for us. */
881 	}
882 	if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
883 		goto unlock_mb_ret; /* Others did our work for us. */
884 
885 	/* force all RCU readers onto ->blkd_tasks lists. */
886 	synchronize_sched_expedited();
887 
888 	raw_spin_lock_irqsave(&rsp->onofflock, flags);
889 
890 	/* Initialize ->expmask for all non-leaf rcu_node structures. */
891 	rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
892 		raw_spin_lock(&rnp->lock); /* irqs already disabled. */
893 		rnp->expmask = rnp->qsmaskinit;
894 		raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
895 	}
896 
897 	/* Snapshot current state of ->blkd_tasks lists. */
898 	rcu_for_each_leaf_node(rsp, rnp)
899 		sync_rcu_preempt_exp_init(rsp, rnp);
900 	if (NUM_RCU_NODES > 1)
901 		sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
902 
903 	raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
904 
905 	/* Wait for snapshotted ->blkd_tasks lists to drain. */
906 	rnp = rcu_get_root(rsp);
907 	wait_event(sync_rcu_preempt_exp_wq,
908 		   sync_rcu_preempt_exp_done(rnp));
909 
910 	/* Clean up and exit. */
911 	smp_mb(); /* ensure expedited GP seen before counter increment. */
912 	ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
913 unlock_mb_ret:
914 	mutex_unlock(&sync_rcu_preempt_exp_mutex);
915 mb_ret:
916 	smp_mb(); /* ensure subsequent action seen after grace period. */
917 }
918 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
919 
920 /*
921  * Check to see if there is any immediate preemptible-RCU-related work
922  * to be done.
923  */
rcu_preempt_pending(int cpu)924 static int rcu_preempt_pending(int cpu)
925 {
926 	return __rcu_pending(&rcu_preempt_state,
927 			     &per_cpu(rcu_preempt_data, cpu));
928 }
929 
930 /*
931  * Does preemptible RCU have callbacks on this CPU?
932  */
rcu_preempt_cpu_has_callbacks(int cpu)933 static int rcu_preempt_cpu_has_callbacks(int cpu)
934 {
935 	return !!per_cpu(rcu_preempt_data, cpu).nxtlist;
936 }
937 
938 /**
939  * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
940  */
rcu_barrier(void)941 void rcu_barrier(void)
942 {
943 	_rcu_barrier(&rcu_preempt_state, call_rcu);
944 }
945 EXPORT_SYMBOL_GPL(rcu_barrier);
946 
947 /*
948  * Initialize preemptible RCU's per-CPU data.
949  */
rcu_preempt_init_percpu_data(int cpu)950 static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
951 {
952 	rcu_init_percpu_data(cpu, &rcu_preempt_state, 1);
953 }
954 
955 /*
956  * Move preemptible RCU's callbacks from dying CPU to other online CPU
957  * and record a quiescent state.
958  */
rcu_preempt_cleanup_dying_cpu(void)959 static void rcu_preempt_cleanup_dying_cpu(void)
960 {
961 	rcu_cleanup_dying_cpu(&rcu_preempt_state);
962 }
963 
964 /*
965  * Initialize preemptible RCU's state structures.
966  */
__rcu_init_preempt(void)967 static void __init __rcu_init_preempt(void)
968 {
969 	rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
970 }
971 
972 /*
973  * Check for a task exiting while in a preemptible-RCU read-side
974  * critical section, clean up if so.  No need to issue warnings,
975  * as debug_check_no_locks_held() already does this if lockdep
976  * is enabled.
977  */
exit_rcu(void)978 void exit_rcu(void)
979 {
980 	struct task_struct *t = current;
981 
982 	if (t->rcu_read_lock_nesting == 0)
983 		return;
984 	t->rcu_read_lock_nesting = 1;
985 	__rcu_read_unlock();
986 }
987 
988 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
989 
990 static struct rcu_state *rcu_state = &rcu_sched_state;
991 
992 /*
993  * Tell them what RCU they are running.
994  */
rcu_bootup_announce(void)995 static void __init rcu_bootup_announce(void)
996 {
997 	printk(KERN_INFO "Hierarchical RCU implementation.\n");
998 	rcu_bootup_announce_oddness();
999 }
1000 
1001 /*
1002  * Return the number of RCU batches processed thus far for debug & stats.
1003  */
rcu_batches_completed(void)1004 long rcu_batches_completed(void)
1005 {
1006 	return rcu_batches_completed_sched();
1007 }
1008 EXPORT_SYMBOL_GPL(rcu_batches_completed);
1009 
1010 /*
1011  * Force a quiescent state for RCU, which, because there is no preemptible
1012  * RCU, becomes the same as rcu-sched.
1013  */
rcu_force_quiescent_state(void)1014 void rcu_force_quiescent_state(void)
1015 {
1016 	rcu_sched_force_quiescent_state();
1017 }
1018 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
1019 
1020 /*
1021  * Because preemptible RCU does not exist, we never have to check for
1022  * CPUs being in quiescent states.
1023  */
rcu_preempt_note_context_switch(int cpu)1024 static void rcu_preempt_note_context_switch(int cpu)
1025 {
1026 }
1027 
1028 /*
1029  * Because preemptible RCU does not exist, there are never any preempted
1030  * RCU readers.
1031  */
rcu_preempt_blocked_readers_cgp(struct rcu_node * rnp)1032 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
1033 {
1034 	return 0;
1035 }
1036 
1037 #ifdef CONFIG_HOTPLUG_CPU
1038 
1039 /* Because preemptible RCU does not exist, no quieting of tasks. */
rcu_report_unblock_qs_rnp(struct rcu_node * rnp,unsigned long flags)1040 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
1041 {
1042 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
1043 }
1044 
1045 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1046 
1047 /*
1048  * Because preemptible RCU does not exist, we never have to check for
1049  * tasks blocked within RCU read-side critical sections.
1050  */
rcu_print_detail_task_stall(struct rcu_state * rsp)1051 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
1052 {
1053 }
1054 
1055 /*
1056  * Because preemptible RCU does not exist, we never have to check for
1057  * tasks blocked within RCU read-side critical sections.
1058  */
rcu_print_task_stall(struct rcu_node * rnp)1059 static int rcu_print_task_stall(struct rcu_node *rnp)
1060 {
1061 	return 0;
1062 }
1063 
1064 /*
1065  * Because preemptible RCU does not exist, there is no need to suppress
1066  * its CPU stall warnings.
1067  */
rcu_preempt_stall_reset(void)1068 static void rcu_preempt_stall_reset(void)
1069 {
1070 }
1071 
1072 /*
1073  * Because there is no preemptible RCU, there can be no readers blocked,
1074  * so there is no need to check for blocked tasks.  So check only for
1075  * bogus qsmask values.
1076  */
rcu_preempt_check_blocked_tasks(struct rcu_node * rnp)1077 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
1078 {
1079 	WARN_ON_ONCE(rnp->qsmask);
1080 }
1081 
1082 #ifdef CONFIG_HOTPLUG_CPU
1083 
1084 /*
1085  * Because preemptible RCU does not exist, it never needs to migrate
1086  * tasks that were blocked within RCU read-side critical sections, and
1087  * such non-existent tasks cannot possibly have been blocking the current
1088  * grace period.
1089  */
rcu_preempt_offline_tasks(struct rcu_state * rsp,struct rcu_node * rnp,struct rcu_data * rdp)1090 static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
1091 				     struct rcu_node *rnp,
1092 				     struct rcu_data *rdp)
1093 {
1094 	return 0;
1095 }
1096 
1097 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1098 
1099 /*
1100  * Because preemptible RCU does not exist, it never needs CPU-offline
1101  * processing.
1102  */
rcu_preempt_cleanup_dead_cpu(int cpu)1103 static void rcu_preempt_cleanup_dead_cpu(int cpu)
1104 {
1105 }
1106 
1107 /*
1108  * Because preemptible RCU does not exist, it never has any callbacks
1109  * to check.
1110  */
rcu_preempt_check_callbacks(int cpu)1111 static void rcu_preempt_check_callbacks(int cpu)
1112 {
1113 }
1114 
1115 /*
1116  * Because preemptible RCU does not exist, it never has any callbacks
1117  * to process.
1118  */
rcu_preempt_process_callbacks(void)1119 static void rcu_preempt_process_callbacks(void)
1120 {
1121 }
1122 
1123 /*
1124  * Queue an RCU callback for lazy invocation after a grace period.
1125  * This will likely be later named something like "call_rcu_lazy()",
1126  * but this change will require some way of tagging the lazy RCU
1127  * callbacks in the list of pending callbacks.  Until then, this
1128  * function may only be called from __kfree_rcu().
1129  *
1130  * Because there is no preemptible RCU, we use RCU-sched instead.
1131  */
kfree_call_rcu(struct rcu_head * head,void (* func)(struct rcu_head * rcu))1132 void kfree_call_rcu(struct rcu_head *head,
1133 		    void (*func)(struct rcu_head *rcu))
1134 {
1135 	__call_rcu(head, func, &rcu_sched_state, 1);
1136 }
1137 EXPORT_SYMBOL_GPL(kfree_call_rcu);
1138 
1139 /*
1140  * Wait for an rcu-preempt grace period, but make it happen quickly.
1141  * But because preemptible RCU does not exist, map to rcu-sched.
1142  */
synchronize_rcu_expedited(void)1143 void synchronize_rcu_expedited(void)
1144 {
1145 	synchronize_sched_expedited();
1146 }
1147 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
1148 
1149 #ifdef CONFIG_HOTPLUG_CPU
1150 
1151 /*
1152  * Because preemptible RCU does not exist, there is never any need to
1153  * report on tasks preempted in RCU read-side critical sections during
1154  * expedited RCU grace periods.
1155  */
rcu_report_exp_rnp(struct rcu_state * rsp,struct rcu_node * rnp,bool wake)1156 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
1157 			       bool wake)
1158 {
1159 }
1160 
1161 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1162 
1163 /*
1164  * Because preemptible RCU does not exist, it never has any work to do.
1165  */
rcu_preempt_pending(int cpu)1166 static int rcu_preempt_pending(int cpu)
1167 {
1168 	return 0;
1169 }
1170 
1171 /*
1172  * Because preemptible RCU does not exist, it never has callbacks
1173  */
rcu_preempt_cpu_has_callbacks(int cpu)1174 static int rcu_preempt_cpu_has_callbacks(int cpu)
1175 {
1176 	return 0;
1177 }
1178 
1179 /*
1180  * Because preemptible RCU does not exist, rcu_barrier() is just
1181  * another name for rcu_barrier_sched().
1182  */
rcu_barrier(void)1183 void rcu_barrier(void)
1184 {
1185 	rcu_barrier_sched();
1186 }
1187 EXPORT_SYMBOL_GPL(rcu_barrier);
1188 
1189 /*
1190  * Because preemptible RCU does not exist, there is no per-CPU
1191  * data to initialize.
1192  */
rcu_preempt_init_percpu_data(int cpu)1193 static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
1194 {
1195 }
1196 
1197 /*
1198  * Because there is no preemptible RCU, there is no cleanup to do.
1199  */
rcu_preempt_cleanup_dying_cpu(void)1200 static void rcu_preempt_cleanup_dying_cpu(void)
1201 {
1202 }
1203 
1204 /*
1205  * Because preemptible RCU does not exist, it need not be initialized.
1206  */
__rcu_init_preempt(void)1207 static void __init __rcu_init_preempt(void)
1208 {
1209 }
1210 
1211 #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
1212 
1213 #ifdef CONFIG_RCU_BOOST
1214 
1215 #include "rtmutex_common.h"
1216 
1217 #ifdef CONFIG_RCU_TRACE
1218 
rcu_initiate_boost_trace(struct rcu_node * rnp)1219 static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1220 {
1221 	if (list_empty(&rnp->blkd_tasks))
1222 		rnp->n_balk_blkd_tasks++;
1223 	else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
1224 		rnp->n_balk_exp_gp_tasks++;
1225 	else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL)
1226 		rnp->n_balk_boost_tasks++;
1227 	else if (rnp->gp_tasks != NULL && rnp->qsmask != 0)
1228 		rnp->n_balk_notblocked++;
1229 	else if (rnp->gp_tasks != NULL &&
1230 		 ULONG_CMP_LT(jiffies, rnp->boost_time))
1231 		rnp->n_balk_notyet++;
1232 	else
1233 		rnp->n_balk_nos++;
1234 }
1235 
1236 #else /* #ifdef CONFIG_RCU_TRACE */
1237 
rcu_initiate_boost_trace(struct rcu_node * rnp)1238 static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1239 {
1240 }
1241 
1242 #endif /* #else #ifdef CONFIG_RCU_TRACE */
1243 
1244 /*
1245  * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1246  * or ->boost_tasks, advancing the pointer to the next task in the
1247  * ->blkd_tasks list.
1248  *
1249  * Note that irqs must be enabled: boosting the task can block.
1250  * Returns 1 if there are more tasks needing to be boosted.
1251  */
rcu_boost(struct rcu_node * rnp)1252 static int rcu_boost(struct rcu_node *rnp)
1253 {
1254 	unsigned long flags;
1255 	struct rt_mutex mtx;
1256 	struct task_struct *t;
1257 	struct list_head *tb;
1258 
1259 	if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL)
1260 		return 0;  /* Nothing left to boost. */
1261 
1262 	raw_spin_lock_irqsave(&rnp->lock, flags);
1263 
1264 	/*
1265 	 * Recheck under the lock: all tasks in need of boosting
1266 	 * might exit their RCU read-side critical sections on their own.
1267 	 */
1268 	if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
1269 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1270 		return 0;
1271 	}
1272 
1273 	/*
1274 	 * Preferentially boost tasks blocking expedited grace periods.
1275 	 * This cannot starve the normal grace periods because a second
1276 	 * expedited grace period must boost all blocked tasks, including
1277 	 * those blocking the pre-existing normal grace period.
1278 	 */
1279 	if (rnp->exp_tasks != NULL) {
1280 		tb = rnp->exp_tasks;
1281 		rnp->n_exp_boosts++;
1282 	} else {
1283 		tb = rnp->boost_tasks;
1284 		rnp->n_normal_boosts++;
1285 	}
1286 	rnp->n_tasks_boosted++;
1287 
1288 	/*
1289 	 * We boost task t by manufacturing an rt_mutex that appears to
1290 	 * be held by task t.  We leave a pointer to that rt_mutex where
1291 	 * task t can find it, and task t will release the mutex when it
1292 	 * exits its outermost RCU read-side critical section.  Then
1293 	 * simply acquiring this artificial rt_mutex will boost task
1294 	 * t's priority.  (Thanks to tglx for suggesting this approach!)
1295 	 *
1296 	 * Note that task t must acquire rnp->lock to remove itself from
1297 	 * the ->blkd_tasks list, which it will do from exit() if from
1298 	 * nowhere else.  We therefore are guaranteed that task t will
1299 	 * stay around at least until we drop rnp->lock.  Note that
1300 	 * rnp->lock also resolves races between our priority boosting
1301 	 * and task t's exiting its outermost RCU read-side critical
1302 	 * section.
1303 	 */
1304 	t = container_of(tb, struct task_struct, rcu_node_entry);
1305 	rt_mutex_init_proxy_locked(&mtx, t);
1306 	t->rcu_boost_mutex = &mtx;
1307 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
1308 	rt_mutex_lock(&mtx);  /* Side effect: boosts task t's priority. */
1309 	rt_mutex_unlock(&mtx);  /* Keep lockdep happy. */
1310 
1311 	return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
1312 	       ACCESS_ONCE(rnp->boost_tasks) != NULL;
1313 }
1314 
1315 /*
1316  * Timer handler to initiate waking up of boost kthreads that
1317  * have yielded the CPU due to excessive numbers of tasks to
1318  * boost.  We wake up the per-rcu_node kthread, which in turn
1319  * will wake up the booster kthread.
1320  */
rcu_boost_kthread_timer(unsigned long arg)1321 static void rcu_boost_kthread_timer(unsigned long arg)
1322 {
1323 	invoke_rcu_node_kthread((struct rcu_node *)arg);
1324 }
1325 
1326 /*
1327  * Priority-boosting kthread.  One per leaf rcu_node and one for the
1328  * root rcu_node.
1329  */
rcu_boost_kthread(void * arg)1330 static int rcu_boost_kthread(void *arg)
1331 {
1332 	struct rcu_node *rnp = (struct rcu_node *)arg;
1333 	int spincnt = 0;
1334 	int more2boost;
1335 
1336 	trace_rcu_utilization("Start boost kthread@init");
1337 	for (;;) {
1338 		rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
1339 		trace_rcu_utilization("End boost kthread@rcu_wait");
1340 		rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
1341 		trace_rcu_utilization("Start boost kthread@rcu_wait");
1342 		rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
1343 		more2boost = rcu_boost(rnp);
1344 		if (more2boost)
1345 			spincnt++;
1346 		else
1347 			spincnt = 0;
1348 		if (spincnt > 10) {
1349 			trace_rcu_utilization("End boost kthread@rcu_yield");
1350 			rcu_yield(rcu_boost_kthread_timer, (unsigned long)rnp);
1351 			trace_rcu_utilization("Start boost kthread@rcu_yield");
1352 			spincnt = 0;
1353 		}
1354 	}
1355 	/* NOTREACHED */
1356 	trace_rcu_utilization("End boost kthread@notreached");
1357 	return 0;
1358 }
1359 
1360 /*
1361  * Check to see if it is time to start boosting RCU readers that are
1362  * blocking the current grace period, and, if so, tell the per-rcu_node
1363  * kthread to start boosting them.  If there is an expedited grace
1364  * period in progress, it is always time to boost.
1365  *
1366  * The caller must hold rnp->lock, which this function releases,
1367  * but irqs remain disabled.  The ->boost_kthread_task is immortal,
1368  * so we don't need to worry about it going away.
1369  */
rcu_initiate_boost(struct rcu_node * rnp,unsigned long flags)1370 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1371 {
1372 	struct task_struct *t;
1373 
1374 	if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
1375 		rnp->n_balk_exp_gp_tasks++;
1376 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1377 		return;
1378 	}
1379 	if (rnp->exp_tasks != NULL ||
1380 	    (rnp->gp_tasks != NULL &&
1381 	     rnp->boost_tasks == NULL &&
1382 	     rnp->qsmask == 0 &&
1383 	     ULONG_CMP_GE(jiffies, rnp->boost_time))) {
1384 		if (rnp->exp_tasks == NULL)
1385 			rnp->boost_tasks = rnp->gp_tasks;
1386 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1387 		t = rnp->boost_kthread_task;
1388 		if (t != NULL)
1389 			wake_up_process(t);
1390 	} else {
1391 		rcu_initiate_boost_trace(rnp);
1392 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1393 	}
1394 }
1395 
1396 /*
1397  * Wake up the per-CPU kthread to invoke RCU callbacks.
1398  */
invoke_rcu_callbacks_kthread(void)1399 static void invoke_rcu_callbacks_kthread(void)
1400 {
1401 	unsigned long flags;
1402 
1403 	local_irq_save(flags);
1404 	__this_cpu_write(rcu_cpu_has_work, 1);
1405 	if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
1406 	    current != __this_cpu_read(rcu_cpu_kthread_task))
1407 		wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
1408 	local_irq_restore(flags);
1409 }
1410 
1411 /*
1412  * Is the current CPU running the RCU-callbacks kthread?
1413  * Caller must have preemption disabled.
1414  */
rcu_is_callbacks_kthread(void)1415 static bool rcu_is_callbacks_kthread(void)
1416 {
1417 	return __get_cpu_var(rcu_cpu_kthread_task) == current;
1418 }
1419 
1420 /*
1421  * Set the affinity of the boost kthread.  The CPU-hotplug locks are
1422  * held, so no one should be messing with the existence of the boost
1423  * kthread.
1424  */
rcu_boost_kthread_setaffinity(struct rcu_node * rnp,cpumask_var_t cm)1425 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
1426 					  cpumask_var_t cm)
1427 {
1428 	struct task_struct *t;
1429 
1430 	t = rnp->boost_kthread_task;
1431 	if (t != NULL)
1432 		set_cpus_allowed_ptr(rnp->boost_kthread_task, cm);
1433 }
1434 
1435 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1436 
1437 /*
1438  * Do priority-boost accounting for the start of a new grace period.
1439  */
rcu_preempt_boost_start_gp(struct rcu_node * rnp)1440 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1441 {
1442 	rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1443 }
1444 
1445 /*
1446  * Create an RCU-boost kthread for the specified node if one does not
1447  * already exist.  We only create this kthread for preemptible RCU.
1448  * Returns zero if all is well, a negated errno otherwise.
1449  */
rcu_spawn_one_boost_kthread(struct rcu_state * rsp,struct rcu_node * rnp,int rnp_index)1450 static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1451 						 struct rcu_node *rnp,
1452 						 int rnp_index)
1453 {
1454 	unsigned long flags;
1455 	struct sched_param sp;
1456 	struct task_struct *t;
1457 
1458 	if (&rcu_preempt_state != rsp)
1459 		return 0;
1460 	rsp->boost = 1;
1461 	if (rnp->boost_kthread_task != NULL)
1462 		return 0;
1463 	t = kthread_create(rcu_boost_kthread, (void *)rnp,
1464 			   "rcub/%d", rnp_index);
1465 	if (IS_ERR(t))
1466 		return PTR_ERR(t);
1467 	raw_spin_lock_irqsave(&rnp->lock, flags);
1468 	rnp->boost_kthread_task = t;
1469 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
1470 	sp.sched_priority = RCU_BOOST_PRIO;
1471 	sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1472 	wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1473 	return 0;
1474 }
1475 
1476 #ifdef CONFIG_HOTPLUG_CPU
1477 
1478 /*
1479  * Stop the RCU's per-CPU kthread when its CPU goes offline,.
1480  */
rcu_stop_cpu_kthread(int cpu)1481 static void rcu_stop_cpu_kthread(int cpu)
1482 {
1483 	struct task_struct *t;
1484 
1485 	/* Stop the CPU's kthread. */
1486 	t = per_cpu(rcu_cpu_kthread_task, cpu);
1487 	if (t != NULL) {
1488 		per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
1489 		kthread_stop(t);
1490 	}
1491 }
1492 
1493 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1494 
rcu_kthread_do_work(void)1495 static void rcu_kthread_do_work(void)
1496 {
1497 	rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
1498 	rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1499 	rcu_preempt_do_callbacks();
1500 }
1501 
1502 /*
1503  * Wake up the specified per-rcu_node-structure kthread.
1504  * Because the per-rcu_node kthreads are immortal, we don't need
1505  * to do anything to keep them alive.
1506  */
invoke_rcu_node_kthread(struct rcu_node * rnp)1507 static void invoke_rcu_node_kthread(struct rcu_node *rnp)
1508 {
1509 	struct task_struct *t;
1510 
1511 	t = rnp->node_kthread_task;
1512 	if (t != NULL)
1513 		wake_up_process(t);
1514 }
1515 
1516 /*
1517  * Set the specified CPU's kthread to run RT or not, as specified by
1518  * the to_rt argument.  The CPU-hotplug locks are held, so the task
1519  * is not going away.
1520  */
rcu_cpu_kthread_setrt(int cpu,int to_rt)1521 static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1522 {
1523 	int policy;
1524 	struct sched_param sp;
1525 	struct task_struct *t;
1526 
1527 	t = per_cpu(rcu_cpu_kthread_task, cpu);
1528 	if (t == NULL)
1529 		return;
1530 	if (to_rt) {
1531 		policy = SCHED_FIFO;
1532 		sp.sched_priority = RCU_KTHREAD_PRIO;
1533 	} else {
1534 		policy = SCHED_NORMAL;
1535 		sp.sched_priority = 0;
1536 	}
1537 	sched_setscheduler_nocheck(t, policy, &sp);
1538 }
1539 
1540 /*
1541  * Timer handler to initiate the waking up of per-CPU kthreads that
1542  * have yielded the CPU due to excess numbers of RCU callbacks.
1543  * We wake up the per-rcu_node kthread, which in turn will wake up
1544  * the booster kthread.
1545  */
rcu_cpu_kthread_timer(unsigned long arg)1546 static void rcu_cpu_kthread_timer(unsigned long arg)
1547 {
1548 	struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
1549 	struct rcu_node *rnp = rdp->mynode;
1550 
1551 	atomic_or(rdp->grpmask, &rnp->wakemask);
1552 	invoke_rcu_node_kthread(rnp);
1553 }
1554 
1555 /*
1556  * Drop to non-real-time priority and yield, but only after posting a
1557  * timer that will cause us to regain our real-time priority if we
1558  * remain preempted.  Either way, we restore our real-time priority
1559  * before returning.
1560  */
rcu_yield(void (* f)(unsigned long),unsigned long arg)1561 static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
1562 {
1563 	struct sched_param sp;
1564 	struct timer_list yield_timer;
1565 	int prio = current->rt_priority;
1566 
1567 	setup_timer_on_stack(&yield_timer, f, arg);
1568 	mod_timer(&yield_timer, jiffies + 2);
1569 	sp.sched_priority = 0;
1570 	sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
1571 	set_user_nice(current, 19);
1572 	schedule();
1573 	set_user_nice(current, 0);
1574 	sp.sched_priority = prio;
1575 	sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1576 	del_timer(&yield_timer);
1577 }
1578 
1579 /*
1580  * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
1581  * This can happen while the corresponding CPU is either coming online
1582  * or going offline.  We cannot wait until the CPU is fully online
1583  * before starting the kthread, because the various notifier functions
1584  * can wait for RCU grace periods.  So we park rcu_cpu_kthread() until
1585  * the corresponding CPU is online.
1586  *
1587  * Return 1 if the kthread needs to stop, 0 otherwise.
1588  *
1589  * Caller must disable bh.  This function can momentarily enable it.
1590  */
rcu_cpu_kthread_should_stop(int cpu)1591 static int rcu_cpu_kthread_should_stop(int cpu)
1592 {
1593 	while (cpu_is_offline(cpu) ||
1594 	       !cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)) ||
1595 	       smp_processor_id() != cpu) {
1596 		if (kthread_should_stop())
1597 			return 1;
1598 		per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1599 		per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
1600 		local_bh_enable();
1601 		schedule_timeout_uninterruptible(1);
1602 		if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
1603 			set_cpus_allowed_ptr(current, cpumask_of(cpu));
1604 		local_bh_disable();
1605 	}
1606 	per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1607 	return 0;
1608 }
1609 
1610 /*
1611  * Per-CPU kernel thread that invokes RCU callbacks.  This replaces the
1612  * RCU softirq used in flavors and configurations of RCU that do not
1613  * support RCU priority boosting.
1614  */
rcu_cpu_kthread(void * arg)1615 static int rcu_cpu_kthread(void *arg)
1616 {
1617 	int cpu = (int)(long)arg;
1618 	unsigned long flags;
1619 	int spincnt = 0;
1620 	unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
1621 	char work;
1622 	char *workp = &per_cpu(rcu_cpu_has_work, cpu);
1623 
1624 	trace_rcu_utilization("Start CPU kthread@init");
1625 	for (;;) {
1626 		*statusp = RCU_KTHREAD_WAITING;
1627 		trace_rcu_utilization("End CPU kthread@rcu_wait");
1628 		rcu_wait(*workp != 0 || kthread_should_stop());
1629 		trace_rcu_utilization("Start CPU kthread@rcu_wait");
1630 		local_bh_disable();
1631 		if (rcu_cpu_kthread_should_stop(cpu)) {
1632 			local_bh_enable();
1633 			break;
1634 		}
1635 		*statusp = RCU_KTHREAD_RUNNING;
1636 		per_cpu(rcu_cpu_kthread_loops, cpu)++;
1637 		local_irq_save(flags);
1638 		work = *workp;
1639 		*workp = 0;
1640 		local_irq_restore(flags);
1641 		if (work)
1642 			rcu_kthread_do_work();
1643 		local_bh_enable();
1644 		if (*workp != 0)
1645 			spincnt++;
1646 		else
1647 			spincnt = 0;
1648 		if (spincnt > 10) {
1649 			*statusp = RCU_KTHREAD_YIELDING;
1650 			trace_rcu_utilization("End CPU kthread@rcu_yield");
1651 			rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
1652 			trace_rcu_utilization("Start CPU kthread@rcu_yield");
1653 			spincnt = 0;
1654 		}
1655 	}
1656 	*statusp = RCU_KTHREAD_STOPPED;
1657 	trace_rcu_utilization("End CPU kthread@term");
1658 	return 0;
1659 }
1660 
1661 /*
1662  * Spawn a per-CPU kthread, setting up affinity and priority.
1663  * Because the CPU hotplug lock is held, no other CPU will be attempting
1664  * to manipulate rcu_cpu_kthread_task.  There might be another CPU
1665  * attempting to access it during boot, but the locking in kthread_bind()
1666  * will enforce sufficient ordering.
1667  *
1668  * Please note that we cannot simply refuse to wake up the per-CPU
1669  * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state,
1670  * which can result in softlockup complaints if the task ends up being
1671  * idle for more than a couple of minutes.
1672  *
1673  * However, please note also that we cannot bind the per-CPU kthread to its
1674  * CPU until that CPU is fully online.  We also cannot wait until the
1675  * CPU is fully online before we create its per-CPU kthread, as this would
1676  * deadlock the system when CPU notifiers tried waiting for grace
1677  * periods.  So we bind the per-CPU kthread to its CPU only if the CPU
1678  * is online.  If its CPU is not yet fully online, then the code in
1679  * rcu_cpu_kthread() will wait until it is fully online, and then do
1680  * the binding.
1681  */
rcu_spawn_one_cpu_kthread(int cpu)1682 static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
1683 {
1684 	struct sched_param sp;
1685 	struct task_struct *t;
1686 
1687 	if (!rcu_scheduler_fully_active ||
1688 	    per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
1689 		return 0;
1690 	t = kthread_create_on_node(rcu_cpu_kthread,
1691 				   (void *)(long)cpu,
1692 				   cpu_to_node(cpu),
1693 				   "rcuc/%d", cpu);
1694 	if (IS_ERR(t))
1695 		return PTR_ERR(t);
1696 	if (cpu_online(cpu))
1697 		kthread_bind(t, cpu);
1698 	per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1699 	WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
1700 	sp.sched_priority = RCU_KTHREAD_PRIO;
1701 	sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1702 	per_cpu(rcu_cpu_kthread_task, cpu) = t;
1703 	wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */
1704 	return 0;
1705 }
1706 
1707 /*
1708  * Per-rcu_node kthread, which is in charge of waking up the per-CPU
1709  * kthreads when needed.  We ignore requests to wake up kthreads
1710  * for offline CPUs, which is OK because force_quiescent_state()
1711  * takes care of this case.
1712  */
rcu_node_kthread(void * arg)1713 static int rcu_node_kthread(void *arg)
1714 {
1715 	int cpu;
1716 	unsigned long flags;
1717 	unsigned long mask;
1718 	struct rcu_node *rnp = (struct rcu_node *)arg;
1719 	struct sched_param sp;
1720 	struct task_struct *t;
1721 
1722 	for (;;) {
1723 		rnp->node_kthread_status = RCU_KTHREAD_WAITING;
1724 		rcu_wait(atomic_read(&rnp->wakemask) != 0);
1725 		rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
1726 		raw_spin_lock_irqsave(&rnp->lock, flags);
1727 		mask = atomic_xchg(&rnp->wakemask, 0);
1728 		rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
1729 		for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
1730 			if ((mask & 0x1) == 0)
1731 				continue;
1732 			preempt_disable();
1733 			t = per_cpu(rcu_cpu_kthread_task, cpu);
1734 			if (!cpu_online(cpu) || t == NULL) {
1735 				preempt_enable();
1736 				continue;
1737 			}
1738 			per_cpu(rcu_cpu_has_work, cpu) = 1;
1739 			sp.sched_priority = RCU_KTHREAD_PRIO;
1740 			sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1741 			preempt_enable();
1742 		}
1743 	}
1744 	/* NOTREACHED */
1745 	rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
1746 	return 0;
1747 }
1748 
1749 /*
1750  * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1751  * served by the rcu_node in question.  The CPU hotplug lock is still
1752  * held, so the value of rnp->qsmaskinit will be stable.
1753  *
1754  * We don't include outgoingcpu in the affinity set, use -1 if there is
1755  * no outgoing CPU.  If there are no CPUs left in the affinity set,
1756  * this function allows the kthread to execute on any CPU.
1757  */
rcu_node_kthread_setaffinity(struct rcu_node * rnp,int outgoingcpu)1758 static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1759 {
1760 	cpumask_var_t cm;
1761 	int cpu;
1762 	unsigned long mask = rnp->qsmaskinit;
1763 
1764 	if (rnp->node_kthread_task == NULL)
1765 		return;
1766 	if (!alloc_cpumask_var(&cm, GFP_KERNEL))
1767 		return;
1768 	cpumask_clear(cm);
1769 	for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1770 		if ((mask & 0x1) && cpu != outgoingcpu)
1771 			cpumask_set_cpu(cpu, cm);
1772 	if (cpumask_weight(cm) == 0) {
1773 		cpumask_setall(cm);
1774 		for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1775 			cpumask_clear_cpu(cpu, cm);
1776 		WARN_ON_ONCE(cpumask_weight(cm) == 0);
1777 	}
1778 	set_cpus_allowed_ptr(rnp->node_kthread_task, cm);
1779 	rcu_boost_kthread_setaffinity(rnp, cm);
1780 	free_cpumask_var(cm);
1781 }
1782 
1783 /*
1784  * Spawn a per-rcu_node kthread, setting priority and affinity.
1785  * Called during boot before online/offline can happen, or, if
1786  * during runtime, with the main CPU-hotplug locks held.  So only
1787  * one of these can be executing at a time.
1788  */
rcu_spawn_one_node_kthread(struct rcu_state * rsp,struct rcu_node * rnp)1789 static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
1790 						struct rcu_node *rnp)
1791 {
1792 	unsigned long flags;
1793 	int rnp_index = rnp - &rsp->node[0];
1794 	struct sched_param sp;
1795 	struct task_struct *t;
1796 
1797 	if (!rcu_scheduler_fully_active ||
1798 	    rnp->qsmaskinit == 0)
1799 		return 0;
1800 	if (rnp->node_kthread_task == NULL) {
1801 		t = kthread_create(rcu_node_kthread, (void *)rnp,
1802 				   "rcun/%d", rnp_index);
1803 		if (IS_ERR(t))
1804 			return PTR_ERR(t);
1805 		raw_spin_lock_irqsave(&rnp->lock, flags);
1806 		rnp->node_kthread_task = t;
1807 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1808 		sp.sched_priority = 99;
1809 		sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1810 		wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1811 	}
1812 	return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
1813 }
1814 
1815 /*
1816  * Spawn all kthreads -- called as soon as the scheduler is running.
1817  */
rcu_spawn_kthreads(void)1818 static int __init rcu_spawn_kthreads(void)
1819 {
1820 	int cpu;
1821 	struct rcu_node *rnp;
1822 
1823 	rcu_scheduler_fully_active = 1;
1824 	for_each_possible_cpu(cpu) {
1825 		per_cpu(rcu_cpu_has_work, cpu) = 0;
1826 		if (cpu_online(cpu))
1827 			(void)rcu_spawn_one_cpu_kthread(cpu);
1828 	}
1829 	rnp = rcu_get_root(rcu_state);
1830 	(void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1831 	if (NUM_RCU_NODES > 1) {
1832 		rcu_for_each_leaf_node(rcu_state, rnp)
1833 			(void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1834 	}
1835 	return 0;
1836 }
1837 early_initcall(rcu_spawn_kthreads);
1838 
rcu_prepare_kthreads(int cpu)1839 static void __cpuinit rcu_prepare_kthreads(int cpu)
1840 {
1841 	struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
1842 	struct rcu_node *rnp = rdp->mynode;
1843 
1844 	/* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1845 	if (rcu_scheduler_fully_active) {
1846 		(void)rcu_spawn_one_cpu_kthread(cpu);
1847 		if (rnp->node_kthread_task == NULL)
1848 			(void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1849 	}
1850 }
1851 
1852 #else /* #ifdef CONFIG_RCU_BOOST */
1853 
rcu_initiate_boost(struct rcu_node * rnp,unsigned long flags)1854 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1855 {
1856 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
1857 }
1858 
invoke_rcu_callbacks_kthread(void)1859 static void invoke_rcu_callbacks_kthread(void)
1860 {
1861 	WARN_ON_ONCE(1);
1862 }
1863 
rcu_is_callbacks_kthread(void)1864 static bool rcu_is_callbacks_kthread(void)
1865 {
1866 	return false;
1867 }
1868 
rcu_preempt_boost_start_gp(struct rcu_node * rnp)1869 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1870 {
1871 }
1872 
1873 #ifdef CONFIG_HOTPLUG_CPU
1874 
rcu_stop_cpu_kthread(int cpu)1875 static void rcu_stop_cpu_kthread(int cpu)
1876 {
1877 }
1878 
1879 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1880 
rcu_node_kthread_setaffinity(struct rcu_node * rnp,int outgoingcpu)1881 static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1882 {
1883 }
1884 
rcu_cpu_kthread_setrt(int cpu,int to_rt)1885 static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1886 {
1887 }
1888 
rcu_scheduler_really_started(void)1889 static int __init rcu_scheduler_really_started(void)
1890 {
1891 	rcu_scheduler_fully_active = 1;
1892 	return 0;
1893 }
1894 early_initcall(rcu_scheduler_really_started);
1895 
rcu_prepare_kthreads(int cpu)1896 static void __cpuinit rcu_prepare_kthreads(int cpu)
1897 {
1898 }
1899 
1900 #endif /* #else #ifdef CONFIG_RCU_BOOST */
1901 
1902 #if !defined(CONFIG_RCU_FAST_NO_HZ)
1903 
1904 /*
1905  * Check to see if any future RCU-related work will need to be done
1906  * by the current CPU, even if none need be done immediately, returning
1907  * 1 if so.  This function is part of the RCU implementation; it is -not-
1908  * an exported member of the RCU API.
1909  *
1910  * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
1911  * any flavor of RCU.
1912  */
rcu_needs_cpu(int cpu)1913 int rcu_needs_cpu(int cpu)
1914 {
1915 	return rcu_cpu_has_callbacks(cpu);
1916 }
1917 
1918 /*
1919  * Because we do not have RCU_FAST_NO_HZ, don't bother initializing for it.
1920  */
rcu_prepare_for_idle_init(int cpu)1921 static void rcu_prepare_for_idle_init(int cpu)
1922 {
1923 }
1924 
1925 /*
1926  * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
1927  * after it.
1928  */
rcu_cleanup_after_idle(int cpu)1929 static void rcu_cleanup_after_idle(int cpu)
1930 {
1931 }
1932 
1933 /*
1934  * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
1935  * is nothing.
1936  */
rcu_prepare_for_idle(int cpu)1937 static void rcu_prepare_for_idle(int cpu)
1938 {
1939 }
1940 
1941 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1942 
1943 /*
1944  * This code is invoked when a CPU goes idle, at which point we want
1945  * to have the CPU do everything required for RCU so that it can enter
1946  * the energy-efficient dyntick-idle mode.  This is handled by a
1947  * state machine implemented by rcu_prepare_for_idle() below.
1948  *
1949  * The following three proprocessor symbols control this state machine:
1950  *
1951  * RCU_IDLE_FLUSHES gives the maximum number of times that we will attempt
1952  *	to satisfy RCU.  Beyond this point, it is better to incur a periodic
1953  *	scheduling-clock interrupt than to loop through the state machine
1954  *	at full power.
1955  * RCU_IDLE_OPT_FLUSHES gives the number of RCU_IDLE_FLUSHES that are
1956  *	optional if RCU does not need anything immediately from this
1957  *	CPU, even if this CPU still has RCU callbacks queued.  The first
1958  *	times through the state machine are mandatory: we need to give
1959  *	the state machine a chance to communicate a quiescent state
1960  *	to the RCU core.
1961  * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
1962  *	to sleep in dyntick-idle mode with RCU callbacks pending.  This
1963  *	is sized to be roughly one RCU grace period.  Those energy-efficiency
1964  *	benchmarkers who might otherwise be tempted to set this to a large
1965  *	number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
1966  *	system.  And if you are -that- concerned about energy efficiency,
1967  *	just power the system down and be done with it!
1968  * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is
1969  *	permitted to sleep in dyntick-idle mode with only lazy RCU
1970  *	callbacks pending.  Setting this too high can OOM your system.
1971  *
1972  * The values below work well in practice.  If future workloads require
1973  * adjustment, they can be converted into kernel config parameters, though
1974  * making the state machine smarter might be a better option.
1975  */
1976 #define RCU_IDLE_FLUSHES 5		/* Number of dyntick-idle tries. */
1977 #define RCU_IDLE_OPT_FLUSHES 3		/* Optional dyntick-idle tries. */
1978 #define RCU_IDLE_GP_DELAY 6		/* Roughly one grace period. */
1979 #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ)	/* Roughly six seconds. */
1980 
1981 static DEFINE_PER_CPU(int, rcu_dyntick_drain);
1982 static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
1983 static DEFINE_PER_CPU(struct hrtimer, rcu_idle_gp_timer);
1984 static ktime_t rcu_idle_gp_wait;	/* If some non-lazy callbacks. */
1985 static ktime_t rcu_idle_lazy_gp_wait;	/* If only lazy callbacks. */
1986 
1987 /*
1988  * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
1989  * callbacks on this CPU, (2) this CPU has not yet attempted to enter
1990  * dyntick-idle mode, or (3) this CPU is in the process of attempting to
1991  * enter dyntick-idle mode.  Otherwise, if we have recently tried and failed
1992  * to enter dyntick-idle mode, we refuse to try to enter it.  After all,
1993  * it is better to incur scheduling-clock interrupts than to spin
1994  * continuously for the same time duration!
1995  */
rcu_needs_cpu(int cpu)1996 int rcu_needs_cpu(int cpu)
1997 {
1998 	/* If no callbacks, RCU doesn't need the CPU. */
1999 	if (!rcu_cpu_has_callbacks(cpu))
2000 		return 0;
2001 	/* Otherwise, RCU needs the CPU only if it recently tried and failed. */
2002 	return per_cpu(rcu_dyntick_holdoff, cpu) == jiffies;
2003 }
2004 
2005 /*
2006  * Does the specified flavor of RCU have non-lazy callbacks pending on
2007  * the specified CPU?  Both RCU flavor and CPU are specified by the
2008  * rcu_data structure.
2009  */
__rcu_cpu_has_nonlazy_callbacks(struct rcu_data * rdp)2010 static bool __rcu_cpu_has_nonlazy_callbacks(struct rcu_data *rdp)
2011 {
2012 	return rdp->qlen != rdp->qlen_lazy;
2013 }
2014 
2015 #ifdef CONFIG_TREE_PREEMPT_RCU
2016 
2017 /*
2018  * Are there non-lazy RCU-preempt callbacks?  (There cannot be if there
2019  * is no RCU-preempt in the kernel.)
2020  */
rcu_preempt_cpu_has_nonlazy_callbacks(int cpu)2021 static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu)
2022 {
2023 	struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
2024 
2025 	return __rcu_cpu_has_nonlazy_callbacks(rdp);
2026 }
2027 
2028 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2029 
rcu_preempt_cpu_has_nonlazy_callbacks(int cpu)2030 static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu)
2031 {
2032 	return 0;
2033 }
2034 
2035 #endif /* else #ifdef CONFIG_TREE_PREEMPT_RCU */
2036 
2037 /*
2038  * Does any flavor of RCU have non-lazy callbacks on the specified CPU?
2039  */
rcu_cpu_has_nonlazy_callbacks(int cpu)2040 static bool rcu_cpu_has_nonlazy_callbacks(int cpu)
2041 {
2042 	return __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_sched_data, cpu)) ||
2043 	       __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_bh_data, cpu)) ||
2044 	       rcu_preempt_cpu_has_nonlazy_callbacks(cpu);
2045 }
2046 
2047 /*
2048  * Timer handler used to force CPU to start pushing its remaining RCU
2049  * callbacks in the case where it entered dyntick-idle mode with callbacks
2050  * pending.  The hander doesn't really need to do anything because the
2051  * real work is done upon re-entry to idle, or by the next scheduling-clock
2052  * interrupt should idle not be re-entered.
2053  */
rcu_idle_gp_timer_func(struct hrtimer * hrtp)2054 static enum hrtimer_restart rcu_idle_gp_timer_func(struct hrtimer *hrtp)
2055 {
2056 	trace_rcu_prep_idle("Timer");
2057 	return HRTIMER_NORESTART;
2058 }
2059 
2060 /*
2061  * Initialize the timer used to pull CPUs out of dyntick-idle mode.
2062  */
rcu_prepare_for_idle_init(int cpu)2063 static void rcu_prepare_for_idle_init(int cpu)
2064 {
2065 	static int firsttime = 1;
2066 	struct hrtimer *hrtp = &per_cpu(rcu_idle_gp_timer, cpu);
2067 
2068 	hrtimer_init(hrtp, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2069 	hrtp->function = rcu_idle_gp_timer_func;
2070 	if (firsttime) {
2071 		unsigned int upj = jiffies_to_usecs(RCU_IDLE_GP_DELAY);
2072 
2073 		rcu_idle_gp_wait = ns_to_ktime(upj * (u64)1000);
2074 		upj = jiffies_to_usecs(RCU_IDLE_LAZY_GP_DELAY);
2075 		rcu_idle_lazy_gp_wait = ns_to_ktime(upj * (u64)1000);
2076 		firsttime = 0;
2077 	}
2078 }
2079 
2080 /*
2081  * Clean up for exit from idle.  Because we are exiting from idle, there
2082  * is no longer any point to rcu_idle_gp_timer, so cancel it.  This will
2083  * do nothing if this timer is not active, so just cancel it unconditionally.
2084  */
rcu_cleanup_after_idle(int cpu)2085 static void rcu_cleanup_after_idle(int cpu)
2086 {
2087 	hrtimer_cancel(&per_cpu(rcu_idle_gp_timer, cpu));
2088 }
2089 
2090 /*
2091  * Check to see if any RCU-related work can be done by the current CPU,
2092  * and if so, schedule a softirq to get it done.  This function is part
2093  * of the RCU implementation; it is -not- an exported member of the RCU API.
2094  *
2095  * The idea is for the current CPU to clear out all work required by the
2096  * RCU core for the current grace period, so that this CPU can be permitted
2097  * to enter dyntick-idle mode.  In some cases, it will need to be awakened
2098  * at the end of the grace period by whatever CPU ends the grace period.
2099  * This allows CPUs to go dyntick-idle more quickly, and to reduce the
2100  * number of wakeups by a modest integer factor.
2101  *
2102  * Because it is not legal to invoke rcu_process_callbacks() with irqs
2103  * disabled, we do one pass of force_quiescent_state(), then do a
2104  * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
2105  * later.  The per-cpu rcu_dyntick_drain variable controls the sequencing.
2106  *
2107  * The caller must have disabled interrupts.
2108  */
rcu_prepare_for_idle(int cpu)2109 static void rcu_prepare_for_idle(int cpu)
2110 {
2111 	/*
2112 	 * If there are no callbacks on this CPU, enter dyntick-idle mode.
2113 	 * Also reset state to avoid prejudicing later attempts.
2114 	 */
2115 	if (!rcu_cpu_has_callbacks(cpu)) {
2116 		per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
2117 		per_cpu(rcu_dyntick_drain, cpu) = 0;
2118 		trace_rcu_prep_idle("No callbacks");
2119 		return;
2120 	}
2121 
2122 	/*
2123 	 * If in holdoff mode, just return.  We will presumably have
2124 	 * refrained from disabling the scheduling-clock tick.
2125 	 */
2126 	if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) {
2127 		trace_rcu_prep_idle("In holdoff");
2128 		return;
2129 	}
2130 
2131 	/* Check and update the rcu_dyntick_drain sequencing. */
2132 	if (per_cpu(rcu_dyntick_drain, cpu) <= 0) {
2133 		/* First time through, initialize the counter. */
2134 		per_cpu(rcu_dyntick_drain, cpu) = RCU_IDLE_FLUSHES;
2135 	} else if (per_cpu(rcu_dyntick_drain, cpu) <= RCU_IDLE_OPT_FLUSHES &&
2136 		   !rcu_pending(cpu) &&
2137 		   !local_softirq_pending()) {
2138 		/* Can we go dyntick-idle despite still having callbacks? */
2139 		trace_rcu_prep_idle("Dyntick with callbacks");
2140 		per_cpu(rcu_dyntick_drain, cpu) = 0;
2141 		per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
2142 		if (rcu_cpu_has_nonlazy_callbacks(cpu))
2143 			hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu),
2144 				      rcu_idle_gp_wait, HRTIMER_MODE_REL);
2145 		else
2146 			hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu),
2147 				      rcu_idle_lazy_gp_wait, HRTIMER_MODE_REL);
2148 		return; /* Nothing more to do immediately. */
2149 	} else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
2150 		/* We have hit the limit, so time to give up. */
2151 		per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
2152 		trace_rcu_prep_idle("Begin holdoff");
2153 		invoke_rcu_core();  /* Force the CPU out of dyntick-idle. */
2154 		return;
2155 	}
2156 
2157 	/*
2158 	 * Do one step of pushing the remaining RCU callbacks through
2159 	 * the RCU core state machine.
2160 	 */
2161 #ifdef CONFIG_TREE_PREEMPT_RCU
2162 	if (per_cpu(rcu_preempt_data, cpu).nxtlist) {
2163 		rcu_preempt_qs(cpu);
2164 		force_quiescent_state(&rcu_preempt_state, 0);
2165 	}
2166 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2167 	if (per_cpu(rcu_sched_data, cpu).nxtlist) {
2168 		rcu_sched_qs(cpu);
2169 		force_quiescent_state(&rcu_sched_state, 0);
2170 	}
2171 	if (per_cpu(rcu_bh_data, cpu).nxtlist) {
2172 		rcu_bh_qs(cpu);
2173 		force_quiescent_state(&rcu_bh_state, 0);
2174 	}
2175 
2176 	/*
2177 	 * If RCU callbacks are still pending, RCU still needs this CPU.
2178 	 * So try forcing the callbacks through the grace period.
2179 	 */
2180 	if (rcu_cpu_has_callbacks(cpu)) {
2181 		trace_rcu_prep_idle("More callbacks");
2182 		invoke_rcu_core();
2183 	} else
2184 		trace_rcu_prep_idle("Callbacks drained");
2185 }
2186 
2187 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
2188 
2189 #ifdef CONFIG_RCU_CPU_STALL_INFO
2190 
2191 #ifdef CONFIG_RCU_FAST_NO_HZ
2192 
print_cpu_stall_fast_no_hz(char * cp,int cpu)2193 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
2194 {
2195 	struct hrtimer *hrtp = &per_cpu(rcu_idle_gp_timer, cpu);
2196 
2197 	sprintf(cp, "drain=%d %c timer=%lld",
2198 		per_cpu(rcu_dyntick_drain, cpu),
2199 		per_cpu(rcu_dyntick_holdoff, cpu) == jiffies ? 'H' : '.',
2200 		hrtimer_active(hrtp)
2201 			? ktime_to_us(hrtimer_get_remaining(hrtp))
2202 			: -1);
2203 }
2204 
2205 #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
2206 
print_cpu_stall_fast_no_hz(char * cp,int cpu)2207 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
2208 {
2209 }
2210 
2211 #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
2212 
2213 /* Initiate the stall-info list. */
print_cpu_stall_info_begin(void)2214 static void print_cpu_stall_info_begin(void)
2215 {
2216 	printk(KERN_CONT "\n");
2217 }
2218 
2219 /*
2220  * Print out diagnostic information for the specified stalled CPU.
2221  *
2222  * If the specified CPU is aware of the current RCU grace period
2223  * (flavor specified by rsp), then print the number of scheduling
2224  * clock interrupts the CPU has taken during the time that it has
2225  * been aware.  Otherwise, print the number of RCU grace periods
2226  * that this CPU is ignorant of, for example, "1" if the CPU was
2227  * aware of the previous grace period.
2228  *
2229  * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
2230  */
print_cpu_stall_info(struct rcu_state * rsp,int cpu)2231 static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
2232 {
2233 	char fast_no_hz[72];
2234 	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2235 	struct rcu_dynticks *rdtp = rdp->dynticks;
2236 	char *ticks_title;
2237 	unsigned long ticks_value;
2238 
2239 	if (rsp->gpnum == rdp->gpnum) {
2240 		ticks_title = "ticks this GP";
2241 		ticks_value = rdp->ticks_this_gp;
2242 	} else {
2243 		ticks_title = "GPs behind";
2244 		ticks_value = rsp->gpnum - rdp->gpnum;
2245 	}
2246 	print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
2247 	printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d %s\n",
2248 	       cpu, ticks_value, ticks_title,
2249 	       atomic_read(&rdtp->dynticks) & 0xfff,
2250 	       rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
2251 	       fast_no_hz);
2252 }
2253 
2254 /* Terminate the stall-info list. */
print_cpu_stall_info_end(void)2255 static void print_cpu_stall_info_end(void)
2256 {
2257 	printk(KERN_ERR "\t");
2258 }
2259 
2260 /* Zero ->ticks_this_gp for all flavors of RCU. */
zero_cpu_stall_ticks(struct rcu_data * rdp)2261 static void zero_cpu_stall_ticks(struct rcu_data *rdp)
2262 {
2263 	rdp->ticks_this_gp = 0;
2264 }
2265 
2266 /* Increment ->ticks_this_gp for all flavors of RCU. */
increment_cpu_stall_ticks(void)2267 static void increment_cpu_stall_ticks(void)
2268 {
2269 	__get_cpu_var(rcu_sched_data).ticks_this_gp++;
2270 	__get_cpu_var(rcu_bh_data).ticks_this_gp++;
2271 #ifdef CONFIG_TREE_PREEMPT_RCU
2272 	__get_cpu_var(rcu_preempt_data).ticks_this_gp++;
2273 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2274 }
2275 
2276 #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
2277 
print_cpu_stall_info_begin(void)2278 static void print_cpu_stall_info_begin(void)
2279 {
2280 	printk(KERN_CONT " {");
2281 }
2282 
print_cpu_stall_info(struct rcu_state * rsp,int cpu)2283 static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
2284 {
2285 	printk(KERN_CONT " %d", cpu);
2286 }
2287 
print_cpu_stall_info_end(void)2288 static void print_cpu_stall_info_end(void)
2289 {
2290 	printk(KERN_CONT "} ");
2291 }
2292 
zero_cpu_stall_ticks(struct rcu_data * rdp)2293 static void zero_cpu_stall_ticks(struct rcu_data *rdp)
2294 {
2295 }
2296 
increment_cpu_stall_ticks(void)2297 static void increment_cpu_stall_ticks(void)
2298 {
2299 }
2300 
2301 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
2302