1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4 * Internal non-public definitions that provide either classic
5 * or preemptible semantics.
6 *
7 * Copyright Red Hat, 2009
8 * Copyright IBM Corporation, 2009
9 *
10 * Author: Ingo Molnar <mingo@elte.hu>
11 * Paul E. McKenney <paulmck@linux.ibm.com>
12 */
13
14 #include "../locking/rtmutex_common.h"
15
rcu_rdp_is_offloaded(struct rcu_data * rdp)16 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp)
17 {
18 /*
19 * In order to read the offloaded state of an rdp in a safe
20 * and stable way and prevent from its value to be changed
21 * under us, we must either hold the barrier mutex, the cpu
22 * hotplug lock (read or write) or the nocb lock. Local
23 * non-preemptible reads are also safe. NOCB kthreads and
24 * timers have their own means of synchronization against the
25 * offloaded state updaters.
26 */
27 RCU_LOCKDEP_WARN(
28 !(lockdep_is_held(&rcu_state.barrier_mutex) ||
29 (IS_ENABLED(CONFIG_HOTPLUG_CPU) && lockdep_is_cpus_held()) ||
30 rcu_lockdep_is_held_nocb(rdp) ||
31 (rdp == this_cpu_ptr(&rcu_data) &&
32 !(IS_ENABLED(CONFIG_PREEMPT_COUNT) && preemptible())) ||
33 rcu_current_is_nocb_kthread(rdp)),
34 "Unsafe read of RCU_NOCB offloaded state"
35 );
36
37 return rcu_segcblist_is_offloaded(&rdp->cblist);
38 }
39
40 /*
41 * Check the RCU kernel configuration parameters and print informative
42 * messages about anything out of the ordinary.
43 */
rcu_bootup_announce_oddness(void)44 static void __init rcu_bootup_announce_oddness(void)
45 {
46 if (IS_ENABLED(CONFIG_RCU_TRACE))
47 pr_info("\tRCU event tracing is enabled.\n");
48 if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) ||
49 (!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32))
50 pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d.\n",
51 RCU_FANOUT);
52 if (rcu_fanout_exact)
53 pr_info("\tHierarchical RCU autobalancing is disabled.\n");
54 if (IS_ENABLED(CONFIG_PROVE_RCU))
55 pr_info("\tRCU lockdep checking is enabled.\n");
56 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
57 pr_info("\tRCU strict (and thus non-scalable) grace periods are enabled.\n");
58 if (RCU_NUM_LVLS >= 4)
59 pr_info("\tFour(or more)-level hierarchy is enabled.\n");
60 if (RCU_FANOUT_LEAF != 16)
61 pr_info("\tBuild-time adjustment of leaf fanout to %d.\n",
62 RCU_FANOUT_LEAF);
63 if (rcu_fanout_leaf != RCU_FANOUT_LEAF)
64 pr_info("\tBoot-time adjustment of leaf fanout to %d.\n",
65 rcu_fanout_leaf);
66 if (nr_cpu_ids != NR_CPUS)
67 pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%u.\n", NR_CPUS, nr_cpu_ids);
68 #ifdef CONFIG_RCU_BOOST
69 pr_info("\tRCU priority boosting: priority %d delay %d ms.\n",
70 kthread_prio, CONFIG_RCU_BOOST_DELAY);
71 #endif
72 if (blimit != DEFAULT_RCU_BLIMIT)
73 pr_info("\tBoot-time adjustment of callback invocation limit to %ld.\n", blimit);
74 if (qhimark != DEFAULT_RCU_QHIMARK)
75 pr_info("\tBoot-time adjustment of callback high-water mark to %ld.\n", qhimark);
76 if (qlowmark != DEFAULT_RCU_QLOMARK)
77 pr_info("\tBoot-time adjustment of callback low-water mark to %ld.\n", qlowmark);
78 if (qovld != DEFAULT_RCU_QOVLD)
79 pr_info("\tBoot-time adjustment of callback overload level to %ld.\n", qovld);
80 if (jiffies_till_first_fqs != ULONG_MAX)
81 pr_info("\tBoot-time adjustment of first FQS scan delay to %ld jiffies.\n", jiffies_till_first_fqs);
82 if (jiffies_till_next_fqs != ULONG_MAX)
83 pr_info("\tBoot-time adjustment of subsequent FQS scan delay to %ld jiffies.\n", jiffies_till_next_fqs);
84 if (jiffies_till_sched_qs != ULONG_MAX)
85 pr_info("\tBoot-time adjustment of scheduler-enlistment delay to %ld jiffies.\n", jiffies_till_sched_qs);
86 if (rcu_kick_kthreads)
87 pr_info("\tKick kthreads if too-long grace period.\n");
88 if (IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD))
89 pr_info("\tRCU callback double-/use-after-free debug is enabled.\n");
90 if (gp_preinit_delay)
91 pr_info("\tRCU debug GP pre-init slowdown %d jiffies.\n", gp_preinit_delay);
92 if (gp_init_delay)
93 pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_init_delay);
94 if (gp_cleanup_delay)
95 pr_info("\tRCU debug GP cleanup slowdown %d jiffies.\n", gp_cleanup_delay);
96 if (!use_softirq)
97 pr_info("\tRCU_SOFTIRQ processing moved to rcuc kthreads.\n");
98 if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG))
99 pr_info("\tRCU debug extended QS entry/exit.\n");
100 rcupdate_announce_bootup_oddness();
101 }
102
103 #ifdef CONFIG_PREEMPT_RCU
104
105 static void rcu_report_exp_rnp(struct rcu_node *rnp, bool wake);
106 static void rcu_read_unlock_special(struct task_struct *t);
107
108 /*
109 * Tell them what RCU they are running.
110 */
rcu_bootup_announce(void)111 static void __init rcu_bootup_announce(void)
112 {
113 pr_info("Preemptible hierarchical RCU implementation.\n");
114 rcu_bootup_announce_oddness();
115 }
116
117 /* Flags for rcu_preempt_ctxt_queue() decision table. */
118 #define RCU_GP_TASKS 0x8
119 #define RCU_EXP_TASKS 0x4
120 #define RCU_GP_BLKD 0x2
121 #define RCU_EXP_BLKD 0x1
122
123 /*
124 * Queues a task preempted within an RCU-preempt read-side critical
125 * section into the appropriate location within the ->blkd_tasks list,
126 * depending on the states of any ongoing normal and expedited grace
127 * periods. The ->gp_tasks pointer indicates which element the normal
128 * grace period is waiting on (NULL if none), and the ->exp_tasks pointer
129 * indicates which element the expedited grace period is waiting on (again,
130 * NULL if none). If a grace period is waiting on a given element in the
131 * ->blkd_tasks list, it also waits on all subsequent elements. Thus,
132 * adding a task to the tail of the list blocks any grace period that is
133 * already waiting on one of the elements. In contrast, adding a task
134 * to the head of the list won't block any grace period that is already
135 * waiting on one of the elements.
136 *
137 * This queuing is imprecise, and can sometimes make an ongoing grace
138 * period wait for a task that is not strictly speaking blocking it.
139 * Given the choice, we needlessly block a normal grace period rather than
140 * blocking an expedited grace period.
141 *
142 * Note that an endless sequence of expedited grace periods still cannot
143 * indefinitely postpone a normal grace period. Eventually, all of the
144 * fixed number of preempted tasks blocking the normal grace period that are
145 * not also blocking the expedited grace period will resume and complete
146 * their RCU read-side critical sections. At that point, the ->gp_tasks
147 * pointer will equal the ->exp_tasks pointer, at which point the end of
148 * the corresponding expedited grace period will also be the end of the
149 * normal grace period.
150 */
rcu_preempt_ctxt_queue(struct rcu_node * rnp,struct rcu_data * rdp)151 static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
152 __releases(rnp->lock) /* But leaves rrupts disabled. */
153 {
154 int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) +
155 (rnp->exp_tasks ? RCU_EXP_TASKS : 0) +
156 (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) +
157 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0);
158 struct task_struct *t = current;
159
160 raw_lockdep_assert_held_rcu_node(rnp);
161 WARN_ON_ONCE(rdp->mynode != rnp);
162 WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
163 /* RCU better not be waiting on newly onlined CPUs! */
164 WARN_ON_ONCE(rnp->qsmaskinitnext & ~rnp->qsmaskinit & rnp->qsmask &
165 rdp->grpmask);
166
167 /*
168 * Decide where to queue the newly blocked task. In theory,
169 * this could be an if-statement. In practice, when I tried
170 * that, it was quite messy.
171 */
172 switch (blkd_state) {
173 case 0:
174 case RCU_EXP_TASKS:
175 case RCU_EXP_TASKS + RCU_GP_BLKD:
176 case RCU_GP_TASKS:
177 case RCU_GP_TASKS + RCU_EXP_TASKS:
178
179 /*
180 * Blocking neither GP, or first task blocking the normal
181 * GP but not blocking the already-waiting expedited GP.
182 * Queue at the head of the list to avoid unnecessarily
183 * blocking the already-waiting GPs.
184 */
185 list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
186 break;
187
188 case RCU_EXP_BLKD:
189 case RCU_GP_BLKD:
190 case RCU_GP_BLKD + RCU_EXP_BLKD:
191 case RCU_GP_TASKS + RCU_EXP_BLKD:
192 case RCU_GP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD:
193 case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD:
194
195 /*
196 * First task arriving that blocks either GP, or first task
197 * arriving that blocks the expedited GP (with the normal
198 * GP already waiting), or a task arriving that blocks
199 * both GPs with both GPs already waiting. Queue at the
200 * tail of the list to avoid any GP waiting on any of the
201 * already queued tasks that are not blocking it.
202 */
203 list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks);
204 break;
205
206 case RCU_EXP_TASKS + RCU_EXP_BLKD:
207 case RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD:
208 case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_EXP_BLKD:
209
210 /*
211 * Second or subsequent task blocking the expedited GP.
212 * The task either does not block the normal GP, or is the
213 * first task blocking the normal GP. Queue just after
214 * the first task blocking the expedited GP.
215 */
216 list_add(&t->rcu_node_entry, rnp->exp_tasks);
217 break;
218
219 case RCU_GP_TASKS + RCU_GP_BLKD:
220 case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD:
221
222 /*
223 * Second or subsequent task blocking the normal GP.
224 * The task does not block the expedited GP. Queue just
225 * after the first task blocking the normal GP.
226 */
227 list_add(&t->rcu_node_entry, rnp->gp_tasks);
228 break;
229
230 default:
231
232 /* Yet another exercise in excessive paranoia. */
233 WARN_ON_ONCE(1);
234 break;
235 }
236
237 /*
238 * We have now queued the task. If it was the first one to
239 * block either grace period, update the ->gp_tasks and/or
240 * ->exp_tasks pointers, respectively, to reference the newly
241 * blocked tasks.
242 */
243 if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) {
244 WRITE_ONCE(rnp->gp_tasks, &t->rcu_node_entry);
245 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq);
246 }
247 if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
248 WRITE_ONCE(rnp->exp_tasks, &t->rcu_node_entry);
249 WARN_ON_ONCE(!(blkd_state & RCU_GP_BLKD) !=
250 !(rnp->qsmask & rdp->grpmask));
251 WARN_ON_ONCE(!(blkd_state & RCU_EXP_BLKD) !=
252 !(rnp->expmask & rdp->grpmask));
253 raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */
254
255 /*
256 * Report the quiescent state for the expedited GP. This expedited
257 * GP should not be able to end until we report, so there should be
258 * no need to check for a subsequent expedited GP. (Though we are
259 * still in a quiescent state in any case.)
260 */
261 if (blkd_state & RCU_EXP_BLKD && rdp->cpu_no_qs.b.exp)
262 rcu_report_exp_rdp(rdp);
263 else
264 WARN_ON_ONCE(rdp->cpu_no_qs.b.exp);
265 }
266
267 /*
268 * Record a preemptible-RCU quiescent state for the specified CPU.
269 * Note that this does not necessarily mean that the task currently running
270 * on the CPU is in a quiescent state: Instead, it means that the current
271 * grace period need not wait on any RCU read-side critical section that
272 * starts later on this CPU. It also means that if the current task is
273 * in an RCU read-side critical section, it has already added itself to
274 * some leaf rcu_node structure's ->blkd_tasks list. In addition to the
275 * current task, there might be any number of other tasks blocked while
276 * in an RCU read-side critical section.
277 *
278 * Unlike non-preemptible-RCU, quiescent state reports for expedited
279 * grace periods are handled separately via deferred quiescent states
280 * and context switch events.
281 *
282 * Callers to this function must disable preemption.
283 */
rcu_qs(void)284 static void rcu_qs(void)
285 {
286 RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!\n");
287 if (__this_cpu_read(rcu_data.cpu_no_qs.b.norm)) {
288 trace_rcu_grace_period(TPS("rcu_preempt"),
289 __this_cpu_read(rcu_data.gp_seq),
290 TPS("cpuqs"));
291 __this_cpu_write(rcu_data.cpu_no_qs.b.norm, false);
292 barrier(); /* Coordinate with rcu_flavor_sched_clock_irq(). */
293 WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, false);
294 }
295 }
296
297 /*
298 * We have entered the scheduler, and the current task might soon be
299 * context-switched away from. If this task is in an RCU read-side
300 * critical section, we will no longer be able to rely on the CPU to
301 * record that fact, so we enqueue the task on the blkd_tasks list.
302 * The task will dequeue itself when it exits the outermost enclosing
303 * RCU read-side critical section. Therefore, the current grace period
304 * cannot be permitted to complete until the blkd_tasks list entries
305 * predating the current grace period drain, in other words, until
306 * rnp->gp_tasks becomes NULL.
307 *
308 * Caller must disable interrupts.
309 */
rcu_note_context_switch(bool preempt)310 void rcu_note_context_switch(bool preempt)
311 {
312 struct task_struct *t = current;
313 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
314 struct rcu_node *rnp;
315
316 trace_rcu_utilization(TPS("Start context switch"));
317 lockdep_assert_irqs_disabled();
318 WARN_ONCE(!preempt && rcu_preempt_depth() > 0, "Voluntary context switch within RCU read-side critical section!");
319 if (rcu_preempt_depth() > 0 &&
320 !t->rcu_read_unlock_special.b.blocked) {
321
322 /* Possibly blocking in an RCU read-side critical section. */
323 rnp = rdp->mynode;
324 raw_spin_lock_rcu_node(rnp);
325 t->rcu_read_unlock_special.b.blocked = true;
326 t->rcu_blocked_node = rnp;
327
328 /*
329 * Verify the CPU's sanity, trace the preemption, and
330 * then queue the task as required based on the states
331 * of any ongoing and expedited grace periods.
332 */
333 WARN_ON_ONCE(!rcu_rdp_cpu_online(rdp));
334 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
335 trace_rcu_preempt_task(rcu_state.name,
336 t->pid,
337 (rnp->qsmask & rdp->grpmask)
338 ? rnp->gp_seq
339 : rcu_seq_snap(&rnp->gp_seq));
340 rcu_preempt_ctxt_queue(rnp, rdp);
341 } else {
342 rcu_preempt_deferred_qs(t);
343 }
344
345 /*
346 * Either we were not in an RCU read-side critical section to
347 * begin with, or we have now recorded that critical section
348 * globally. Either way, we can now note a quiescent state
349 * for this CPU. Again, if we were in an RCU read-side critical
350 * section, and if that critical section was blocking the current
351 * grace period, then the fact that the task has been enqueued
352 * means that we continue to block the current grace period.
353 */
354 rcu_qs();
355 if (rdp->cpu_no_qs.b.exp)
356 rcu_report_exp_rdp(rdp);
357 rcu_tasks_qs(current, preempt);
358 trace_rcu_utilization(TPS("End context switch"));
359 }
360 EXPORT_SYMBOL_GPL(rcu_note_context_switch);
361
362 /*
363 * Check for preempted RCU readers blocking the current grace period
364 * for the specified rcu_node structure. If the caller needs a reliable
365 * answer, it must hold the rcu_node's ->lock.
366 */
rcu_preempt_blocked_readers_cgp(struct rcu_node * rnp)367 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
368 {
369 return READ_ONCE(rnp->gp_tasks) != NULL;
370 }
371
372 /* limit value for ->rcu_read_lock_nesting. */
373 #define RCU_NEST_PMAX (INT_MAX / 2)
374
rcu_preempt_read_enter(void)375 static void rcu_preempt_read_enter(void)
376 {
377 WRITE_ONCE(current->rcu_read_lock_nesting, READ_ONCE(current->rcu_read_lock_nesting) + 1);
378 }
379
rcu_preempt_read_exit(void)380 static int rcu_preempt_read_exit(void)
381 {
382 int ret = READ_ONCE(current->rcu_read_lock_nesting) - 1;
383
384 WRITE_ONCE(current->rcu_read_lock_nesting, ret);
385 return ret;
386 }
387
rcu_preempt_depth_set(int val)388 static void rcu_preempt_depth_set(int val)
389 {
390 WRITE_ONCE(current->rcu_read_lock_nesting, val);
391 }
392
393 /*
394 * Preemptible RCU implementation for rcu_read_lock().
395 * Just increment ->rcu_read_lock_nesting, shared state will be updated
396 * if we block.
397 */
__rcu_read_lock(void)398 void __rcu_read_lock(void)
399 {
400 rcu_preempt_read_enter();
401 if (IS_ENABLED(CONFIG_PROVE_LOCKING))
402 WARN_ON_ONCE(rcu_preempt_depth() > RCU_NEST_PMAX);
403 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) && rcu_state.gp_kthread)
404 WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, true);
405 barrier(); /* critical section after entry code. */
406 }
407 EXPORT_SYMBOL_GPL(__rcu_read_lock);
408
409 /*
410 * Preemptible RCU implementation for rcu_read_unlock().
411 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
412 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
413 * invoke rcu_read_unlock_special() to clean up after a context switch
414 * in an RCU read-side critical section and other special cases.
415 */
__rcu_read_unlock(void)416 void __rcu_read_unlock(void)
417 {
418 struct task_struct *t = current;
419
420 barrier(); // critical section before exit code.
421 if (rcu_preempt_read_exit() == 0) {
422 barrier(); // critical-section exit before .s check.
423 if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
424 rcu_read_unlock_special(t);
425 }
426 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
427 int rrln = rcu_preempt_depth();
428
429 WARN_ON_ONCE(rrln < 0 || rrln > RCU_NEST_PMAX);
430 }
431 }
432 EXPORT_SYMBOL_GPL(__rcu_read_unlock);
433
434 /*
435 * Advance a ->blkd_tasks-list pointer to the next entry, instead
436 * returning NULL if at the end of the list.
437 */
rcu_next_node_entry(struct task_struct * t,struct rcu_node * rnp)438 static struct list_head *rcu_next_node_entry(struct task_struct *t,
439 struct rcu_node *rnp)
440 {
441 struct list_head *np;
442
443 np = t->rcu_node_entry.next;
444 if (np == &rnp->blkd_tasks)
445 np = NULL;
446 return np;
447 }
448
449 /*
450 * Return true if the specified rcu_node structure has tasks that were
451 * preempted within an RCU read-side critical section.
452 */
rcu_preempt_has_tasks(struct rcu_node * rnp)453 static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
454 {
455 return !list_empty(&rnp->blkd_tasks);
456 }
457
458 /*
459 * Report deferred quiescent states. The deferral time can
460 * be quite short, for example, in the case of the call from
461 * rcu_read_unlock_special().
462 */
463 static notrace void
rcu_preempt_deferred_qs_irqrestore(struct task_struct * t,unsigned long flags)464 rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
465 {
466 bool empty_exp;
467 bool empty_norm;
468 bool empty_exp_now;
469 struct list_head *np;
470 bool drop_boost_mutex = false;
471 struct rcu_data *rdp;
472 struct rcu_node *rnp;
473 union rcu_special special;
474
475 /*
476 * If RCU core is waiting for this CPU to exit its critical section,
477 * report the fact that it has exited. Because irqs are disabled,
478 * t->rcu_read_unlock_special cannot change.
479 */
480 special = t->rcu_read_unlock_special;
481 rdp = this_cpu_ptr(&rcu_data);
482 if (!special.s && !rdp->cpu_no_qs.b.exp) {
483 local_irq_restore(flags);
484 return;
485 }
486 t->rcu_read_unlock_special.s = 0;
487 if (special.b.need_qs) {
488 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
489 rdp->cpu_no_qs.b.norm = false;
490 rcu_report_qs_rdp(rdp);
491 udelay(rcu_unlock_delay);
492 } else {
493 rcu_qs();
494 }
495 }
496
497 /*
498 * Respond to a request by an expedited grace period for a
499 * quiescent state from this CPU. Note that requests from
500 * tasks are handled when removing the task from the
501 * blocked-tasks list below.
502 */
503 if (rdp->cpu_no_qs.b.exp)
504 rcu_report_exp_rdp(rdp);
505
506 /* Clean up if blocked during RCU read-side critical section. */
507 if (special.b.blocked) {
508
509 /*
510 * Remove this task from the list it blocked on. The task
511 * now remains queued on the rcu_node corresponding to the
512 * CPU it first blocked on, so there is no longer any need
513 * to loop. Retain a WARN_ON_ONCE() out of sheer paranoia.
514 */
515 rnp = t->rcu_blocked_node;
516 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
517 WARN_ON_ONCE(rnp != t->rcu_blocked_node);
518 WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
519 empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
520 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq &&
521 (!empty_norm || rnp->qsmask));
522 empty_exp = sync_rcu_exp_done(rnp);
523 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
524 np = rcu_next_node_entry(t, rnp);
525 list_del_init(&t->rcu_node_entry);
526 t->rcu_blocked_node = NULL;
527 trace_rcu_unlock_preempted_task(TPS("rcu_preempt"),
528 rnp->gp_seq, t->pid);
529 if (&t->rcu_node_entry == rnp->gp_tasks)
530 WRITE_ONCE(rnp->gp_tasks, np);
531 if (&t->rcu_node_entry == rnp->exp_tasks)
532 WRITE_ONCE(rnp->exp_tasks, np);
533 if (IS_ENABLED(CONFIG_RCU_BOOST)) {
534 /* Snapshot ->boost_mtx ownership w/rnp->lock held. */
535 drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx.rtmutex) == t;
536 if (&t->rcu_node_entry == rnp->boost_tasks)
537 WRITE_ONCE(rnp->boost_tasks, np);
538 }
539
540 /*
541 * If this was the last task on the current list, and if
542 * we aren't waiting on any CPUs, report the quiescent state.
543 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
544 * so we must take a snapshot of the expedited state.
545 */
546 empty_exp_now = sync_rcu_exp_done(rnp);
547 if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
548 trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
549 rnp->gp_seq,
550 0, rnp->qsmask,
551 rnp->level,
552 rnp->grplo,
553 rnp->grphi,
554 !!rnp->gp_tasks);
555 rcu_report_unblock_qs_rnp(rnp, flags);
556 } else {
557 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
558 }
559
560 /*
561 * If this was the last task on the expedited lists,
562 * then we need to report up the rcu_node hierarchy.
563 */
564 if (!empty_exp && empty_exp_now)
565 rcu_report_exp_rnp(rnp, true);
566
567 /* Unboost if we were boosted. */
568 if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
569 rt_mutex_futex_unlock(&rnp->boost_mtx.rtmutex);
570 } else {
571 local_irq_restore(flags);
572 }
573 }
574
575 /*
576 * Is a deferred quiescent-state pending, and are we also not in
577 * an RCU read-side critical section? It is the caller's responsibility
578 * to ensure it is otherwise safe to report any deferred quiescent
579 * states. The reason for this is that it is safe to report a
580 * quiescent state during context switch even though preemption
581 * is disabled. This function cannot be expected to understand these
582 * nuances, so the caller must handle them.
583 */
rcu_preempt_need_deferred_qs(struct task_struct * t)584 static notrace bool rcu_preempt_need_deferred_qs(struct task_struct *t)
585 {
586 return (__this_cpu_read(rcu_data.cpu_no_qs.b.exp) ||
587 READ_ONCE(t->rcu_read_unlock_special.s)) &&
588 rcu_preempt_depth() == 0;
589 }
590
591 /*
592 * Report a deferred quiescent state if needed and safe to do so.
593 * As with rcu_preempt_need_deferred_qs(), "safe" involves only
594 * not being in an RCU read-side critical section. The caller must
595 * evaluate safety in terms of interrupt, softirq, and preemption
596 * disabling.
597 */
rcu_preempt_deferred_qs(struct task_struct * t)598 notrace void rcu_preempt_deferred_qs(struct task_struct *t)
599 {
600 unsigned long flags;
601
602 if (!rcu_preempt_need_deferred_qs(t))
603 return;
604 local_irq_save(flags);
605 rcu_preempt_deferred_qs_irqrestore(t, flags);
606 }
607
608 /*
609 * Minimal handler to give the scheduler a chance to re-evaluate.
610 */
rcu_preempt_deferred_qs_handler(struct irq_work * iwp)611 static void rcu_preempt_deferred_qs_handler(struct irq_work *iwp)
612 {
613 struct rcu_data *rdp;
614
615 rdp = container_of(iwp, struct rcu_data, defer_qs_iw);
616 rdp->defer_qs_iw_pending = false;
617 }
618
619 /*
620 * Handle special cases during rcu_read_unlock(), such as needing to
621 * notify RCU core processing or task having blocked during the RCU
622 * read-side critical section.
623 */
rcu_read_unlock_special(struct task_struct * t)624 static void rcu_read_unlock_special(struct task_struct *t)
625 {
626 unsigned long flags;
627 bool irqs_were_disabled;
628 bool preempt_bh_were_disabled =
629 !!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK));
630
631 /* NMI handlers cannot block and cannot safely manipulate state. */
632 if (in_nmi())
633 return;
634
635 local_irq_save(flags);
636 irqs_were_disabled = irqs_disabled_flags(flags);
637 if (preempt_bh_were_disabled || irqs_were_disabled) {
638 bool expboost; // Expedited GP in flight or possible boosting.
639 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
640 struct rcu_node *rnp = rdp->mynode;
641
642 expboost = (t->rcu_blocked_node && READ_ONCE(t->rcu_blocked_node->exp_tasks)) ||
643 (rdp->grpmask & READ_ONCE(rnp->expmask)) ||
644 (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) &&
645 ((rdp->grpmask & READ_ONCE(rnp->qsmask)) || t->rcu_blocked_node)) ||
646 (IS_ENABLED(CONFIG_RCU_BOOST) && irqs_were_disabled &&
647 t->rcu_blocked_node);
648 // Need to defer quiescent state until everything is enabled.
649 if (use_softirq && (in_hardirq() || (expboost && !irqs_were_disabled))) {
650 // Using softirq, safe to awaken, and either the
651 // wakeup is free or there is either an expedited
652 // GP in flight or a potential need to deboost.
653 raise_softirq_irqoff(RCU_SOFTIRQ);
654 } else {
655 // Enabling BH or preempt does reschedule, so...
656 // Also if no expediting and no possible deboosting,
657 // slow is OK. Plus nohz_full CPUs eventually get
658 // tick enabled.
659 set_tsk_need_resched(current);
660 set_preempt_need_resched();
661 if (IS_ENABLED(CONFIG_IRQ_WORK) && irqs_were_disabled &&
662 expboost && !rdp->defer_qs_iw_pending && cpu_online(rdp->cpu)) {
663 // Get scheduler to re-evaluate and call hooks.
664 // If !IRQ_WORK, FQS scan will eventually IPI.
665 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) &&
666 IS_ENABLED(CONFIG_PREEMPT_RT))
667 rdp->defer_qs_iw = IRQ_WORK_INIT_HARD(
668 rcu_preempt_deferred_qs_handler);
669 else
670 init_irq_work(&rdp->defer_qs_iw,
671 rcu_preempt_deferred_qs_handler);
672 rdp->defer_qs_iw_pending = true;
673 irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu);
674 }
675 }
676 local_irq_restore(flags);
677 return;
678 }
679 rcu_preempt_deferred_qs_irqrestore(t, flags);
680 }
681
682 /*
683 * Check that the list of blocked tasks for the newly completed grace
684 * period is in fact empty. It is a serious bug to complete a grace
685 * period that still has RCU readers blocked! This function must be
686 * invoked -before- updating this rnp's ->gp_seq.
687 *
688 * Also, if there are blocked tasks on the list, they automatically
689 * block the newly created grace period, so set up ->gp_tasks accordingly.
690 */
rcu_preempt_check_blocked_tasks(struct rcu_node * rnp)691 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
692 {
693 struct task_struct *t;
694
695 RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n");
696 raw_lockdep_assert_held_rcu_node(rnp);
697 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
698 dump_blkd_tasks(rnp, 10);
699 if (rcu_preempt_has_tasks(rnp) &&
700 (rnp->qsmaskinit || rnp->wait_blkd_tasks)) {
701 WRITE_ONCE(rnp->gp_tasks, rnp->blkd_tasks.next);
702 t = container_of(rnp->gp_tasks, struct task_struct,
703 rcu_node_entry);
704 trace_rcu_unlock_preempted_task(TPS("rcu_preempt-GPS"),
705 rnp->gp_seq, t->pid);
706 }
707 WARN_ON_ONCE(rnp->qsmask);
708 }
709
710 /*
711 * Check for a quiescent state from the current CPU, including voluntary
712 * context switches for Tasks RCU. When a task blocks, the task is
713 * recorded in the corresponding CPU's rcu_node structure, which is checked
714 * elsewhere, hence this function need only check for quiescent states
715 * related to the current CPU, not to those related to tasks.
716 */
rcu_flavor_sched_clock_irq(int user)717 static void rcu_flavor_sched_clock_irq(int user)
718 {
719 struct task_struct *t = current;
720
721 lockdep_assert_irqs_disabled();
722 if (rcu_preempt_depth() > 0 ||
723 (preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) {
724 /* No QS, force context switch if deferred. */
725 if (rcu_preempt_need_deferred_qs(t)) {
726 set_tsk_need_resched(t);
727 set_preempt_need_resched();
728 }
729 } else if (rcu_preempt_need_deferred_qs(t)) {
730 rcu_preempt_deferred_qs(t); /* Report deferred QS. */
731 return;
732 } else if (!WARN_ON_ONCE(rcu_preempt_depth())) {
733 rcu_qs(); /* Report immediate QS. */
734 return;
735 }
736
737 /* If GP is oldish, ask for help from rcu_read_unlock_special(). */
738 if (rcu_preempt_depth() > 0 &&
739 __this_cpu_read(rcu_data.core_needs_qs) &&
740 __this_cpu_read(rcu_data.cpu_no_qs.b.norm) &&
741 !t->rcu_read_unlock_special.b.need_qs &&
742 time_after(jiffies, rcu_state.gp_start + HZ))
743 t->rcu_read_unlock_special.b.need_qs = true;
744 }
745
746 /*
747 * Check for a task exiting while in a preemptible-RCU read-side
748 * critical section, clean up if so. No need to issue warnings, as
749 * debug_check_no_locks_held() already does this if lockdep is enabled.
750 * Besides, if this function does anything other than just immediately
751 * return, there was a bug of some sort. Spewing warnings from this
752 * function is like as not to simply obscure important prior warnings.
753 */
exit_rcu(void)754 void exit_rcu(void)
755 {
756 struct task_struct *t = current;
757
758 if (unlikely(!list_empty(¤t->rcu_node_entry))) {
759 rcu_preempt_depth_set(1);
760 barrier();
761 WRITE_ONCE(t->rcu_read_unlock_special.b.blocked, true);
762 } else if (unlikely(rcu_preempt_depth())) {
763 rcu_preempt_depth_set(1);
764 } else {
765 return;
766 }
767 __rcu_read_unlock();
768 rcu_preempt_deferred_qs(current);
769 }
770
771 /*
772 * Dump the blocked-tasks state, but limit the list dump to the
773 * specified number of elements.
774 */
775 static void
dump_blkd_tasks(struct rcu_node * rnp,int ncheck)776 dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
777 {
778 int cpu;
779 int i;
780 struct list_head *lhp;
781 struct rcu_data *rdp;
782 struct rcu_node *rnp1;
783
784 raw_lockdep_assert_held_rcu_node(rnp);
785 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
786 __func__, rnp->grplo, rnp->grphi, rnp->level,
787 (long)READ_ONCE(rnp->gp_seq), (long)rnp->completedqs);
788 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
789 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx\n",
790 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext);
791 pr_info("%s: ->gp_tasks %p ->boost_tasks %p ->exp_tasks %p\n",
792 __func__, READ_ONCE(rnp->gp_tasks), data_race(rnp->boost_tasks),
793 READ_ONCE(rnp->exp_tasks));
794 pr_info("%s: ->blkd_tasks", __func__);
795 i = 0;
796 list_for_each(lhp, &rnp->blkd_tasks) {
797 pr_cont(" %p", lhp);
798 if (++i >= ncheck)
799 break;
800 }
801 pr_cont("\n");
802 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
803 rdp = per_cpu_ptr(&rcu_data, cpu);
804 pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)\n",
805 cpu, ".o"[rcu_rdp_cpu_online(rdp)],
806 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
807 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
808 }
809 }
810
811 #else /* #ifdef CONFIG_PREEMPT_RCU */
812
813 /*
814 * If strict grace periods are enabled, and if the calling
815 * __rcu_read_unlock() marks the beginning of a quiescent state, immediately
816 * report that quiescent state and, if requested, spin for a bit.
817 */
rcu_read_unlock_strict(void)818 void rcu_read_unlock_strict(void)
819 {
820 struct rcu_data *rdp;
821
822 if (irqs_disabled() || preempt_count() || !rcu_state.gp_kthread)
823 return;
824 rdp = this_cpu_ptr(&rcu_data);
825 rdp->cpu_no_qs.b.norm = false;
826 rcu_report_qs_rdp(rdp);
827 udelay(rcu_unlock_delay);
828 }
829 EXPORT_SYMBOL_GPL(rcu_read_unlock_strict);
830
831 /*
832 * Tell them what RCU they are running.
833 */
rcu_bootup_announce(void)834 static void __init rcu_bootup_announce(void)
835 {
836 pr_info("Hierarchical RCU implementation.\n");
837 rcu_bootup_announce_oddness();
838 }
839
840 /*
841 * Note a quiescent state for PREEMPTION=n. Because we do not need to know
842 * how many quiescent states passed, just if there was at least one since
843 * the start of the grace period, this just sets a flag. The caller must
844 * have disabled preemption.
845 */
rcu_qs(void)846 static void rcu_qs(void)
847 {
848 RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!");
849 if (!__this_cpu_read(rcu_data.cpu_no_qs.s))
850 return;
851 trace_rcu_grace_period(TPS("rcu_sched"),
852 __this_cpu_read(rcu_data.gp_seq), TPS("cpuqs"));
853 __this_cpu_write(rcu_data.cpu_no_qs.b.norm, false);
854 if (__this_cpu_read(rcu_data.cpu_no_qs.b.exp))
855 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
856 }
857
858 /*
859 * Register an urgently needed quiescent state. If there is an
860 * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
861 * dyntick-idle quiescent state visible to other CPUs, which will in
862 * some cases serve for expedited as well as normal grace periods.
863 * Either way, register a lightweight quiescent state.
864 */
rcu_all_qs(void)865 void rcu_all_qs(void)
866 {
867 unsigned long flags;
868
869 if (!raw_cpu_read(rcu_data.rcu_urgent_qs))
870 return;
871 preempt_disable(); // For CONFIG_PREEMPT_COUNT=y kernels
872 /* Load rcu_urgent_qs before other flags. */
873 if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
874 preempt_enable();
875 return;
876 }
877 this_cpu_write(rcu_data.rcu_urgent_qs, false);
878 if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) {
879 local_irq_save(flags);
880 rcu_momentary_dyntick_idle();
881 local_irq_restore(flags);
882 }
883 rcu_qs();
884 preempt_enable();
885 }
886 EXPORT_SYMBOL_GPL(rcu_all_qs);
887
888 /*
889 * Note a PREEMPTION=n context switch. The caller must have disabled interrupts.
890 */
rcu_note_context_switch(bool preempt)891 void rcu_note_context_switch(bool preempt)
892 {
893 trace_rcu_utilization(TPS("Start context switch"));
894 rcu_qs();
895 /* Load rcu_urgent_qs before other flags. */
896 if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs)))
897 goto out;
898 this_cpu_write(rcu_data.rcu_urgent_qs, false);
899 if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs)))
900 rcu_momentary_dyntick_idle();
901 out:
902 rcu_tasks_qs(current, preempt);
903 trace_rcu_utilization(TPS("End context switch"));
904 }
905 EXPORT_SYMBOL_GPL(rcu_note_context_switch);
906
907 /*
908 * Because preemptible RCU does not exist, there are never any preempted
909 * RCU readers.
910 */
rcu_preempt_blocked_readers_cgp(struct rcu_node * rnp)911 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
912 {
913 return 0;
914 }
915
916 /*
917 * Because there is no preemptible RCU, there can be no readers blocked.
918 */
rcu_preempt_has_tasks(struct rcu_node * rnp)919 static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
920 {
921 return false;
922 }
923
924 /*
925 * Because there is no preemptible RCU, there can be no deferred quiescent
926 * states.
927 */
rcu_preempt_need_deferred_qs(struct task_struct * t)928 static notrace bool rcu_preempt_need_deferred_qs(struct task_struct *t)
929 {
930 return false;
931 }
932
933 // Except that we do need to respond to a request by an expedited
934 // grace period for a quiescent state from this CPU. Note that in
935 // non-preemptible kernels, there can be no context switches within RCU
936 // read-side critical sections, which in turn means that the leaf rcu_node
937 // structure's blocked-tasks list is always empty. is therefore no need to
938 // actually check it. Instead, a quiescent state from this CPU suffices,
939 // and this function is only called from such a quiescent state.
rcu_preempt_deferred_qs(struct task_struct * t)940 notrace void rcu_preempt_deferred_qs(struct task_struct *t)
941 {
942 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
943
944 if (rdp->cpu_no_qs.b.exp)
945 rcu_report_exp_rdp(rdp);
946 }
947
948 /*
949 * Because there is no preemptible RCU, there can be no readers blocked,
950 * so there is no need to check for blocked tasks. So check only for
951 * bogus qsmask values.
952 */
rcu_preempt_check_blocked_tasks(struct rcu_node * rnp)953 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
954 {
955 WARN_ON_ONCE(rnp->qsmask);
956 }
957
958 /*
959 * Check to see if this CPU is in a non-context-switch quiescent state,
960 * namely user mode and idle loop.
961 */
rcu_flavor_sched_clock_irq(int user)962 static void rcu_flavor_sched_clock_irq(int user)
963 {
964 if (user || rcu_is_cpu_rrupt_from_idle()) {
965
966 /*
967 * Get here if this CPU took its interrupt from user
968 * mode or from the idle loop, and if this is not a
969 * nested interrupt. In this case, the CPU is in
970 * a quiescent state, so note it.
971 *
972 * No memory barrier is required here because rcu_qs()
973 * references only CPU-local variables that other CPUs
974 * neither access nor modify, at least not while the
975 * corresponding CPU is online.
976 */
977 rcu_qs();
978 }
979 }
980
981 /*
982 * Because preemptible RCU does not exist, tasks cannot possibly exit
983 * while in preemptible RCU read-side critical sections.
984 */
exit_rcu(void)985 void exit_rcu(void)
986 {
987 }
988
989 /*
990 * Dump the guaranteed-empty blocked-tasks state. Trust but verify.
991 */
992 static void
dump_blkd_tasks(struct rcu_node * rnp,int ncheck)993 dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
994 {
995 WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks));
996 }
997
998 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
999
1000 /*
1001 * If boosting, set rcuc kthreads to realtime priority.
1002 */
rcu_cpu_kthread_setup(unsigned int cpu)1003 static void rcu_cpu_kthread_setup(unsigned int cpu)
1004 {
1005 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1006 #ifdef CONFIG_RCU_BOOST
1007 struct sched_param sp;
1008
1009 sp.sched_priority = kthread_prio;
1010 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1011 #endif /* #ifdef CONFIG_RCU_BOOST */
1012
1013 WRITE_ONCE(rdp->rcuc_activity, jiffies);
1014 }
1015
rcu_is_callbacks_nocb_kthread(struct rcu_data * rdp)1016 static bool rcu_is_callbacks_nocb_kthread(struct rcu_data *rdp)
1017 {
1018 #ifdef CONFIG_RCU_NOCB_CPU
1019 return rdp->nocb_cb_kthread == current;
1020 #else
1021 return false;
1022 #endif
1023 }
1024
1025 /*
1026 * Is the current CPU running the RCU-callbacks kthread?
1027 * Caller must have preemption disabled.
1028 */
rcu_is_callbacks_kthread(struct rcu_data * rdp)1029 static bool rcu_is_callbacks_kthread(struct rcu_data *rdp)
1030 {
1031 return rdp->rcu_cpu_kthread_task == current ||
1032 rcu_is_callbacks_nocb_kthread(rdp);
1033 }
1034
1035 #ifdef CONFIG_RCU_BOOST
1036
1037 /*
1038 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1039 * or ->boost_tasks, advancing the pointer to the next task in the
1040 * ->blkd_tasks list.
1041 *
1042 * Note that irqs must be enabled: boosting the task can block.
1043 * Returns 1 if there are more tasks needing to be boosted.
1044 */
rcu_boost(struct rcu_node * rnp)1045 static int rcu_boost(struct rcu_node *rnp)
1046 {
1047 unsigned long flags;
1048 struct task_struct *t;
1049 struct list_head *tb;
1050
1051 if (READ_ONCE(rnp->exp_tasks) == NULL &&
1052 READ_ONCE(rnp->boost_tasks) == NULL)
1053 return 0; /* Nothing left to boost. */
1054
1055 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1056
1057 /*
1058 * Recheck under the lock: all tasks in need of boosting
1059 * might exit their RCU read-side critical sections on their own.
1060 */
1061 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
1062 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1063 return 0;
1064 }
1065
1066 /*
1067 * Preferentially boost tasks blocking expedited grace periods.
1068 * This cannot starve the normal grace periods because a second
1069 * expedited grace period must boost all blocked tasks, including
1070 * those blocking the pre-existing normal grace period.
1071 */
1072 if (rnp->exp_tasks != NULL)
1073 tb = rnp->exp_tasks;
1074 else
1075 tb = rnp->boost_tasks;
1076
1077 /*
1078 * We boost task t by manufacturing an rt_mutex that appears to
1079 * be held by task t. We leave a pointer to that rt_mutex where
1080 * task t can find it, and task t will release the mutex when it
1081 * exits its outermost RCU read-side critical section. Then
1082 * simply acquiring this artificial rt_mutex will boost task
1083 * t's priority. (Thanks to tglx for suggesting this approach!)
1084 *
1085 * Note that task t must acquire rnp->lock to remove itself from
1086 * the ->blkd_tasks list, which it will do from exit() if from
1087 * nowhere else. We therefore are guaranteed that task t will
1088 * stay around at least until we drop rnp->lock. Note that
1089 * rnp->lock also resolves races between our priority boosting
1090 * and task t's exiting its outermost RCU read-side critical
1091 * section.
1092 */
1093 t = container_of(tb, struct task_struct, rcu_node_entry);
1094 rt_mutex_init_proxy_locked(&rnp->boost_mtx.rtmutex, t);
1095 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1096 /* Lock only for side effect: boosts task t's priority. */
1097 rt_mutex_lock(&rnp->boost_mtx);
1098 rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */
1099 rnp->n_boosts++;
1100
1101 return READ_ONCE(rnp->exp_tasks) != NULL ||
1102 READ_ONCE(rnp->boost_tasks) != NULL;
1103 }
1104
1105 /*
1106 * Priority-boosting kthread, one per leaf rcu_node.
1107 */
rcu_boost_kthread(void * arg)1108 static int rcu_boost_kthread(void *arg)
1109 {
1110 struct rcu_node *rnp = (struct rcu_node *)arg;
1111 int spincnt = 0;
1112 int more2boost;
1113
1114 trace_rcu_utilization(TPS("Start boost kthread@init"));
1115 for (;;) {
1116 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_WAITING);
1117 trace_rcu_utilization(TPS("End boost kthread@rcu_wait"));
1118 rcu_wait(READ_ONCE(rnp->boost_tasks) ||
1119 READ_ONCE(rnp->exp_tasks));
1120 trace_rcu_utilization(TPS("Start boost kthread@rcu_wait"));
1121 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_RUNNING);
1122 more2boost = rcu_boost(rnp);
1123 if (more2boost)
1124 spincnt++;
1125 else
1126 spincnt = 0;
1127 if (spincnt > 10) {
1128 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_YIELDING);
1129 trace_rcu_utilization(TPS("End boost kthread@rcu_yield"));
1130 schedule_timeout_idle(2);
1131 trace_rcu_utilization(TPS("Start boost kthread@rcu_yield"));
1132 spincnt = 0;
1133 }
1134 }
1135 /* NOTREACHED */
1136 trace_rcu_utilization(TPS("End boost kthread@notreached"));
1137 return 0;
1138 }
1139
1140 /*
1141 * Check to see if it is time to start boosting RCU readers that are
1142 * blocking the current grace period, and, if so, tell the per-rcu_node
1143 * kthread to start boosting them. If there is an expedited grace
1144 * period in progress, it is always time to boost.
1145 *
1146 * The caller must hold rnp->lock, which this function releases.
1147 * The ->boost_kthread_task is immortal, so we don't need to worry
1148 * about it going away.
1149 */
rcu_initiate_boost(struct rcu_node * rnp,unsigned long flags)1150 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1151 __releases(rnp->lock)
1152 {
1153 raw_lockdep_assert_held_rcu_node(rnp);
1154 if (!rnp->boost_kthread_task ||
1155 (!rcu_preempt_blocked_readers_cgp(rnp) && !rnp->exp_tasks)) {
1156 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1157 return;
1158 }
1159 if (rnp->exp_tasks != NULL ||
1160 (rnp->gp_tasks != NULL &&
1161 rnp->boost_tasks == NULL &&
1162 rnp->qsmask == 0 &&
1163 (!time_after(rnp->boost_time, jiffies) || rcu_state.cbovld ||
1164 IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)))) {
1165 if (rnp->exp_tasks == NULL)
1166 WRITE_ONCE(rnp->boost_tasks, rnp->gp_tasks);
1167 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1168 rcu_wake_cond(rnp->boost_kthread_task,
1169 READ_ONCE(rnp->boost_kthread_status));
1170 } else {
1171 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1172 }
1173 }
1174
1175 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1176
1177 /*
1178 * Do priority-boost accounting for the start of a new grace period.
1179 */
rcu_preempt_boost_start_gp(struct rcu_node * rnp)1180 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1181 {
1182 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1183 }
1184
1185 /*
1186 * Create an RCU-boost kthread for the specified node if one does not
1187 * already exist. We only create this kthread for preemptible RCU.
1188 */
rcu_spawn_one_boost_kthread(struct rcu_node * rnp)1189 static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
1190 {
1191 unsigned long flags;
1192 int rnp_index = rnp - rcu_get_root();
1193 struct sched_param sp;
1194 struct task_struct *t;
1195
1196 mutex_lock(&rnp->boost_kthread_mutex);
1197 if (rnp->boost_kthread_task || !rcu_scheduler_fully_active)
1198 goto out;
1199
1200 t = kthread_create(rcu_boost_kthread, (void *)rnp,
1201 "rcub/%d", rnp_index);
1202 if (WARN_ON_ONCE(IS_ERR(t)))
1203 goto out;
1204
1205 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1206 rnp->boost_kthread_task = t;
1207 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1208 sp.sched_priority = kthread_prio;
1209 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1210 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1211
1212 out:
1213 mutex_unlock(&rnp->boost_kthread_mutex);
1214 }
1215
1216 /*
1217 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1218 * served by the rcu_node in question. The CPU hotplug lock is still
1219 * held, so the value of rnp->qsmaskinit will be stable.
1220 *
1221 * We don't include outgoingcpu in the affinity set, use -1 if there is
1222 * no outgoing CPU. If there are no CPUs left in the affinity set,
1223 * this function allows the kthread to execute on any CPU.
1224 */
rcu_boost_kthread_setaffinity(struct rcu_node * rnp,int outgoingcpu)1225 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1226 {
1227 struct task_struct *t = rnp->boost_kthread_task;
1228 unsigned long mask = rcu_rnp_online_cpus(rnp);
1229 cpumask_var_t cm;
1230 int cpu;
1231
1232 if (!t)
1233 return;
1234 if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
1235 return;
1236 mutex_lock(&rnp->boost_kthread_mutex);
1237 for_each_leaf_node_possible_cpu(rnp, cpu)
1238 if ((mask & leaf_node_cpu_bit(rnp, cpu)) &&
1239 cpu != outgoingcpu)
1240 cpumask_set_cpu(cpu, cm);
1241 cpumask_and(cm, cm, housekeeping_cpumask(HK_TYPE_RCU));
1242 if (cpumask_empty(cm)) {
1243 cpumask_copy(cm, housekeeping_cpumask(HK_TYPE_RCU));
1244 if (outgoingcpu >= 0)
1245 cpumask_clear_cpu(outgoingcpu, cm);
1246 }
1247 set_cpus_allowed_ptr(t, cm);
1248 mutex_unlock(&rnp->boost_kthread_mutex);
1249 free_cpumask_var(cm);
1250 }
1251
1252 #else /* #ifdef CONFIG_RCU_BOOST */
1253
rcu_initiate_boost(struct rcu_node * rnp,unsigned long flags)1254 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1255 __releases(rnp->lock)
1256 {
1257 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1258 }
1259
rcu_preempt_boost_start_gp(struct rcu_node * rnp)1260 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1261 {
1262 }
1263
rcu_spawn_one_boost_kthread(struct rcu_node * rnp)1264 static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
1265 {
1266 }
1267
rcu_boost_kthread_setaffinity(struct rcu_node * rnp,int outgoingcpu)1268 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1269 {
1270 }
1271
1272 #endif /* #else #ifdef CONFIG_RCU_BOOST */
1273
1274 /*
1275 * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the
1276 * grace-period kthread will do force_quiescent_state() processing?
1277 * The idea is to avoid waking up RCU core processing on such a
1278 * CPU unless the grace period has extended for too long.
1279 *
1280 * This code relies on the fact that all NO_HZ_FULL CPUs are also
1281 * RCU_NOCB_CPU CPUs.
1282 */
rcu_nohz_full_cpu(void)1283 static bool rcu_nohz_full_cpu(void)
1284 {
1285 #ifdef CONFIG_NO_HZ_FULL
1286 if (tick_nohz_full_cpu(smp_processor_id()) &&
1287 (!rcu_gp_in_progress() ||
1288 time_before(jiffies, READ_ONCE(rcu_state.gp_start) + HZ)))
1289 return true;
1290 #endif /* #ifdef CONFIG_NO_HZ_FULL */
1291 return false;
1292 }
1293
1294 /*
1295 * Bind the RCU grace-period kthreads to the housekeeping CPU.
1296 */
rcu_bind_gp_kthread(void)1297 static void rcu_bind_gp_kthread(void)
1298 {
1299 if (!tick_nohz_full_enabled())
1300 return;
1301 housekeeping_affine(current, HK_TYPE_RCU);
1302 }
1303