1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Sleepable Read-Copy Update mechanism for mutual exclusion.
4 *
5 * Copyright (C) IBM Corporation, 2006
6 * Copyright (C) Fujitsu, 2012
7 *
8 * Authors: Paul McKenney <paulmck@linux.ibm.com>
9 * Lai Jiangshan <laijs@cn.fujitsu.com>
10 *
11 * For detailed explanation of Read-Copy Update mechanism see -
12 * Documentation/RCU/ *.txt
13 *
14 */
15
16 #define pr_fmt(fmt) "rcu: " fmt
17
18 #include <linux/export.h>
19 #include <linux/mutex.h>
20 #include <linux/percpu.h>
21 #include <linux/preempt.h>
22 #include <linux/rcupdate_wait.h>
23 #include <linux/sched.h>
24 #include <linux/smp.h>
25 #include <linux/delay.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/srcu.h>
29
30 #include "rcu.h"
31 #include "rcu_segcblist.h"
32
33 /* Holdoff in nanoseconds for auto-expediting. */
34 #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000)
35 static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF;
36 module_param(exp_holdoff, ulong, 0444);
37
38 /* Overflow-check frequency. N bits roughly says every 2**N grace periods. */
39 static ulong counter_wrap_check = (ULONG_MAX >> 2);
40 module_param(counter_wrap_check, ulong, 0444);
41
42 /*
43 * Control conversion to SRCU_SIZE_BIG:
44 * 0: Don't convert at all.
45 * 1: Convert at init_srcu_struct() time.
46 * 2: Convert when rcutorture invokes srcu_torture_stats_print().
47 * 3: Decide at boot time based on system shape (default).
48 * 0x1x: Convert when excessive contention encountered.
49 */
50 #define SRCU_SIZING_NONE 0
51 #define SRCU_SIZING_INIT 1
52 #define SRCU_SIZING_TORTURE 2
53 #define SRCU_SIZING_AUTO 3
54 #define SRCU_SIZING_CONTEND 0x10
55 #define SRCU_SIZING_IS(x) ((convert_to_big & ~SRCU_SIZING_CONTEND) == x)
56 #define SRCU_SIZING_IS_NONE() (SRCU_SIZING_IS(SRCU_SIZING_NONE))
57 #define SRCU_SIZING_IS_INIT() (SRCU_SIZING_IS(SRCU_SIZING_INIT))
58 #define SRCU_SIZING_IS_TORTURE() (SRCU_SIZING_IS(SRCU_SIZING_TORTURE))
59 #define SRCU_SIZING_IS_CONTEND() (convert_to_big & SRCU_SIZING_CONTEND)
60 static int convert_to_big = SRCU_SIZING_AUTO;
61 module_param(convert_to_big, int, 0444);
62
63 /* Number of CPUs to trigger init_srcu_struct()-time transition to big. */
64 static int big_cpu_lim __read_mostly = 128;
65 module_param(big_cpu_lim, int, 0444);
66
67 /* Contention events per jiffy to initiate transition to big. */
68 static int small_contention_lim __read_mostly = 100;
69 module_param(small_contention_lim, int, 0444);
70
71 /* Early-boot callback-management, so early that no lock is required! */
72 static LIST_HEAD(srcu_boot_list);
73 static bool __read_mostly srcu_init_done;
74
75 static void srcu_invoke_callbacks(struct work_struct *work);
76 static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
77 static void process_srcu(struct work_struct *work);
78 static void srcu_delay_timer(struct timer_list *t);
79
80 /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
81 #define spin_lock_rcu_node(p) \
82 do { \
83 spin_lock(&ACCESS_PRIVATE(p, lock)); \
84 smp_mb__after_unlock_lock(); \
85 } while (0)
86
87 #define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock))
88
89 #define spin_lock_irq_rcu_node(p) \
90 do { \
91 spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
92 smp_mb__after_unlock_lock(); \
93 } while (0)
94
95 #define spin_unlock_irq_rcu_node(p) \
96 spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
97
98 #define spin_lock_irqsave_rcu_node(p, flags) \
99 do { \
100 spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
101 smp_mb__after_unlock_lock(); \
102 } while (0)
103
104 #define spin_trylock_irqsave_rcu_node(p, flags) \
105 ({ \
106 bool ___locked = spin_trylock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
107 \
108 if (___locked) \
109 smp_mb__after_unlock_lock(); \
110 ___locked; \
111 })
112
113 #define spin_unlock_irqrestore_rcu_node(p, flags) \
114 spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \
115
116 /*
117 * Initialize SRCU per-CPU data. Note that statically allocated
118 * srcu_struct structures might already have srcu_read_lock() and
119 * srcu_read_unlock() running against them. So if the is_static parameter
120 * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
121 */
init_srcu_struct_data(struct srcu_struct * ssp)122 static void init_srcu_struct_data(struct srcu_struct *ssp)
123 {
124 int cpu;
125 struct srcu_data *sdp;
126
127 /*
128 * Initialize the per-CPU srcu_data array, which feeds into the
129 * leaves of the srcu_node tree.
130 */
131 WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
132 ARRAY_SIZE(sdp->srcu_unlock_count));
133 for_each_possible_cpu(cpu) {
134 sdp = per_cpu_ptr(ssp->sda, cpu);
135 spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
136 rcu_segcblist_init(&sdp->srcu_cblist);
137 sdp->srcu_cblist_invoking = false;
138 sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq;
139 sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq;
140 sdp->mynode = NULL;
141 sdp->cpu = cpu;
142 INIT_WORK(&sdp->work, srcu_invoke_callbacks);
143 timer_setup(&sdp->delay_work, srcu_delay_timer, 0);
144 sdp->ssp = ssp;
145 }
146 }
147
148 /* Invalid seq state, used during snp node initialization */
149 #define SRCU_SNP_INIT_SEQ 0x2
150
151 /*
152 * Check whether sequence number corresponding to snp node,
153 * is invalid.
154 */
srcu_invl_snp_seq(unsigned long s)155 static inline bool srcu_invl_snp_seq(unsigned long s)
156 {
157 return rcu_seq_state(s) == SRCU_SNP_INIT_SEQ;
158 }
159
160 /*
161 * Allocated and initialize SRCU combining tree. Returns @true if
162 * allocation succeeded and @false otherwise.
163 */
init_srcu_struct_nodes(struct srcu_struct * ssp,gfp_t gfp_flags)164 static bool init_srcu_struct_nodes(struct srcu_struct *ssp, gfp_t gfp_flags)
165 {
166 int cpu;
167 int i;
168 int level = 0;
169 int levelspread[RCU_NUM_LVLS];
170 struct srcu_data *sdp;
171 struct srcu_node *snp;
172 struct srcu_node *snp_first;
173
174 /* Initialize geometry if it has not already been initialized. */
175 rcu_init_geometry();
176 ssp->node = kcalloc(rcu_num_nodes, sizeof(*ssp->node), gfp_flags);
177 if (!ssp->node)
178 return false;
179
180 /* Work out the overall tree geometry. */
181 ssp->level[0] = &ssp->node[0];
182 for (i = 1; i < rcu_num_lvls; i++)
183 ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1];
184 rcu_init_levelspread(levelspread, num_rcu_lvl);
185
186 /* Each pass through this loop initializes one srcu_node structure. */
187 srcu_for_each_node_breadth_first(ssp, snp) {
188 spin_lock_init(&ACCESS_PRIVATE(snp, lock));
189 WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
190 ARRAY_SIZE(snp->srcu_data_have_cbs));
191 for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
192 snp->srcu_have_cbs[i] = SRCU_SNP_INIT_SEQ;
193 snp->srcu_data_have_cbs[i] = 0;
194 }
195 snp->srcu_gp_seq_needed_exp = SRCU_SNP_INIT_SEQ;
196 snp->grplo = -1;
197 snp->grphi = -1;
198 if (snp == &ssp->node[0]) {
199 /* Root node, special case. */
200 snp->srcu_parent = NULL;
201 continue;
202 }
203
204 /* Non-root node. */
205 if (snp == ssp->level[level + 1])
206 level++;
207 snp->srcu_parent = ssp->level[level - 1] +
208 (snp - ssp->level[level]) /
209 levelspread[level - 1];
210 }
211
212 /*
213 * Initialize the per-CPU srcu_data array, which feeds into the
214 * leaves of the srcu_node tree.
215 */
216 level = rcu_num_lvls - 1;
217 snp_first = ssp->level[level];
218 for_each_possible_cpu(cpu) {
219 sdp = per_cpu_ptr(ssp->sda, cpu);
220 sdp->mynode = &snp_first[cpu / levelspread[level]];
221 for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
222 if (snp->grplo < 0)
223 snp->grplo = cpu;
224 snp->grphi = cpu;
225 }
226 sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
227 }
228 smp_store_release(&ssp->srcu_size_state, SRCU_SIZE_WAIT_BARRIER);
229 return true;
230 }
231
232 /*
233 * Initialize non-compile-time initialized fields, including the
234 * associated srcu_node and srcu_data structures. The is_static parameter
235 * tells us that ->sda has already been wired up to srcu_data.
236 */
init_srcu_struct_fields(struct srcu_struct * ssp,bool is_static)237 static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
238 {
239 ssp->srcu_size_state = SRCU_SIZE_SMALL;
240 ssp->node = NULL;
241 mutex_init(&ssp->srcu_cb_mutex);
242 mutex_init(&ssp->srcu_gp_mutex);
243 ssp->srcu_idx = 0;
244 ssp->srcu_gp_seq = 0;
245 ssp->srcu_barrier_seq = 0;
246 mutex_init(&ssp->srcu_barrier_mutex);
247 atomic_set(&ssp->srcu_barrier_cpu_cnt, 0);
248 INIT_DELAYED_WORK(&ssp->work, process_srcu);
249 ssp->sda_is_static = is_static;
250 if (!is_static)
251 ssp->sda = alloc_percpu(struct srcu_data);
252 if (!ssp->sda)
253 return -ENOMEM;
254 init_srcu_struct_data(ssp);
255 ssp->srcu_gp_seq_needed_exp = 0;
256 ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
257 if (READ_ONCE(ssp->srcu_size_state) == SRCU_SIZE_SMALL && SRCU_SIZING_IS_INIT()) {
258 if (!init_srcu_struct_nodes(ssp, GFP_ATOMIC)) {
259 if (!ssp->sda_is_static) {
260 free_percpu(ssp->sda);
261 ssp->sda = NULL;
262 return -ENOMEM;
263 }
264 } else {
265 WRITE_ONCE(ssp->srcu_size_state, SRCU_SIZE_BIG);
266 }
267 }
268 smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */
269 return 0;
270 }
271
272 #ifdef CONFIG_DEBUG_LOCK_ALLOC
273
__init_srcu_struct(struct srcu_struct * ssp,const char * name,struct lock_class_key * key)274 int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
275 struct lock_class_key *key)
276 {
277 /* Don't re-initialize a lock while it is held. */
278 debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
279 lockdep_init_map(&ssp->dep_map, name, key, 0);
280 spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
281 return init_srcu_struct_fields(ssp, false);
282 }
283 EXPORT_SYMBOL_GPL(__init_srcu_struct);
284
285 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
286
287 /**
288 * init_srcu_struct - initialize a sleep-RCU structure
289 * @ssp: structure to initialize.
290 *
291 * Must invoke this on a given srcu_struct before passing that srcu_struct
292 * to any other function. Each srcu_struct represents a separate domain
293 * of SRCU protection.
294 */
init_srcu_struct(struct srcu_struct * ssp)295 int init_srcu_struct(struct srcu_struct *ssp)
296 {
297 spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
298 return init_srcu_struct_fields(ssp, false);
299 }
300 EXPORT_SYMBOL_GPL(init_srcu_struct);
301
302 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
303
304 /*
305 * Initiate a transition to SRCU_SIZE_BIG with lock held.
306 */
__srcu_transition_to_big(struct srcu_struct * ssp)307 static void __srcu_transition_to_big(struct srcu_struct *ssp)
308 {
309 lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
310 smp_store_release(&ssp->srcu_size_state, SRCU_SIZE_ALLOC);
311 }
312
313 /*
314 * Initiate an idempotent transition to SRCU_SIZE_BIG.
315 */
srcu_transition_to_big(struct srcu_struct * ssp)316 static void srcu_transition_to_big(struct srcu_struct *ssp)
317 {
318 unsigned long flags;
319
320 /* Double-checked locking on ->srcu_size-state. */
321 if (smp_load_acquire(&ssp->srcu_size_state) != SRCU_SIZE_SMALL)
322 return;
323 spin_lock_irqsave_rcu_node(ssp, flags);
324 if (smp_load_acquire(&ssp->srcu_size_state) != SRCU_SIZE_SMALL) {
325 spin_unlock_irqrestore_rcu_node(ssp, flags);
326 return;
327 }
328 __srcu_transition_to_big(ssp);
329 spin_unlock_irqrestore_rcu_node(ssp, flags);
330 }
331
332 /*
333 * Check to see if the just-encountered contention event justifies
334 * a transition to SRCU_SIZE_BIG.
335 */
spin_lock_irqsave_check_contention(struct srcu_struct * ssp)336 static void spin_lock_irqsave_check_contention(struct srcu_struct *ssp)
337 {
338 unsigned long j;
339
340 if (!SRCU_SIZING_IS_CONTEND() || ssp->srcu_size_state)
341 return;
342 j = jiffies;
343 if (ssp->srcu_size_jiffies != j) {
344 ssp->srcu_size_jiffies = j;
345 ssp->srcu_n_lock_retries = 0;
346 }
347 if (++ssp->srcu_n_lock_retries <= small_contention_lim)
348 return;
349 __srcu_transition_to_big(ssp);
350 }
351
352 /*
353 * Acquire the specified srcu_data structure's ->lock, but check for
354 * excessive contention, which results in initiation of a transition
355 * to SRCU_SIZE_BIG. But only if the srcutree.convert_to_big module
356 * parameter permits this.
357 */
spin_lock_irqsave_sdp_contention(struct srcu_data * sdp,unsigned long * flags)358 static void spin_lock_irqsave_sdp_contention(struct srcu_data *sdp, unsigned long *flags)
359 {
360 struct srcu_struct *ssp = sdp->ssp;
361
362 if (spin_trylock_irqsave_rcu_node(sdp, *flags))
363 return;
364 spin_lock_irqsave_rcu_node(ssp, *flags);
365 spin_lock_irqsave_check_contention(ssp);
366 spin_unlock_irqrestore_rcu_node(ssp, *flags);
367 spin_lock_irqsave_rcu_node(sdp, *flags);
368 }
369
370 /*
371 * Acquire the specified srcu_struct structure's ->lock, but check for
372 * excessive contention, which results in initiation of a transition
373 * to SRCU_SIZE_BIG. But only if the srcutree.convert_to_big module
374 * parameter permits this.
375 */
spin_lock_irqsave_ssp_contention(struct srcu_struct * ssp,unsigned long * flags)376 static void spin_lock_irqsave_ssp_contention(struct srcu_struct *ssp, unsigned long *flags)
377 {
378 if (spin_trylock_irqsave_rcu_node(ssp, *flags))
379 return;
380 spin_lock_irqsave_rcu_node(ssp, *flags);
381 spin_lock_irqsave_check_contention(ssp);
382 }
383
384 /*
385 * First-use initialization of statically allocated srcu_struct
386 * structure. Wiring up the combining tree is more than can be
387 * done with compile-time initialization, so this check is added
388 * to each update-side SRCU primitive. Use ssp->lock, which -is-
389 * compile-time initialized, to resolve races involving multiple
390 * CPUs trying to garner first-use privileges.
391 */
check_init_srcu_struct(struct srcu_struct * ssp)392 static void check_init_srcu_struct(struct srcu_struct *ssp)
393 {
394 unsigned long flags;
395
396 /* The smp_load_acquire() pairs with the smp_store_release(). */
397 if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/
398 return; /* Already initialized. */
399 spin_lock_irqsave_rcu_node(ssp, flags);
400 if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) {
401 spin_unlock_irqrestore_rcu_node(ssp, flags);
402 return;
403 }
404 init_srcu_struct_fields(ssp, true);
405 spin_unlock_irqrestore_rcu_node(ssp, flags);
406 }
407
408 /*
409 * Returns approximate total of the readers' ->srcu_lock_count[] values
410 * for the rank of per-CPU counters specified by idx.
411 */
srcu_readers_lock_idx(struct srcu_struct * ssp,int idx)412 static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx)
413 {
414 int cpu;
415 unsigned long sum = 0;
416
417 for_each_possible_cpu(cpu) {
418 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
419
420 sum += READ_ONCE(cpuc->srcu_lock_count[idx]);
421 }
422 return sum;
423 }
424
425 /*
426 * Returns approximate total of the readers' ->srcu_unlock_count[] values
427 * for the rank of per-CPU counters specified by idx.
428 */
srcu_readers_unlock_idx(struct srcu_struct * ssp,int idx)429 static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx)
430 {
431 int cpu;
432 unsigned long sum = 0;
433
434 for_each_possible_cpu(cpu) {
435 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
436
437 sum += READ_ONCE(cpuc->srcu_unlock_count[idx]);
438 }
439 return sum;
440 }
441
442 /*
443 * Return true if the number of pre-existing readers is determined to
444 * be zero.
445 */
srcu_readers_active_idx_check(struct srcu_struct * ssp,int idx)446 static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx)
447 {
448 unsigned long unlocks;
449
450 unlocks = srcu_readers_unlock_idx(ssp, idx);
451
452 /*
453 * Make sure that a lock is always counted if the corresponding
454 * unlock is counted. Needs to be a smp_mb() as the read side may
455 * contain a read from a variable that is written to before the
456 * synchronize_srcu() in the write side. In this case smp_mb()s
457 * A and B act like the store buffering pattern.
458 *
459 * This smp_mb() also pairs with smp_mb() C to prevent accesses
460 * after the synchronize_srcu() from being executed before the
461 * grace period ends.
462 */
463 smp_mb(); /* A */
464
465 /*
466 * If the locks are the same as the unlocks, then there must have
467 * been no readers on this index at some time in between. This does
468 * not mean that there are no more readers, as one could have read
469 * the current index but not have incremented the lock counter yet.
470 *
471 * So suppose that the updater is preempted here for so long
472 * that more than ULONG_MAX non-nested readers come and go in
473 * the meantime. It turns out that this cannot result in overflow
474 * because if a reader modifies its unlock count after we read it
475 * above, then that reader's next load of ->srcu_idx is guaranteed
476 * to get the new value, which will cause it to operate on the
477 * other bank of counters, where it cannot contribute to the
478 * overflow of these counters. This means that there is a maximum
479 * of 2*NR_CPUS increments, which cannot overflow given current
480 * systems, especially not on 64-bit systems.
481 *
482 * OK, how about nesting? This does impose a limit on nesting
483 * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient,
484 * especially on 64-bit systems.
485 */
486 return srcu_readers_lock_idx(ssp, idx) == unlocks;
487 }
488
489 /**
490 * srcu_readers_active - returns true if there are readers. and false
491 * otherwise
492 * @ssp: which srcu_struct to count active readers (holding srcu_read_lock).
493 *
494 * Note that this is not an atomic primitive, and can therefore suffer
495 * severe errors when invoked on an active srcu_struct. That said, it
496 * can be useful as an error check at cleanup time.
497 */
srcu_readers_active(struct srcu_struct * ssp)498 static bool srcu_readers_active(struct srcu_struct *ssp)
499 {
500 int cpu;
501 unsigned long sum = 0;
502
503 for_each_possible_cpu(cpu) {
504 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
505
506 sum += READ_ONCE(cpuc->srcu_lock_count[0]);
507 sum += READ_ONCE(cpuc->srcu_lock_count[1]);
508 sum -= READ_ONCE(cpuc->srcu_unlock_count[0]);
509 sum -= READ_ONCE(cpuc->srcu_unlock_count[1]);
510 }
511 return sum;
512 }
513
514 /*
515 * We use an adaptive strategy for synchronize_srcu() and especially for
516 * synchronize_srcu_expedited(). We spin for a fixed time period
517 * (defined below, boot time configurable) to allow SRCU readers to exit
518 * their read-side critical sections. If there are still some readers
519 * after one jiffy, we repeatedly block for one jiffy time periods.
520 * The blocking time is increased as the grace-period age increases,
521 * with max blocking time capped at 10 jiffies.
522 */
523 #define SRCU_DEFAULT_RETRY_CHECK_DELAY 5
524
525 static ulong srcu_retry_check_delay = SRCU_DEFAULT_RETRY_CHECK_DELAY;
526 module_param(srcu_retry_check_delay, ulong, 0444);
527
528 #define SRCU_INTERVAL 1 // Base delay if no expedited GPs pending.
529 #define SRCU_MAX_INTERVAL 10 // Maximum incremental delay from slow readers.
530
531 #define SRCU_DEFAULT_MAX_NODELAY_PHASE_LO 3UL // Lowmark on default per-GP-phase
532 // no-delay instances.
533 #define SRCU_DEFAULT_MAX_NODELAY_PHASE_HI 1000UL // Highmark on default per-GP-phase
534 // no-delay instances.
535
536 #define SRCU_UL_CLAMP_LO(val, low) ((val) > (low) ? (val) : (low))
537 #define SRCU_UL_CLAMP_HI(val, high) ((val) < (high) ? (val) : (high))
538 #define SRCU_UL_CLAMP(val, low, high) SRCU_UL_CLAMP_HI(SRCU_UL_CLAMP_LO((val), (low)), (high))
539 // per-GP-phase no-delay instances adjusted to allow non-sleeping poll upto
540 // one jiffies time duration. Mult by 2 is done to factor in the srcu_get_delay()
541 // called from process_srcu().
542 #define SRCU_DEFAULT_MAX_NODELAY_PHASE_ADJUSTED \
543 (2UL * USEC_PER_SEC / HZ / SRCU_DEFAULT_RETRY_CHECK_DELAY)
544
545 // Maximum per-GP-phase consecutive no-delay instances.
546 #define SRCU_DEFAULT_MAX_NODELAY_PHASE \
547 SRCU_UL_CLAMP(SRCU_DEFAULT_MAX_NODELAY_PHASE_ADJUSTED, \
548 SRCU_DEFAULT_MAX_NODELAY_PHASE_LO, \
549 SRCU_DEFAULT_MAX_NODELAY_PHASE_HI)
550
551 static ulong srcu_max_nodelay_phase = SRCU_DEFAULT_MAX_NODELAY_PHASE;
552 module_param(srcu_max_nodelay_phase, ulong, 0444);
553
554 // Maximum consecutive no-delay instances.
555 #define SRCU_DEFAULT_MAX_NODELAY (SRCU_DEFAULT_MAX_NODELAY_PHASE > 100 ? \
556 SRCU_DEFAULT_MAX_NODELAY_PHASE : 100)
557
558 static ulong srcu_max_nodelay = SRCU_DEFAULT_MAX_NODELAY;
559 module_param(srcu_max_nodelay, ulong, 0444);
560
561 /*
562 * Return grace-period delay, zero if there are expedited grace
563 * periods pending, SRCU_INTERVAL otherwise.
564 */
srcu_get_delay(struct srcu_struct * ssp)565 static unsigned long srcu_get_delay(struct srcu_struct *ssp)
566 {
567 unsigned long gpstart;
568 unsigned long j;
569 unsigned long jbase = SRCU_INTERVAL;
570
571 if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
572 jbase = 0;
573 if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq))) {
574 j = jiffies - 1;
575 gpstart = READ_ONCE(ssp->srcu_gp_start);
576 if (time_after(j, gpstart))
577 jbase += j - gpstart;
578 if (!jbase) {
579 WRITE_ONCE(ssp->srcu_n_exp_nodelay, READ_ONCE(ssp->srcu_n_exp_nodelay) + 1);
580 if (READ_ONCE(ssp->srcu_n_exp_nodelay) > srcu_max_nodelay_phase)
581 jbase = 1;
582 }
583 }
584 return jbase > SRCU_MAX_INTERVAL ? SRCU_MAX_INTERVAL : jbase;
585 }
586
587 /**
588 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
589 * @ssp: structure to clean up.
590 *
591 * Must invoke this after you are finished using a given srcu_struct that
592 * was initialized via init_srcu_struct(), else you leak memory.
593 */
cleanup_srcu_struct(struct srcu_struct * ssp)594 void cleanup_srcu_struct(struct srcu_struct *ssp)
595 {
596 int cpu;
597
598 if (WARN_ON(!srcu_get_delay(ssp)))
599 return; /* Just leak it! */
600 if (WARN_ON(srcu_readers_active(ssp)))
601 return; /* Just leak it! */
602 flush_delayed_work(&ssp->work);
603 for_each_possible_cpu(cpu) {
604 struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
605
606 del_timer_sync(&sdp->delay_work);
607 flush_work(&sdp->work);
608 if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist)))
609 return; /* Forgot srcu_barrier(), so just leak it! */
610 }
611 if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
612 WARN_ON(rcu_seq_current(&ssp->srcu_gp_seq) != ssp->srcu_gp_seq_needed) ||
613 WARN_ON(srcu_readers_active(ssp))) {
614 pr_info("%s: Active srcu_struct %p read state: %d gp state: %lu/%lu\n",
615 __func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)),
616 rcu_seq_current(&ssp->srcu_gp_seq), ssp->srcu_gp_seq_needed);
617 return; /* Caller forgot to stop doing call_srcu()? */
618 }
619 if (!ssp->sda_is_static) {
620 free_percpu(ssp->sda);
621 ssp->sda = NULL;
622 }
623 kfree(ssp->node);
624 ssp->node = NULL;
625 ssp->srcu_size_state = SRCU_SIZE_SMALL;
626 }
627 EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
628
629 /*
630 * Counts the new reader in the appropriate per-CPU element of the
631 * srcu_struct.
632 * Returns an index that must be passed to the matching srcu_read_unlock().
633 */
__srcu_read_lock(struct srcu_struct * ssp)634 int __srcu_read_lock(struct srcu_struct *ssp)
635 {
636 int idx;
637
638 idx = READ_ONCE(ssp->srcu_idx) & 0x1;
639 this_cpu_inc(ssp->sda->srcu_lock_count[idx]);
640 smp_mb(); /* B */ /* Avoid leaking the critical section. */
641 return idx;
642 }
643 EXPORT_SYMBOL_GPL(__srcu_read_lock);
644
645 /*
646 * Removes the count for the old reader from the appropriate per-CPU
647 * element of the srcu_struct. Note that this may well be a different
648 * CPU than that which was incremented by the corresponding srcu_read_lock().
649 */
__srcu_read_unlock(struct srcu_struct * ssp,int idx)650 void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
651 {
652 smp_mb(); /* C */ /* Avoid leaking the critical section. */
653 this_cpu_inc(ssp->sda->srcu_unlock_count[idx]);
654 }
655 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
656
657 /*
658 * Start an SRCU grace period.
659 */
srcu_gp_start(struct srcu_struct * ssp)660 static void srcu_gp_start(struct srcu_struct *ssp)
661 {
662 struct srcu_data *sdp;
663 int state;
664
665 if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
666 sdp = per_cpu_ptr(ssp->sda, 0);
667 else
668 sdp = this_cpu_ptr(ssp->sda);
669 lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
670 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
671 spin_lock_rcu_node(sdp); /* Interrupts already disabled. */
672 rcu_segcblist_advance(&sdp->srcu_cblist,
673 rcu_seq_current(&ssp->srcu_gp_seq));
674 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
675 rcu_seq_snap(&ssp->srcu_gp_seq));
676 spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */
677 WRITE_ONCE(ssp->srcu_gp_start, jiffies);
678 WRITE_ONCE(ssp->srcu_n_exp_nodelay, 0);
679 smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
680 rcu_seq_start(&ssp->srcu_gp_seq);
681 state = rcu_seq_state(ssp->srcu_gp_seq);
682 WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
683 }
684
685
srcu_delay_timer(struct timer_list * t)686 static void srcu_delay_timer(struct timer_list *t)
687 {
688 struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work);
689
690 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
691 }
692
srcu_queue_delayed_work_on(struct srcu_data * sdp,unsigned long delay)693 static void srcu_queue_delayed_work_on(struct srcu_data *sdp,
694 unsigned long delay)
695 {
696 if (!delay) {
697 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
698 return;
699 }
700
701 timer_reduce(&sdp->delay_work, jiffies + delay);
702 }
703
704 /*
705 * Schedule callback invocation for the specified srcu_data structure,
706 * if possible, on the corresponding CPU.
707 */
srcu_schedule_cbs_sdp(struct srcu_data * sdp,unsigned long delay)708 static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
709 {
710 srcu_queue_delayed_work_on(sdp, delay);
711 }
712
713 /*
714 * Schedule callback invocation for all srcu_data structures associated
715 * with the specified srcu_node structure that have callbacks for the
716 * just-completed grace period, the one corresponding to idx. If possible,
717 * schedule this invocation on the corresponding CPUs.
718 */
srcu_schedule_cbs_snp(struct srcu_struct * ssp,struct srcu_node * snp,unsigned long mask,unsigned long delay)719 static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp,
720 unsigned long mask, unsigned long delay)
721 {
722 int cpu;
723
724 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
725 if (!(mask & (1 << (cpu - snp->grplo))))
726 continue;
727 srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
728 }
729 }
730
731 /*
732 * Note the end of an SRCU grace period. Initiates callback invocation
733 * and starts a new grace period if needed.
734 *
735 * The ->srcu_cb_mutex acquisition does not protect any data, but
736 * instead prevents more than one grace period from starting while we
737 * are initiating callback invocation. This allows the ->srcu_have_cbs[]
738 * array to have a finite number of elements.
739 */
srcu_gp_end(struct srcu_struct * ssp)740 static void srcu_gp_end(struct srcu_struct *ssp)
741 {
742 unsigned long cbdelay = 1;
743 bool cbs;
744 bool last_lvl;
745 int cpu;
746 unsigned long flags;
747 unsigned long gpseq;
748 int idx;
749 unsigned long mask;
750 struct srcu_data *sdp;
751 unsigned long sgsne;
752 struct srcu_node *snp;
753 int ss_state;
754
755 /* Prevent more than one additional grace period. */
756 mutex_lock(&ssp->srcu_cb_mutex);
757
758 /* End the current grace period. */
759 spin_lock_irq_rcu_node(ssp);
760 idx = rcu_seq_state(ssp->srcu_gp_seq);
761 WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
762 if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
763 cbdelay = 0;
764
765 WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns());
766 rcu_seq_end(&ssp->srcu_gp_seq);
767 gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
768 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq))
769 WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, gpseq);
770 spin_unlock_irq_rcu_node(ssp);
771 mutex_unlock(&ssp->srcu_gp_mutex);
772 /* A new grace period can start at this point. But only one. */
773
774 /* Initiate callback invocation as needed. */
775 ss_state = smp_load_acquire(&ssp->srcu_size_state);
776 if (ss_state < SRCU_SIZE_WAIT_BARRIER) {
777 srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, 0), cbdelay);
778 } else {
779 idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
780 srcu_for_each_node_breadth_first(ssp, snp) {
781 spin_lock_irq_rcu_node(snp);
782 cbs = false;
783 last_lvl = snp >= ssp->level[rcu_num_lvls - 1];
784 if (last_lvl)
785 cbs = ss_state < SRCU_SIZE_BIG || snp->srcu_have_cbs[idx] == gpseq;
786 snp->srcu_have_cbs[idx] = gpseq;
787 rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
788 sgsne = snp->srcu_gp_seq_needed_exp;
789 if (srcu_invl_snp_seq(sgsne) || ULONG_CMP_LT(sgsne, gpseq))
790 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, gpseq);
791 if (ss_state < SRCU_SIZE_BIG)
792 mask = ~0;
793 else
794 mask = snp->srcu_data_have_cbs[idx];
795 snp->srcu_data_have_cbs[idx] = 0;
796 spin_unlock_irq_rcu_node(snp);
797 if (cbs)
798 srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay);
799 }
800 }
801
802 /* Occasionally prevent srcu_data counter wrap. */
803 if (!(gpseq & counter_wrap_check))
804 for_each_possible_cpu(cpu) {
805 sdp = per_cpu_ptr(ssp->sda, cpu);
806 spin_lock_irqsave_rcu_node(sdp, flags);
807 if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed + 100))
808 sdp->srcu_gp_seq_needed = gpseq;
809 if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed_exp + 100))
810 sdp->srcu_gp_seq_needed_exp = gpseq;
811 spin_unlock_irqrestore_rcu_node(sdp, flags);
812 }
813
814 /* Callback initiation done, allow grace periods after next. */
815 mutex_unlock(&ssp->srcu_cb_mutex);
816
817 /* Start a new grace period if needed. */
818 spin_lock_irq_rcu_node(ssp);
819 gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
820 if (!rcu_seq_state(gpseq) &&
821 ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) {
822 srcu_gp_start(ssp);
823 spin_unlock_irq_rcu_node(ssp);
824 srcu_reschedule(ssp, 0);
825 } else {
826 spin_unlock_irq_rcu_node(ssp);
827 }
828
829 /* Transition to big if needed. */
830 if (ss_state != SRCU_SIZE_SMALL && ss_state != SRCU_SIZE_BIG) {
831 if (ss_state == SRCU_SIZE_ALLOC)
832 init_srcu_struct_nodes(ssp, GFP_KERNEL);
833 else
834 smp_store_release(&ssp->srcu_size_state, ss_state + 1);
835 }
836 }
837
838 /*
839 * Funnel-locking scheme to scalably mediate many concurrent expedited
840 * grace-period requests. This function is invoked for the first known
841 * expedited request for a grace period that has already been requested,
842 * but without expediting. To start a completely new grace period,
843 * whether expedited or not, use srcu_funnel_gp_start() instead.
844 */
srcu_funnel_exp_start(struct srcu_struct * ssp,struct srcu_node * snp,unsigned long s)845 static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp,
846 unsigned long s)
847 {
848 unsigned long flags;
849 unsigned long sgsne;
850
851 if (snp)
852 for (; snp != NULL; snp = snp->srcu_parent) {
853 sgsne = READ_ONCE(snp->srcu_gp_seq_needed_exp);
854 if (rcu_seq_done(&ssp->srcu_gp_seq, s) ||
855 (!srcu_invl_snp_seq(sgsne) && ULONG_CMP_GE(sgsne, s)))
856 return;
857 spin_lock_irqsave_rcu_node(snp, flags);
858 sgsne = snp->srcu_gp_seq_needed_exp;
859 if (!srcu_invl_snp_seq(sgsne) && ULONG_CMP_GE(sgsne, s)) {
860 spin_unlock_irqrestore_rcu_node(snp, flags);
861 return;
862 }
863 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
864 spin_unlock_irqrestore_rcu_node(snp, flags);
865 }
866 spin_lock_irqsave_ssp_contention(ssp, &flags);
867 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
868 WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);
869 spin_unlock_irqrestore_rcu_node(ssp, flags);
870 }
871
872 /*
873 * Funnel-locking scheme to scalably mediate many concurrent grace-period
874 * requests. The winner has to do the work of actually starting grace
875 * period s. Losers must either ensure that their desired grace-period
876 * number is recorded on at least their leaf srcu_node structure, or they
877 * must take steps to invoke their own callbacks.
878 *
879 * Note that this function also does the work of srcu_funnel_exp_start(),
880 * in some cases by directly invoking it.
881 */
srcu_funnel_gp_start(struct srcu_struct * ssp,struct srcu_data * sdp,unsigned long s,bool do_norm)882 static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
883 unsigned long s, bool do_norm)
884 {
885 unsigned long flags;
886 int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
887 unsigned long sgsne;
888 struct srcu_node *snp;
889 struct srcu_node *snp_leaf;
890 unsigned long snp_seq;
891
892 /* Ensure that snp node tree is fully initialized before traversing it */
893 if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
894 snp_leaf = NULL;
895 else
896 snp_leaf = sdp->mynode;
897
898 if (snp_leaf)
899 /* Each pass through the loop does one level of the srcu_node tree. */
900 for (snp = snp_leaf; snp != NULL; snp = snp->srcu_parent) {
901 if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != snp_leaf)
902 return; /* GP already done and CBs recorded. */
903 spin_lock_irqsave_rcu_node(snp, flags);
904 snp_seq = snp->srcu_have_cbs[idx];
905 if (!srcu_invl_snp_seq(snp_seq) && ULONG_CMP_GE(snp_seq, s)) {
906 if (snp == snp_leaf && snp_seq == s)
907 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
908 spin_unlock_irqrestore_rcu_node(snp, flags);
909 if (snp == snp_leaf && snp_seq != s) {
910 srcu_schedule_cbs_sdp(sdp, do_norm ? SRCU_INTERVAL : 0);
911 return;
912 }
913 if (!do_norm)
914 srcu_funnel_exp_start(ssp, snp, s);
915 return;
916 }
917 snp->srcu_have_cbs[idx] = s;
918 if (snp == snp_leaf)
919 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
920 sgsne = snp->srcu_gp_seq_needed_exp;
921 if (!do_norm && (srcu_invl_snp_seq(sgsne) || ULONG_CMP_LT(sgsne, s)))
922 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
923 spin_unlock_irqrestore_rcu_node(snp, flags);
924 }
925
926 /* Top of tree, must ensure the grace period will be started. */
927 spin_lock_irqsave_ssp_contention(ssp, &flags);
928 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) {
929 /*
930 * Record need for grace period s. Pair with load
931 * acquire setting up for initialization.
932 */
933 smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/
934 }
935 if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
936 WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);
937
938 /* If grace period not already done and none in progress, start it. */
939 if (!rcu_seq_done(&ssp->srcu_gp_seq, s) &&
940 rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) {
941 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
942 srcu_gp_start(ssp);
943
944 // And how can that list_add() in the "else" clause
945 // possibly be safe for concurrent execution? Well,
946 // it isn't. And it does not have to be. After all, it
947 // can only be executed during early boot when there is only
948 // the one boot CPU running with interrupts still disabled.
949 if (likely(srcu_init_done))
950 queue_delayed_work(rcu_gp_wq, &ssp->work,
951 !!srcu_get_delay(ssp));
952 else if (list_empty(&ssp->work.work.entry))
953 list_add(&ssp->work.work.entry, &srcu_boot_list);
954 }
955 spin_unlock_irqrestore_rcu_node(ssp, flags);
956 }
957
958 /*
959 * Wait until all readers counted by array index idx complete, but
960 * loop an additional time if there is an expedited grace period pending.
961 * The caller must ensure that ->srcu_idx is not changed while checking.
962 */
try_check_zero(struct srcu_struct * ssp,int idx,int trycount)963 static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
964 {
965 unsigned long curdelay;
966
967 curdelay = !srcu_get_delay(ssp);
968
969 for (;;) {
970 if (srcu_readers_active_idx_check(ssp, idx))
971 return true;
972 if ((--trycount + curdelay) <= 0)
973 return false;
974 udelay(srcu_retry_check_delay);
975 }
976 }
977
978 /*
979 * Increment the ->srcu_idx counter so that future SRCU readers will
980 * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows
981 * us to wait for pre-existing readers in a starvation-free manner.
982 */
srcu_flip(struct srcu_struct * ssp)983 static void srcu_flip(struct srcu_struct *ssp)
984 {
985 /*
986 * Ensure that if this updater saw a given reader's increment
987 * from __srcu_read_lock(), that reader was using an old value
988 * of ->srcu_idx. Also ensure that if a given reader sees the
989 * new value of ->srcu_idx, this updater's earlier scans cannot
990 * have seen that reader's increments (which is OK, because this
991 * grace period need not wait on that reader).
992 */
993 smp_mb(); /* E */ /* Pairs with B and C. */
994
995 WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
996
997 /*
998 * Ensure that if the updater misses an __srcu_read_unlock()
999 * increment, that task's next __srcu_read_lock() will see the
1000 * above counter update. Note that both this memory barrier
1001 * and the one in srcu_readers_active_idx_check() provide the
1002 * guarantee for __srcu_read_lock().
1003 */
1004 smp_mb(); /* D */ /* Pairs with C. */
1005 }
1006
1007 /*
1008 * If SRCU is likely idle, return true, otherwise return false.
1009 *
1010 * Note that it is OK for several current from-idle requests for a new
1011 * grace period from idle to specify expediting because they will all end
1012 * up requesting the same grace period anyhow. So no loss.
1013 *
1014 * Note also that if any CPU (including the current one) is still invoking
1015 * callbacks, this function will nevertheless say "idle". This is not
1016 * ideal, but the overhead of checking all CPUs' callback lists is even
1017 * less ideal, especially on large systems. Furthermore, the wakeup
1018 * can happen before the callback is fully removed, so we have no choice
1019 * but to accept this type of error.
1020 *
1021 * This function is also subject to counter-wrap errors, but let's face
1022 * it, if this function was preempted for enough time for the counters
1023 * to wrap, it really doesn't matter whether or not we expedite the grace
1024 * period. The extra overhead of a needlessly expedited grace period is
1025 * negligible when amortized over that time period, and the extra latency
1026 * of a needlessly non-expedited grace period is similarly negligible.
1027 */
srcu_might_be_idle(struct srcu_struct * ssp)1028 static bool srcu_might_be_idle(struct srcu_struct *ssp)
1029 {
1030 unsigned long curseq;
1031 unsigned long flags;
1032 struct srcu_data *sdp;
1033 unsigned long t;
1034 unsigned long tlast;
1035
1036 check_init_srcu_struct(ssp);
1037 /* If the local srcu_data structure has callbacks, not idle. */
1038 sdp = raw_cpu_ptr(ssp->sda);
1039 spin_lock_irqsave_rcu_node(sdp, flags);
1040 if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
1041 spin_unlock_irqrestore_rcu_node(sdp, flags);
1042 return false; /* Callbacks already present, so not idle. */
1043 }
1044 spin_unlock_irqrestore_rcu_node(sdp, flags);
1045
1046 /*
1047 * No local callbacks, so probabilistically probe global state.
1048 * Exact information would require acquiring locks, which would
1049 * kill scalability, hence the probabilistic nature of the probe.
1050 */
1051
1052 /* First, see if enough time has passed since the last GP. */
1053 t = ktime_get_mono_fast_ns();
1054 tlast = READ_ONCE(ssp->srcu_last_gp_end);
1055 if (exp_holdoff == 0 ||
1056 time_in_range_open(t, tlast, tlast + exp_holdoff))
1057 return false; /* Too soon after last GP. */
1058
1059 /* Next, check for probable idleness. */
1060 curseq = rcu_seq_current(&ssp->srcu_gp_seq);
1061 smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
1062 if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed)))
1063 return false; /* Grace period in progress, so not idle. */
1064 smp_mb(); /* Order ->srcu_gp_seq with prior access. */
1065 if (curseq != rcu_seq_current(&ssp->srcu_gp_seq))
1066 return false; /* GP # changed, so not idle. */
1067 return true; /* With reasonable probability, idle! */
1068 }
1069
1070 /*
1071 * SRCU callback function to leak a callback.
1072 */
srcu_leak_callback(struct rcu_head * rhp)1073 static void srcu_leak_callback(struct rcu_head *rhp)
1074 {
1075 }
1076
1077 /*
1078 * Start an SRCU grace period, and also queue the callback if non-NULL.
1079 */
srcu_gp_start_if_needed(struct srcu_struct * ssp,struct rcu_head * rhp,bool do_norm)1080 static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
1081 struct rcu_head *rhp, bool do_norm)
1082 {
1083 unsigned long flags;
1084 int idx;
1085 bool needexp = false;
1086 bool needgp = false;
1087 unsigned long s;
1088 struct srcu_data *sdp;
1089 struct srcu_node *sdp_mynode;
1090 int ss_state;
1091
1092 check_init_srcu_struct(ssp);
1093 idx = srcu_read_lock(ssp);
1094 ss_state = smp_load_acquire(&ssp->srcu_size_state);
1095 if (ss_state < SRCU_SIZE_WAIT_CALL)
1096 sdp = per_cpu_ptr(ssp->sda, 0);
1097 else
1098 sdp = raw_cpu_ptr(ssp->sda);
1099 spin_lock_irqsave_sdp_contention(sdp, &flags);
1100 if (rhp)
1101 rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
1102 rcu_segcblist_advance(&sdp->srcu_cblist,
1103 rcu_seq_current(&ssp->srcu_gp_seq));
1104 s = rcu_seq_snap(&ssp->srcu_gp_seq);
1105 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
1106 if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
1107 sdp->srcu_gp_seq_needed = s;
1108 needgp = true;
1109 }
1110 if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
1111 sdp->srcu_gp_seq_needed_exp = s;
1112 needexp = true;
1113 }
1114 spin_unlock_irqrestore_rcu_node(sdp, flags);
1115
1116 /* Ensure that snp node tree is fully initialized before traversing it */
1117 if (ss_state < SRCU_SIZE_WAIT_BARRIER)
1118 sdp_mynode = NULL;
1119 else
1120 sdp_mynode = sdp->mynode;
1121
1122 if (needgp)
1123 srcu_funnel_gp_start(ssp, sdp, s, do_norm);
1124 else if (needexp)
1125 srcu_funnel_exp_start(ssp, sdp_mynode, s);
1126 srcu_read_unlock(ssp, idx);
1127 return s;
1128 }
1129
1130 /*
1131 * Enqueue an SRCU callback on the srcu_data structure associated with
1132 * the current CPU and the specified srcu_struct structure, initiating
1133 * grace-period processing if it is not already running.
1134 *
1135 * Note that all CPUs must agree that the grace period extended beyond
1136 * all pre-existing SRCU read-side critical section. On systems with
1137 * more than one CPU, this means that when "func()" is invoked, each CPU
1138 * is guaranteed to have executed a full memory barrier since the end of
1139 * its last corresponding SRCU read-side critical section whose beginning
1140 * preceded the call to call_srcu(). It also means that each CPU executing
1141 * an SRCU read-side critical section that continues beyond the start of
1142 * "func()" must have executed a memory barrier after the call_srcu()
1143 * but before the beginning of that SRCU read-side critical section.
1144 * Note that these guarantees include CPUs that are offline, idle, or
1145 * executing in user mode, as well as CPUs that are executing in the kernel.
1146 *
1147 * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the
1148 * resulting SRCU callback function "func()", then both CPU A and CPU
1149 * B are guaranteed to execute a full memory barrier during the time
1150 * interval between the call to call_srcu() and the invocation of "func()".
1151 * This guarantee applies even if CPU A and CPU B are the same CPU (but
1152 * again only if the system has more than one CPU).
1153 *
1154 * Of course, these guarantees apply only for invocations of call_srcu(),
1155 * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
1156 * srcu_struct structure.
1157 */
__call_srcu(struct srcu_struct * ssp,struct rcu_head * rhp,rcu_callback_t func,bool do_norm)1158 static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
1159 rcu_callback_t func, bool do_norm)
1160 {
1161 if (debug_rcu_head_queue(rhp)) {
1162 /* Probable double call_srcu(), so leak the callback. */
1163 WRITE_ONCE(rhp->func, srcu_leak_callback);
1164 WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n");
1165 return;
1166 }
1167 rhp->func = func;
1168 (void)srcu_gp_start_if_needed(ssp, rhp, do_norm);
1169 }
1170
1171 /**
1172 * call_srcu() - Queue a callback for invocation after an SRCU grace period
1173 * @ssp: srcu_struct in queue the callback
1174 * @rhp: structure to be used for queueing the SRCU callback.
1175 * @func: function to be invoked after the SRCU grace period
1176 *
1177 * The callback function will be invoked some time after a full SRCU
1178 * grace period elapses, in other words after all pre-existing SRCU
1179 * read-side critical sections have completed. However, the callback
1180 * function might well execute concurrently with other SRCU read-side
1181 * critical sections that started after call_srcu() was invoked. SRCU
1182 * read-side critical sections are delimited by srcu_read_lock() and
1183 * srcu_read_unlock(), and may be nested.
1184 *
1185 * The callback will be invoked from process context, but must nevertheless
1186 * be fast and must not block.
1187 */
call_srcu(struct srcu_struct * ssp,struct rcu_head * rhp,rcu_callback_t func)1188 void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
1189 rcu_callback_t func)
1190 {
1191 __call_srcu(ssp, rhp, func, true);
1192 }
1193 EXPORT_SYMBOL_GPL(call_srcu);
1194
1195 /*
1196 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
1197 */
__synchronize_srcu(struct srcu_struct * ssp,bool do_norm)1198 static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm)
1199 {
1200 struct rcu_synchronize rcu;
1201
1202 RCU_LOCKDEP_WARN(lockdep_is_held(ssp) ||
1203 lock_is_held(&rcu_bh_lock_map) ||
1204 lock_is_held(&rcu_lock_map) ||
1205 lock_is_held(&rcu_sched_lock_map),
1206 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
1207
1208 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
1209 return;
1210 might_sleep();
1211 check_init_srcu_struct(ssp);
1212 init_completion(&rcu.completion);
1213 init_rcu_head_on_stack(&rcu.head);
1214 __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm);
1215 wait_for_completion(&rcu.completion);
1216 destroy_rcu_head_on_stack(&rcu.head);
1217
1218 /*
1219 * Make sure that later code is ordered after the SRCU grace
1220 * period. This pairs with the spin_lock_irq_rcu_node()
1221 * in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed
1222 * because the current CPU might have been totally uninvolved with
1223 * (and thus unordered against) that grace period.
1224 */
1225 smp_mb();
1226 }
1227
1228 /**
1229 * synchronize_srcu_expedited - Brute-force SRCU grace period
1230 * @ssp: srcu_struct with which to synchronize.
1231 *
1232 * Wait for an SRCU grace period to elapse, but be more aggressive about
1233 * spinning rather than blocking when waiting.
1234 *
1235 * Note that synchronize_srcu_expedited() has the same deadlock and
1236 * memory-ordering properties as does synchronize_srcu().
1237 */
synchronize_srcu_expedited(struct srcu_struct * ssp)1238 void synchronize_srcu_expedited(struct srcu_struct *ssp)
1239 {
1240 __synchronize_srcu(ssp, rcu_gp_is_normal());
1241 }
1242 EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
1243
1244 /**
1245 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
1246 * @ssp: srcu_struct with which to synchronize.
1247 *
1248 * Wait for the count to drain to zero of both indexes. To avoid the
1249 * possible starvation of synchronize_srcu(), it waits for the count of
1250 * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first,
1251 * and then flip the srcu_idx and wait for the count of the other index.
1252 *
1253 * Can block; must be called from process context.
1254 *
1255 * Note that it is illegal to call synchronize_srcu() from the corresponding
1256 * SRCU read-side critical section; doing so will result in deadlock.
1257 * However, it is perfectly legal to call synchronize_srcu() on one
1258 * srcu_struct from some other srcu_struct's read-side critical section,
1259 * as long as the resulting graph of srcu_structs is acyclic.
1260 *
1261 * There are memory-ordering constraints implied by synchronize_srcu().
1262 * On systems with more than one CPU, when synchronize_srcu() returns,
1263 * each CPU is guaranteed to have executed a full memory barrier since
1264 * the end of its last corresponding SRCU read-side critical section
1265 * whose beginning preceded the call to synchronize_srcu(). In addition,
1266 * each CPU having an SRCU read-side critical section that extends beyond
1267 * the return from synchronize_srcu() is guaranteed to have executed a
1268 * full memory barrier after the beginning of synchronize_srcu() and before
1269 * the beginning of that SRCU read-side critical section. Note that these
1270 * guarantees include CPUs that are offline, idle, or executing in user mode,
1271 * as well as CPUs that are executing in the kernel.
1272 *
1273 * Furthermore, if CPU A invoked synchronize_srcu(), which returned
1274 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
1275 * to have executed a full memory barrier during the execution of
1276 * synchronize_srcu(). This guarantee applies even if CPU A and CPU B
1277 * are the same CPU, but again only if the system has more than one CPU.
1278 *
1279 * Of course, these memory-ordering guarantees apply only when
1280 * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
1281 * passed the same srcu_struct structure.
1282 *
1283 * Implementation of these memory-ordering guarantees is similar to
1284 * that of synchronize_rcu().
1285 *
1286 * If SRCU is likely idle, expedite the first request. This semantic
1287 * was provided by Classic SRCU, and is relied upon by its users, so TREE
1288 * SRCU must also provide it. Note that detecting idleness is heuristic
1289 * and subject to both false positives and negatives.
1290 */
synchronize_srcu(struct srcu_struct * ssp)1291 void synchronize_srcu(struct srcu_struct *ssp)
1292 {
1293 if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited())
1294 synchronize_srcu_expedited(ssp);
1295 else
1296 __synchronize_srcu(ssp, true);
1297 }
1298 EXPORT_SYMBOL_GPL(synchronize_srcu);
1299
1300 /**
1301 * get_state_synchronize_srcu - Provide an end-of-grace-period cookie
1302 * @ssp: srcu_struct to provide cookie for.
1303 *
1304 * This function returns a cookie that can be passed to
1305 * poll_state_synchronize_srcu(), which will return true if a full grace
1306 * period has elapsed in the meantime. It is the caller's responsibility
1307 * to make sure that grace period happens, for example, by invoking
1308 * call_srcu() after return from get_state_synchronize_srcu().
1309 */
get_state_synchronize_srcu(struct srcu_struct * ssp)1310 unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
1311 {
1312 // Any prior manipulation of SRCU-protected data must happen
1313 // before the load from ->srcu_gp_seq.
1314 smp_mb();
1315 return rcu_seq_snap(&ssp->srcu_gp_seq);
1316 }
1317 EXPORT_SYMBOL_GPL(get_state_synchronize_srcu);
1318
1319 /**
1320 * start_poll_synchronize_srcu - Provide cookie and start grace period
1321 * @ssp: srcu_struct to provide cookie for.
1322 *
1323 * This function returns a cookie that can be passed to
1324 * poll_state_synchronize_srcu(), which will return true if a full grace
1325 * period has elapsed in the meantime. Unlike get_state_synchronize_srcu(),
1326 * this function also ensures that any needed SRCU grace period will be
1327 * started. This convenience does come at a cost in terms of CPU overhead.
1328 */
start_poll_synchronize_srcu(struct srcu_struct * ssp)1329 unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
1330 {
1331 return srcu_gp_start_if_needed(ssp, NULL, true);
1332 }
1333 EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);
1334
1335 /**
1336 * poll_state_synchronize_srcu - Has cookie's grace period ended?
1337 * @ssp: srcu_struct to provide cookie for.
1338 * @cookie: Return value from get_state_synchronize_srcu() or start_poll_synchronize_srcu().
1339 *
1340 * This function takes the cookie that was returned from either
1341 * get_state_synchronize_srcu() or start_poll_synchronize_srcu(), and
1342 * returns @true if an SRCU grace period elapsed since the time that the
1343 * cookie was created.
1344 *
1345 * Because cookies are finite in size, wrapping/overflow is possible.
1346 * This is more pronounced on 32-bit systems where cookies are 32 bits,
1347 * where in theory wrapping could happen in about 14 hours assuming
1348 * 25-microsecond expedited SRCU grace periods. However, a more likely
1349 * overflow lower bound is on the order of 24 days in the case of
1350 * one-millisecond SRCU grace periods. Of course, wrapping in a 64-bit
1351 * system requires geologic timespans, as in more than seven million years
1352 * even for expedited SRCU grace periods.
1353 *
1354 * Wrapping/overflow is much more of an issue for CONFIG_SMP=n systems
1355 * that also have CONFIG_PREEMPTION=n, which selects Tiny SRCU. This uses
1356 * a 16-bit cookie, which rcutorture routinely wraps in a matter of a
1357 * few minutes. If this proves to be a problem, this counter will be
1358 * expanded to the same size as for Tree SRCU.
1359 */
poll_state_synchronize_srcu(struct srcu_struct * ssp,unsigned long cookie)1360 bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
1361 {
1362 if (!rcu_seq_done(&ssp->srcu_gp_seq, cookie))
1363 return false;
1364 // Ensure that the end of the SRCU grace period happens before
1365 // any subsequent code that the caller might execute.
1366 smp_mb(); // ^^^
1367 return true;
1368 }
1369 EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu);
1370
1371 /*
1372 * Callback function for srcu_barrier() use.
1373 */
srcu_barrier_cb(struct rcu_head * rhp)1374 static void srcu_barrier_cb(struct rcu_head *rhp)
1375 {
1376 struct srcu_data *sdp;
1377 struct srcu_struct *ssp;
1378
1379 sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
1380 ssp = sdp->ssp;
1381 if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1382 complete(&ssp->srcu_barrier_completion);
1383 }
1384
1385 /*
1386 * Enqueue an srcu_barrier() callback on the specified srcu_data
1387 * structure's ->cblist. but only if that ->cblist already has at least one
1388 * callback enqueued. Note that if a CPU already has callbacks enqueue,
1389 * it must have already registered the need for a future grace period,
1390 * so all we need do is enqueue a callback that will use the same grace
1391 * period as the last callback already in the queue.
1392 */
srcu_barrier_one_cpu(struct srcu_struct * ssp,struct srcu_data * sdp)1393 static void srcu_barrier_one_cpu(struct srcu_struct *ssp, struct srcu_data *sdp)
1394 {
1395 spin_lock_irq_rcu_node(sdp);
1396 atomic_inc(&ssp->srcu_barrier_cpu_cnt);
1397 sdp->srcu_barrier_head.func = srcu_barrier_cb;
1398 debug_rcu_head_queue(&sdp->srcu_barrier_head);
1399 if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
1400 &sdp->srcu_barrier_head)) {
1401 debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
1402 atomic_dec(&ssp->srcu_barrier_cpu_cnt);
1403 }
1404 spin_unlock_irq_rcu_node(sdp);
1405 }
1406
1407 /**
1408 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
1409 * @ssp: srcu_struct on which to wait for in-flight callbacks.
1410 */
srcu_barrier(struct srcu_struct * ssp)1411 void srcu_barrier(struct srcu_struct *ssp)
1412 {
1413 int cpu;
1414 int idx;
1415 unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq);
1416
1417 check_init_srcu_struct(ssp);
1418 mutex_lock(&ssp->srcu_barrier_mutex);
1419 if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) {
1420 smp_mb(); /* Force ordering following return. */
1421 mutex_unlock(&ssp->srcu_barrier_mutex);
1422 return; /* Someone else did our work for us. */
1423 }
1424 rcu_seq_start(&ssp->srcu_barrier_seq);
1425 init_completion(&ssp->srcu_barrier_completion);
1426
1427 /* Initial count prevents reaching zero until all CBs are posted. */
1428 atomic_set(&ssp->srcu_barrier_cpu_cnt, 1);
1429
1430 idx = srcu_read_lock(ssp);
1431 if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
1432 srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, 0));
1433 else
1434 for_each_possible_cpu(cpu)
1435 srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, cpu));
1436 srcu_read_unlock(ssp, idx);
1437
1438 /* Remove the initial count, at which point reaching zero can happen. */
1439 if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1440 complete(&ssp->srcu_barrier_completion);
1441 wait_for_completion(&ssp->srcu_barrier_completion);
1442
1443 rcu_seq_end(&ssp->srcu_barrier_seq);
1444 mutex_unlock(&ssp->srcu_barrier_mutex);
1445 }
1446 EXPORT_SYMBOL_GPL(srcu_barrier);
1447
1448 /**
1449 * srcu_batches_completed - return batches completed.
1450 * @ssp: srcu_struct on which to report batch completion.
1451 *
1452 * Report the number of batches, correlated with, but not necessarily
1453 * precisely the same as, the number of grace periods that have elapsed.
1454 */
srcu_batches_completed(struct srcu_struct * ssp)1455 unsigned long srcu_batches_completed(struct srcu_struct *ssp)
1456 {
1457 return READ_ONCE(ssp->srcu_idx);
1458 }
1459 EXPORT_SYMBOL_GPL(srcu_batches_completed);
1460
1461 /*
1462 * Core SRCU state machine. Push state bits of ->srcu_gp_seq
1463 * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
1464 * completed in that state.
1465 */
srcu_advance_state(struct srcu_struct * ssp)1466 static void srcu_advance_state(struct srcu_struct *ssp)
1467 {
1468 int idx;
1469
1470 mutex_lock(&ssp->srcu_gp_mutex);
1471
1472 /*
1473 * Because readers might be delayed for an extended period after
1474 * fetching ->srcu_idx for their index, at any point in time there
1475 * might well be readers using both idx=0 and idx=1. We therefore
1476 * need to wait for readers to clear from both index values before
1477 * invoking a callback.
1478 *
1479 * The load-acquire ensures that we see the accesses performed
1480 * by the prior grace period.
1481 */
1482 idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */
1483 if (idx == SRCU_STATE_IDLE) {
1484 spin_lock_irq_rcu_node(ssp);
1485 if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1486 WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq));
1487 spin_unlock_irq_rcu_node(ssp);
1488 mutex_unlock(&ssp->srcu_gp_mutex);
1489 return;
1490 }
1491 idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq));
1492 if (idx == SRCU_STATE_IDLE)
1493 srcu_gp_start(ssp);
1494 spin_unlock_irq_rcu_node(ssp);
1495 if (idx != SRCU_STATE_IDLE) {
1496 mutex_unlock(&ssp->srcu_gp_mutex);
1497 return; /* Someone else started the grace period. */
1498 }
1499 }
1500
1501 if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1502 idx = 1 ^ (ssp->srcu_idx & 1);
1503 if (!try_check_zero(ssp, idx, 1)) {
1504 mutex_unlock(&ssp->srcu_gp_mutex);
1505 return; /* readers present, retry later. */
1506 }
1507 srcu_flip(ssp);
1508 spin_lock_irq_rcu_node(ssp);
1509 rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2);
1510 ssp->srcu_n_exp_nodelay = 0;
1511 spin_unlock_irq_rcu_node(ssp);
1512 }
1513
1514 if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
1515
1516 /*
1517 * SRCU read-side critical sections are normally short,
1518 * so check at least twice in quick succession after a flip.
1519 */
1520 idx = 1 ^ (ssp->srcu_idx & 1);
1521 if (!try_check_zero(ssp, idx, 2)) {
1522 mutex_unlock(&ssp->srcu_gp_mutex);
1523 return; /* readers present, retry later. */
1524 }
1525 ssp->srcu_n_exp_nodelay = 0;
1526 srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */
1527 }
1528 }
1529
1530 /*
1531 * Invoke a limited number of SRCU callbacks that have passed through
1532 * their grace period. If there are more to do, SRCU will reschedule
1533 * the workqueue. Note that needed memory barriers have been executed
1534 * in this task's context by srcu_readers_active_idx_check().
1535 */
srcu_invoke_callbacks(struct work_struct * work)1536 static void srcu_invoke_callbacks(struct work_struct *work)
1537 {
1538 long len;
1539 bool more;
1540 struct rcu_cblist ready_cbs;
1541 struct rcu_head *rhp;
1542 struct srcu_data *sdp;
1543 struct srcu_struct *ssp;
1544
1545 sdp = container_of(work, struct srcu_data, work);
1546
1547 ssp = sdp->ssp;
1548 rcu_cblist_init(&ready_cbs);
1549 spin_lock_irq_rcu_node(sdp);
1550 rcu_segcblist_advance(&sdp->srcu_cblist,
1551 rcu_seq_current(&ssp->srcu_gp_seq));
1552 if (sdp->srcu_cblist_invoking ||
1553 !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
1554 spin_unlock_irq_rcu_node(sdp);
1555 return; /* Someone else on the job or nothing to do. */
1556 }
1557
1558 /* We are on the job! Extract and invoke ready callbacks. */
1559 sdp->srcu_cblist_invoking = true;
1560 rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
1561 len = ready_cbs.len;
1562 spin_unlock_irq_rcu_node(sdp);
1563 rhp = rcu_cblist_dequeue(&ready_cbs);
1564 for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
1565 debug_rcu_head_unqueue(rhp);
1566 local_bh_disable();
1567 rhp->func(rhp);
1568 local_bh_enable();
1569 }
1570 WARN_ON_ONCE(ready_cbs.len);
1571
1572 /*
1573 * Update counts, accelerate new callbacks, and if needed,
1574 * schedule another round of callback invocation.
1575 */
1576 spin_lock_irq_rcu_node(sdp);
1577 rcu_segcblist_add_len(&sdp->srcu_cblist, -len);
1578 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
1579 rcu_seq_snap(&ssp->srcu_gp_seq));
1580 sdp->srcu_cblist_invoking = false;
1581 more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
1582 spin_unlock_irq_rcu_node(sdp);
1583 if (more)
1584 srcu_schedule_cbs_sdp(sdp, 0);
1585 }
1586
1587 /*
1588 * Finished one round of SRCU grace period. Start another if there are
1589 * more SRCU callbacks queued, otherwise put SRCU into not-running state.
1590 */
srcu_reschedule(struct srcu_struct * ssp,unsigned long delay)1591 static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay)
1592 {
1593 bool pushgp = true;
1594
1595 spin_lock_irq_rcu_node(ssp);
1596 if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1597 if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) {
1598 /* All requests fulfilled, time to go idle. */
1599 pushgp = false;
1600 }
1601 } else if (!rcu_seq_state(ssp->srcu_gp_seq)) {
1602 /* Outstanding request and no GP. Start one. */
1603 srcu_gp_start(ssp);
1604 }
1605 spin_unlock_irq_rcu_node(ssp);
1606
1607 if (pushgp)
1608 queue_delayed_work(rcu_gp_wq, &ssp->work, delay);
1609 }
1610
1611 /*
1612 * This is the work-queue function that handles SRCU grace periods.
1613 */
process_srcu(struct work_struct * work)1614 static void process_srcu(struct work_struct *work)
1615 {
1616 unsigned long curdelay;
1617 unsigned long j;
1618 struct srcu_struct *ssp;
1619
1620 ssp = container_of(work, struct srcu_struct, work.work);
1621
1622 srcu_advance_state(ssp);
1623 curdelay = srcu_get_delay(ssp);
1624 if (curdelay) {
1625 WRITE_ONCE(ssp->reschedule_count, 0);
1626 } else {
1627 j = jiffies;
1628 if (READ_ONCE(ssp->reschedule_jiffies) == j) {
1629 WRITE_ONCE(ssp->reschedule_count, READ_ONCE(ssp->reschedule_count) + 1);
1630 if (READ_ONCE(ssp->reschedule_count) > srcu_max_nodelay)
1631 curdelay = 1;
1632 } else {
1633 WRITE_ONCE(ssp->reschedule_count, 1);
1634 WRITE_ONCE(ssp->reschedule_jiffies, j);
1635 }
1636 }
1637 srcu_reschedule(ssp, curdelay);
1638 }
1639
srcutorture_get_gp_data(enum rcutorture_type test_type,struct srcu_struct * ssp,int * flags,unsigned long * gp_seq)1640 void srcutorture_get_gp_data(enum rcutorture_type test_type,
1641 struct srcu_struct *ssp, int *flags,
1642 unsigned long *gp_seq)
1643 {
1644 if (test_type != SRCU_FLAVOR)
1645 return;
1646 *flags = 0;
1647 *gp_seq = rcu_seq_current(&ssp->srcu_gp_seq);
1648 }
1649 EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
1650
1651 static const char * const srcu_size_state_name[] = {
1652 "SRCU_SIZE_SMALL",
1653 "SRCU_SIZE_ALLOC",
1654 "SRCU_SIZE_WAIT_BARRIER",
1655 "SRCU_SIZE_WAIT_CALL",
1656 "SRCU_SIZE_WAIT_CBS1",
1657 "SRCU_SIZE_WAIT_CBS2",
1658 "SRCU_SIZE_WAIT_CBS3",
1659 "SRCU_SIZE_WAIT_CBS4",
1660 "SRCU_SIZE_BIG",
1661 "SRCU_SIZE_???",
1662 };
1663
srcu_torture_stats_print(struct srcu_struct * ssp,char * tt,char * tf)1664 void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)
1665 {
1666 int cpu;
1667 int idx;
1668 unsigned long s0 = 0, s1 = 0;
1669 int ss_state = READ_ONCE(ssp->srcu_size_state);
1670 int ss_state_idx = ss_state;
1671
1672 idx = ssp->srcu_idx & 0x1;
1673 if (ss_state < 0 || ss_state >= ARRAY_SIZE(srcu_size_state_name))
1674 ss_state_idx = ARRAY_SIZE(srcu_size_state_name) - 1;
1675 pr_alert("%s%s Tree SRCU g%ld state %d (%s)",
1676 tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), ss_state,
1677 srcu_size_state_name[ss_state_idx]);
1678 if (!ssp->sda) {
1679 // Called after cleanup_srcu_struct(), perhaps.
1680 pr_cont(" No per-CPU srcu_data structures (->sda == NULL).\n");
1681 } else {
1682 pr_cont(" per-CPU(idx=%d):", idx);
1683 for_each_possible_cpu(cpu) {
1684 unsigned long l0, l1;
1685 unsigned long u0, u1;
1686 long c0, c1;
1687 struct srcu_data *sdp;
1688
1689 sdp = per_cpu_ptr(ssp->sda, cpu);
1690 u0 = data_race(sdp->srcu_unlock_count[!idx]);
1691 u1 = data_race(sdp->srcu_unlock_count[idx]);
1692
1693 /*
1694 * Make sure that a lock is always counted if the corresponding
1695 * unlock is counted.
1696 */
1697 smp_rmb();
1698
1699 l0 = data_race(sdp->srcu_lock_count[!idx]);
1700 l1 = data_race(sdp->srcu_lock_count[idx]);
1701
1702 c0 = l0 - u0;
1703 c1 = l1 - u1;
1704 pr_cont(" %d(%ld,%ld %c)",
1705 cpu, c0, c1,
1706 "C."[rcu_segcblist_empty(&sdp->srcu_cblist)]);
1707 s0 += c0;
1708 s1 += c1;
1709 }
1710 pr_cont(" T(%ld,%ld)\n", s0, s1);
1711 }
1712 if (SRCU_SIZING_IS_TORTURE())
1713 srcu_transition_to_big(ssp);
1714 }
1715 EXPORT_SYMBOL_GPL(srcu_torture_stats_print);
1716
srcu_bootup_announce(void)1717 static int __init srcu_bootup_announce(void)
1718 {
1719 pr_info("Hierarchical SRCU implementation.\n");
1720 if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
1721 pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
1722 if (srcu_retry_check_delay != SRCU_DEFAULT_RETRY_CHECK_DELAY)
1723 pr_info("\tNon-default retry check delay of %lu us.\n", srcu_retry_check_delay);
1724 if (srcu_max_nodelay != SRCU_DEFAULT_MAX_NODELAY)
1725 pr_info("\tNon-default max no-delay of %lu.\n", srcu_max_nodelay);
1726 pr_info("\tMax phase no-delay instances is %lu.\n", srcu_max_nodelay_phase);
1727 return 0;
1728 }
1729 early_initcall(srcu_bootup_announce);
1730
srcu_init(void)1731 void __init srcu_init(void)
1732 {
1733 struct srcu_struct *ssp;
1734
1735 /* Decide on srcu_struct-size strategy. */
1736 if (SRCU_SIZING_IS(SRCU_SIZING_AUTO)) {
1737 if (nr_cpu_ids >= big_cpu_lim) {
1738 convert_to_big = SRCU_SIZING_INIT; // Don't bother waiting for contention.
1739 pr_info("%s: Setting srcu_struct sizes to big.\n", __func__);
1740 } else {
1741 convert_to_big = SRCU_SIZING_NONE | SRCU_SIZING_CONTEND;
1742 pr_info("%s: Setting srcu_struct sizes based on contention.\n", __func__);
1743 }
1744 }
1745
1746 /*
1747 * Once that is set, call_srcu() can follow the normal path and
1748 * queue delayed work. This must follow RCU workqueues creation
1749 * and timers initialization.
1750 */
1751 srcu_init_done = true;
1752 while (!list_empty(&srcu_boot_list)) {
1753 ssp = list_first_entry(&srcu_boot_list, struct srcu_struct,
1754 work.work.entry);
1755 list_del_init(&ssp->work.work.entry);
1756 if (SRCU_SIZING_IS(SRCU_SIZING_INIT) && ssp->srcu_size_state == SRCU_SIZE_SMALL)
1757 ssp->srcu_size_state = SRCU_SIZE_ALLOC;
1758 queue_work(rcu_gp_wq, &ssp->work.work);
1759 }
1760 }
1761
1762 #ifdef CONFIG_MODULES
1763
1764 /* Initialize any global-scope srcu_struct structures used by this module. */
srcu_module_coming(struct module * mod)1765 static int srcu_module_coming(struct module *mod)
1766 {
1767 int i;
1768 struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1769 int ret;
1770
1771 for (i = 0; i < mod->num_srcu_structs; i++) {
1772 ret = init_srcu_struct(*(sspp++));
1773 if (WARN_ON_ONCE(ret))
1774 return ret;
1775 }
1776 return 0;
1777 }
1778
1779 /* Clean up any global-scope srcu_struct structures used by this module. */
srcu_module_going(struct module * mod)1780 static void srcu_module_going(struct module *mod)
1781 {
1782 int i;
1783 struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1784
1785 for (i = 0; i < mod->num_srcu_structs; i++)
1786 cleanup_srcu_struct(*(sspp++));
1787 }
1788
1789 /* Handle one module, either coming or going. */
srcu_module_notify(struct notifier_block * self,unsigned long val,void * data)1790 static int srcu_module_notify(struct notifier_block *self,
1791 unsigned long val, void *data)
1792 {
1793 struct module *mod = data;
1794 int ret = 0;
1795
1796 switch (val) {
1797 case MODULE_STATE_COMING:
1798 ret = srcu_module_coming(mod);
1799 break;
1800 case MODULE_STATE_GOING:
1801 srcu_module_going(mod);
1802 break;
1803 default:
1804 break;
1805 }
1806 return ret;
1807 }
1808
1809 static struct notifier_block srcu_module_nb = {
1810 .notifier_call = srcu_module_notify,
1811 .priority = 0,
1812 };
1813
init_srcu_module_notifier(void)1814 static __init int init_srcu_module_notifier(void)
1815 {
1816 int ret;
1817
1818 ret = register_module_notifier(&srcu_module_nb);
1819 if (ret)
1820 pr_warn("Failed to register srcu module notifier\n");
1821 return ret;
1822 }
1823 late_initcall(init_srcu_module_notifier);
1824
1825 #endif /* #ifdef CONFIG_MODULES */
1826