Lines Matching refs:rcu_data
78 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
151 static void rcu_report_exp_rdp(struct rcu_data *rdp);
153 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
154 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp);
233 static bool rcu_rdp_cpu_online(struct rcu_data *rdp) in rcu_rdp_cpu_online()
254 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_get_n_cbs_cpu()
315 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap) in rcu_dynticks_in_eqs_since()
354 raw_cpu_write(rcu_data.rcu_need_heavy_qs, false); in rcu_momentary_dyntick_idle()
500 static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
573 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_irq_work_resched()
636 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in __rcu_irq_enter_check_tick()
683 return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) && in rcu_needs_cpu()
684 !rcu_rdp_is_offloaded(this_cpu_ptr(&rcu_data)); in rcu_needs_cpu()
692 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp) in rcu_disable_urgency_upon_qs()
740 smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true); in rcu_request_urgent_qs_task()
760 struct rcu_data *rdp; in rcu_lockdep_current_cpu_online()
766 rdp = this_cpu_ptr(&rcu_data); in rcu_lockdep_current_cpu_online()
790 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_gpnum_ovf()
805 static int dyntick_save_progress_counter(struct rcu_data *rdp) in dyntick_save_progress_counter()
822 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) in rcu_implicit_dynticks_qs()
940 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp, in trace_rcu_this_gp()
964 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp, in rcu_start_this_gp()
1042 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_future_gp_cleanup()
1091 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_accelerate_cbs()
1138 struct rcu_data *rdp) in rcu_accelerate_cbs_unlocked()
1167 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_advance_cbs()
1191 struct rcu_data *rdp) in rcu_advance_cbs_nowake()
1221 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp) in __note_gp_changes()
1270 static void note_gp_changes(struct rcu_data *rdp) in note_gp_changes()
1442 struct rcu_data *rdp; in rcu_gp_init()
1549 rdp = this_cpu_ptr(&rcu_data); in rcu_gp_init()
1722 struct rcu_data *rdp; in rcu_gp_cleanup()
1763 rdp = this_cpu_ptr(&rcu_data); in rcu_gp_cleanup()
1771 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_gp_cleanup()
1790 rdp = this_cpu_ptr(&rcu_data); in rcu_gp_cleanup()
2006 rcu_report_qs_rdp(struct rcu_data *rdp) in rcu_report_qs_rdp()
2072 rcu_check_quiescent_state(struct rcu_data *rdp) in rcu_check_quiescent_state()
2105 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_dying_cpu()
2170 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_dead_cpu()
2188 static void rcu_do_batch(struct rcu_data *rdp) in rcu_do_batch()
2331 WARN_ON_ONCE(time_before(j, __this_cpu_read(rcu_data.last_sched_clock))); in rcu_sched_clock_irq()
2332 __this_cpu_write(rcu_data.last_sched_clock, j); in rcu_sched_clock_irq()
2336 raw_cpu_inc(rcu_data.ticks_this_gp); in rcu_sched_clock_irq()
2338 if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) { in rcu_sched_clock_irq()
2344 __this_cpu_write(rcu_data.rcu_urgent_qs, false); in rcu_sched_clock_irq()
2363 static void force_qs_rnp(int (*f)(struct rcu_data *rdp)) in force_qs_rnp()
2368 struct rcu_data *rdp; in force_qs_rnp()
2393 rdp = per_cpu_ptr(&rcu_data, cpu); in force_qs_rnp()
2421 rnp = raw_cpu_read(rcu_data.mynode); in rcu_force_quiescent_state()
2459 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); in rcu_core()
2546 __this_cpu_write(rcu_data.rcu_cpu_has_work, 1); in invoke_rcu_core_kthread()
2547 t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task); in invoke_rcu_core_kthread()
2549 rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status)); in invoke_rcu_core_kthread()
2568 per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; in rcu_cpu_kthread_park()
2573 return __this_cpu_read(rcu_data.rcu_cpu_has_work); in rcu_cpu_kthread_should_run()
2583 unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status); in rcu_cpu_kthread()
2584 char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work); in rcu_cpu_kthread()
2585 unsigned long *j = this_cpu_ptr(&rcu_data.rcuc_activity); in rcu_cpu_kthread()
2615 .store = &rcu_data.rcu_cpu_kthread_task,
2631 per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0; in rcu_spawn_core_kthreads()
2642 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head, in __call_rcu_core()
2697 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp) in check_cb_ovld_locked()
2720 static void check_cb_ovld(struct rcu_data *rdp) in check_cb_ovld()
2777 struct rcu_data *rdp; in call_rcu()
2800 rdp = this_cpu_ptr(&rcu_data); in call_rcu()
3532 for (rnp = this_cpu_ptr(&rcu_data)->mynode; rnp; rnp = rnp->parent) in synchronize_rcu()
3609 struct rcu_data *rdp; in start_poll_synchronize_rcu_common()
3614 rdp = this_cpu_ptr(&rcu_data); in start_poll_synchronize_rcu_common()
3819 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_pending()
3895 static void rcu_barrier_entrain(struct rcu_data *rdp) in rcu_barrier_entrain()
3924 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_barrier_handler()
3947 struct rcu_data *rdp; in rcu_barrier()
3986 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_barrier()
4028 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_barrier()
4073 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_boot_init_percpu_data()
4104 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_prepare_cpu()
4151 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_affinity_setting()
4163 struct rcu_data *rdp; in rcutree_online_cpu()
4166 rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_online_cpu()
4188 struct rcu_data *rdp; in rcutree_offline_cpu()
4191 rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_offline_cpu()
4219 struct rcu_data *rdp; in rcu_cpu_starting()
4223 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_cpu_starting()
4275 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_report_dead()
4315 struct rcu_data *my_rdp; in rcutree_migrate_callbacks()
4317 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_migrate_callbacks()
4327 my_rdp = this_cpu_ptr(&rcu_data); in rcutree_migrate_callbacks()
4435 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_spawn_gp_kthread()
4579 per_cpu_ptr(&rcu_data, i)->mynode = rnp; in rcu_init_one()
4797 if (!(per_cpu_ptr(&rcu_data, cpu)->mynode->exp_seq_poll_rq & 0x1)) in rcu_init()