/linux-6.6.21/kernel/rcu/ |
D | tree_exp.h | 13 static int rcu_print_task_exp_stall(struct rcu_node *rnp); 14 static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp); 83 struct rcu_node *rnp; in sync_exp_reset_tree_hotplug() local 95 rcu_for_each_leaf_node(rnp) { in sync_exp_reset_tree_hotplug() 96 raw_spin_lock_irqsave_rcu_node(rnp, flags); in sync_exp_reset_tree_hotplug() 97 if (rnp->expmaskinit == rnp->expmaskinitnext) { in sync_exp_reset_tree_hotplug() 98 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in sync_exp_reset_tree_hotplug() 103 oldmask = rnp->expmaskinit; in sync_exp_reset_tree_hotplug() 104 rnp->expmaskinit = rnp->expmaskinitnext; in sync_exp_reset_tree_hotplug() 105 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in sync_exp_reset_tree_hotplug() [all …]
|
D | tree_plugin.h | 105 static void rcu_report_exp_rnp(struct rcu_node *rnp, bool wake); 151 static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_preempt_ctxt_queue() argument 152 __releases(rnp->lock) /* But leaves rrupts disabled. */ in rcu_preempt_ctxt_queue() 154 int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) + in rcu_preempt_ctxt_queue() 155 (rnp->exp_tasks ? RCU_EXP_TASKS : 0) + in rcu_preempt_ctxt_queue() 156 (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) + in rcu_preempt_ctxt_queue() 157 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0); in rcu_preempt_ctxt_queue() 160 raw_lockdep_assert_held_rcu_node(rnp); in rcu_preempt_ctxt_queue() 161 WARN_ON_ONCE(rdp->mynode != rnp); in rcu_preempt_ctxt_queue() 162 WARN_ON_ONCE(!rcu_is_leaf_node(rnp)); in rcu_preempt_ctxt_queue() [all …]
|
D | tree.c | 146 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, 148 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); 152 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp); 731 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_gpnum_ovf() argument 733 raw_lockdep_assert_held_rcu_node(rnp); in rcu_gpnum_ovf() 735 rnp->gp_seq)) in rcu_gpnum_ovf() 737 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq)) in rcu_gpnum_ovf() 738 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4; in rcu_gpnum_ovf() 771 struct rcu_node *rnp = rdp->mynode; in rcu_implicit_dynticks_qs() local 783 rcu_gpnum_ovf(rnp, rdp); in rcu_implicit_dynticks_qs() [all …]
|
D | tree_stall.h | 221 struct rcu_node *rnp; in rcu_iw_handler() local 224 rnp = rdp->mynode; in rcu_iw_handler() 225 raw_spin_lock_rcu_node(rnp); in rcu_iw_handler() 227 rdp->rcu_iw_gp_seq = rnp->gp_seq; in rcu_iw_handler() 230 raw_spin_unlock_rcu_node(rnp); in rcu_iw_handler() 243 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) in rcu_print_detail_task_stall_rnp() argument 248 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_print_detail_task_stall_rnp() 249 if (!rcu_preempt_blocked_readers_cgp(rnp)) { in rcu_print_detail_task_stall_rnp() 250 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_print_detail_task_stall_rnp() 253 t = list_entry(rnp->gp_tasks->prev, in rcu_print_detail_task_stall_rnp() [all …]
|
D | rcu.h | 371 #define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1) argument 374 #define rcu_is_last_leaf_node(rnp) ((rnp) == &rcu_state.node[rcu_num_nodes - 1]) argument 381 #define _rcu_for_each_node_breadth_first(sp, rnp) \ argument 382 for ((rnp) = &(sp)->node[0]; \ 383 (rnp) < &(sp)->node[rcu_num_nodes]; (rnp)++) 384 #define rcu_for_each_node_breadth_first(rnp) \ argument 385 _rcu_for_each_node_breadth_first(&rcu_state, rnp) 386 #define srcu_for_each_node_breadth_first(ssp, rnp) \ argument 387 _rcu_for_each_node_breadth_first(ssp->srcu_sup, rnp) 395 #define rcu_for_each_leaf_node(rnp) \ argument [all …]
|
D | tree.h | 147 #define leaf_node_cpu_bit(rnp, cpu) (BIT((cpu) - (rnp)->grplo)) argument 448 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); 450 static bool rcu_preempt_has_tasks(struct rcu_node *rnp); 452 static int rcu_print_task_exp_stall(struct rcu_node *rnp); 453 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); 455 static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck); 456 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); 457 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); 460 static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp); 461 static bool rcu_preempt_has_tasks(struct rcu_node *rnp); [all …]
|
D | tree_nocb.h | 204 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp) in rcu_nocb_gp_get() argument 206 return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1]; in rcu_nocb_gp_get() 209 static void rcu_init_one_nocb(struct rcu_node *rnp) in rcu_init_one_nocb() argument 211 init_swait_queue_head(&rnp->nocb_gp_wq[0]); in rcu_init_one_nocb() 212 init_swait_queue_head(&rnp->nocb_gp_wq[1]); in rcu_init_one_nocb() 690 struct rcu_node *rnp; in nocb_gp_wait() local 753 rnp = rdp->mynode; in nocb_gp_wait() 760 rcu_seq_done(&rnp->gp_seq, cur_gp_seq))) { in nocb_gp_wait() 761 raw_spin_lock_rcu_node(rnp); /* irqs disabled. */ in nocb_gp_wait() 762 needwake_gp = rcu_advance_cbs(rnp, rdp); in nocb_gp_wait() [all …]
|
/linux-6.6.21/Documentation/RCU/Design/Memory-Ordering/ |
D | Tree-RCU-Memory-Ordering.rst | 84 5 raw_spin_lock_rcu_node(rnp); 87 8 raw_spin_unlock_rcu_node(rnp); 92 13 raw_spin_lock_rcu_node(rnp); 95 16 raw_spin_unlock_rcu_node(rnp); 206 5 struct rcu_node *rnp; 232 31 rnp = rdp->mynode; 233 32 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 234 33 needwake = rcu_accelerate_cbs(rnp, rdp); 235 34 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
|
/linux-6.6.21/Documentation/RCU/Design/Data-Structures/ |
D | Data-Structures.rst | 1106 6 #define rcu_for_each_node_breadth_first(rsp, rnp) \ 1107 7 for ((rnp) = &(rsp)->node[0]; \ 1108 8 (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++) 1110 10 #define rcu_for_each_leaf_node(rsp, rnp) \ 1111 11 for ((rnp) = (rsp)->level[NUM_RCU_LVLS - 1]; \ 1112 12 (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
|