Home
last modified time | relevance | path

Searched refs:data_race (Results 1 – 25 of 52) sorted by relevance

123

/linux-6.6.21/kernel/rcu/
Dtree_stall.h519 data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart, in print_cpu_stall_info()
538 data_race(READ_ONCE(rcu_state.gp_flags)), in rcu_check_gp_kthread_starvation()
540 data_race(READ_ONCE(rcu_state.gp_state)), in rcu_check_gp_kthread_starvation()
541 gpk ? data_race(READ_ONCE(gpk->__state)) : ~0, cpu); in rcu_check_gp_kthread_starvation()
581 data_race(rcu_state.gp_flags), in rcu_check_gp_kthread_expired_fqs_timer()
583 data_race(READ_ONCE(gpk->__state))); in rcu_check_gp_kthread_expired_fqs_timer()
642 gpa = data_race(READ_ONCE(rcu_state.gp_activity)); in print_other_cpu_stall()
645 data_race(READ_ONCE(jiffies_till_next_fqs)), in print_other_cpu_stall()
646 data_race(READ_ONCE(rcu_get_root()->qsmask))); in print_other_cpu_stall()
849 if (data_race(READ_ONCE(rnp->qsmask))) { in rcu_check_boost_fail()
[all …]
Dtasks.h229 int i = data_race(rtp->gp_state); // Let KCSAN detect update races in tasks_gp_state_getname()
281 data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim), rcu_task_cb_adjust); in cblist_init_generic()
699 if (!data_race(rcu_segcblist_empty(&rtpcp->cblist))) in show_rcu_tasks_generic_gp_kthread()
701 if (data_race(rtpcp->urgent_gp)) in show_rcu_tasks_generic_gp_kthread()
703 if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)) && data_race(rtpcp->urgent_gp)) in show_rcu_tasks_generic_gp_kthread()
710 tasks_gp_state_getname(rtp), data_race(rtp->gp_state), in show_rcu_tasks_generic_gp_kthread()
711 jiffies - data_race(rtp->gp_jiffies), in show_rcu_tasks_generic_gp_kthread()
712 data_race(rcu_seq_current(&rtp->tasks_gp_seq)), in show_rcu_tasks_generic_gp_kthread()
713 data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis), in show_rcu_tasks_generic_gp_kthread()
714 ".k"[!!data_race(rtp->kthread_ptr)], in show_rcu_tasks_generic_gp_kthread()
[all …]
Dtree_exp.h645 "D."[!!data_race(rdp->cpu_no_qs.b.exp)]); in synchronize_rcu_expedited_wait()
650 data_race(rnp_root->expmask), in synchronize_rcu_expedited_wait()
651 ".T"[!!data_race(rnp_root->exp_tasks)]); in synchronize_rcu_expedited_wait()
661 data_race(rnp->expmask), in synchronize_rcu_expedited_wait()
662 ".T"[!!data_race(rnp->exp_tasks)]); in synchronize_rcu_expedited_wait()
Dsrcutree.c1862 u0 = data_race(atomic_long_read(&sdp->srcu_unlock_count[!idx])); in srcu_torture_stats_print()
1863 u1 = data_race(atomic_long_read(&sdp->srcu_unlock_count[idx])); in srcu_torture_stats_print()
1871 l0 = data_race(atomic_long_read(&sdp->srcu_lock_count[!idx])); in srcu_torture_stats_print()
1872 l1 = data_race(atomic_long_read(&sdp->srcu_lock_count[idx])); in srcu_torture_stats_print()
/linux-6.6.21/include/linux/
Dsrcutiny.h88 idx = ((data_race(READ_ONCE(ssp->srcu_idx)) + 1) & 0x2) >> 1; in srcu_torture_stats_print()
91 data_race(READ_ONCE(ssp->srcu_lock_nesting[!idx])), in srcu_torture_stats_print()
92 data_race(READ_ONCE(ssp->srcu_lock_nesting[idx])), in srcu_torture_stats_print()
93 data_race(READ_ONCE(ssp->srcu_idx)), in srcu_torture_stats_print()
94 data_race(READ_ONCE(ssp->srcu_idx_max))); in srcu_torture_stats_print()
Dcompiler.h195 #define data_race(expr) \ macro
Dconsole.h260 return data_race(READ_ONCE(con->flags)); in console_srcu_read_flags()
/linux-6.6.21/tools/memory-model/Documentation/
Daccess-marking.txt19 2. Data-race marking, for example, "data_race(a = b);"
31 WRITE_ONCE(a, b + data_race(c + d) + READ_ONCE(e));
33 Neither plain C-language accesses nor data_race() (#1 and #2 above) place
39 preferable to data_race(), which in turn is usually preferable to plain
41 data_race(READ_ONCE(a)), which will both restrict compiler optimizations
46 race with one of data_race(), READ_ONCE(), or WRITE_ONCE(), will prevent
50 ill-considered additions of data_race(), READ_ONCE(), and WRITE_ONCE()
54 data_race() and even plain C-language accesses is preferable to
58 Use of the data_race() Macro
61 Here are some situations where data_race() should be used instead of
[all …]
/linux-6.6.21/security/tomoyo/
Dutil.c1069 perm = data_race(container_of(ptr, struct tomoyo_path_acl, head)->perm); in tomoyo_domain_quota_is_ok()
1072 perm = data_race(container_of(ptr, struct tomoyo_path2_acl, head)->perm); in tomoyo_domain_quota_is_ok()
1075 perm = data_race(container_of(ptr, struct tomoyo_path_number_acl, head) in tomoyo_domain_quota_is_ok()
1079 perm = data_race(container_of(ptr, struct tomoyo_mkdev_acl, head)->perm); in tomoyo_domain_quota_is_ok()
1082 perm = data_race(container_of(ptr, struct tomoyo_inet_acl, head)->perm); in tomoyo_domain_quota_is_ok()
1085 perm = data_race(container_of(ptr, struct tomoyo_unix_acl, head)->perm); in tomoyo_domain_quota_is_ok()
/linux-6.6.21/arch/powerpc/kernel/
Dinterrupt.c167 if (!data_race(warned)) { in check_return_regs_valid()
168 data_race(warned = true); in check_return_regs_valid()
/linux-6.6.21/fs/btrfs/
Dblock-rsv.h101 return data_race(rsv->full); in btrfs_block_rsv_full()
/linux-6.6.21/kernel/locking/
Dosq_lock.c161 if (data_race(prev->next) == node && in osq_lock()
Dlocktorture.c905 long max = 0, min = statp ? data_race(statp[0].n_lock_acquired) : 0; in __torture_print_stats()
910 if (data_race(statp[i].n_lock_fail)) in __torture_print_stats()
912 cur = data_race(statp[i].n_lock_acquired); in __torture_print_stats()
/linux-6.6.21/mm/
Dpage_counter.c127 data_race(c->failcnt++); in page_counter_try_charge()
Dpage_io.c381 if (data_race(sis->flags & SWP_FS_OPS)) in __swap_writepage()
519 } else if (data_race(sis->flags & SWP_FS_OPS)) { in swap_readpage()
Dswap.c656 if (data_race(folio_batch_count(fbatch))) { in lru_add_drain_cpu()
796 data_race(folio_batch_count(&per_cpu(lru_rotate.fbatch, cpu))) || in cpu_needs_drain()
/linux-6.6.21/lib/
Dgroup_cpus.c383 cpumask_copy(npresmsk, data_race(cpu_present_mask)); in group_cpus_evenly()
/linux-6.6.21/drivers/net/wireless/ath/ath9k/
Dwmi.c173 if (!data_race(priv->tx.initialized)) in ath9k_wmi_event_tasklet()
/linux-6.6.21/kernel/irq/
Dirqdesc.c975 return data_race(desc->tot_count); in kstat_irqs()
978 sum += data_race(*per_cpu_ptr(desc->kstat_irqs, cpu)); in kstat_irqs()
Dproc.c493 any_count |= data_race(*per_cpu_ptr(desc->kstat_irqs, j)); in show_interrupts()
/linux-6.6.21/mm/kfence/
Dcore.c1185 distance = addr - data_race(meta->addr + meta->size); in kfence_handle_page_fault()
1191 if (!to_report || distance > data_race(meta->addr) - addr) in kfence_handle_page_fault()
/linux-6.6.21/kernel/cgroup/
Drstat.c42 if (data_race(cgroup_rstat_cpu(cgrp, cpu)->updated_next)) in cgroup_rstat_updated()
/linux-6.6.21/net/9p/
Dtrans_fd.c843 data_race(ts->rd->f_flags |= O_NONBLOCK); in p9_fd_open()
849 data_race(ts->wr->f_flags |= O_NONBLOCK); in p9_fd_open()
/linux-6.6.21/fs/jbd2/
Dtransaction.c363 if (!data_race(journal->j_running_transaction)) { in start_this_handle()
1510 if (data_race(jh->b_transaction != transaction && in jbd2_journal_dirty_metadata()
1519 if (data_race(jh->b_transaction == transaction && in jbd2_journal_dirty_metadata()
/linux-6.6.21/kernel/time/
Dtimekeeping.c536 return (ktime_get_mono_fast_ns() + ktime_to_ns(data_race(tk->offs_boot))); in ktime_get_boot_fast_ns()
553 return (ktime_get_mono_fast_ns() + ktime_to_ns(data_race(tk->offs_tai))); in ktime_get_tai_fast_ns()
638 snapshot->boot = snapshot->mono + ktime_to_ns(data_race(tk->offs_boot)); in ktime_get_fast_timestamps()

123