/linux-6.1.9/kernel/rcu/ |
D | tree_stall.h | 30 int cpu_stall_timeout = READ_ONCE(rcu_exp_cpu_stall_timeout); in rcu_exp_jiffies_till_stall_check() 59 int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout); in rcu_jiffies_till_stall_check() 103 return !time_before(j, READ_ONCE(rcu_state.gp_start) + d); in rcu_gp_might_be_stalled() 175 rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs); in record_gp_stall_check_time() 194 if (!READ_ONCE(rcu_kick_kthreads)) in rcu_stall_kick_kthreads() 196 j = READ_ONCE(rcu_state.jiffies_kick_kthreads); in rcu_stall_kick_kthreads() 198 (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) { in rcu_stall_kick_kthreads() 403 unsigned long j = jiffies - READ_ONCE(rcu_state.gp_activity); in rcu_is_gp_kthread_starving() 424 j = jiffies - READ_ONCE(rdp->rcuc_activity); in rcu_is_rcuc_kthread_starving() 501 data_race(READ_ONCE(rcu_state.gp_flags)), in rcu_check_gp_kthread_starvation() [all …]
|
D | sync.c | 78 WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_IDLE); in rcu_sync_func() 79 WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_PASSED); in rcu_sync_func() 155 wait_event(rsp->gp_wait, READ_ONCE(rsp->gp_state) >= GP_PASSED); in rcu_sync_enter() 170 WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_IDLE); in rcu_sync_exit() 171 WARN_ON_ONCE(READ_ONCE(rsp->gp_count) == 0); in rcu_sync_exit() 193 WARN_ON_ONCE(READ_ONCE(rsp->gp_count)); in rcu_sync_dtor() 194 WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_PASSED); in rcu_sync_dtor()
|
D | srcutiny.c | 99 int newval = READ_ONCE(ssp->srcu_lock_nesting[idx]) - 1; in __srcu_read_unlock() 102 if (!newval && READ_ONCE(ssp->srcu_gp_waiting) && in_task()) in __srcu_read_unlock() 120 if (ssp->srcu_gp_running || ULONG_CMP_GE(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max))) in srcu_drive_gp() 133 swait_event_exclusive(ssp->srcu_wq, !READ_ONCE(ssp->srcu_lock_nesting[idx])); in srcu_drive_gp() 153 if (ULONG_CMP_LT(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max))) in srcu_drive_gp() 163 if (ULONG_CMP_GE(READ_ONCE(ssp->srcu_idx_max), cookie)) in srcu_gp_start_if_needed() 166 if (!READ_ONCE(ssp->srcu_gp_running)) { in srcu_gp_start_if_needed() 216 ret = (READ_ONCE(ssp->srcu_idx) + 3) & ~0x1; in get_state_synchronize_srcu() 243 unsigned long cur_s = READ_ONCE(ssp->srcu_idx); in poll_state_synchronize_srcu()
|
D | rcu_segcblist.h | 15 return READ_ONCE(rclp->len); in rcu_cblist_n_cbs() 43 return !READ_ONCE(rsclp->head); in rcu_segcblist_empty() 52 return READ_ONCE(rsclp->len); in rcu_segcblist_n_cbs() 71 return READ_ONCE(rsclp->flags) & flags; in rcu_segcblist_test_flags() 112 return !READ_ONCE(*READ_ONCE(rsclp->tails[seg])); in rcu_segcblist_restempty()
|
/linux-6.1.9/Documentation/translations/ko_KR/ |
D | memory-barriers.txt | 259 Q = READ_ONCE(P); D = READ_ONCE(*Q); 266 READ_ONCE() 는 메모리 배리어 명령도 내게 되어 있어서, DEC Alpha CPU 는 271 DEC Alpha 에서 수행되든 아니든, READ_ONCE() 는 컴파일러로부터의 악영향 277 a = READ_ONCE(*X); WRITE_ONCE(*X, b); 285 WRITE_ONCE(*X, c); d = READ_ONCE(*X); 296 (*) 컴파일러가 READ_ONCE() 나 WRITE_ONCE() 로 보호되지 않은 메모리 액세스를 577 리눅스 커널 v4.15 기준으로, smp_mb() 가 DEC Alpha 용 READ_ONCE() 코드에 579 전용 코드를 만드는 사람들과 READ_ONCE() 자체를 만드는 사람들 뿐임을 의미합니다. 593 Q = READ_ONCE(P); 620 Q = READ_ONCE(P); [all …]
|
/linux-6.1.9/io_uring/ |
D | fs.c | 60 ren->old_dfd = READ_ONCE(sqe->fd); in io_renameat_prep() 61 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_renameat_prep() 62 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2)); in io_renameat_prep() 63 ren->new_dfd = READ_ONCE(sqe->len); in io_renameat_prep() 64 ren->flags = READ_ONCE(sqe->rename_flags); in io_renameat_prep() 114 un->dfd = READ_ONCE(sqe->fd); in io_unlinkat_prep() 116 un->flags = READ_ONCE(sqe->unlink_flags); in io_unlinkat_prep() 120 fname = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_unlinkat_prep() 164 mkd->dfd = READ_ONCE(sqe->fd); in io_mkdirat_prep() 165 mkd->mode = READ_ONCE(sqe->len); in io_mkdirat_prep() [all …]
|
D | sync.c | 32 sync->off = READ_ONCE(sqe->off); in io_sfr_prep() 33 sync->len = READ_ONCE(sqe->len); in io_sfr_prep() 34 sync->flags = READ_ONCE(sqe->sync_range_flags); in io_sfr_prep() 59 sync->flags = READ_ONCE(sqe->fsync_flags); in io_fsync_prep() 63 sync->off = READ_ONCE(sqe->off); in io_fsync_prep() 64 sync->len = READ_ONCE(sqe->len); in io_fsync_prep() 91 sync->off = READ_ONCE(sqe->off); in io_fallocate_prep() 92 sync->len = READ_ONCE(sqe->addr); in io_fallocate_prep() 93 sync->mode = READ_ONCE(sqe->len); in io_fallocate_prep()
|
D | advise.c | 39 ma->addr = READ_ONCE(sqe->addr); in io_madvise_prep() 40 ma->len = READ_ONCE(sqe->len); in io_madvise_prep() 41 ma->advice = READ_ONCE(sqe->fadvise_advice); in io_madvise_prep() 72 fa->offset = READ_ONCE(sqe->off); in io_fadvise_prep() 73 fa->len = READ_ONCE(sqe->len); in io_fadvise_prep() 74 fa->advice = READ_ONCE(sqe->fadvise_advice); in io_fadvise_prep()
|
D | splice.c | 32 sp->len = READ_ONCE(sqe->len); in __io_splice_prep() 33 sp->flags = READ_ONCE(sqe->splice_flags); in __io_splice_prep() 36 sp->splice_fd_in = READ_ONCE(sqe->splice_fd_in); in __io_splice_prep() 42 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off)) in io_tee_prep() 83 sp->off_in = READ_ONCE(sqe->splice_off_in); in io_splice_prep() 84 sp->off_out = READ_ONCE(sqe->off); in io_splice_prep()
|
D | statx.c | 33 sx->dfd = READ_ONCE(sqe->fd); in io_statx_prep() 34 sx->mask = READ_ONCE(sqe->len); in io_statx_prep() 35 path = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_statx_prep() 36 sx->buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2)); in io_statx_prep() 37 sx->flags = READ_ONCE(sqe->statx_flags); in io_statx_prep()
|
D | xattr.c | 56 name = u64_to_user_ptr(READ_ONCE(sqe->addr)); in __io_getxattr_prep() 57 ix->ctx.cvalue = u64_to_user_ptr(READ_ONCE(sqe->addr2)); in __io_getxattr_prep() 58 ix->ctx.size = READ_ONCE(sqe->len); in __io_getxattr_prep() 59 ix->ctx.flags = READ_ONCE(sqe->xattr_flags); in __io_getxattr_prep() 96 path = u64_to_user_ptr(READ_ONCE(sqe->addr3)); in io_getxattr_prep() 162 name = u64_to_user_ptr(READ_ONCE(sqe->addr)); in __io_setxattr_prep() 163 ix->ctx.cvalue = u64_to_user_ptr(READ_ONCE(sqe->addr2)); in __io_setxattr_prep() 165 ix->ctx.size = READ_ONCE(sqe->len); in __io_setxattr_prep() 166 ix->ctx.flags = READ_ONCE(sqe->xattr_flags); in __io_setxattr_prep() 192 path = u64_to_user_ptr(READ_ONCE(sqe->addr3)); in io_setxattr_prep()
|
D | openclose.c | 49 open->dfd = READ_ONCE(sqe->fd); in __io_openat_prep() 50 fname = u64_to_user_ptr(READ_ONCE(sqe->addr)); in __io_openat_prep() 58 open->file_slot = READ_ONCE(sqe->file_index); in __io_openat_prep() 70 u64 mode = READ_ONCE(sqe->len); in io_openat_prep() 71 u64 flags = READ_ONCE(sqe->open_flags); in io_openat_prep() 84 how = u64_to_user_ptr(READ_ONCE(sqe->addr2)); in io_openat2_prep() 85 len = READ_ONCE(sqe->len); in io_openat2_prep() 204 close->fd = READ_ONCE(sqe->fd); in io_close_prep() 205 close->file_slot = READ_ONCE(sqe->file_index); in io_close_prep()
|
/linux-6.1.9/include/net/ |
D | busy_poll.h | 36 return READ_ONCE(sysctl_net_busy_poll); in net_busy_loop_on() 41 return READ_ONCE(sk->sk_ll_usec) && !signal_pending(current); in sk_can_busy_loop() 76 unsigned long bp_usec = READ_ONCE(sysctl_net_busy_poll); in busy_loop_timeout() 92 unsigned long bp_usec = READ_ONCE(sk->sk_ll_usec); in sk_busy_loop_timeout() 107 unsigned int napi_id = READ_ONCE(sk->sk_napi_id); in sk_busy_loop() 111 READ_ONCE(sk->sk_prefer_busy_poll), in sk_busy_loop() 112 READ_ONCE(sk->sk_busy_poll_budget) ?: BUSY_POLL_BUDGET); in sk_busy_loop() 133 if (unlikely(READ_ONCE(sk->sk_napi_id) != skb->napi_id)) in sk_mark_napi_id() 155 if (!READ_ONCE(sk->sk_napi_id)) in __sk_mark_napi_id_once()
|
/linux-6.1.9/include/linux/ |
D | srcutiny.h | 63 idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1; in __srcu_read_lock() 64 WRITE_ONCE(ssp->srcu_lock_nesting[idx], READ_ONCE(ssp->srcu_lock_nesting[idx]) + 1); in __srcu_read_lock() 84 idx = ((data_race(READ_ONCE(ssp->srcu_idx)) + 1) & 0x2) >> 1; in srcu_torture_stats_print() 87 data_race(READ_ONCE(ssp->srcu_lock_nesting[!idx])), in srcu_torture_stats_print() 88 data_race(READ_ONCE(ssp->srcu_lock_nesting[idx])), in srcu_torture_stats_print() 89 data_race(READ_ONCE(ssp->srcu_idx)), in srcu_torture_stats_print() 90 data_race(READ_ONCE(ssp->srcu_idx_max))); in srcu_torture_stats_print()
|
/linux-6.1.9/arch/arm64/include/asm/ |
D | preempt.h | 13 return READ_ONCE(current_thread_info()->preempt.count); in preempt_count() 47 u32 pc = READ_ONCE(current_thread_info()->preempt.count); in __preempt_count_add() 54 u32 pc = READ_ONCE(current_thread_info()->preempt.count); in __preempt_count_sub() 62 u64 pc = READ_ONCE(ti->preempt_count); in __preempt_count_dec_and_test() 74 return !pc || !READ_ONCE(ti->preempt_count); in __preempt_count_dec_and_test() 79 u64 pc = READ_ONCE(current_thread_info()->preempt_count); in should_resched()
|
/linux-6.1.9/arch/arm64/mm/ |
D | mmu.c | 177 pte_t old_pte = READ_ONCE(*ptep); in init_pte() 186 READ_ONCE(pte_val(*ptep)))); in init_pte() 201 pmd_t pmd = READ_ONCE(*pmdp); in alloc_init_cont_pte() 213 pmd = READ_ONCE(*pmdp); in alloc_init_cont_pte() 242 pmd_t old_pmd = READ_ONCE(*pmdp); in init_pmd() 256 READ_ONCE(pmd_val(*pmdp)))); in init_pmd() 262 pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp))); in init_pmd() 276 pud_t pud = READ_ONCE(*pudp); in alloc_init_cont_pmd() 291 pud = READ_ONCE(*pudp); in alloc_init_cont_pmd() 319 p4d_t p4d = READ_ONCE(*p4dp); in alloc_init_pud() [all …]
|
/linux-6.1.9/drivers/powercap/ |
D | idle_inject.c | 114 duration_us = READ_ONCE(ii_dev->run_duration_us); in idle_inject_timer_fn() 115 duration_us += READ_ONCE(ii_dev->idle_duration_us); in idle_inject_timer_fn() 144 play_idle_precise(READ_ONCE(ii_dev->idle_duration_us) * NSEC_PER_USEC, in idle_inject_fn() 145 READ_ONCE(ii_dev->latency_us) * NSEC_PER_USEC); in idle_inject_fn() 172 *run_duration_us = READ_ONCE(ii_dev->run_duration_us); in idle_inject_get_duration() 173 *idle_duration_us = READ_ONCE(ii_dev->idle_duration_us); in idle_inject_get_duration() 198 unsigned int idle_duration_us = READ_ONCE(ii_dev->idle_duration_us); in idle_inject_start() 199 unsigned int run_duration_us = READ_ONCE(ii_dev->run_duration_us); in idle_inject_start()
|
/linux-6.1.9/arch/s390/lib/ |
D | spinlock.c | 131 old = READ_ONCE(lp->lock); in arch_spin_lock_queued() 168 while (READ_ONCE(node->prev) != NULL) { in arch_spin_lock_queued() 182 old = READ_ONCE(lp->lock); in arch_spin_lock_queued() 202 while ((next = READ_ONCE(node->next)) == NULL) in arch_spin_lock_queued() 218 owner = arch_spin_yield_target(READ_ONCE(lp->lock), NULL); in arch_spin_lock_classic() 258 owner = READ_ONCE(lp->lock); in arch_spin_trylock_retry() 272 while (READ_ONCE(rw->cnts) & 0x10000) in arch_read_lock_wait() 284 while (READ_ONCE(rw->cnts) & 0x10000) in arch_read_lock_wait() 301 old = READ_ONCE(rw->cnts); in arch_write_lock_wait() 317 cpu = READ_ONCE(lp->lock) & _Q_LOCK_CPU_MASK; in arch_spin_relax()
|
/linux-6.1.9/drivers/net/ethernet/mellanox/mlx4/ |
D | en_port.c | 165 packets += READ_ONCE(ring->packets); in mlx4_en_fold_software_stats() 166 bytes += READ_ONCE(ring->bytes); in mlx4_en_fold_software_stats() 176 packets += READ_ONCE(ring->packets); in mlx4_en_fold_software_stats() 177 bytes += READ_ONCE(ring->bytes); in mlx4_en_fold_software_stats() 254 sw_rx_dropped += READ_ONCE(ring->dropped); in mlx4_en_DUMP_ETH_STATS() 255 priv->port_stats.rx_chksum_good += READ_ONCE(ring->csum_ok); in mlx4_en_DUMP_ETH_STATS() 256 priv->port_stats.rx_chksum_none += READ_ONCE(ring->csum_none); in mlx4_en_DUMP_ETH_STATS() 257 priv->port_stats.rx_chksum_complete += READ_ONCE(ring->csum_complete); in mlx4_en_DUMP_ETH_STATS() 258 priv->port_stats.rx_alloc_pages += READ_ONCE(ring->rx_alloc_pages); in mlx4_en_DUMP_ETH_STATS() 259 priv->xdp_stats.rx_xdp_drop += READ_ONCE(ring->xdp_drop); in mlx4_en_DUMP_ETH_STATS() [all …]
|
/linux-6.1.9/arch/s390/kernel/ |
D | idle.c | 81 idle_count = READ_ONCE(idle->idle_count); in show_idle_count() 82 if (READ_ONCE(idle->clock_idle_enter)) in show_idle_count() 98 idle_time = READ_ONCE(idle->idle_time); in show_idle_time() 99 idle_enter = READ_ONCE(idle->clock_idle_enter); in show_idle_time() 100 idle_exit = READ_ONCE(idle->clock_idle_exit); in show_idle_time() 124 idle_enter = READ_ONCE(idle->clock_idle_enter); in arch_cpu_idle_time() 125 idle_exit = READ_ONCE(idle->clock_idle_exit); in arch_cpu_idle_time()
|
/linux-6.1.9/arch/s390/include/asm/ |
D | preempt.h | 17 return READ_ONCE(S390_lowcore.preempt_count) & ~PREEMPT_NEED_RESCHED; in preempt_count() 25 old = READ_ONCE(S390_lowcore.preempt_count); in preempt_count_set() 44 return !(READ_ONCE(S390_lowcore.preempt_count) & PREEMPT_NEED_RESCHED); in test_preempt_need_resched() 74 return unlikely(READ_ONCE(S390_lowcore.preempt_count) == in should_resched() 84 return READ_ONCE(S390_lowcore.preempt_count); in preempt_count()
|
/linux-6.1.9/kernel/cgroup/ |
D | misc.c | 148 if (!(valid_type(type) && cg && READ_ONCE(misc_res_capacity[type]))) in misc_cg_try_charge() 158 if (new_usage > READ_ONCE(res->max) || in misc_cg_try_charge() 159 new_usage > READ_ONCE(misc_res_capacity[type])) { in misc_cg_try_charge() 215 if (READ_ONCE(misc_res_capacity[i])) { in misc_cg_max_show() 216 max = READ_ONCE(cg->res[i].max); in misc_cg_max_show() 281 if (READ_ONCE(misc_res_capacity[type])) in misc_cg_max_write() 305 if (READ_ONCE(misc_res_capacity[i]) || usage) in misc_cg_current_show() 328 cap = READ_ONCE(misc_res_capacity[i]); in misc_cg_capacity_show() 343 if (READ_ONCE(misc_res_capacity[i]) || events) in misc_events_show()
|
/linux-6.1.9/tools/memory-model/litmus-tests/ |
D | SB+rfionceonce-poonceonces.litmus | 17 r1 = READ_ONCE(*x); 18 r2 = READ_ONCE(*y); 27 r3 = READ_ONCE(*y); 28 r4 = READ_ONCE(*x);
|
/linux-6.1.9/tools/memory-model/Documentation/ |
D | control-dependencies.txt | 15 q = READ_ONCE(a); 17 p = READ_ONCE(b); 25 q = READ_ONCE(a); 28 p = READ_ONCE(b); 35 q = READ_ONCE(a); 40 of ordering. But please note that neither the READ_ONCE() nor the 41 WRITE_ONCE() are optional. Without the READ_ONCE(), the compiler might 55 So don't leave out either the READ_ONCE() or the WRITE_ONCE(). 56 In particular, although READ_ONCE() does force the compiler to emit a 62 q = READ_ONCE(a); [all …]
|
/linux-6.1.9/tools/lib/perf/ |
D | mmap.c | 410 seq = READ_ONCE(pc->lock); in perf_mmap__read_self() 413 count->ena = READ_ONCE(pc->time_enabled); in perf_mmap__read_self() 414 count->run = READ_ONCE(pc->time_running); in perf_mmap__read_self() 418 time_mult = READ_ONCE(pc->time_mult); in perf_mmap__read_self() 419 time_shift = READ_ONCE(pc->time_shift); in perf_mmap__read_self() 420 time_offset = READ_ONCE(pc->time_offset); in perf_mmap__read_self() 423 time_cycles = READ_ONCE(pc->time_cycles); in perf_mmap__read_self() 424 time_mask = READ_ONCE(pc->time_mask); in perf_mmap__read_self() 428 idx = READ_ONCE(pc->index); in perf_mmap__read_self() 429 cnt = READ_ONCE(pc->offset); in perf_mmap__read_self() [all …]
|