/linux-6.1.9/tools/memory-model/ |
D | linux-kernel.def | 14 smp_load_acquire(X) __load{acquire}(*X) 33 xchg_acquire(X,V) __xchg{acquire}(X,V) 36 cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W) 70 atomic_add_return_acquire(V,X) __atomic_op_return{acquire}(X,+,V) 74 atomic_fetch_add_acquire(V,X) __atomic_fetch_op{acquire}(X,+,V) 79 atomic_inc_return_acquire(X) __atomic_op_return{acquire}(X,+,1) 83 atomic_fetch_inc_acquire(X) __atomic_fetch_op{acquire}(X,+,1) 88 atomic_sub_return_acquire(V,X) __atomic_op_return{acquire}(X,-,V) 92 atomic_fetch_sub_acquire(V,X) __atomic_fetch_op{acquire}(X,-,V) 97 atomic_dec_return_acquire(X) __atomic_op_return{acquire}(X,-,1) [all …]
|
D | linux-kernel.bell | 18 'acquire (*smp_load_acquire*) || 20 instructions R[{'once,'acquire,'noreturn}] 22 instructions RMW[{'once,'acquire,'release}]
|
/linux-6.1.9/tools/memory-model/litmus-tests/ |
D | ISA2+pooncerelease+poacquirerelease+poacquireonce.litmus | 6 * This litmus test demonstrates that a release-acquire chain suffices 8 * that the release-acquire chain suffices is because in all but one 11 * (AKA non-rf) link, so release-acquire is all that is needed.
|
D | README | 46 and load-acquire replaced with READ_ONCE(). 49 Can a release-acquire chain order a prior store against 58 Does a release-acquire pair suffice for the load-buffering 64 and load-acquire replaced with READ_ONCE(). 75 in one process, and use an acquire load followed by a pair of 80 acquire load followed by a pair of spin_is_locked() calls 91 As below, but with a release-acquire chain. 134 As below, but without the smp_wmb() and acquire load. 137 Can a smp_wmb(), instead of a release, and an acquire order 157 Is the ordering provided by a release-acquire chain sufficient [all …]
|
D | S+fencewmbonceonce+poacquireonce.litmus | 6 * Can a smp_wmb(), instead of a release, and an acquire order a prior
|
D | LB+poacquireonce+pooncerelease.litmus | 6 * Does a release-acquire pair suffice for the load-buffering litmus
|
D | S+poonceonces.litmus | 6 * Starting with a two-process release-acquire chain ordering P0()'s
|
D | ISA2+poonceonces.litmus | 6 * Given a release-acquire chain ordering the first process's store
|
D | MP+polockmbonce+poacquiresilsil.litmus | 8 * state, ordered by acquire? Note that when the first spin_is_locked()
|
/linux-6.1.9/Documentation/litmus-tests/atomic/ |
D | Atomic-RMW+mb__after_atomic-is-stronger-than-acquire.litmus | 1 C Atomic-RMW+mb__after_atomic-is-stronger-than-acquire 7 * stronger than a normal acquire: both the read and write parts of
|
/linux-6.1.9/Documentation/locking/ |
D | futex-requeue-pi.rst | 91 to be able to acquire the rt_mutex before returning to user space. 93 acquire the rt_mutex as it would open a race window between the 99 allow the requeue code to acquire an uncontended rt_mutex on behalf 115 requeueing, futex_requeue() attempts to acquire the requeue target 127 tasks as it can acquire the lock for, which in the majority of cases 129 either pthread_cond_broadcast() or pthread_cond_signal() acquire the
|
D | ww-mutex-design.rst | 64 trying to acquire locks doesn't grab a new reservation id, but keeps the one it 66 acquire context. Furthermore the acquire context keeps track of debugging state 67 to catch w/w mutex interface abuse. An acquire context is representing a 71 w/w mutexes, since it is required to initialize the acquire context. The lock 74 Furthermore there are three different class of w/w lock acquire functions: 99 * Functions to only acquire a single w/w mutex, which results in the exact same 103 Again this is not strictly required. But often you only want to acquire a 104 single lock in which case it's pointless to set up an acquire context (and so 119 Three different ways to acquire locks within the same w/w class. Common 344 (1) Waiters with an acquire context are sorted by stamp order; waiters [all …]
|
/linux-6.1.9/drivers/net/ethernet/broadcom/bnx2x/ |
D | bnx2x_vfpf.c | 226 struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire; in bnx2x_vfpf_acquire() 1362 struct vfpf_acquire_tlv *acquire) in bnx2x_vf_mbx_is_windows_vm() argument 1369 if (!acquire->bulletin_addr || in bnx2x_vf_mbx_is_windows_vm() 1370 acquire->resc_request.num_mc_filters == 32 || in bnx2x_vf_mbx_is_windows_vm() 1371 ((acquire->vfdev_info.vf_os & VF_OS_MASK) == in bnx2x_vf_mbx_is_windows_vm() 1390 if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire)) in bnx2x_vf_mbx_acquire_chk_dorq() 1400 struct vfpf_acquire_tlv *acquire = &mbx->msg->req.acquire; in bnx2x_vf_mbx_acquire() local 1405 vf->abs_vfid, acquire->vfdev_info.vf_id, acquire->vfdev_info.vf_os, in bnx2x_vf_mbx_acquire() 1406 acquire->resc_request.num_rxqs, acquire->resc_request.num_txqs, in bnx2x_vf_mbx_acquire() 1407 acquire->resc_request.num_sbs, acquire->resc_request.num_mac_filters, in bnx2x_vf_mbx_acquire() [all …]
|
/linux-6.1.9/drivers/net/ethernet/intel/igc/ |
D | igc_i225.c | 200 status = hw->nvm.ops.acquire(hw); in igc_read_nvm_srrd_i225() 298 status = hw->nvm.ops.acquire(hw); in igc_write_nvm_srwr_i225() 324 status = hw->nvm.ops.acquire(hw); in igc_validate_nvm_checksum_i225() 419 ret_val = hw->nvm.ops.acquire(hw); in igc_update_nvm_checksum_i225() 478 nvm->ops.acquire = igc_acquire_nvm_i225; in igc_init_nvm_params_i225()
|
/linux-6.1.9/Documentation/litmus-tests/ |
D | README | 15 Atomic-RMW+mb__after_atomic-is-stronger-than-acquire.litmus 17 stronger than a normal acquire: both the read and write parts of
|
/linux-6.1.9/drivers/net/ethernet/intel/e1000e/ |
D | phy.c | 250 ret_val = hw->phy.ops.acquire(hw); in e1000e_read_phy_reg_m88() 275 ret_val = hw->phy.ops.acquire(hw); in e1000e_write_phy_reg_m88() 322 if (!hw->phy.ops.acquire) in __e1000e_read_phy_reg_igp() 325 ret_val = hw->phy.ops.acquire(hw); in __e1000e_read_phy_reg_igp() 389 if (!hw->phy.ops.acquire) in __e1000e_write_phy_reg_igp() 392 ret_val = hw->phy.ops.acquire(hw); in __e1000e_write_phy_reg_igp() 457 if (!hw->phy.ops.acquire) in __e1000_read_kmrn_reg() 460 ret_val = hw->phy.ops.acquire(hw); in __e1000_read_kmrn_reg() 530 if (!hw->phy.ops.acquire) in __e1000_write_kmrn_reg() 533 ret_val = hw->phy.ops.acquire(hw); in __e1000_write_kmrn_reg() [all …]
|
D | ich8lan.c | 216 hw->phy.ops.acquire(hw); in e1000_phy_is_accessible_pchlan() 307 ret_val = hw->phy.ops.acquire(hw); in e1000_init_phy_workarounds_pchlan() 836 ret_val = hw->phy.ops.acquire(hw); in e1000_set_eee_pchlan() 922 ret_val = hw->phy.ops.acquire(hw); in e1000_k1_workaround_lpt_lp() 1141 ret_val = hw->phy.ops.acquire(hw); in e1000_enable_ulp_lpt_lp() 1304 ret_val = hw->phy.ops.acquire(hw); in e1000_disable_ulp_lpt_lp() 1451 ret_val = hw->phy.ops.acquire(hw); in e1000_check_for_copper_link_ich8lan() 1492 ret_val = hw->phy.ops.acquire(hw); in e1000_check_for_copper_link_ich8lan() 1517 ret_val = hw->phy.ops.acquire(hw); in e1000_check_for_copper_link_ich8lan() 2146 ret_val = hw->phy.ops.acquire(hw); in e1000_sw_lcd_config_ich8lan() [all …]
|
/linux-6.1.9/Documentation/networking/ |
D | xfrm_sysctl.rst | 11 default 30 - hard timeout in seconds for acquire requests
|
/linux-6.1.9/drivers/media/dvb-frontends/ |
D | as102_fe.h | 14 int (*stream_ctrl)(void *priv, int acquire, uint32_t elna_cfg);
|
/linux-6.1.9/drivers/net/ethernet/intel/igb/ |
D | e1000_i210.c | 200 if (!(hw->nvm.ops.acquire(hw))) { in igb_read_nvm_srrd_i210() 300 if (!(hw->nvm.ops.acquire(hw))) { in igb_write_nvm_srwr_i210() 543 if (!(hw->nvm.ops.acquire(hw))) { in igb_validate_nvm_checksum_i210() 589 if (!(hw->nvm.ops.acquire(hw))) { in igb_update_nvm_checksum_i210() 797 nvm->ops.acquire = igb_acquire_nvm_i210; in igb_init_nvm_params_i210()
|
/linux-6.1.9/Documentation/filesystems/ |
D | directory-locking.rst | 11 always acquire the locks in order by increasing address. We'll call 75 attempts to acquire lock on B, A will remain the parent of B until we 76 acquire the lock on B. (Proof: only cross-directory rename can change 91 attempt to acquire some lock and already holds at least one lock. Let's 121 try to acquire lock on descendent before the lock on ancestor.
|
/linux-6.1.9/drivers/gpu/drm/nouveau/include/nvkm/core/ |
D | memory.h | 36 void __iomem *(*acquire)(struct nvkm_memory *); member 70 #define nvkm_kmap(o) (o)->func->acquire(o)
|
D | gpuobj.h | 27 void *(*acquire)(struct nvkm_gpuobj *); member
|
/linux-6.1.9/tools/memory-model/Documentation/ |
D | glossary.txt | 31 An example special acquire operation is smp_load_acquire(), 33 acquire loads. 35 When an acquire load returns the value stored by a release store 36 to that same variable, (in other words, the acquire load "reads 38 store "happen before" any operations following that load acquire.
|
/linux-6.1.9/Documentation/RCU/ |
D | UP.rst | 60 callback function must acquire this same lock. In this case, if 120 like spin_lock_bh() to acquire the lock. Please note that 131 callbacks acquire locks directly. However, a great many RCU 132 callbacks do acquire locks *indirectly*, for example, via
|