Home
last modified time | relevance | path

Searched refs:W (Results 1 – 25 of 365) sorted by relevance

12345678910>>...15

/linux-5.19.10/arch/powerpc/crypto/
Dsha1-powerpc-asm.S34 #define W(t) (((t)%16)+16) macro
37 LWZ(W(t),(t)*4,r4)
46 add r14,r0,W(t); \
47 LWZ(W((t)+4),((t)+4)*4,r4); \
58 xor r5,W((t)+4-3),W((t)+4-8); \
60 xor W((t)+4),W((t)+4-16),W((t)+4-14); \
61 add r0,r0,W(t); \
62 xor W((t)+4),W((t)+4),r5; \
64 rotlwi W((t)+4),W((t)+4),1
73 add r0,r0,W(t); \
[all …]
/linux-5.19.10/lib/crypto/
Dsha256.c55 static inline void LOAD_OP(int I, u32 *W, const u8 *input) in LOAD_OP() argument
57 W[I] = get_unaligned_be32((__u32 *)input + I); in LOAD_OP()
60 static inline void BLEND_OP(int I, u32 *W) in BLEND_OP() argument
62 W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16]; in BLEND_OP()
67 t1 = h + e1(e) + Ch(e, f, g) + SHA256_K[i] + W[i]; \
73 static void sha256_transform(u32 *state, const u8 *input, u32 *W) in sha256_transform() argument
80 LOAD_OP(i + 0, W, input); in sha256_transform()
81 LOAD_OP(i + 1, W, input); in sha256_transform()
82 LOAD_OP(i + 2, W, input); in sha256_transform()
83 LOAD_OP(i + 3, W, input); in sha256_transform()
[all …]
/linux-5.19.10/Documentation/translations/zh_CN/loongarch/
Dintroduction.rst205 ADD.W SUB.W ADDI.W ADD.D SUB.D ADDI.D
208 MUL.W MULH.W MULH.WU DIV.W DIV.WU MOD.W MOD.WU
211 LU12I.W LU32I.D LU52I.D ADDU16I.D
215 SLL.W SRL.W SRA.W ROTR.W SLLI.W SRLI.W SRAI.W ROTRI.W
220 EXT.W.B EXT.W.H CLO.W CLO.D SLZ.W CLZ.D CTO.W CTO.D CTZ.W CTZ.D
221 BYTEPICK.W BYTEPICK.D BSTRINS.W BSTRINS.D BSTRPICK.W BSTRPICK.D
222 REVB.2H REVB.4H REVB.2W REVB.D REVH.2W REVH.D BITREV.4B BITREV.8B BITREV.W BITREV.D
231 LD.B LD.BU LD.H LD.HU LD.W LD.WU LD.D ST.B ST.H ST.W ST.D
232 LDX.B LDX.BU LDX.H LDX.HU LDX.W LDX.WU LDX.D STX.B STX.H STX.W STX.D
233 LDPTR.W LDPTR.D STPTR.W STPTR.D
[all …]
/linux-5.19.10/arch/x86/kernel/
Duprobes.c46 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\ macro
89 W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 00 */
90 W(0x10, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 10 */
91 W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
92 W(0x30, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */
93 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
94 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
95 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
96 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
97 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
[all …]
/linux-5.19.10/arch/x86/crypto/
Dsha1_ssse3_asm.S311 .set W, W0 define
319 .set W_minus_32, W
330 .set W_minus_04, W
331 .set W, W_minus_32 define
352 movdqa W_TMP1, W
374 movdqa W_minus_12, W
375 palignr $8, W_minus_16, W # w[i-14]
378 pxor W_minus_08, W
381 pxor W_TMP1, W
382 movdqa W, W_TMP2
[all …]
Dsha512-ssse3-asm.S98 # W[t]+K[t] (stack frame)
125 add WK_2(idx), T1 # W[t] + K[t] from message scheduler
129 add h_64, T1 # T1 = CH(e,f,g) + W[t] + K[t] + h
131 add tmp0, T1 # T1 = CH(e,f,g) + W[t] + K[t] + S1(e)
154 # Two rounds are computed based on the values for K[t-2]+W[t-2] and
155 # K[t-1]+W[t-1] which were previously stored at WK_2 by the message
162 # Eg. XMM2=W[t-2] really means XMM2={W[t-2]|W[t-1]}
171 movdqa W_t(idx), %xmm2 # XMM2 = W[t-2]
174 movdqa %xmm2, %xmm0 # XMM0 = W[t-2]
179 movdqu W_t(idx), %xmm5 # XMM5 = W[t-15]
[all …]
Dsha512-avx-asm.S77 # W[t] + K[t] | W[t+1] + K[t+1]
100 # W[t]+K[t] (stack frame)
131 add WK_2(idx), T1 # W[t] + K[t] from message scheduler
135 add h_64, T1 # T1 = CH(e,f,g) + W[t] + K[t] + h
137 add tmp0, T1 # T1 = CH(e,f,g) + W[t] + K[t] + S1(e)
159 # Two rounds are computed based on the values for K[t-2]+W[t-2] and
160 # K[t-1]+W[t-1] which were previously stored at WK_2 by the message
167 # Eg. XMM4=W[t-2] really means XMM4={W[t-2]|W[t-1]}
172 vmovdqa W_t(idx), %xmm4 # XMM4 = W[t-2]
174 vmovdqu W_t(idx), %xmm5 # XMM5 = W[t-15]
[all …]
Dsha512-avx2-asm.S166 MY_VPALIGNR YTMP0, Y_3, Y_2, 8 # YTMP0 = W[-7]
168 vpaddq Y_0, YTMP0, YTMP0 # YTMP0 = W[-7] + W[-16]
170 MY_VPALIGNR YTMP1, Y_1, Y_0, 8 # YTMP1 = W[-15]
177 vpor YTMP2, YTMP3, YTMP3 # YTMP3 = W[-15] ror 1
179 vpsrlq $7, YTMP1, YTMP4 # YTMP4 = W[-15] >> 7
223 vpor YTMP2, YTMP1, YTMP1 # YTMP1 = W[-15] ror 8
225 vpxor YTMP4, YTMP3, YTMP3 # YTMP3 = W[-15] ror 1 ^ W[-15] >> 7
230 vpaddq YTMP1, YTMP0, YTMP0 # YTMP0 = W[-16] + W[-7] + s0
232 vperm2f128 $0x0, YTMP0, YTMP0, Y_0 # Y_0 = W[-16] + W[-7] + s0 {BABA}
234 vpand MASK_YMM_LO(%rip), YTMP0, YTMP0 # YTMP0 = W[-16] + W[-7] + s0 {DC00}
[all …]
Dsha256-ssse3-asm.S149 ## compute W[-16] + W[-7] 4 at a time
154 palignr $4, X2, XTMP0 # XTMP0 = W[-7]
162 paddd X0, XTMP0 # XTMP0 = W[-7] + W[-16]
167 palignr $4, X0, XTMP1 # XTMP1 = W[-15]
171 movdqa XTMP1, XTMP2 # XTMP2 = W[-15]
175 movdqa XTMP1, XTMP3 # XTMP3 = W[-15]
186 por XTMP2, XTMP1 # XTMP1 = W[-15] ror 7
191 movdqa XTMP3, XTMP2 # XTMP2 = W[-15]
194 movdqa XTMP3, XTMP4 # XTMP4 = W[-15]
211 psrld $3, XTMP4 # XTMP4 = W[-15] >> 3
[all …]
/linux-5.19.10/tools/bpf/bpftool/bash-completion/
Dbpftool22 COMPREPLY+=( $( compgen -W "$w" -- "$cur" ) )
44 COMPREPLY+=( $( compgen -W "$*" -- "$cur" ) )
49 COMPREPLY+=( $( compgen -W "$( bpftool -jp map 2>&1 | \
57 COMPREPLY+=( $( compgen -W "$( bpftool -jp map 2>&1 | \
64 COMPREPLY+=( $( compgen -W "$( bpftool -jp map 2>&1 | \
72 COMPREPLY+=( $( compgen -W "$( bpftool -jp map 2>&1 | \
79 COMPREPLY+=( $( compgen -W "$( bpftool -jp prog 2>&1 | \
85 COMPREPLY+=( $( compgen -W "$( bpftool -jp prog 2>&1 | \
91 COMPREPLY+=( $( compgen -W "$( bpftool -jp prog 2>&1 | \
97 COMPREPLY+=( $( compgen -W "$( bpftool -jp btf 2>&1 | \
[all …]
/linux-5.19.10/arch/arm/crypto/
Dsha1-armv7-neon.S88 W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ argument
90 pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
94 pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
98 pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
102 W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ argument
104 pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
108 pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
111 pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
115 W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \ argument
117 pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
[all …]
/linux-5.19.10/crypto/
Dsha512_generic.c88 static inline void LOAD_OP(int I, u64 *W, const u8 *input) in LOAD_OP() argument
90 W[I] = get_unaligned_be64((__u64 *)input + I); in LOAD_OP()
93 static inline void BLEND_OP(int I, u64 *W) in BLEND_OP() argument
95 W[I & 15] += s1(W[(I-2) & 15]) + W[(I-7) & 15] + s0(W[(I-15) & 15]); in BLEND_OP()
104 u64 W[16]; in sha512_transform() local
118 LOAD_OP(i + j, W, input); in sha512_transform()
121 BLEND_OP(i + j, W); in sha512_transform()
126 t1 = h + e1(e) + Ch(e,f,g) + sha512_K[i ] + W[(i & 15)]; in sha512_transform()
128 t1 = g + e1(d) + Ch(d,e,f) + sha512_K[i+1] + W[(i & 15) + 1]; in sha512_transform()
130 t1 = f + e1(c) + Ch(c,d,e) + sha512_K[i+2] + W[(i & 15) + 2]; in sha512_transform()
[all …]
Dsm3.c63 #define I(i) (W[i] = get_unaligned_be32(data + i * 4))
64 #define W1(i) (W[i & 0x0f])
65 #define W2(i) (W[i & 0x0f] = \
66 P1(W[i & 0x0f] \
67 ^ W[(i-9) & 0x0f] \
68 ^ rol32(W[(i-3) & 0x0f], 15)) \
69 ^ rol32(W[(i-13) & 0x0f], 7) \
70 ^ W[(i-6) & 0x0f])
72 static void sm3_transform(struct sm3_state *sctx, u8 const *data, u32 W[16]) in sm3_transform()
170 u8 const *data, int blocks, u32 W[16]) in sm3_block()
[all …]
/linux-5.19.10/Documentation/loongarch/
Dintroduction.rst238 ADD.W SUB.W ADDI.W ADD.D SUB.D ADDI.D
241 MUL.W MULH.W MULH.WU DIV.W DIV.WU MOD.W MOD.WU
244 LU12I.W LU32I.D LU52I.D ADDU16I.D
248 SLL.W SRL.W SRA.W ROTR.W SLLI.W SRLI.W SRAI.W ROTRI.W
253 EXT.W.B EXT.W.H CLO.W CLO.D SLZ.W CLZ.D CTO.W CTO.D CTZ.W CTZ.D
254 BYTEPICK.W BYTEPICK.D BSTRINS.W BSTRINS.D BSTRPICK.W BSTRPICK.D
255 REVB.2H REVB.4H REVB.2W REVB.D REVH.2W REVH.D BITREV.4B BITREV.8B BITREV.W BITREV.D
264 LD.B LD.BU LD.H LD.HU LD.W LD.WU LD.D ST.B ST.H ST.W ST.D
265 LDX.B LDX.BU LDX.H LDX.HU LDX.W LDX.WU LDX.D STX.B STX.H STX.W STX.D
266 LDPTR.W LDPTR.D STPTR.W STPTR.D
[all …]
/linux-5.19.10/tools/memory-model/
Dlinux-kernel.def34 cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W)
35 cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W)
36 cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W)
37 cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W)
108 atomic_cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W)
109 atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W)
110 atomic_cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W)
111 atomic_cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W)
/linux-5.19.10/arch/arm/lib/
Dmemmove.S82 6: W(nop)
83 W(ldr) r3, [r1, #-4]!
84 W(ldr) r4, [r1, #-4]!
85 W(ldr) r5, [r1, #-4]!
86 W(ldr) r6, [r1, #-4]!
87 W(ldr) r8, [r1, #-4]!
88 W(ldr) r9, [r1, #-4]!
89 W(ldr) lr, [r1, #-4]!
93 W(nop)
94 W(str) r3, [r0, #-4]!
[all …]
/linux-5.19.10/lib/
Dsha1.c39 #define setW(x, val) (*(volatile __u32 *)&W(x) = (val))
41 #define setW(x, val) do { W(x) = (val); __asm__("":::"memory"); } while (0)
43 #define setW(x, val) (W(x) = (val))
47 #define W(x) (array[(x)&15]) macro
54 #define SHA_MIX(t) rol32(W(t+13) ^ W(t+8) ^ W(t+2) ^ W(t), 1)
/linux-5.19.10/arch/m68k/fpsp040/
Dslogn.S436 |--LET V=U*U, W=V*V, CALCULATE
438 |--U + U*V*( [B1 + W*(B3 + W*B5)] + [V*(B2 + W*B4)] )
443 fmulx %fp1,%fp1 | ...FP1 IS W
448 fmulx %fp1,%fp3 | ...W*B5
449 fmulx %fp1,%fp2 | ...W*B4
451 faddd LOGB3,%fp3 | ...B3+W*B5
452 faddd LOGB2,%fp2 | ...B2+W*B4
454 fmulx %fp3,%fp1 | ...W*(B3+W*B5), FP3 RELEASED
456 fmulx %fp0,%fp2 | ...V*(B2+W*B4)
458 faddd LOGB1,%fp1 | ...B1+W*(B3+W*B5)
[all …]
/linux-5.19.10/arch/arm/boot/compressed/
Dhead.S217 W(b) 1f
1018 W(b) __armv4_mmu_cache_on
1019 W(b) __armv4_mmu_cache_off
1025 W(b) __armv3_mpu_cache_on
1026 W(b) __armv3_mpu_cache_off
1027 W(b) __armv3_mpu_cache_flush
1031 W(b) __armv4_mpu_cache_on
1032 W(b) __armv4_mpu_cache_off
1033 W(b) __armv4_mpu_cache_flush
1037 W(b) __arm926ejs_mmu_cache_on
[all …]
/linux-5.19.10/arch/mips/n64/
Dinit.c51 #define W 320 macro
82 .width = W, in n64_platform_init()
84 .stride = W * 2, in n64_platform_init()
122 orig = kzalloc(W * H * 2 + 63, GFP_DMA | GFP_KERNEL); in n64_platform_init()
141 res[0].end = phys + W * H * 2 - 1; in n64_platform_init()
149 #undef W
/linux-5.19.10/arch/arm/kernel/
Dentry-armv.S1132 3: W(b) . + 4
1300 W(b) vector_rst
1301 W(b) vector_und
1304 W(ldr) pc, .
1305 W(b) vector_pabt
1306 W(b) vector_dabt
1307 W(b) vector_addrexcptn
1308 W(b) vector_irq
1309 W(b) vector_fiq
1313 W(b) vector_rst
[all …]
/linux-5.19.10/tools/memory-model/Documentation/
Dcheatsheet.txt3 C Self R W RMW Self R W DR DW RMW SV
11 Successful *_release() C Y Y Y W Y
13 smp_wmb() Y W Y Y W
28 W: Write, for example, WRITE_ONCE(), or write portion of RMW
/linux-5.19.10/drivers/atm/
DKconfig86 when going from 8W to 16W bursts.
89 bool "Enable 16W TX bursts (discouraged)"
96 bool "Enable 8W TX bursts (recommended)"
103 bool "Enable 4W TX bursts (optional)"
107 this if you have disabled 8W bursts. Enabling 4W if 8W is also set
111 bool "Enable 2W TX bursts (optional)"
115 this if you have disabled 4W and 8W bursts. Enabling 2W if 4W or 8W
119 bool "Enable 16W RX bursts (discouraged)"
126 bool "Enable 8W RX bursts (discouraged)"
134 bool "Enable 4W RX bursts (recommended)"
[all …]
/linux-5.19.10/
DCREDITS5 (W), PGP key ID and fingerprint (P), description (D), and
29 W: http://www.arbornet.org/~dragos
37 W: https://alumnus.caltech.edu/~madler/
49 W: http://www.csn.ul.ie/~airlied
57 W: http://www.moses.uklinux.net/patches
65 W: https://www.almesberger.net/
80 W: http://www-stu.christs.cam.ac.uk/~aia21/
88 W: http://www.pdos.lcs.mit.edu/~cananian
99 W: https://www.codepoet.org/
109 W: http://www.subcarrier.org/mang
[all …]
/linux-5.19.10/drivers/gpu/drm/i915/gt/
Dintel_reset.h72 #define intel_wedge_on_timeout(W, GT, TIMEOUT) \ argument
73 for (__intel_init_wedge((W), (GT), (TIMEOUT), __func__); \
74 (W)->gt; \
75 __intel_fini_wedge((W)))

12345678910>>...15