/linux-6.1.9/lib/crypto/ |
D | utils.c | 17 void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len) in __crypto_xor() argument 24 ((unsigned long)dst ^ (unsigned long)src2)) & in __crypto_xor() 36 *dst++ = *src1++ ^ *src2++; in __crypto_xor() 44 get_unaligned((u64 *)src2); in __crypto_xor() 47 *(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2; in __crypto_xor() 51 src2 += 8; in __crypto_xor() 58 get_unaligned((u32 *)src2); in __crypto_xor() 61 *(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2; in __crypto_xor() 65 src2 += 4; in __crypto_xor() 72 get_unaligned((u16 *)src2); in __crypto_xor() [all …]
|
/linux-6.1.9/include/linux/ |
D | bitmap.h | 141 const unsigned long *src2, 321 const unsigned long *src2, unsigned int nbits) in bitmap_and() argument 324 return (*dst = *src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)) != 0; in bitmap_and() 325 return __bitmap_and(dst, src1, src2, nbits); in bitmap_and() 329 const unsigned long *src2, unsigned int nbits) in bitmap_or() argument 332 *dst = *src1 | *src2; in bitmap_or() 334 __bitmap_or(dst, src1, src2, nbits); in bitmap_or() 338 const unsigned long *src2, unsigned int nbits) in bitmap_xor() argument 341 *dst = *src1 ^ *src2; in bitmap_xor() 343 __bitmap_xor(dst, src1, src2, nbits); in bitmap_xor() [all …]
|
D | linkmode.h | 36 const unsigned long *src2) in linkmode_andnot() argument 38 return bitmap_andnot(dst, src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS); in linkmode_andnot() 75 const unsigned long *src2) in linkmode_equal() argument 77 return bitmap_equal(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS); in linkmode_equal() 81 const unsigned long *src2) in linkmode_intersects() argument 83 return bitmap_intersects(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS); in linkmode_intersects() 87 const unsigned long *src2) in linkmode_subset() argument 89 return bitmap_subset(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS); in linkmode_subset()
|
D | nodemask.h | 162 #define nodes_and(dst, src1, src2) \ argument 163 __nodes_and(&(dst), &(src1), &(src2), MAX_NUMNODES) 170 #define nodes_or(dst, src1, src2) \ argument 171 __nodes_or(&(dst), &(src1), &(src2), MAX_NUMNODES) 178 #define nodes_xor(dst, src1, src2) \ argument 179 __nodes_xor(&(dst), &(src1), &(src2), MAX_NUMNODES) 186 #define nodes_andnot(dst, src1, src2) \ argument 187 __nodes_andnot(&(dst), &(src1), &(src2), MAX_NUMNODES) 202 #define nodes_equal(src1, src2) \ argument 203 __nodes_equal(&(src1), &(src2), MAX_NUMNODES) [all …]
|
/linux-6.1.9/tools/include/linux/ |
D | bitmap.h | 72 const unsigned long *src2, unsigned int nbits) in bitmap_or() argument 75 *dst = *src1 | *src2; in bitmap_or() 77 __bitmap_or(dst, src1, src2, nbits); in bitmap_or() 150 const unsigned long *src2, unsigned int nbits) in bitmap_and() argument 153 return (*dst = *src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)) != 0; in bitmap_and() 154 return __bitmap_and(dst, src1, src2, nbits); in bitmap_and() 166 const unsigned long *src2, unsigned int nbits) in bitmap_equal() argument 169 return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); in bitmap_equal() 172 return !memcmp(src1, src2, nbits / 8); in bitmap_equal() 173 return __bitmap_equal(src1, src2, nbits); in bitmap_equal() [all …]
|
/linux-6.1.9/arch/powerpc/include/asm/ |
D | kvm_fpu.h | 18 extern void fps_fadds(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2); 19 extern void fps_fdivs(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2); 20 extern void fps_fmuls(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2); 21 extern void fps_fsubs(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2); 23 extern void fps_fmadds(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2, 25 extern void fps_fmsubs(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2, 27 extern void fps_fnmadds(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2, 29 extern void fps_fnmsubs(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2, 31 extern void fps_fsel(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2, 37 u64 *dst, u64 *src1, u64 *src2); [all …]
|
/linux-6.1.9/arch/arm64/lib/ |
D | memcmp.S | 21 #define src2 x1 macro 40 ldr data2, [src2], 8 48 ldr data2, [src2, limit] 53 ldr data2, [src2], 8 71 sub src2, src2, tmp1 79 ldp data2, data2h, [src2], 16 95 add src2, src2, limit 97 ldp data2, data2h, [src2] 122 ldr data2w, [src2], 4 131 ldrb data2w, [src2], 1
|
D | strncmp.S | 25 #define src2 x1 macro 63 eor tmp1, src1, src2 76 ldr data2, [src2], #8 165 bic src2, src2, #7 168 ldr data2, [src2], #8 187 ldrb data2w, [src2], #1 206 ldrb data2w, [src2], #1 233 lsl offset, src2, #3 234 bic src2, src2, #0xf 238 ldp tmp1, tmp2, [src2], #16 [all …]
|
D | strcmp.S | 24 #define src2 x1 macro 57 sub off2, src2, src1 114 neg shift, src2, lsl 3 /* Bits to alignment -64. */ 127 ldrb data2w, [src2], 1 135 neg shift, src2, lsl 3 136 bic src2, src2, 7 137 ldr data3, [src2], 8 148 sub off1, src2, src1
|
/linux-6.1.9/arch/arc/kernel/ |
D | unaligned.c | 139 set_reg(state->wb_reg, state->src1 + state->src2, regs, cregs); in fixup_load() 142 state->src2 = 0; in fixup_load() 146 get32_unaligned_check(val, state->src1 + state->src2); in fixup_load() 148 get16_unaligned_check(val, state->src1 + state->src2); in fixup_load() 167 set_reg(state->wb_reg, state->src2 + state->src3, regs, cregs); in fixup_store() 173 set_reg(state->wb_reg, state->src2 + (state->src3 << 1), in fixup_store() 176 set_reg(state->wb_reg, state->src2 + (state->src3 << 2), in fixup_store() 185 put32_unaligned_check(state->src1, state->src2 + state->src3); in fixup_store() 187 put16_unaligned_check(state->src1, state->src2 + state->src3); in fixup_store()
|
D | disasm.c | 117 state->src2 = FIELD_s9(state->words[0]); in disasm_instr() 140 state->src2 = state->words[1]; in disasm_instr() 142 state->src2 = get_reg(state->wb_reg, regs, cregs); in disasm_instr() 224 state->src2 = FIELD_C(state->words[0]); in disasm_instr() 225 if (state->src2 == REG_LIMM) { in disasm_instr() 227 state->src2 = state->words[1]; in disasm_instr() 229 state->src2 = get_reg(state->src2, regs, in disasm_instr() 286 state->src2 = get_reg(FIELD_S_C(state->words[0]), regs, cregs); in disasm_instr() 329 state->src2 = FIELD_S_u7(state->words[0]); in disasm_instr() 347 state->src2 = FIELD_S_u6(state->words[0]); in disasm_instr() [all …]
|
/linux-6.1.9/arch/m68k/math-emu/ |
D | multi_arith.h | 115 struct fp_ext *src2) in fp_submant() argument 119 : "g,d" (src2->lowmant), "0,0" (src1->lowmant)); in fp_submant() 121 : "d" (src2->mant.m32[1]), "0" (src1->mant.m32[1])); in fp_submant() 123 : "d" (src2->mant.m32[0]), "0" (src1->mant.m32[0])); in fp_submant() 126 #define fp_mul64(desth, destl, src1, src2) ({ \ argument 128 : "dm" (src1), "0" (src2)); \ 133 #define fp_add64(dest1, dest2, src1, src2) ({ \ argument 135 : "dm,d" (src2), "0,0" (dest2)); \ 166 struct fp_ext *src2) in fp_multiplymant() argument 170 fp_mul64(dest->m32[0], dest->m32[1], src1->mant.m32[0], src2->mant.m32[0]); in fp_multiplymant() [all …]
|
D | fp_log.c | 31 struct fp_ext tmp, src2; in fp_fsqrt() local 64 fp_copy_ext(&src2, dest); in fp_fsqrt() 89 fp_copy_ext(&tmp, &src2); in fp_fsqrt()
|
/linux-6.1.9/arch/ia64/lib/ |
D | copy_page.S | 30 #define src2 r21 macro 60 adds src2=8,in1 73 (p[0]) ld8 t2[0]=[src2],16 79 (p[0]) ld8 t4[0]=[src2],16 84 (p[0]) ld8 t6[0]=[src2],16 89 (p[0]) ld8 t8[0]=[src2],16
|
D | memcpy.S | 31 # define src2 r17 macro 189 adds src2=7,src // src2 = (src + 7) 192 and src2=-8,src2 // src2 = (src + 7) & ~7 196 ld8 t1=[src2] // t1 = 1st source word if src is 8-byte aligned, 2nd otherwise 227 and src2=-8,src // align source pointer 254 (p6) ld8 val[1]=[src2],8 // prime the pump... 279 (p[0]) ld8 val[0]=[src2],8; \
|
D | copy_user.S | 68 #define src2 r26 macro 130 and src2=0x7,src1 // src offset 160 sub t1=8,src2 161 mov t2=src2 171 cmp.leu p14,p15=src2,dst2 172 sub t1=dst2,src2 175 (p14) sub word1=8,src2 // (8 - src offset) 369 adds src2=8,src1 378 (p16) ld8 val2[0]=[src2],16
|
/linux-6.1.9/arch/xtensa/lib/ |
D | umulsidi3.S | 197 .macro mul_mulsi3_body dst, src1, src2, tmp1, tmp2 199 1: add \tmp1, \src2, \dst 203 do_addx2 \tmp1, \src2, \dst, \tmp1 207 do_addx4 \tmp1, \src2, \dst, \tmp1 211 do_addx8 \tmp1, \src2, \dst, \tmp1 216 slli \src2, \src2, 4
|
/linux-6.1.9/arch/x86/include/asm/ |
D | mpspec.h | 95 #define physids_and(dst, src1, src2) \ argument 96 bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_LOCAL_APIC) 98 #define physids_or(dst, src1, src2) \ argument 99 bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_LOCAL_APIC)
|
/linux-6.1.9/arch/sparc/kernel/ |
D | visemul.c | 604 s16 src2 = (rs2 >> (byte * 16)) & 0xffff; in pmul() local 605 u32 prod = src1 * src2; in pmul() 621 s16 src2; in pmul() local 627 src2 = rs2 >> (opf == FMUL8x16AU_OPF ? 16 : 0); in pmul() 630 u32 prod = src1 * src2; in pmul() 654 s16 src2; in pmul() local 659 src2 = ((rs2 >> (16 * byte)) & 0xffff); in pmul() 660 prod = src1 * src2; in pmul() 684 s16 src2; in pmul() local 689 src2 = ((rs2 >> (16 * byte)) & 0xffff); in pmul() [all …]
|
/linux-6.1.9/include/crypto/ |
D | algapi.h | 150 void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size); 171 static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2, in crypto_xor_cpy() argument 179 unsigned long *s2 = (unsigned long *)src2; in crypto_xor_cpy() 188 __crypto_xor(dst, src1, src2, size); in crypto_xor_cpy()
|
/linux-6.1.9/arch/powerpc/mm/book3s64/ |
D | slice.c | 401 const struct slice_mask *src2) in slice_or_mask() argument 403 dst->low_slices = src1->low_slices | src2->low_slices; in slice_or_mask() 406 bitmap_or(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH); in slice_or_mask() 411 const struct slice_mask *src2) in slice_andnot_mask() argument 413 dst->low_slices = src1->low_slices & ~src2->low_slices; in slice_andnot_mask() 416 bitmap_andnot(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH); in slice_andnot_mask()
|
/linux-6.1.9/arch/parisc/math-emu/ |
D | sgl_float.h | 28 #define Sgl_and_signs( src1dst, src2) \ argument 29 Sall(src1dst) = (Sall(src2)|~((unsigned int)1<<31)) & Sall(src1dst) 30 #define Sgl_or_signs( src1dst, src2) \ argument 31 Sall(src1dst) = (Sall(src2)&((unsigned int)1<<31)) | Sall(src1dst)
|
D | dbl_float.h | 29 #define Dbl_and_signs( src1dst, src2) \ argument 30 Dallp1(src1dst) = (Dallp1(src2)|~((unsigned int)1<<31)) & Dallp1(src1dst) 31 #define Dbl_or_signs( src1dst, src2) \ argument 32 Dallp1(src1dst) = (Dallp1(src2)&((unsigned int)1<<31)) | Dallp1(src1dst) 718 #define Dbl_copyto_dblext(src1,src2,dest1,dest2,dest3,dest4) \ argument 719 Dextallp1(dest1) = Dallp1(src1); Dextallp2(dest2) = Dallp2(src2); \
|
/linux-6.1.9/arch/arc/include/asm/ |
D | disasm.h | 85 int src1, src2, src3, dest, wb_reg; member
|
/linux-6.1.9/drivers/video/fbdev/core/ |
D | svgalib.c | 242 u16 __iomem *src2 = src; in svga_tilecopy() local 245 fb_writew(fb_readw(src2), dst2); in svga_tilecopy() 247 src2 += colstride; in svga_tilecopy()
|