/linux-6.1.9/include/asm-generic/ |
D | xor.h | 12 const unsigned long * __restrict p2) in xor_8regs_2() argument 17 p1[0] ^= p2[0]; in xor_8regs_2() 18 p1[1] ^= p2[1]; in xor_8regs_2() 19 p1[2] ^= p2[2]; in xor_8regs_2() 20 p1[3] ^= p2[3]; in xor_8regs_2() 21 p1[4] ^= p2[4]; in xor_8regs_2() 22 p1[5] ^= p2[5]; in xor_8regs_2() 23 p1[6] ^= p2[6]; in xor_8regs_2() 24 p1[7] ^= p2[7]; in xor_8regs_2() 26 p2 += 8; in xor_8regs_2() [all …]
|
/linux-6.1.9/scripts/coccinelle/misc/ |
D | array_size_dup.cocci | 30 position p1, p2; 36 * array_size(E1, E2)@p2 40 p2 << as_next.p2; 43 msg = "WARNING: array_size is used later (line %s) to compute the same size" % (p2[0].line) 48 p2 << as_next.p2; 51 msg = "WARNING: array_size is used later (line %s) to compute the same size" % (p2[0].line) 59 position p1, p2; 65 * E1 * E2@p2 69 p2 << as_prev.p2; 73 coccilib.report.print_report(p2[0], msg) [all …]
|
D | ifcol.cocci | 23 position p1,p2; 30 if (...) S1@p1 S2@p2 35 p2 << r.p2; 38 if (p1[0].column == p2[0].column): 40 cocci.print_secs("after",p2) 44 p2 << r.p2; 47 if (p1[0].column == p2[0].column): 48 msg = "code aligned with following code on line %s" % (p2[0].line)
|
D | cstptr.cocci | 19 position p1,p2; 26 *PTR_ERR@p2(e) 30 p2 << r.p2; 33 cocci.print_main("PTR_ERR",p2) 38 p2 << r.p2; 42 coccilib.report.print_report(p2[0],msg)
|
/linux-6.1.9/drivers/thermal/qcom/ |
D | tsens-v0_1.c | 229 u32 p1[5], p2[5]; in calibrate_8916() local 249 p2[0] = (qfprom_cdata[0] & MSM8916_S0_P2_MASK) >> MSM8916_S0_P2_SHIFT; in calibrate_8916() 250 p2[1] = (qfprom_cdata[0] & MSM8916_S1_P2_MASK) >> MSM8916_S1_P2_SHIFT; in calibrate_8916() 251 p2[2] = (qfprom_cdata[1] & MSM8916_S2_P2_MASK) >> MSM8916_S2_P2_SHIFT; in calibrate_8916() 252 p2[3] = (qfprom_cdata[1] & MSM8916_S3_P2_MASK) >> MSM8916_S3_P2_SHIFT; in calibrate_8916() 253 p2[4] = (qfprom_cdata[1] & MSM8916_S4_P2_MASK) >> MSM8916_S4_P2_SHIFT; in calibrate_8916() 255 p2[i] = ((base1 + p2[i]) << 3); in calibrate_8916() 270 p2[i] = 780; in calibrate_8916() 275 compute_intercept_slope(priv, p1, p2, mode); in calibrate_8916() 285 u32 p1[10], p2[10]; in calibrate_8939() local [all …]
|
D | tsens-v1.c | 146 u32 *p1, u32 *p2, u32 mode) in compute_intercept_slope_8976() argument 172 u32 p1[10], p2[10]; in calibrate_v1() local 187 p2[0] = (qfprom_cdata[0] & S0_P2_MASK) >> S0_P2_SHIFT; in calibrate_v1() 188 p2[1] = (qfprom_cdata[0] & S1_P2_MASK) >> S1_P2_SHIFT; in calibrate_v1() 192 p2[2] = msb << 2 | lsb; in calibrate_v1() 193 p2[3] = (qfprom_cdata[1] & S3_P2_MASK) >> S3_P2_SHIFT; in calibrate_v1() 194 p2[4] = (qfprom_cdata[1] & S4_P2_MASK) >> S4_P2_SHIFT; in calibrate_v1() 195 p2[5] = (qfprom_cdata[2] & S5_P2_MASK) >> S5_P2_SHIFT; in calibrate_v1() 196 p2[6] = (qfprom_cdata[2] & S6_P2_MASK) >> S6_P2_SHIFT; in calibrate_v1() 200 p2[7] = msb << 2 | lsb; in calibrate_v1() [all …]
|
/linux-6.1.9/arch/powerpc/platforms/pseries/ |
D | of_helpers.c | 49 const __be32 *p2; in of_read_drc_info_cell() local 67 p2 = (const __be32 *)p; in of_read_drc_info_cell() 68 data->drc_index_start = be32_to_cpu(*p2); in of_read_drc_info_cell() 71 p2 = of_prop_next_u32(*prop, p2, &data->drc_name_suffix_start); in of_read_drc_info_cell() 72 if (!p2) in of_read_drc_info_cell() 76 p2 = of_prop_next_u32(*prop, p2, &data->num_sequential_elems); in of_read_drc_info_cell() 77 if (!p2) in of_read_drc_info_cell() 81 p2 = of_prop_next_u32(*prop, p2, &data->sequential_inc); in of_read_drc_info_cell() 82 if (!p2) in of_read_drc_info_cell() 86 p2 = of_prop_next_u32(*prop, p2, &data->drc_power_domain); in of_read_drc_info_cell() [all …]
|
/linux-6.1.9/scripts/coccinelle/api/ |
D | d_find_alias.cocci | 19 position p1, p2; 37 return @p2 ...; 39 dent@p2 = E1; 46 position r.p1,r.p2; 51 * return@p2 ...; 53 * dent@p2 59 p2 << r.p2; 62 cocci.print_secs("",p2) 66 position r.p2; 70 return @p2 ...; [all …]
|
D | atomic_as_refcounter.cocci | 15 position p1, p2; 40 fname@p2(a, ...); 42 fname2@p2(...); 44 fname3@p2(...); 46 fname4@p2(...); 48 fname5@p2(...); 50 fname6@p2(...); 56 p2 << r1.p2; 59 coccilib.report.print_report(p1[0], msg % (p2[0].line)) 63 position p1, p2; [all …]
|
D | kstrdup.cocci | 51 position p1,p2; 58 * strcpy@p2(to, from); 64 position p1,p2; 73 * memcpy@p2(to, from, x); 77 p2 << r1.p2; 81 cocci.print_secs("strcpy",p2) 85 p2 << r2.p2; 89 cocci.print_secs("memcpy",p2) 93 p2 << r1.p2; 96 msg = "WARNING opportunity for kstrdup (strcpy on line %s)" % (p2[0].line) [all …]
|
D | check_bq27xxx_data.cocci | 69 position p2; 72 u8 i2regs@p2[...] = { i2regs_vals, }; 80 p2 << getregs2.p2; 89 i1regs i2regs (List.hd p2).line in 105 position p2; 108 struct bq27xxx_dm_reg i2dmregs@p2[] = { i2dmregs_vals, }; 116 p2 << getdmregs2.p2; 125 i1dmregs i2dmregs (List.hd p2).line in 141 position p2; 144 enum power_supply_property i2props@p2[] = { i2props_vals, }; [all …]
|
/linux-6.1.9/arch/arm/include/asm/ |
D | xor.h | 48 const unsigned long * __restrict p2) in xor_arm4regs_2() argument 62 XOR_BLOCK_4(p2); in xor_arm4regs_2() 69 const unsigned long * __restrict p2, in xor_arm4regs_3() argument 84 XOR_BLOCK_4(p2); in xor_arm4regs_3() 92 const unsigned long * __restrict p2, in xor_arm4regs_4() argument 104 XOR_BLOCK_2(p2); in xor_arm4regs_4() 113 const unsigned long * __restrict p2, in xor_arm4regs_5() argument 126 XOR_BLOCK_2(p2); in xor_arm4regs_5() 157 const unsigned long * __restrict p2) in xor_neon_2() argument 160 xor_arm4regs_2(bytes, p1, p2); in xor_neon_2() [all …]
|
/linux-6.1.9/arch/sparc/include/asm/ |
D | xor_32.h | 17 const unsigned long * __restrict p2) in sparc_2() argument 44 : "r" (p1), "r" (p2) in sparc_2() 49 p2 += 8; in sparc_2() 55 const unsigned long * __restrict p2, in sparc_3() argument 95 : "r" (p1), "r" (p2), "r" (p3) in sparc_3() 100 p2 += 8; in sparc_3() 107 const unsigned long * __restrict p2, in sparc_4() argument 160 : "r" (p1), "r" (p2), "r" (p3), "r" (p4) in sparc_4() 165 p2 += 8; in sparc_4() 173 const unsigned long * __restrict p2, in sparc_5() argument [all …]
|
D | xor_64.h | 16 const unsigned long * __restrict p2); 18 const unsigned long * __restrict p2, 21 const unsigned long * __restrict p2, 25 const unsigned long * __restrict p2, 41 const unsigned long * __restrict p2); 43 const unsigned long * __restrict p2, 46 const unsigned long * __restrict p2, 50 const unsigned long * __restrict p2,
|
/linux-6.1.9/scripts/coccinelle/locks/ |
D | flags.cocci | 18 position p1,p2; 36 spin_lock_irqsave@p2(lock2,flags) 38 read_lock_irqsave@p2(lock2,flags) 40 write_lock_irqsave@p2(lock2,flags) 46 position r.p1, r.p2; 58 *spin_lock_irqsave@p2(lock2,flags) 60 *read_lock_irqsave@p2(lock2,flags) 62 *write_lock_irqsave@p2(lock2,flags) 69 p2 << r.p2; 73 cocci.print_secs("nested lock+irqsave that reuses flags",p2) [all …]
|
/linux-6.1.9/arch/x86/include/asm/ |
D | xor_avx.h | 58 const unsigned long * __restrict p2) in xor_avx_3() argument 68 asm volatile("vmovdqa %0, %%ymm" #reg : : "m" (p2[i / sizeof(*p2)])); \ in xor_avx_3() 81 p2 = (unsigned long *)((uintptr_t)p2 + 512); in xor_avx_3() 89 const unsigned long * __restrict p2, in xor_avx_4() argument 102 "m" (p2[i / sizeof(*p2)])); \ in xor_avx_4() 115 p2 = (unsigned long *)((uintptr_t)p2 + 512); in xor_avx_4() 124 const unsigned long * __restrict p2, in xor_avx_5() argument 140 "m" (p2[i / sizeof(*p2)])); \ in xor_avx_5() 153 p2 = (unsigned long *)((uintptr_t)p2 + 512); in xor_avx_5()
|
D | kvm_para.h | 63 unsigned long p2) in kvm_hypercall2() argument 68 return tdx_kvm_hypercall(nr, p1, p2, 0, 0); in kvm_hypercall2() 72 : "a"(nr), "b"(p1), "c"(p2) in kvm_hypercall2() 78 unsigned long p2, unsigned long p3) in kvm_hypercall3() argument 83 return tdx_kvm_hypercall(nr, p1, p2, p3, 0); in kvm_hypercall3() 87 : "a"(nr), "b"(p1), "c"(p2), "d"(p3) in kvm_hypercall3() 93 unsigned long p2, unsigned long p3, in kvm_hypercall4() argument 99 return tdx_kvm_hypercall(nr, p1, p2, p3, p4); in kvm_hypercall4() 103 : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4) in kvm_hypercall4() 109 unsigned long p2, unsigned long p3) in kvm_sev_hypercall3() argument [all …]
|
D | xor.h | 61 const unsigned long * __restrict p2) in xor_sse_2() argument 104 [p1] "+r" (p1), [p2] "+r" (p2) in xor_sse_2() 113 const unsigned long * __restrict p2) in xor_sse_2_pf64() argument 139 [p1] "+r" (p1), [p2] "+r" (p2) in xor_sse_2_pf64() 148 const unsigned long * __restrict p2, in xor_sse_3() argument 199 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3) in xor_sse_3() 208 const unsigned long * __restrict p2, in xor_sse_3_pf64() argument 237 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3) in xor_sse_3_pf64() 246 const unsigned long * __restrict p2, in xor_sse_4() argument 305 [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4) in xor_sse_4() [all …]
|
D | xor_32.h | 25 const unsigned long * __restrict p2) in xor_pII_mmx_2() argument 60 "+r" (p1), "+r" (p2) in xor_pII_mmx_2() 69 const unsigned long * __restrict p2, in xor_pII_mmx_3() argument 110 "+r" (p1), "+r" (p2), "+r" (p3) in xor_pII_mmx_3() 119 const unsigned long * __restrict p2, in xor_pII_mmx_4() argument 166 "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4) in xor_pII_mmx_4() 176 const unsigned long * __restrict p2, in xor_pII_mmx_5() argument 237 "+r" (p1), "+r" (p2), "+r" (p3) in xor_pII_mmx_5() 259 const unsigned long * __restrict p2) in xor_p5_mmx_2() argument 298 "+r" (p1), "+r" (p2) in xor_p5_mmx_2() [all …]
|
/linux-6.1.9/scripts/coccinelle/iterators/ |
D | list_entry_update.cocci | 22 position p1,p2; 25 list_for_each_entry@p1(x,...) { <... x =@p2 E ...> } 29 position r.p1,r.p2; 33 *x =@p2 E 41 position r.p1,r.p2; 45 x =@p2 E 51 p2 << r.p2; 55 cocci.print_secs("update",p2) 59 p2 << r.p2; 62 msg = "iterator with update on line %s" % (p2[0].line)
|
/linux-6.1.9/arch/s390/lib/ |
D | xor.c | 15 const unsigned long * __restrict p2) in xor_xc_2() argument 32 : : "d" (bytes), "a" (p1), "a" (p2) in xor_xc_2() 37 const unsigned long * __restrict p2, in xor_xc_3() argument 59 : "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3) in xor_xc_3() 64 const unsigned long * __restrict p2, in xor_xc_4() argument 91 : "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3), "+a" (p4) in xor_xc_4() 96 const unsigned long * __restrict p2, in xor_xc_5() argument 128 : "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3), "+a" (p4), in xor_xc_5()
|
/linux-6.1.9/arch/powerpc/lib/ |
D | xor_vmx_glue.c | 16 const unsigned long * __restrict p2) in xor_altivec_2() argument 20 __xor_altivec_2(bytes, p1, p2); in xor_altivec_2() 27 const unsigned long * __restrict p2, in xor_altivec_3() argument 32 __xor_altivec_3(bytes, p1, p2, p3); in xor_altivec_3() 39 const unsigned long * __restrict p2, in xor_altivec_4() argument 45 __xor_altivec_4(bytes, p1, p2, p3, p4); in xor_altivec_4() 52 const unsigned long * __restrict p2, in xor_altivec_5() argument 59 __xor_altivec_5(bytes, p1, p2, p3, p4, p5); in xor_altivec_5()
|
/linux-6.1.9/arch/arm64/include/asm/ |
D | xor.h | 20 const unsigned long * __restrict p2) in xor_neon_2() argument 23 xor_block_inner_neon.do_2(bytes, p1, p2); in xor_neon_2() 29 const unsigned long * __restrict p2, in xor_neon_3() argument 33 xor_block_inner_neon.do_3(bytes, p1, p2, p3); in xor_neon_3() 39 const unsigned long * __restrict p2, in xor_neon_4() argument 44 xor_block_inner_neon.do_4(bytes, p1, p2, p3, p4); in xor_neon_4() 50 const unsigned long * __restrict p2, in xor_neon_5() argument 56 xor_block_inner_neon.do_5(bytes, p1, p2, p3, p4, p5); in xor_neon_5()
|
/linux-6.1.9/scripts/coccinelle/free/ |
D | pci_free_consistent.cocci | 16 position p1,p2; 35 return@p2 ...; 40 p2 << search.p2; 43 …t; pci_alloc_consistent on line %s and return without freeing on line %s" % (p1[0].line,p2[0].line) 44 coccilib.report.print_report(p2[0],msg) 48 p2 << search.p2; 51 …t; pci_alloc_consistent on line %s and return without freeing on line %s" % (p1[0].line,p2[0].line) 53 cocci.print_secs("",p2)
|
/linux-6.1.9/arch/alpha/include/asm/ |
D | xor.h | 10 const unsigned long * __restrict p2); 13 const unsigned long * __restrict p2, 17 const unsigned long * __restrict p2, 22 const unsigned long * __restrict p2, 29 const unsigned long * __restrict p2); 32 const unsigned long * __restrict p2, 36 const unsigned long * __restrict p2, 41 const unsigned long * __restrict p2,
|