Home
last modified time | relevance | path

Searched refs:rcx (Results 1 – 25 of 136) sorted by relevance

123456

/glibc-2.36/sysdeps/x86_64/multiarch/
Dwcscpy-ssse3.S35 mov %rsi, %rcx
38 cmpl $0, (%rcx)
40 cmpl $0, 4(%rcx)
42 cmpl $0, 8(%rcx)
44 cmpl $0, 12(%rcx)
47 lea 16(%rcx), %rsi
51 mov (%rcx), %r9
55 mov 8(%rcx), %r9
59 sub %rcx, %rsi
68 sub %rax, %rcx
[all …]
Dmemmove-avx512-no-vzeroupper.S53 lea (%rsi, %rdx), %rcx
67 vmovups -0x100(%rcx), %zmm4
68 vmovups -0xC0(%rcx), %zmm5
69 vmovups -0x80(%rcx), %zmm6
70 vmovups -0x40(%rcx), %zmm7
86 vmovups -0x80(%rcx), %zmm2
87 vmovups -0x40(%rcx), %zmm3
99 vmovdqu -0x40(%rcx), %ymm2
100 vmovdqu -0x20(%rcx), %ymm3
111 vmovdqu -0x20(%rcx), %ymm1
[all …]
Dstrcmp-sse2.S41 lea -16(%rcx, %r11), %r9; \
57 lea -16(%rcx, %r11), %r9; \
113 mov LOCALE_T___LOCALES+LC_CTYPE*LP_SIZE(%rcx), %RAX_LP
115 mov (%rcx), %RAX_LP
134 and $0x3f, %rcx /* rsi alignment in cache line */
215 sub %rcx, %r9
250 mov $16, %rcx
260 movdqa (%rsi, %rcx), %xmm1
261 movdqa (%rdi, %rcx), %xmm2
275 add $16, %rcx
[all …]
Dmemrchr-sse2.S42 leaq (%rdx, %rdi), %rcx
60 movups -(VEC_SIZE)(%rcx), %xmm1
84 leaq -(VEC_SIZE)(%rcx, %rax), %rax
99 decq %rcx
100 andq $-VEC_SIZE, %rcx
102 movq %rcx, %rdx
107 movaps -(VEC_SIZE)(%rcx), %xmm1
121 movaps -(VEC_SIZE * 2)(%rcx), %xmm1
142 leaq -1(%rcx), %r8
156 leaq (%rdi, %rdx), %rcx
[all …]
Dstrcpy-sse2-unaligned.S38 movslq (%r11, INDEX, SCALE), %rcx; \
39 lea (%r11, %rcx), %rcx; \
40 _CET_NOTRACK jmp *%rcx
51 mov %rsi, %rcx
58 and $63, %rcx
59 cmp $32, %rcx
63 and $15, %rcx
74 sub %rcx, %r10
78 sub %rcx, %r10
97 movdqu (%rsi, %rcx), %xmm1 /* copy 16 bytes */
[all …]
Dstrcmp-avx2.S97 # define LOCALE_REG rcx
237 movq %rdx, %rcx
243 shrq $56, %rcx
310 movl (%rdi, %rcx), %edx
312 cmpl (%rsi, %rcx), %edx
318 movzbl (%rdi, %rcx), %eax
319 movzbl (%rsi, %rcx), %ecx
321 TOLOWER_gpr (%rcx, %ecx)
362 TOLOWER_gpr (%rcx, %ecx)
376 cmpq %rcx, %rdx
[all …]
Dmemrchr-avx2.S76 subq %rcx, %rax
87 subq %rcx, %rax
119 lzcntq %rcx, %rcx
123 subq %rcx, %rax
148 subq %rcx, %rax
153 lzcntq %rcx, %rcx
154 subq %rcx, %rax
187 subq %rcx, %rax
202 subq %rcx, %rax
212 leaq (VEC_SIZE * -3 + 1)(%rcx, %rax), %rax
[all …]
Dstrcpy-avx2.S62 mov %rsi, %rcx
85 sub %rcx, %r10
89 sub %rcx, %r10
108 vmovdqu (%rsi, %rcx), %ymm2 /* copy VEC_SIZE bytes */
114 sub %rcx, %rdi
116 add %rcx, %r8
117 sbb %rcx, %rcx
118 or %rcx, %r8
120 mov $VEC_SIZE, %rcx
121 vmovdqa (%rsi, %rcx), %ymm2
[all …]
Dstrcpy-evex.S65 mov %rsi, %rcx
87 sub %rcx, %r10
91 sub %rcx, %r10
110 VMOVU (%rsi, %rcx), %YMM2 /* copy VEC_SIZE bytes */
116 sub %rcx, %rdi
118 add %rcx, %r8
119 sbb %rcx, %rcx
120 or %rcx, %r8
122 mov $VEC_SIZE, %rcx
123 VMOVA (%rsi, %rcx), %YMM2
[all …]
Dstrcmp-evex.S120 # define LOCALE_REG rcx
315 movl (%rdi, %rcx, SIZE_OF_CHAR), %edx
317 cmpl (%rsi, %rcx, SIZE_OF_CHAR), %edx
323 movzbl (%rdi, %rcx), %eax
324 movzbl (%rsi, %rcx), %ecx
326 TOLOWER_gpr (%rcx, %ecx)
366 TOLOWER_gpr (%rcx, %ecx)
380 cmpq %rcx, %rdx
384 movl VEC_SIZE(%rdi, %rcx, SIZE_OF_CHAR), %edx
386 cmpl VEC_SIZE(%rsi, %rcx, SIZE_OF_CHAR), %edx
[all …]
Dstrcat-sse2.S34 movq %rdi, %rcx /* Dest. register. */
57 movq (%rax), %rcx /* get double word (= 8 bytes) in question */
60 addq %rcx, %rdx /* add the magic value to the word. We get
64 xorq %rcx, %rdx /* (word+magic)^word */
71 movq (%rax), %rcx /* get double word (= 8 bytes) in question */
74 addq %rcx, %rdx /* add the magic value to the word. We get
78 xorq %rcx, %rdx /* (word+magic)^word */
85 movq (%rax), %rcx /* get double word (= 8 bytes) in question */
88 addq %rcx, %rdx /* add the magic value to the word. We get
92 xorq %rcx, %rdx /* (word+magic)^word */
[all …]
Dstrcmp-sse2-unaligned.S79 movq %rcx, %rdx
93 subq %rsi, %rcx
94 shrq $6, %rcx
95 movq %rcx, %rsi
141 salq $16, %rcx
144 orq %r8, %rcx
145 orq %rdi, %rcx
147 orq %rsi, %rcx
148 bsfq %rcx, %rcx
149 movzbl (%rax, %rcx), %eax
[all …]
Dmemmove-ssse3.S82 movq 0(%rsi), %rcx
84 movq %rcx, 0(%rdi)
128 movq %rdi, %rcx
131 subq %rsi, %rcx
132 cmpq %rdx, %rcx
145 addq %rcx, %rsi
167 addq %r9, %rcx
168 jmp * %rcx
172 testq %rcx, %rcx
194 addq %r9, %rcx
[all …]
Dmemrchr-evex.S71 subq %rcx, %rax
84 subq %rcx, %rax
121 lzcntq %rcx, %rcx
124 subq %rcx, %rax
147 subq %rcx, %rax
160 leaq -(VEC_SIZE * 2)(%rcx, %rax), %rax
192 subq %rcx, %rax
201 subq %rcx, %rax
209 leaq -(VEC_SIZE * 3)(%rcx, %rax), %rax
215 leaq -(VEC_SIZE * 4)(%rcx, %rax), %rax
[all …]
Dstrcmp-sse4_2.S37 lea -16(%rcx, %r11), %r9; \
129 mov LOCALE_T___LOCALES+LC_CTYPE*LP_SIZE(%rcx), %RAX_LP
131 mov (%rcx), %RAX_LP
147 and $0x3f, %rcx /* rsi alignment in cache line */
230 sub %rcx, %r9
265 mov $16, %rcx
272 mov %rcx, %rdx /* only for offset of sse4 instruction loop*/
311 sub %rcx, %r11
314 lea -16(%rdx, %rcx), %rcx
315 movzbl (%rdi, %rcx), %eax
[all …]
Dstrlen-sse2.S70 salq $16, %rcx; \
72 orq %r8, %rcx; \
73 salq $32, %rcx; \
74 orq %rcx, %rdx;
106 movq %rdi, %rcx
107 andq $4095, %rcx
109 cmpq $4047, %rcx
127 movq %rdi, %rcx; \
128 xorq %rax, %rcx; \
161 salq $16, %rcx
[all …]
Dmemmove-vec-unaligned-erms.S372 movq -8(%rsi, %rdx), %rcx
375 movq %rcx, -8(%rdi, %rdx)
425 movq %rdi, %rcx
426 subq %rsi, %rcx
430 cmpq %rdx, %rcx
448 leaq (%rcx, %rdx), %r8
451 xorq %rcx, %r8
472 movq %rdi, %rcx
479 subq %rcx, %rsi
485 leaq (VEC_SIZE * -4)(%rcx, %rdx), %rdx
[all …]
Dstrlen-avx2.S125 leal -(VEC_SIZE * 4 + 1)(%rax, %rcx, 4), %eax
150 leal -(VEC_SIZE * 3 + 1)(%rax, %rcx, 4), %eax
175 leal -(VEC_SIZE * 2 + 1)(%rax, %rcx, 4), %eax
200 leal -(VEC_SIZE * 1 + 1)(%rax, %rcx, 4), %eax
228 leaq (VEC_SIZE * 4 + CHAR_SIZE + 1)(%rdi), %rcx
229 subq %rdx, %rcx
239 subq %rcx, %rsi
275 addq %rcx, %rsi
322 salq $32, %rcx
323 orq %rcx, %rax
[all …]
Dwcschr-sse2.S36 mov %rdi, %rcx
40 and $63, %rcx
41 cmp $48, %rcx
67 and $15, %rcx
89 add %rcx, %rax
148 bsf %rdx, %rcx
149 cmp %rcx, %rax
/glibc-2.36/sysdeps/unix/sysv/linux/x86_64/
Dswapcontext.S49 movq %rcx, oRCX(%rdi)
53 movq (%rsp), %rcx
54 movq %rcx, oRIP(%rdi)
55 leaq 8(%rsp), %rcx /* Exclude the return address. */
56 movq %rcx, oRSP(%rdi)
61 leaq oFPREGSMEM(%rdi), %rcx
62 movq %rcx, oFPREGS(%rdi)
64 fnstenv (%rcx)
90 movq oFPREGS(%rdx), %rcx
91 fldenv (%rcx)
[all …]
Dsetcontext.S57 movq oFPREGS(%rdx), %rcx
58 fldenv (%rcx)
95 movq (oSSP + 8)(%rdx), %rcx
96 cmpq %fs:SSP_BASE_OFFSET, %rcx
129 rdsspq %rcx
130 subq %rdi, %rcx
132 negq %rcx
133 shrq $3, %rcx
136 cmpq %rsi, %rcx
137 cmovb %rcx, %rsi
[all …]
Dgetcontext.S47 movq %rcx, oRCX(%rdi)
51 movq (%rsp), %rcx
52 movq %rcx, oRIP(%rdi)
53 leaq 8(%rsp), %rcx /* Exclude the return address. */
54 movq %rcx, oRSP(%rdi)
109 leaq oFPREGSMEM(%rdi), %rcx
110 movq %rcx, oFPREGS(%rdi)
112 fnstenv (%rcx)
113 fldenv (%rcx)
/glibc-2.36/sysdeps/x86_64/
D_mcount.S32 movq %rcx,8(%rsp)
33 cfi_rel_offset (rcx, 8)
63 movq 8(%rsp),%rcx
64 cfi_restore (rcx)
86 movq %rcx,8(%rsp)
87 cfi_rel_offset (rcx, 8)
117 movq 8(%rsp),%rcx
118 cfi_restore (rcx)
Ddl-trampoline.h98 movq %rcx, REGISTER_SAVE_RCX(%rsp)
145 movq REGISTER_SAVE_RCX(%rsp), %rcx
215 movq %rcx, LR_RCX_OFFSET(%rsp)
258 mov %RSP_LP, %RCX_LP # La_x86_64_regs pointer to %rcx.
367 movq LR_RCX_OFFSET(%rsp), %rcx
398 movq 24(%rdi), %rcx # Get back register content.
426 mov %RSP_LP, %RCX_LP # La_x86_64_retval argument to %rcx.
429 movq %rax, LRV_RAX_OFFSET(%rcx)
430 movq %rdx, LRV_RDX_OFFSET(%rcx)
432 VMOVA %xmm0, LRV_XMM0_OFFSET(%rcx)
[all …]
Daddmul_1.S26 #define v0 %rcx
45 mul %rcx
53 mul %rcx
68 mul %rcx
75 L(top): mul %rcx
84 L(mid): mul %rcx
96 mul %rcx

123456