Lines Matching refs:TMP1

195 .macro PRECOMPUTE SUBKEY TMP1 TMP2 TMP3 TMP4 TMP5 TMP6 TMP7
206 movdqa \TMP2, \TMP1
208 psrldq $8, \TMP1
213 pshufd $0x24, \TMP1, \TMP2
220 pshufd $78, \TMP3, \TMP1
221 pxor \TMP3, \TMP1
222 movdqu \TMP1, HashKey_k(%arg2)
224 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
228 pshufd $78, \TMP5, \TMP1
229 pxor \TMP5, \TMP1
230 movdqu \TMP1, HashKey_2_k(%arg2)
232 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
235 pshufd $78, \TMP5, \TMP1
236 pxor \TMP5, \TMP1
237 movdqu \TMP1, HashKey_3_k(%arg2)
239 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
242 pshufd $78, \TMP5, \TMP1
243 pxor \TMP5, \TMP1
244 movdqu \TMP1, HashKey_4_k(%arg2)
510 .macro GHASH_MUL GH HK TMP1 TMP2 TMP3 TMP4 TMP5
511 movdqa \GH, \TMP1
516 pclmulqdq $0x11, \HK, \TMP1 # TMP1 = a1*b1
520 pxor \TMP1, \TMP2 # TMP2 = (a0*b0)+(a1*b0)
525 pxor \TMP2, \TMP1 # TMP2:GH holds the result of GH*HK
558 pxor \TMP1, \GH # result is in TMP1
594 .macro CALC_AAD_HASH HASHKEY AAD AADLEN TMP1 TMP2 TMP3 TMP4 TMP5 \
608 GHASH_MUL \TMP6, \HASHKEY, \TMP1, \TMP2, \TMP3, \TMP4, \TMP5
621 READ_PARTIAL_BLOCK %r10, %r11, \TMP1, \TMP7
624 GHASH_MUL \TMP7, \HASHKEY, \TMP1, \TMP2, \TMP3, \TMP4, \TMP5
787 .macro INITIAL_BLOCKS_ENC_DEC TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
799 MOVADQ ONE(%RIP),\TMP1
802 paddd \TMP1, \XMM0 # INCR Y0
817 MOVADQ (%r10),\TMP1
819 aesenc \TMP1, %xmm\index
825 MOVADQ (%r10), \TMP1
827 aesenclast \TMP1, %xmm\index # Last Round
830 movdqu (%arg4 , %r11, 1), \TMP1
831 pxor \TMP1, %xmm\index
837 movdqa \TMP1, %xmm\index
849 GHASH_MUL %xmm6, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
851 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
853 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
856 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
858 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
861 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
871 MOVADQ ONE(%RIP),\TMP1
872 paddd \TMP1, \XMM0 # INCR Y0
876 paddd \TMP1, \XMM0 # INCR Y0
880 paddd \TMP1, \XMM0 # INCR Y0
884 paddd \TMP1, \XMM0 # INCR Y0
888 MOVADQ 0(%arg1),\TMP1
889 pxor \TMP1, \XMM1
890 pxor \TMP1, \XMM2
891 pxor \TMP1, \XMM3
892 pxor \TMP1, \XMM4
894 movaps 0x10*\index(%arg1), \TMP1
895 aesenc \TMP1, \XMM1
896 aesenc \TMP1, \XMM2
897 aesenc \TMP1, \XMM3
898 aesenc \TMP1, \XMM4
901 movaps 0x10*\index(%arg1), \TMP1
902 aesenc \TMP1, \XMM1
903 aesenc \TMP1, \XMM2
904 aesenc \TMP1, \XMM3
905 aesenc \TMP1, \XMM4
928 movdqu 16*0(%arg4 , %r11 , 1), \TMP1
929 pxor \TMP1, \XMM1
932 movdqa \TMP1, \XMM1
934 movdqu 16*1(%arg4 , %r11 , 1), \TMP1
935 pxor \TMP1, \XMM2
938 movdqa \TMP1, \XMM2
940 movdqu 16*2(%arg4 , %r11 , 1), \TMP1
941 pxor \TMP1, \XMM3
944 movdqa \TMP1, \XMM3
946 movdqu 16*3(%arg4 , %r11 , 1), \TMP1
947 pxor \TMP1, \XMM4
950 movdqa \TMP1, \XMM4
976 .macro GHASH_4_ENCRYPT_4_PARALLEL_enc TMP1 TMP2 TMP3 TMP4 TMP5 \
1012 movaps 0x10(%arg1), \TMP1
1013 aesenc \TMP1, \XMM1 # Round 1
1014 aesenc \TMP1, \XMM2
1015 aesenc \TMP1, \XMM3
1016 aesenc \TMP1, \XMM4
1017 movaps 0x20(%arg1), \TMP1
1018 aesenc \TMP1, \XMM1 # Round 2
1019 aesenc \TMP1, \XMM2
1020 aesenc \TMP1, \XMM3
1021 aesenc \TMP1, \XMM4
1022 movdqa \XMM6, \TMP1
1026 pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1 * b1
1045 pxor \TMP1, \TMP4
1049 movdqa \XMM7, \TMP1
1056 pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1
1075 pxor \TMP1, \TMP4
1083 movdqa \XMM8, \TMP1
1087 pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1
1134 pxor \TMP4, \TMP1
1137 pxor \TMP1, \TMP2
1143 pxor \TMP2, \TMP1 # accumulate the results in TMP1:XMM5
1173 pxor \TMP1, \XMM5 # result is in TMP1
1184 .macro GHASH_4_ENCRYPT_4_PARALLEL_dec TMP1 TMP2 TMP3 TMP4 TMP5 \
1220 movaps 0x10(%arg1), \TMP1
1221 aesenc \TMP1, \XMM1 # Round 1
1222 aesenc \TMP1, \XMM2
1223 aesenc \TMP1, \XMM3
1224 aesenc \TMP1, \XMM4
1225 movaps 0x20(%arg1), \TMP1
1226 aesenc \TMP1, \XMM1 # Round 2
1227 aesenc \TMP1, \XMM2
1228 aesenc \TMP1, \XMM3
1229 aesenc \TMP1, \XMM4
1230 movdqa \XMM6, \TMP1
1234 pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1 * b1
1253 pxor \TMP1, \TMP4
1257 movdqa \XMM7, \TMP1
1264 pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1
1283 pxor \TMP1, \TMP4
1291 movdqa \XMM8, \TMP1
1295 pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1
1346 pxor \TMP4, \TMP1
1349 pxor \TMP1, \TMP2
1355 pxor \TMP2, \TMP1 # accumulate the results in TMP1:XMM5
1385 pxor \TMP1, \XMM5 # result is in TMP1
1391 .macro GHASH_LAST_4 TMP1 TMP2 TMP3 TMP4 TMP5 TMP6 \
1407 # Multiply TMP1 * HashKey (using Karatsuba)
1409 movdqa \XMM2, \TMP1
1413 pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1
1417 pxor \TMP1, \TMP6
1422 # Multiply TMP1 * HashKey (using Karatsuba)
1424 movdqa \XMM3, \TMP1
1428 pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1
1432 pxor \TMP1, \TMP6
1436 # Multiply TMP1 * HashKey (using Karatsuba)
1437 movdqa \XMM4, \TMP1
1441 pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1
1445 pxor \TMP1, \TMP6
1492 .macro ENCRYPT_SINGLE_BLOCK XMM0 TMP1 argument
1501 MOVADQ (%r10),\TMP1
1502 aesenc \TMP1,\XMM0
1507 MOVADQ (%r10),\TMP1
1508 aesenclast \TMP1,\XMM0