Lines Matching refs:TMP3

195 .macro PRECOMPUTE SUBKEY TMP1 TMP2 TMP3 TMP4 TMP5 TMP6 TMP7
197 movdqu (%r12), \TMP3
199 pshufb \TMP2, \TMP3
203 movdqa \TMP3, \TMP2
204 psllq $1, \TMP3
209 por \TMP2, \TMP3
216 pxor \TMP2, \TMP3
217 movdqu \TMP3, HashKey(%arg2)
219 movdqa \TMP3, \TMP5
220 pshufd $78, \TMP3, \TMP1
221 pxor \TMP3, \TMP1
224 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
232 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
239 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
510 .macro GHASH_MUL GH HK TMP1 TMP2 TMP3 TMP4 TMP5
513 pshufd $78, \HK, \TMP3
515 pxor \HK, \TMP3 # TMP3 = b1+b0
518 pclmulqdq $0x00, \TMP3, \TMP2 # TMP2 = (a0+a1)*(b1+b0)
521 movdqa \TMP2, \TMP3
522 pslldq $8, \TMP3 # left shift TMP3 2 DWs
524 pxor \TMP3, \GH
530 movdqa \GH, \TMP3
531 movdqa \GH, \TMP4 # copy GH into TMP2,TMP3 and TMP4
535 pslld $30, \TMP3 # packed right shift <<30
537 pxor \TMP3, \TMP2 # xor the shifted versions
546 movdqa \GH,\TMP2 # copy GH into TMP2,TMP3 and TMP4
549 movdqa \GH,\TMP3
552 psrld $2,\TMP3 # packed left shift >>2
554 pxor \TMP3,\TMP2 # xor the shifted versions
594 .macro CALC_AAD_HASH HASHKEY AAD AADLEN TMP1 TMP2 TMP3 TMP4 TMP5 \
608 GHASH_MUL \TMP6, \HASHKEY, \TMP1, \TMP2, \TMP3, \TMP4, \TMP5
624 GHASH_MUL \TMP7, \HASHKEY, \TMP1, \TMP2, \TMP3, \TMP4, \TMP5
787 .macro INITIAL_BLOCKS_ENC_DEC TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
849 GHASH_MUL %xmm6, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
851 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
853 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
856 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
858 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
861 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
976 .macro GHASH_4_ENCRYPT_4_PARALLEL_enc TMP1 TMP2 TMP3 TMP4 TMP5 \
1027 movaps 0x30(%arg1), \TMP3
1028 aesenc \TMP3, \XMM1 # Round 3
1029 aesenc \TMP3, \XMM2
1030 aesenc \TMP3, \XMM3
1031 aesenc \TMP3, \XMM4
1033 movaps 0x40(%arg1), \TMP3
1034 aesenc \TMP3, \XMM1 # Round 4
1035 aesenc \TMP3, \XMM2
1036 aesenc \TMP3, \XMM3
1037 aesenc \TMP3, \XMM4
1040 movaps 0x50(%arg1), \TMP3
1041 aesenc \TMP3, \XMM1 # Round 5
1042 aesenc \TMP3, \XMM2
1043 aesenc \TMP3, \XMM3
1044 aesenc \TMP3, \XMM4
1057 movaps 0x60(%arg1), \TMP3
1058 aesenc \TMP3, \XMM1 # Round 6
1059 aesenc \TMP3, \XMM2
1060 aesenc \TMP3, \XMM3
1061 aesenc \TMP3, \XMM4
1063 movaps 0x70(%arg1), \TMP3
1064 aesenc \TMP3, \XMM1 # Round 7
1065 aesenc \TMP3, \XMM2
1066 aesenc \TMP3, \XMM3
1067 aesenc \TMP3, \XMM4
1070 movaps 0x80(%arg1), \TMP3
1071 aesenc \TMP3, \XMM1 # Round 8
1072 aesenc \TMP3, \XMM2
1073 aesenc \TMP3, \XMM3
1074 aesenc \TMP3, \XMM4
1088 movaps 0x90(%arg1), \TMP3
1089 aesenc \TMP3, \XMM1 # Round 9
1090 aesenc \TMP3, \XMM2
1091 aesenc \TMP3, \XMM3
1092 aesenc \TMP3, \XMM4
1101 MOVADQ (%r10),\TMP3
1103 aesenc \TMP3, %xmm\index
1110 MOVADQ (%r10), \TMP3
1111 aesenclast \TMP3, \XMM1 # Round 10
1112 aesenclast \TMP3, \XMM2
1113 aesenclast \TMP3, \XMM3
1114 aesenclast \TMP3, \XMM4
1117 movdqu (%arg4,%r11,1), \TMP3
1118 pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK
1119 movdqu 16(%arg4,%r11,1), \TMP3
1120 pxor \TMP3, \XMM2 # Ciphertext/Plaintext XOR EK
1121 movdqu 32(%arg4,%r11,1), \TMP3
1122 pxor \TMP3, \XMM3 # Ciphertext/Plaintext XOR EK
1123 movdqu 48(%arg4,%r11,1), \TMP3
1124 pxor \TMP3, \XMM4 # Ciphertext/Plaintext XOR EK
1139 movdqa \TMP2, \TMP3
1140 pslldq $8, \TMP3 # left shift TMP3 2 DWs
1142 pxor \TMP3, \XMM5
1148 movdqa \XMM5, \TMP3
1150 # move XMM5 into TMP2, TMP3, TMP4 in order to perform shifts independently
1152 pslld $30, \TMP3 # packed right shift << 30
1154 pxor \TMP3, \TMP2 # xor the shifted versions
1163 movdqa \XMM5,\TMP2 # make 3 copies of XMM5 into TMP2, TMP3, TMP4
1164 movdqa \XMM5,\TMP3
1167 psrld $2, \TMP3 # packed left shift >>2
1169 pxor \TMP3,\TMP2 # xor the shifted versions
1184 .macro GHASH_4_ENCRYPT_4_PARALLEL_dec TMP1 TMP2 TMP3 TMP4 TMP5 \
1235 movaps 0x30(%arg1), \TMP3
1236 aesenc \TMP3, \XMM1 # Round 3
1237 aesenc \TMP3, \XMM2
1238 aesenc \TMP3, \XMM3
1239 aesenc \TMP3, \XMM4
1241 movaps 0x40(%arg1), \TMP3
1242 aesenc \TMP3, \XMM1 # Round 4
1243 aesenc \TMP3, \XMM2
1244 aesenc \TMP3, \XMM3
1245 aesenc \TMP3, \XMM4
1248 movaps 0x50(%arg1), \TMP3
1249 aesenc \TMP3, \XMM1 # Round 5
1250 aesenc \TMP3, \XMM2
1251 aesenc \TMP3, \XMM3
1252 aesenc \TMP3, \XMM4
1265 movaps 0x60(%arg1), \TMP3
1266 aesenc \TMP3, \XMM1 # Round 6
1267 aesenc \TMP3, \XMM2
1268 aesenc \TMP3, \XMM3
1269 aesenc \TMP3, \XMM4
1271 movaps 0x70(%arg1), \TMP3
1272 aesenc \TMP3, \XMM1 # Round 7
1273 aesenc \TMP3, \XMM2
1274 aesenc \TMP3, \XMM3
1275 aesenc \TMP3, \XMM4
1278 movaps 0x80(%arg1), \TMP3
1279 aesenc \TMP3, \XMM1 # Round 8
1280 aesenc \TMP3, \XMM2
1281 aesenc \TMP3, \XMM3
1282 aesenc \TMP3, \XMM4
1296 movaps 0x90(%arg1), \TMP3
1297 aesenc \TMP3, \XMM1 # Round 9
1298 aesenc \TMP3, \XMM2
1299 aesenc \TMP3, \XMM3
1300 aesenc \TMP3, \XMM4
1309 MOVADQ (%r10),\TMP3
1311 aesenc \TMP3, %xmm\index
1318 MOVADQ (%r10), \TMP3
1319 aesenclast \TMP3, \XMM1 # last round
1320 aesenclast \TMP3, \XMM2
1321 aesenclast \TMP3, \XMM3
1322 aesenclast \TMP3, \XMM4
1325 movdqu (%arg4,%r11,1), \TMP3
1326 pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK
1328 movdqa \TMP3, \XMM1
1329 movdqu 16(%arg4,%r11,1), \TMP3
1330 pxor \TMP3, \XMM2 # Ciphertext/Plaintext XOR EK
1332 movdqa \TMP3, \XMM2
1333 movdqu 32(%arg4,%r11,1), \TMP3
1334 pxor \TMP3, \XMM3 # Ciphertext/Plaintext XOR EK
1336 movdqa \TMP3, \XMM3
1337 movdqu 48(%arg4,%r11,1), \TMP3
1338 pxor \TMP3, \XMM4 # Ciphertext/Plaintext XOR EK
1340 movdqa \TMP3, \XMM4
1351 movdqa \TMP2, \TMP3
1352 pslldq $8, \TMP3 # left shift TMP3 2 DWs
1354 pxor \TMP3, \XMM5
1360 movdqa \XMM5, \TMP3
1362 # move XMM5 into TMP2, TMP3, TMP4 in order to perform shifts independently
1364 pslld $30, \TMP3 # packed right shift << 30
1366 pxor \TMP3, \TMP2 # xor the shifted versions
1375 movdqa \XMM5,\TMP2 # make 3 copies of XMM5 into TMP2, TMP3, TMP4
1376 movdqa \XMM5,\TMP3
1379 psrld $2, \TMP3 # packed left shift >>2
1381 pxor \TMP3,\TMP2 # xor the shifted versions
1391 .macro GHASH_LAST_4 TMP1 TMP2 TMP3 TMP4 TMP5 TMP6 \
1459 movdqa \XMMDst, \TMP3
1461 # move XMMDst into TMP2, TMP3, TMP4 in order to perform 3 shifts independently
1463 pslld $30, \TMP3 # packed right shifting << 30
1465 pxor \TMP3, \TMP2 # xor the shifted versions
1475 movdqa \XMMDst, \TMP3
1478 psrld $2, \TMP3 # packed left shift >> 2
1480 pxor \TMP3, \TMP2 # xor the shifted versions