1/* Function log vectorized with AVX-512. KNL and SKX versions. 2 Copyright (C) 2014-2022 Free Software Foundation, Inc. 3 This file is part of the GNU C Library. 4 5 The GNU C Library is free software; you can redistribute it and/or 6 modify it under the terms of the GNU Lesser General Public 7 License as published by the Free Software Foundation; either 8 version 2.1 of the License, or (at your option) any later version. 9 10 The GNU C Library is distributed in the hope that it will be useful, 11 but WITHOUT ANY WARRANTY; without even the implied warranty of 12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13 Lesser General Public License for more details. 14 15 You should have received a copy of the GNU Lesser General Public 16 License along with the GNU C Library; if not, see 17 <https://www.gnu.org/licenses/>. */ 18 19#include <sysdep.h> 20#include "svml_d_log_data.h" 21#include "svml_d_wrapper_impl.h" 22 23 .text 24ENTRY (_ZGVeN8v_log_knl) 25/* 26 ALGORITHM DESCRIPTION: 27 28 log(x) = -log(Rcp) + log(Rcp*x), 29 where Rcp ~ 1/x (accuracy ~9 bits, obtained by 30 rounding HW approximation to 1+9 mantissa bits) 31 32 Reduced argument R=Rcp*x-1 is used to approximate log(1+R) as polynomial 33 34 log(Rcp) = exponent_Rcp*log(2) + log(mantissa_Rcp) 35 -log(mantissa_Rcp) is obtained from a lookup table, 36 accessed by a 9-bit index 37 */ 38 pushq %rbp 39 cfi_adjust_cfa_offset (8) 40 cfi_rel_offset (%rbp, 0) 41 movq %rsp, %rbp 42 cfi_def_cfa_register (%rbp) 43 andq $-64, %rsp 44 subq $1280, %rsp 45 movq __svml_dlog_data@GOTPCREL(%rip), %rdx 46 movq $-1, %rax 47 48/* isolate exponent bits */ 49 vpsrlq $20, %zmm0, %zmm2 50 vpsrlq $32, %zmm2, %zmm3 51 vpxord %zmm2, %zmm2, %zmm2 52 kxnorw %k3, %k3, %k3 53 vmovups _Two10(%rdx), %zmm1 54 vmovups _One(%rdx), %zmm9 55 vpmovqd %zmm3, %ymm4 56 57/* convert biased exponent to DP format */ 58 vcvtdq2pd %ymm4, %zmm13 59 60/* preserve mantissa, set input exponent to 2^(-10) */ 61 vpternlogq $248, _ExpMask(%rdx), %zmm0, %zmm1 62 vcmppd $17, _MinNorm(%rdx), %zmm0, %k1 63 64/* reciprocal approximation good to at least 11 bits */ 65 vrcp28pd %zmm1, %zmm5 66 vpbroadcastq %rax, %zmm6{%k1}{z} 67 vmovups _poly_coeff_3(%rdx), %zmm15 68 vcmppd $22, _MaxNorm(%rdx), %zmm0, %k2 69 vmovups _Bias1(%rdx), %zmm14 70 71/* round reciprocal to nearest integer, will have 1+9 mantissa bits */ 72 vrndscalepd $8, %zmm5, %zmm11 73 vpbroadcastq %rax, %zmm7{%k2}{z} 74 75/* argument reduction started: R = Mantissa*Rcp - 1 */ 76 vfmsub213pd %zmm9, %zmm11, %zmm1 77 78/* calculate index for table lookup */ 79 vpsrlq $40, %zmm11, %zmm10 80 vgatherqpd _LogRcp_lookup(%rdx,%zmm10), %zmm2{%k3} 81 vcmppd $30, _Threshold(%rdx), %zmm11, %k1 82 83/* combine and get argument value range mask */ 84 vporq %zmm7, %zmm6, %zmm8 85 86/* exponent*log(2.0) */ 87 vmovups _poly_coeff_1(%rdx), %zmm11 88 vmulpd %zmm1, %zmm1, %zmm10 89 vptestmq %zmm8, %zmm8, %k0 90 vfmadd213pd _poly_coeff_4(%rdx), %zmm1, %zmm15 91 kmovw %k0, %ecx 92 93/* polynomial computation */ 94 vfmadd213pd _poly_coeff_2(%rdx), %zmm1, %zmm11 95 movzbl %cl, %ecx 96 vpbroadcastq %rax, %zmm12{%k1}{z} 97 vfmadd213pd %zmm15, %zmm10, %zmm11 98 vpternlogq $248, _Bias(%rdx), %zmm12, %zmm14 99 100/* 101 Table stores -log(0.5*mantissa) for larger mantissas, 102 adjust exponent accordingly 103 */ 104 vsubpd %zmm14, %zmm13, %zmm3 105 106/* 107 reconstruction: 108 (exponent*log(2)) + (LogRcp + (R+poly)) 109 */ 110 vfmadd213pd %zmm1, %zmm10, %zmm11 111 vaddpd %zmm2, %zmm11, %zmm1 112 vfmadd132pd _L2(%rdx), %zmm1, %zmm3 113 testl %ecx, %ecx 114 jne .LBL_1_3 115 116.LBL_1_2: 117 cfi_remember_state 118 vmovaps %zmm3, %zmm0 119 movq %rbp, %rsp 120 cfi_def_cfa_register (%rsp) 121 popq %rbp 122 cfi_adjust_cfa_offset (-8) 123 cfi_restore (%rbp) 124 ret 125 126.LBL_1_3: 127 cfi_restore_state 128 vmovups %zmm0, 1152(%rsp) 129 vmovups %zmm3, 1216(%rsp) 130 je .LBL_1_2 131 132 xorb %dl, %dl 133 kmovw %k4, 1048(%rsp) 134 xorl %eax, %eax 135 kmovw %k5, 1040(%rsp) 136 kmovw %k6, 1032(%rsp) 137 kmovw %k7, 1024(%rsp) 138 vmovups %zmm16, 960(%rsp) 139 vmovups %zmm17, 896(%rsp) 140 vmovups %zmm18, 832(%rsp) 141 vmovups %zmm19, 768(%rsp) 142 vmovups %zmm20, 704(%rsp) 143 vmovups %zmm21, 640(%rsp) 144 vmovups %zmm22, 576(%rsp) 145 vmovups %zmm23, 512(%rsp) 146 vmovups %zmm24, 448(%rsp) 147 vmovups %zmm25, 384(%rsp) 148 vmovups %zmm26, 320(%rsp) 149 vmovups %zmm27, 256(%rsp) 150 vmovups %zmm28, 192(%rsp) 151 vmovups %zmm29, 128(%rsp) 152 vmovups %zmm30, 64(%rsp) 153 vmovups %zmm31, (%rsp) 154 movq %rsi, 1064(%rsp) 155 movq %rdi, 1056(%rsp) 156 movq %r12, 1096(%rsp) 157 cfi_offset_rel_rsp (12, 1096) 158 movb %dl, %r12b 159 movq %r13, 1088(%rsp) 160 cfi_offset_rel_rsp (13, 1088) 161 movl %ecx, %r13d 162 movq %r14, 1080(%rsp) 163 cfi_offset_rel_rsp (14, 1080) 164 movl %eax, %r14d 165 movq %r15, 1072(%rsp) 166 cfi_offset_rel_rsp (15, 1072) 167 cfi_remember_state 168 169.LBL_1_6: 170 btl %r14d, %r13d 171 jc .LBL_1_12 172 173.LBL_1_7: 174 lea 1(%r14), %esi 175 btl %esi, %r13d 176 jc .LBL_1_10 177 178.LBL_1_8: 179 addb $1, %r12b 180 addl $2, %r14d 181 cmpb $16, %r12b 182 jb .LBL_1_6 183 184 kmovw 1048(%rsp), %k4 185 movq 1064(%rsp), %rsi 186 kmovw 1040(%rsp), %k5 187 movq 1056(%rsp), %rdi 188 kmovw 1032(%rsp), %k6 189 movq 1096(%rsp), %r12 190 cfi_restore (%r12) 191 movq 1088(%rsp), %r13 192 cfi_restore (%r13) 193 kmovw 1024(%rsp), %k7 194 vmovups 960(%rsp), %zmm16 195 vmovups 896(%rsp), %zmm17 196 vmovups 832(%rsp), %zmm18 197 vmovups 768(%rsp), %zmm19 198 vmovups 704(%rsp), %zmm20 199 vmovups 640(%rsp), %zmm21 200 vmovups 576(%rsp), %zmm22 201 vmovups 512(%rsp), %zmm23 202 vmovups 448(%rsp), %zmm24 203 vmovups 384(%rsp), %zmm25 204 vmovups 320(%rsp), %zmm26 205 vmovups 256(%rsp), %zmm27 206 vmovups 192(%rsp), %zmm28 207 vmovups 128(%rsp), %zmm29 208 vmovups 64(%rsp), %zmm30 209 vmovups (%rsp), %zmm31 210 movq 1080(%rsp), %r14 211 cfi_restore (%r14) 212 movq 1072(%rsp), %r15 213 cfi_restore (%r15) 214 vmovups 1216(%rsp), %zmm3 215 jmp .LBL_1_2 216 217.LBL_1_10: 218 cfi_restore_state 219 movzbl %r12b, %r15d 220 shlq $4, %r15 221 vmovsd 1160(%rsp,%r15), %xmm0 222 call JUMPTARGET(log) 223 vmovsd %xmm0, 1224(%rsp,%r15) 224 jmp .LBL_1_8 225 226.LBL_1_12: 227 movzbl %r12b, %r15d 228 shlq $4, %r15 229 vmovsd 1152(%rsp,%r15), %xmm0 230 call JUMPTARGET(log) 231 vmovsd %xmm0, 1216(%rsp,%r15) 232 jmp .LBL_1_7 233END (_ZGVeN8v_log_knl) 234 235ENTRY (_ZGVeN8v_log_skx) 236/* 237 ALGORITHM DESCRIPTION: 238 239 log(x) = -log(Rcp) + log(Rcp*x), 240 where Rcp ~ 1/x (accuracy ~9 bits, 241 obtained by rounding HW approximation to 1+9 mantissa bits) 242 243 Reduced argument R=Rcp*x-1 is used to approximate log(1+R) as polynomial 244 245 log(Rcp) = exponent_Rcp*log(2) + log(mantissa_Rcp) 246 -log(mantissa_Rcp) is obtained from a lookup table, 247 accessed by a 9-bit index 248 */ 249 pushq %rbp 250 cfi_adjust_cfa_offset (8) 251 cfi_rel_offset (%rbp, 0) 252 movq %rsp, %rbp 253 cfi_def_cfa_register (%rbp) 254 andq $-64, %rsp 255 subq $1280, %rsp 256 movq __svml_dlog_data@GOTPCREL(%rip), %rax 257 vmovaps %zmm0, %zmm3 258 kxnorw %k3, %k3, %k3 259 vmovups _Two10(%rax), %zmm2 260 vmovups _Threshold(%rax), %zmm14 261 vmovups _One(%rax), %zmm11 262 vcmppd $21, _MinNorm(%rax), %zmm3, %k1 263 vcmppd $18, _MaxNorm(%rax), %zmm3, %k2 264 265/* isolate exponent bits */ 266 vpsrlq $20, %zmm3, %zmm4 267 268/* preserve mantissa, set input exponent to 2^(-10) */ 269 vpternlogq $248, _ExpMask(%rax), %zmm3, %zmm2 270 vpternlogd $0xff, %zmm1, %zmm1, %zmm1 271 vpsrlq $32, %zmm4, %zmm6 272 273/* reciprocal approximation good to at least 11 bits */ 274 vrcp14pd %zmm2, %zmm5 275 276/* exponent*log(2.0) */ 277 vmovups _poly_coeff_1(%rax), %zmm4 278 vpmovqd %zmm6, %ymm7 279 280/* round reciprocal to nearest integer, will have 1+9 mantissa bits */ 281 vrndscalepd $8, %zmm5, %zmm0 282 283/* calculate index for table lookup */ 284 vpsrlq $40, %zmm0, %zmm12 285 286/* argument reduction started: R = Mantissa*Rcp - 1 */ 287 vfmsub213pd %zmm11, %zmm0, %zmm2 288 vpmovqd %zmm12, %ymm13 289 290/* polynomial computation */ 291 vfmadd213pd _poly_coeff_2(%rax), %zmm2, %zmm4 292 vmovaps %zmm1, %zmm8 293 vmovaps %zmm1, %zmm9 294 vpxord %zmm5, %zmm5, %zmm5 295 vgatherdpd _LogRcp_lookup(%rax,%ymm13), %zmm5{%k3} 296 vmovups _Bias1(%rax), %zmm13 297 vpandnq %zmm3, %zmm3, %zmm8{%k1} 298 vcmppd $21, %zmm0, %zmm14, %k1 299 vpandnq %zmm14, %zmm14, %zmm1{%k1} 300 vmulpd %zmm2, %zmm2, %zmm14 301 vpternlogq $248, _Bias(%rax), %zmm1, %zmm13 302 vmovups _poly_coeff_3(%rax), %zmm1 303 vfmadd213pd _poly_coeff_4(%rax), %zmm2, %zmm1 304 vfmadd213pd %zmm1, %zmm14, %zmm4 305 306/* 307 reconstruction: 308 (exponent*log(2)) + (LogRcp + (R+poly)) 309 */ 310 vfmadd213pd %zmm2, %zmm14, %zmm4 311 vaddpd %zmm5, %zmm4, %zmm2 312 vpandnq %zmm3, %zmm3, %zmm9{%k2} 313 314/* combine and get argument value range mask */ 315 vorpd %zmm9, %zmm8, %zmm10 316 vcmppd $3, %zmm10, %zmm10, %k0 317 kmovw %k0, %ecx 318 319/* convert biased exponent to DP format */ 320 vcvtdq2pd %ymm7, %zmm15 321 322/* 323 Table stores -log(0.5*mantissa) for larger mantissas, 324 adjust exponent accordingly 325 */ 326 vsubpd %zmm13, %zmm15, %zmm0 327 vfmadd132pd _L2(%rax), %zmm2, %zmm0 328 testl %ecx, %ecx 329 jne .LBL_2_3 330 331.LBL_2_2: 332 cfi_remember_state 333 movq %rbp, %rsp 334 cfi_def_cfa_register (%rsp) 335 popq %rbp 336 cfi_adjust_cfa_offset (-8) 337 cfi_restore (%rbp) 338 ret 339 340.LBL_2_3: 341 cfi_restore_state 342 vmovups %zmm3, 1152(%rsp) 343 vmovups %zmm0, 1216(%rsp) 344 je .LBL_2_2 345 346 xorb %dl, %dl 347 xorl %eax, %eax 348 kmovw %k4, 1048(%rsp) 349 kmovw %k5, 1040(%rsp) 350 kmovw %k6, 1032(%rsp) 351 kmovw %k7, 1024(%rsp) 352 vmovups %zmm16, 960(%rsp) 353 vmovups %zmm17, 896(%rsp) 354 vmovups %zmm18, 832(%rsp) 355 vmovups %zmm19, 768(%rsp) 356 vmovups %zmm20, 704(%rsp) 357 vmovups %zmm21, 640(%rsp) 358 vmovups %zmm22, 576(%rsp) 359 vmovups %zmm23, 512(%rsp) 360 vmovups %zmm24, 448(%rsp) 361 vmovups %zmm25, 384(%rsp) 362 vmovups %zmm26, 320(%rsp) 363 vmovups %zmm27, 256(%rsp) 364 vmovups %zmm28, 192(%rsp) 365 vmovups %zmm29, 128(%rsp) 366 vmovups %zmm30, 64(%rsp) 367 vmovups %zmm31, (%rsp) 368 movq %rsi, 1064(%rsp) 369 movq %rdi, 1056(%rsp) 370 movq %r12, 1096(%rsp) 371 cfi_offset_rel_rsp (12, 1096) 372 movb %dl, %r12b 373 movq %r13, 1088(%rsp) 374 cfi_offset_rel_rsp (13, 1088) 375 movl %ecx, %r13d 376 movq %r14, 1080(%rsp) 377 cfi_offset_rel_rsp (14, 1080) 378 movl %eax, %r14d 379 movq %r15, 1072(%rsp) 380 cfi_offset_rel_rsp (15, 1072) 381 cfi_remember_state 382 383.LBL_2_6: 384 btl %r14d, %r13d 385 jc .LBL_2_12 386 387.LBL_2_7: 388 lea 1(%r14), %esi 389 btl %esi, %r13d 390 jc .LBL_2_10 391 392.LBL_2_8: 393 incb %r12b 394 addl $2, %r14d 395 cmpb $16, %r12b 396 jb .LBL_2_6 397 398 kmovw 1048(%rsp), %k4 399 kmovw 1040(%rsp), %k5 400 kmovw 1032(%rsp), %k6 401 kmovw 1024(%rsp), %k7 402 vmovups 960(%rsp), %zmm16 403 vmovups 896(%rsp), %zmm17 404 vmovups 832(%rsp), %zmm18 405 vmovups 768(%rsp), %zmm19 406 vmovups 704(%rsp), %zmm20 407 vmovups 640(%rsp), %zmm21 408 vmovups 576(%rsp), %zmm22 409 vmovups 512(%rsp), %zmm23 410 vmovups 448(%rsp), %zmm24 411 vmovups 384(%rsp), %zmm25 412 vmovups 320(%rsp), %zmm26 413 vmovups 256(%rsp), %zmm27 414 vmovups 192(%rsp), %zmm28 415 vmovups 128(%rsp), %zmm29 416 vmovups 64(%rsp), %zmm30 417 vmovups (%rsp), %zmm31 418 vmovups 1216(%rsp), %zmm0 419 movq 1064(%rsp), %rsi 420 movq 1056(%rsp), %rdi 421 movq 1096(%rsp), %r12 422 cfi_restore (%r12) 423 movq 1088(%rsp), %r13 424 cfi_restore (%r13) 425 movq 1080(%rsp), %r14 426 cfi_restore (%r14) 427 movq 1072(%rsp), %r15 428 cfi_restore (%r15) 429 jmp .LBL_2_2 430 431.LBL_2_10: 432 cfi_restore_state 433 movzbl %r12b, %r15d 434 shlq $4, %r15 435 vmovsd 1160(%rsp,%r15), %xmm0 436 vzeroupper 437 vmovsd 1160(%rsp,%r15), %xmm0 438 439 call JUMPTARGET(log) 440 441 vmovsd %xmm0, 1224(%rsp,%r15) 442 jmp .LBL_2_8 443 444.LBL_2_12: 445 movzbl %r12b, %r15d 446 shlq $4, %r15 447 vmovsd 1152(%rsp,%r15), %xmm0 448 vzeroupper 449 vmovsd 1152(%rsp,%r15), %xmm0 450 451 call JUMPTARGET(log) 452 453 vmovsd %xmm0, 1216(%rsp,%r15) 454 jmp .LBL_2_7 455END (_ZGVeN8v_log_skx) 456