x86_64: Fix svml_s_tanf16_core_avx512.S code formatting

This commit contains following formatting changes

1. Instructions proceeded by a tab.
2. Instruction less than 8 characters in length have a tab
   between it and the first operand.
3. Instruction greater than 7 characters in length have a
   space between it and the first operand.
4. Tabs after `#define`d names and their value.
5. 8 space at the beginning of line replaced by tab.
6. Indent comments with code.
7. Remove redundent .text section.
8. 1 space between line content and line comment.
9. Space after all commas.

Reviewed-by: Noah Goldstein <goldstein.w.n@gmail.com>
This commit is contained in:
Sunil K Pandey 2022-03-07 10:47:14 -08:00
parent a9f782823f
commit fa2a051dd9

View File

@ -53,8 +53,7 @@
#include <sysdep.h> #include <sysdep.h>
.text .section .text.exex512, "ax", @progbits
.section .text.exex512,"ax",@progbits
ENTRY(_ZGVeN16v_tanf_skx) ENTRY(_ZGVeN16v_tanf_skx)
pushq %rbp pushq %rbp
cfi_def_cfa_offset(16) cfi_def_cfa_offset(16)
@ -65,10 +64,10 @@ ENTRY(_ZGVeN16v_tanf_skx)
subq $192, %rsp subq $192, %rsp
xorl %edx, %edx xorl %edx, %edx
/* Large values check */ /* Large values check */
vmovups _sRangeReductionVal_uisa+__svml_stan_data_internal(%rip), %zmm10 vmovups _sRangeReductionVal_uisa+__svml_stan_data_internal(%rip), %zmm10
/* /*
* *
* Main path * Main path
* *
@ -83,7 +82,7 @@ ENTRY(_ZGVeN16v_tanf_skx)
vcmpps $22, {sae}, %zmm10, %zmm0, %k6 vcmpps $22, {sae}, %zmm10, %zmm0, %k6
vmovups __svml_stan_data_internal(%rip), %zmm10 vmovups __svml_stan_data_internal(%rip), %zmm10
/* /*
* *
* End of main path * End of main path
*/ */
@ -95,16 +94,16 @@ ENTRY(_ZGVeN16v_tanf_skx)
vfnmadd231ps {rn-sae}, %zmm5, %zmm2, %zmm4 vfnmadd231ps {rn-sae}, %zmm5, %zmm2, %zmm4
vfnmadd213ps {rn-sae}, %zmm4, %zmm3, %zmm5 vfnmadd213ps {rn-sae}, %zmm4, %zmm3, %zmm5
/* Go to auxilary branch */ /* Go to auxilary branch */
jne L(AUX_BRANCH) jne L(AUX_BRANCH)
# LOE rbx r12 r13 r14 r15 edx zmm0 zmm5 zmm10 zmm11 k6 # LOE rbx r12 r13 r14 r15 edx zmm0 zmm5 zmm10 zmm11 k6
/* Return from auxilary branch /* Return from auxilary branch
* for out of main path inputs * for out of main path inputs
*/ */
L(AUX_BRANCH_RETURN): L(AUX_BRANCH_RETURN):
/* Table lookup */ /* Table lookup */
vmovups Th_tbl_uisa+__svml_stan_data_internal(%rip), %zmm3 vmovups Th_tbl_uisa+__svml_stan_data_internal(%rip), %zmm3
vmovups _sPC3_uisa+__svml_stan_data_internal(%rip), %zmm0 vmovups _sPC3_uisa+__svml_stan_data_internal(%rip), %zmm0
vmulps {rn-sae}, %zmm5, %zmm5, %zmm1 vmulps {rn-sae}, %zmm5, %zmm5, %zmm1
@ -114,14 +113,14 @@ L(AUX_BRANCH_RETURN):
vmulps {rn-sae}, %zmm5, %zmm0, %zmm4 vmulps {rn-sae}, %zmm5, %zmm0, %zmm4
vfmadd213ps {rn-sae}, %zmm5, %zmm1, %zmm4 vfmadd213ps {rn-sae}, %zmm5, %zmm1, %zmm4
/* /*
* Computer Denominator: * Computer Denominator:
* sDenominator - sDlow ~= 1-(sTh+sTl)*(sP+sPlow) * sDenominator - sDlow ~= 1-(sTh+sTl)*(sP+sPlow)
*/ */
vmovups _sOne+__svml_stan_data_internal(%rip), %zmm5 vmovups _sOne+__svml_stan_data_internal(%rip), %zmm5
vmulps {rn-sae}, %zmm4, %zmm3, %zmm7 vmulps {rn-sae}, %zmm4, %zmm3, %zmm7
/* /*
* Compute Numerator: * Compute Numerator:
* sNumerator + sNlow ~= sTh+sTl+sP+sPlow * sNumerator + sNlow ~= sTh+sTl+sP+sPlow
*/ */
@ -129,7 +128,7 @@ L(AUX_BRANCH_RETURN):
vsubps {rn-sae}, %zmm7, %zmm5, %zmm9 vsubps {rn-sae}, %zmm7, %zmm5, %zmm9
vsubps {rn-sae}, %zmm3, %zmm8, %zmm2 vsubps {rn-sae}, %zmm3, %zmm8, %zmm2
/* /*
* Now computes (sNumerator + sNlow)/(sDenominator - sDlow) * Now computes (sNumerator + sNlow)/(sDenominator - sDlow)
* Choose NR iteration instead of hardware division * Choose NR iteration instead of hardware division
*/ */
@ -139,18 +138,18 @@ L(AUX_BRANCH_RETURN):
vmulps {rn-sae}, %zmm8, %zmm14, %zmm15 vmulps {rn-sae}, %zmm8, %zmm14, %zmm15
vaddps {rn-sae}, %zmm7, %zmm6, %zmm12 vaddps {rn-sae}, %zmm7, %zmm6, %zmm12
/* One NR iteration to refine sQuotient */ /* One NR iteration to refine sQuotient */
vfmsub213ps {rn-sae}, %zmm8, %zmm15, %zmm9 vfmsub213ps {rn-sae}, %zmm8, %zmm15, %zmm9
vfnmadd213ps {rn-sae}, %zmm9, %zmm15, %zmm12 vfnmadd213ps {rn-sae}, %zmm9, %zmm15, %zmm12
vsubps {rn-sae}, %zmm13, %zmm12, %zmm0 vsubps {rn-sae}, %zmm13, %zmm12, %zmm0
vfnmadd213ps {rn-sae}, %zmm15, %zmm14, %zmm0 vfnmadd213ps {rn-sae}, %zmm15, %zmm14, %zmm0
testl %edx, %edx testl %edx, %edx
/* Go to special inputs processing branch */ /* Go to special inputs processing branch */
jne L(SPECIAL_VALUES_BRANCH) jne L(SPECIAL_VALUES_BRANCH)
# LOE rbx r12 r13 r14 r15 edx zmm0 zmm11 # LOE rbx r12 r13 r14 r15 edx zmm0 zmm11
/* Restore registers /* Restore registers
* and exit the function * and exit the function
*/ */
@ -163,7 +162,7 @@ L(EXIT):
cfi_def_cfa(6, 16) cfi_def_cfa(6, 16)
cfi_offset(6, -16) cfi_offset(6, -16)
/* Branch to process /* Branch to process
* special inputs * special inputs
*/ */
@ -189,18 +188,18 @@ L(SPECIAL_VALUES_BRANCH):
.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
# LOE rbx r15 r12d r13d # LOE rbx r15 r12d r13d
/* Range mask /* Range mask
* bits check * bits check
*/ */
L(RANGEMASK_CHECK): L(RANGEMASK_CHECK):
btl %r12d, %r13d btl %r12d, %r13d
/* Call scalar math function */ /* Call scalar math function */
jc L(SCALAR_MATH_CALL) jc L(SCALAR_MATH_CALL)
# LOE rbx r15 r12d r13d # LOE rbx r15 r12d r13d
/* Special inputs /* Special inputs
* processing loop * processing loop
*/ */
@ -208,7 +207,7 @@ L(SPECIAL_VALUES_LOOP):
incl %r12d incl %r12d
cmpl $16, %r12d cmpl $16, %r12d
/* Check bits in range mask */ /* Check bits in range mask */
jl L(RANGEMASK_CHECK) jl L(RANGEMASK_CHECK)
# LOE rbx r15 r12d r13d # LOE rbx r15 r12d r13d
@ -220,7 +219,7 @@ L(SPECIAL_VALUES_LOOP):
cfi_restore(14) cfi_restore(14)
vmovups 128(%rsp), %zmm0 vmovups 128(%rsp), %zmm0
/* Go to exit */ /* Go to exit */
jmp L(EXIT) jmp L(EXIT)
/* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -176; DW_OP_plus) */ /* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -176; DW_OP_plus) */
.cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x50, 0xff, 0xff, 0xff, 0x22 .cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x50, 0xff, 0xff, 0xff, 0x22
@ -230,33 +229,33 @@ L(SPECIAL_VALUES_LOOP):
.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
# LOE rbx r12 r13 r14 r15 zmm0 # LOE rbx r12 r13 r14 r15 zmm0
/* Scalar math fucntion call /* Scalar math fucntion call
* to process special input * to process special input
*/ */
L(SCALAR_MATH_CALL): L(SCALAR_MATH_CALL):
movl %r12d, %r14d movl %r12d, %r14d
movss 64(%rsp,%r14,4), %xmm0 movss 64(%rsp, %r14, 4), %xmm0
call tanf@PLT call tanf@PLT
# LOE rbx r14 r15 r12d r13d xmm0 # LOE rbx r14 r15 r12d r13d xmm0
movss %xmm0, 128(%rsp,%r14,4) movss %xmm0, 128(%rsp, %r14, 4)
/* Process special inputs in loop */ /* Process special inputs in loop */
jmp L(SPECIAL_VALUES_LOOP) jmp L(SPECIAL_VALUES_LOOP)
cfi_restore(12) cfi_restore(12)
cfi_restore(13) cfi_restore(13)
cfi_restore(14) cfi_restore(14)
# LOE rbx r15 r12d r13d # LOE rbx r15 r12d r13d
/* Auxilary branch /* Auxilary branch
* for out of main path inputs * for out of main path inputs
*/ */
L(AUX_BRANCH): L(AUX_BRANCH):
vmovups _sRangeVal+__svml_stan_data_internal(%rip), %zmm6 vmovups _sRangeVal+__svml_stan_data_internal(%rip), %zmm6
/* /*
* Get the (2^a / 2pi) mod 1 values from the table. * Get the (2^a / 2pi) mod 1 values from the table.
* Because doesn't have I-type gather, we need a trivial cast * Because doesn't have I-type gather, we need a trivial cast
*/ */
@ -265,7 +264,7 @@ L(AUX_BRANCH):
vandps %zmm0, %zmm6, %zmm14 vandps %zmm0, %zmm6, %zmm14
vcmpps $0, {sae}, %zmm6, %zmm14, %k0 vcmpps $0, {sae}, %zmm6, %zmm14, %k0
/* /*
* Break the P_xxx and m into 16-bit chunks ready for * Break the P_xxx and m into 16-bit chunks ready for
* the long multiplication via 16x16->32 multiplications * the long multiplication via 16x16->32 multiplications
*/ */
@ -282,13 +281,13 @@ L(AUX_BRANCH):
vpxord %zmm3, %zmm3, %zmm3 vpxord %zmm3, %zmm3, %zmm3
vpxord %zmm15, %zmm15, %zmm15 vpxord %zmm15, %zmm15, %zmm15
vpxord %zmm2, %zmm2, %zmm2 vpxord %zmm2, %zmm2, %zmm2
vgatherdps (%rax,%zmm4), %zmm3{%k1} vgatherdps (%rax, %zmm4), %zmm3{%k1}
vgatherdps 4(%rax,%zmm4), %zmm15{%k2} vgatherdps 4(%rax, %zmm4), %zmm15{%k2}
vgatherdps 8(%rax,%zmm4), %zmm2{%k3} vgatherdps 8(%rax, %zmm4), %zmm2{%k3}
vpsrld $16, %zmm3, %zmm5 vpsrld $16, %zmm3, %zmm5
vpsrld $16, %zmm2, %zmm13 vpsrld $16, %zmm2, %zmm13
/* /*
* Also get the significand as an integer * Also get the significand as an integer
* NB: adding in the integer bit is wrong for denorms! * NB: adding in the integer bit is wrong for denorms!
* To make this work for denorms we should do something slightly different * To make this work for denorms we should do something slightly different
@ -302,7 +301,7 @@ L(AUX_BRANCH):
vpandd %zmm6, %zmm2, %zmm7 vpandd %zmm6, %zmm2, %zmm7
vpandd %zmm6, %zmm1, %zmm14 vpandd %zmm6, %zmm1, %zmm14
/* Now do the big multiplication and carry propagation */ /* Now do the big multiplication and carry propagation */
vpmulld %zmm9, %zmm8, %zmm4 vpmulld %zmm9, %zmm8, %zmm4
vpmulld %zmm0, %zmm8, %zmm3 vpmulld %zmm0, %zmm8, %zmm3
vpmulld %zmm12, %zmm8, %zmm2 vpmulld %zmm12, %zmm8, %zmm2
@ -330,7 +329,7 @@ L(AUX_BRANCH):
vpaddd %zmm2, %zmm9, %zmm1 vpaddd %zmm2, %zmm9, %zmm1
vpaddd %zmm1, %zmm0, %zmm8 vpaddd %zmm1, %zmm0, %zmm8
/* /*
* Now round at the 2^-8 bit position for reduction mod pi/2^7 * Now round at the 2^-8 bit position for reduction mod pi/2^7
* instead of the original 2pi (but still with the same 2pi scaling). * instead of the original 2pi (but still with the same 2pi scaling).
* Use a shifter of 2^15 + 2^14. * Use a shifter of 2^15 + 2^14.
@ -349,7 +348,7 @@ L(AUX_BRANCH):
vpsrld $16, %zmm15, %zmm12 vpsrld $16, %zmm15, %zmm12
vpaddd %zmm13, %zmm12, %zmm5 vpaddd %zmm13, %zmm12, %zmm5
/* Assemble reduced argument from the pieces */ /* Assemble reduced argument from the pieces */
vpandd %zmm6, %zmm14, %zmm9 vpandd %zmm6, %zmm14, %zmm9
vpandd %zmm6, %zmm15, %zmm7 vpandd %zmm6, %zmm15, %zmm7
vpslld $16, %zmm5, %zmm6 vpslld $16, %zmm5, %zmm6
@ -358,7 +357,7 @@ L(AUX_BRANCH):
vpaddd %zmm9, %zmm5, %zmm9 vpaddd %zmm9, %zmm5, %zmm9
vpsrld $9, %zmm4, %zmm6 vpsrld $9, %zmm4, %zmm6
/* /*
* We want to incorporate the original sign now too. * We want to incorporate the original sign now too.
* Do it here for convenience in getting the right N value, * Do it here for convenience in getting the right N value,
* though we could wait right to the end if we were prepared * though we could wait right to the end if we were prepared
@ -369,7 +368,7 @@ L(AUX_BRANCH):
vpandd .FLT_21(%rip), %zmm9, %zmm13 vpandd .FLT_21(%rip), %zmm9, %zmm13
vpslld $5, %zmm13, %zmm14 vpslld $5, %zmm13, %zmm14
/* /*
* Create floating-point high part, implicitly adding integer bit 1 * Create floating-point high part, implicitly adding integer bit 1
* Incorporate overall sign at this stage too. * Incorporate overall sign at this stage too.
*/ */
@ -379,7 +378,7 @@ L(AUX_BRANCH):
vsubps {rn-sae}, %zmm1, %zmm12, %zmm3 vsubps {rn-sae}, %zmm1, %zmm12, %zmm3
vsubps {rn-sae}, %zmm3, %zmm2, %zmm7 vsubps {rn-sae}, %zmm3, %zmm2, %zmm7
/* /*
* Create floating-point low and medium parts, respectively * Create floating-point low and medium parts, respectively
* lo_17, ... lo_0, 0, ..., 0 * lo_17, ... lo_0, 0, ..., 0
* hi_8, ... hi_0, lo_31, ..., lo_18 * hi_8, ... hi_0, lo_31, ..., lo_18
@ -394,7 +393,7 @@ L(AUX_BRANCH):
vandps .FLT_26(%rip), %zmm11, %zmm15 vandps .FLT_26(%rip), %zmm11, %zmm15
vpsrld $18, %zmm9, %zmm6 vpsrld $18, %zmm9, %zmm6
/* /*
* If the magnitude of the input is <= 2^-20, then * If the magnitude of the input is <= 2^-20, then
* just pass through the input, since no reduction will be needed and * just pass through the input, since no reduction will be needed and
* the main path will only work accurately if the reduced argument is * the main path will only work accurately if the reduced argument is
@ -410,12 +409,12 @@ L(AUX_BRANCH):
vsubps {rn-sae}, %zmm1, %zmm4, %zmm2 vsubps {rn-sae}, %zmm1, %zmm4, %zmm2
vpternlogd $255, %zmm6, %zmm6, %zmm6 vpternlogd $255, %zmm6, %zmm6, %zmm6
/* Now add them up into 2 reasonably aligned pieces */ /* Now add them up into 2 reasonably aligned pieces */
vaddps {rn-sae}, %zmm2, %zmm7, %zmm13 vaddps {rn-sae}, %zmm2, %zmm7, %zmm13
vsubps {rn-sae}, %zmm13, %zmm7, %zmm7 vsubps {rn-sae}, %zmm13, %zmm7, %zmm7
vaddps {rn-sae}, %zmm7, %zmm2, %zmm3 vaddps {rn-sae}, %zmm7, %zmm2, %zmm3
/* /*
* The output is _VRES_R (high) + _VRES_E (low), and the integer part is _VRES_IND * The output is _VRES_R (high) + _VRES_E (low), and the integer part is _VRES_IND
* Set sRp2 = _VRES_R^2 and then resume the original code. * Set sRp2 = _VRES_R^2 and then resume the original code.
*/ */
@ -423,10 +422,10 @@ L(AUX_BRANCH):
vaddps {rn-sae}, %zmm8, %zmm3, %zmm1 vaddps {rn-sae}, %zmm8, %zmm3, %zmm1
vmovups .FLT_25(%rip), %zmm8 vmovups .FLT_25(%rip), %zmm8
/* Grab our final N value as an integer, appropriately masked mod 2^8 */ /* Grab our final N value as an integer, appropriately masked mod 2^8 */
vpandd .FLT_19(%rip), %zmm12, %zmm5 vpandd .FLT_19(%rip), %zmm12, %zmm5
/* /*
* Now multiply those numbers all by 2 pi, reasonably accurately. * Now multiply those numbers all by 2 pi, reasonably accurately.
* (RHi + RLo) * (pi_lead + pi_trail) ~= * (RHi + RLo) * (pi_lead + pi_trail) ~=
* RHi * pi_lead + (RHi * pi_trail + RLo * pi_lead) * RHi * pi_lead + (RHi * pi_trail + RLo * pi_lead)
@ -450,7 +449,7 @@ L(AUX_BRANCH):
vpsrld $2, %zmm7, %zmm13 vpsrld $2, %zmm7, %zmm13
vpslld $2, %zmm13, %zmm9 vpslld $2, %zmm13, %zmm9
/* /*
* *
* End of large arguments path * End of large arguments path
* *
@ -467,7 +466,7 @@ L(AUX_BRANCH):
vfmadd213ps {rn-sae}, %zmm6, %zmm9, %zmm0 vfmadd213ps {rn-sae}, %zmm6, %zmm9, %zmm0
vblendmps %zmm0, %zmm5, %zmm5{%k6} vblendmps %zmm0, %zmm5, %zmm5{%k6}
/* Return to main vector processing path */ /* Return to main vector processing path */
jmp L(AUX_BRANCH_RETURN) jmp L(AUX_BRANCH_RETURN)
# LOE rbx r12 r13 r14 r15 edx zmm5 zmm10 zmm11 # LOE rbx r12 r13 r14 r15 edx zmm5 zmm10 zmm11
END(_ZGVeN16v_tanf_skx) END(_ZGVeN16v_tanf_skx)
@ -476,123 +475,122 @@ END(_ZGVeN16v_tanf_skx)
.align 64 .align 64
.FLT_12: .FLT_12:
.long 0x7f800000,0x7f800000,0x7f800000,0x7f800000,0x7f800000,0x7f800000,0x7f800000,0x7f800000,0x7f800000,0x7f800000,0x7f800000,0x7f800000,0x7f800000,0x7f800000,0x7f800000,0x7f800000 .long 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000
.type .FLT_12,@object .type .FLT_12, @object
.size .FLT_12,64 .size .FLT_12, 64
.align 64 .align 64
.FLT_13: .FLT_13:
.long 0x007fffff,0x007fffff,0x007fffff,0x007fffff,0x007fffff,0x007fffff,0x007fffff,0x007fffff,0x007fffff,0x007fffff,0x007fffff,0x007fffff,0x007fffff,0x007fffff,0x007fffff,0x007fffff .long 0x007fffff, 0x007fffff, 0x007fffff, 0x007fffff, 0x007fffff, 0x007fffff, 0x007fffff, 0x007fffff, 0x007fffff, 0x007fffff, 0x007fffff, 0x007fffff, 0x007fffff, 0x007fffff, 0x007fffff, 0x007fffff
.type .FLT_13,@object .type .FLT_13, @object
.size .FLT_13,64 .size .FLT_13, 64
.align 64 .align 64
.FLT_14: .FLT_14:
.long 0x00800000,0x00800000,0x00800000,0x00800000,0x00800000,0x00800000,0x00800000,0x00800000,0x00800000,0x00800000,0x00800000,0x00800000,0x00800000,0x00800000,0x00800000,0x00800000 .long 0x00800000, 0x00800000, 0x00800000, 0x00800000, 0x00800000, 0x00800000, 0x00800000, 0x00800000, 0x00800000, 0x00800000, 0x00800000, 0x00800000, 0x00800000, 0x00800000, 0x00800000, 0x00800000
.type .FLT_14,@object .type .FLT_14, @object
.size .FLT_14,64 .size .FLT_14, 64
.align 64 .align 64
.FLT_15: .FLT_15:
.long 0x0000ffff,0x0000ffff,0x0000ffff,0x0000ffff,0x0000ffff,0x0000ffff,0x0000ffff,0x0000ffff,0x0000ffff,0x0000ffff,0x0000ffff,0x0000ffff,0x0000ffff,0x0000ffff,0x0000ffff,0x0000ffff .long 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x0000ffff
.type .FLT_15,@object .type .FLT_15, @object
.size .FLT_15,64 .size .FLT_15, 64
.align 64 .align 64
.FLT_16: .FLT_16:
.long 0x80000000,0x80000000,0x80000000,0x80000000,0x80000000,0x80000000,0x80000000,0x80000000,0x80000000,0x80000000,0x80000000,0x80000000,0x80000000,0x80000000,0x80000000,0x80000000 .long 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000
.type .FLT_16,@object .type .FLT_16, @object
.size .FLT_16,64 .size .FLT_16, 64
.align 64 .align 64
.FLT_17: .FLT_17:
.long 0x3f800000,0x3f800000,0x3f800000,0x3f800000,0x3f800000,0x3f800000,0x3f800000,0x3f800000,0x3f800000,0x3f800000,0x3f800000,0x3f800000,0x3f800000,0x3f800000,0x3f800000,0x3f800000 .long 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000
.type .FLT_17,@object .type .FLT_17, @object
.size .FLT_17,64 .size .FLT_17, 64
.align 64 .align 64
.FLT_18: .FLT_18:
.long 0x47400000,0x47400000,0x47400000,0x47400000,0x47400000,0x47400000,0x47400000,0x47400000,0x47400000,0x47400000,0x47400000,0x47400000,0x47400000,0x47400000,0x47400000,0x47400000 .long 0x47400000, 0x47400000, 0x47400000, 0x47400000, 0x47400000, 0x47400000, 0x47400000, 0x47400000, 0x47400000, 0x47400000, 0x47400000, 0x47400000, 0x47400000, 0x47400000, 0x47400000, 0x47400000
.type .FLT_18,@object .type .FLT_18, @object
.size .FLT_18,64 .size .FLT_18, 64
.align 64 .align 64
.FLT_19: .FLT_19:
.long 0x000000ff,0x000000ff,0x000000ff,0x000000ff,0x000000ff,0x000000ff,0x000000ff,0x000000ff,0x000000ff,0x000000ff,0x000000ff,0x000000ff,0x000000ff,0x000000ff,0x000000ff,0x000000ff .long 0x000000ff, 0x000000ff, 0x000000ff, 0x000000ff, 0x000000ff, 0x000000ff, 0x000000ff, 0x000000ff, 0x000000ff, 0x000000ff, 0x000000ff, 0x000000ff, 0x000000ff, 0x000000ff, 0x000000ff, 0x000000ff
.type .FLT_19,@object .type .FLT_19, @object
.size .FLT_19,64 .size .FLT_19, 64
.align 64 .align 64
.FLT_20: .FLT_20:
.long 0x28800000,0x28800000,0x28800000,0x28800000,0x28800000,0x28800000,0x28800000,0x28800000,0x28800000,0x28800000,0x28800000,0x28800000,0x28800000,0x28800000,0x28800000,0x28800000 .long 0x28800000, 0x28800000, 0x28800000, 0x28800000, 0x28800000, 0x28800000, 0x28800000, 0x28800000, 0x28800000, 0x28800000, 0x28800000, 0x28800000, 0x28800000, 0x28800000, 0x28800000, 0x28800000
.type .FLT_20,@object .type .FLT_20, @object
.size .FLT_20,64 .size .FLT_20, 64
.align 64 .align 64
.FLT_21: .FLT_21:
.long 0x0003ffff,0x0003ffff,0x0003ffff,0x0003ffff,0x0003ffff,0x0003ffff,0x0003ffff,0x0003ffff,0x0003ffff,0x0003ffff,0x0003ffff,0x0003ffff,0x0003ffff,0x0003ffff,0x0003ffff,0x0003ffff .long 0x0003ffff, 0x0003ffff, 0x0003ffff, 0x0003ffff, 0x0003ffff, 0x0003ffff, 0x0003ffff, 0x0003ffff, 0x0003ffff, 0x0003ffff, 0x0003ffff, 0x0003ffff, 0x0003ffff, 0x0003ffff, 0x0003ffff, 0x0003ffff
.type .FLT_21,@object .type .FLT_21, @object
.size .FLT_21,64 .size .FLT_21, 64
.align 64 .align 64
.FLT_22: .FLT_22:
.long 0x34000000,0x34000000,0x34000000,0x34000000,0x34000000,0x34000000,0x34000000,0x34000000,0x34000000,0x34000000,0x34000000,0x34000000,0x34000000,0x34000000,0x34000000,0x34000000 .long 0x34000000, 0x34000000, 0x34000000, 0x34000000, 0x34000000, 0x34000000, 0x34000000, 0x34000000, 0x34000000, 0x34000000, 0x34000000, 0x34000000, 0x34000000, 0x34000000, 0x34000000, 0x34000000
.type .FLT_22,@object .type .FLT_22, @object
.size .FLT_22,64 .size .FLT_22, 64
.align 64 .align 64
.FLT_23: .FLT_23:
.long 0x000001ff,0x000001ff,0x000001ff,0x000001ff,0x000001ff,0x000001ff,0x000001ff,0x000001ff,0x000001ff,0x000001ff,0x000001ff,0x000001ff,0x000001ff,0x000001ff,0x000001ff,0x000001ff .long 0x000001ff, 0x000001ff, 0x000001ff, 0x000001ff, 0x000001ff, 0x000001ff, 0x000001ff, 0x000001ff, 0x000001ff, 0x000001ff, 0x000001ff, 0x000001ff, 0x000001ff, 0x000001ff, 0x000001ff, 0x000001ff
.type .FLT_23,@object .type .FLT_23, @object
.size .FLT_23,64 .size .FLT_23, 64
.align 64 .align 64
.FLT_24: .FLT_24:
.long 0x40c90fdb,0x40c90fdb,0x40c90fdb,0x40c90fdb,0x40c90fdb,0x40c90fdb,0x40c90fdb,0x40c90fdb,0x40c90fdb,0x40c90fdb,0x40c90fdb,0x40c90fdb,0x40c90fdb,0x40c90fdb,0x40c90fdb,0x40c90fdb .long 0x40c90fdb, 0x40c90fdb, 0x40c90fdb, 0x40c90fdb, 0x40c90fdb, 0x40c90fdb, 0x40c90fdb, 0x40c90fdb, 0x40c90fdb, 0x40c90fdb, 0x40c90fdb, 0x40c90fdb, 0x40c90fdb, 0x40c90fdb, 0x40c90fdb, 0x40c90fdb
.type .FLT_24,@object .type .FLT_24, @object
.size .FLT_24,64 .size .FLT_24, 64
.align 64 .align 64
.FLT_25: .FLT_25:
.long 0xb43bbd2e,0xb43bbd2e,0xb43bbd2e,0xb43bbd2e,0xb43bbd2e,0xb43bbd2e,0xb43bbd2e,0xb43bbd2e,0xb43bbd2e,0xb43bbd2e,0xb43bbd2e,0xb43bbd2e,0xb43bbd2e,0xb43bbd2e,0xb43bbd2e,0xb43bbd2e .long 0xb43bbd2e, 0xb43bbd2e, 0xb43bbd2e, 0xb43bbd2e, 0xb43bbd2e, 0xb43bbd2e, 0xb43bbd2e, 0xb43bbd2e, 0xb43bbd2e, 0xb43bbd2e, 0xb43bbd2e, 0xb43bbd2e, 0xb43bbd2e, 0xb43bbd2e, 0xb43bbd2e, 0xb43bbd2e
.type .FLT_25,@object .type .FLT_25, @object
.size .FLT_25,64 .size .FLT_25, 64
.align 64 .align 64
.FLT_26: .FLT_26:
.long 0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff .long 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff
.type .FLT_26,@object .type .FLT_26, @object
.size .FLT_26,64 .size .FLT_26, 64
.align 64 .align 64
.FLT_27: .FLT_27:
.long 0x35800000,0x35800000,0x35800000,0x35800000,0x35800000,0x35800000,0x35800000,0x35800000,0x35800000,0x35800000,0x35800000,0x35800000,0x35800000,0x35800000,0x35800000,0x35800000 .long 0x35800000, 0x35800000, 0x35800000, 0x35800000, 0x35800000, 0x35800000, 0x35800000, 0x35800000, 0x35800000, 0x35800000, 0x35800000, 0x35800000, 0x35800000, 0x35800000, 0x35800000, 0x35800000
.type .FLT_27,@object .type .FLT_27, @object
.size .FLT_27,64 .size .FLT_27, 64
.align 64 .align 64
.FLT_28: .FLT_28:
.long 0x00000002,0x00000002,0x00000002,0x00000002,0x00000002,0x00000002,0x00000002,0x00000002,0x00000002,0x00000002,0x00000002,0x00000002,0x00000002,0x00000002,0x00000002,0x00000002 .long 0x00000002, 0x00000002, 0x00000002, 0x00000002, 0x00000002, 0x00000002, 0x00000002, 0x00000002, 0x00000002, 0x00000002, 0x00000002, 0x00000002, 0x00000002, 0x00000002, 0x00000002, 0x00000002
.type .FLT_28,@object .type .FLT_28, @object
.size .FLT_28,64 .size .FLT_28, 64
.align 64 .align 64
.FLT_29: .FLT_29:
.long 0x3cc90fdb,0x3cc90fdb,0x3cc90fdb,0x3cc90fdb,0x3cc90fdb,0x3cc90fdb,0x3cc90fdb,0x3cc90fdb,0x3cc90fdb,0x3cc90fdb,0x3cc90fdb,0x3cc90fdb,0x3cc90fdb,0x3cc90fdb,0x3cc90fdb,0x3cc90fdb .long 0x3cc90fdb, 0x3cc90fdb, 0x3cc90fdb, 0x3cc90fdb, 0x3cc90fdb, 0x3cc90fdb, 0x3cc90fdb, 0x3cc90fdb, 0x3cc90fdb, 0x3cc90fdb, 0x3cc90fdb, 0x3cc90fdb, 0x3cc90fdb, 0x3cc90fdb, 0x3cc90fdb, 0x3cc90fdb
.type .FLT_29,@object .type .FLT_29, @object
.size .FLT_29,64 .size .FLT_29, 64
.align 64 .align 64
.FLT_30: .FLT_30:
.long 0xb03bbd2e,0xb03bbd2e,0xb03bbd2e,0xb03bbd2e,0xb03bbd2e,0xb03bbd2e,0xb03bbd2e,0xb03bbd2e,0xb03bbd2e,0xb03bbd2e,0xb03bbd2e,0xb03bbd2e,0xb03bbd2e,0xb03bbd2e,0xb03bbd2e,0xb03bbd2e .long 0xb03bbd2e, 0xb03bbd2e, 0xb03bbd2e, 0xb03bbd2e, 0xb03bbd2e, 0xb03bbd2e, 0xb03bbd2e, 0xb03bbd2e, 0xb03bbd2e, 0xb03bbd2e, 0xb03bbd2e, 0xb03bbd2e, 0xb03bbd2e, 0xb03bbd2e, 0xb03bbd2e, 0xb03bbd2e
.type .FLT_30,@object .type .FLT_30, @object
.size .FLT_30,64 .size .FLT_30, 64
.align 64 .align 64
#ifdef __svml_stan_data_internal_typedef #ifdef __svml_stan_data_internal_typedef
typedef unsigned int VUINT32; typedef unsigned int VUINT32;
typedef struct typedef struct {
{
__declspec(align(64)) VUINT32 _sInvPI_uisa[16][1]; __declspec(align(64)) VUINT32 _sInvPI_uisa[16][1];
__declspec(align(64)) VUINT32 _sPI1_uisa[16][1]; __declspec(align(64)) VUINT32 _sPI1_uisa[16][1];
__declspec(align(64)) VUINT32 _sPI2_uisa[16][1]; __declspec(align(64)) VUINT32 _sPI2_uisa[16][1];
@ -609,7 +607,7 @@ typedef unsigned int VUINT32;
__declspec(align(64)) VUINT32 _sPI1[16][1]; __declspec(align(64)) VUINT32 _sPI1[16][1];
__declspec(align(64)) VUINT32 _sPI2[16][1]; __declspec(align(64)) VUINT32 _sPI2[16][1];
__declspec(align(64)) VUINT32 _sPI3[16][1]; __declspec(align(64)) VUINT32 _sPI3[16][1];
} __svml_stan_data_internal; } __svml_stan_data_internal;
#endif #endif
__svml_stan_data_internal: __svml_stan_data_internal:
/* UISA */ /* UISA */
@ -653,14 +651,13 @@ __svml_stan_data_internal:
.align 64 .align 64
.long 0x33A22000, 0x33A22000, 0x33A22000, 0x33A22000, 0x33A22000, 0x33A22000, 0x33A22000, 0x33A22000, 0x33A22000, 0x33A22000, 0x33A22000, 0x33A22000, 0x33A22000, 0x33A22000, 0x33A22000, 0x33A22000 /* _sPI3 */ .long 0x33A22000, 0x33A22000, 0x33A22000, 0x33A22000, 0x33A22000, 0x33A22000, 0x33A22000, 0x33A22000, 0x33A22000, 0x33A22000, 0x33A22000, 0x33A22000, 0x33A22000, 0x33A22000, 0x33A22000, 0x33A22000 /* _sPI3 */
.align 64 .align 64
.type __svml_stan_data_internal,@object .type __svml_stan_data_internal, @object
.size __svml_stan_data_internal,.-__svml_stan_data_internal .size __svml_stan_data_internal, .-__svml_stan_data_internal
.align 64 .align 64
#ifdef __svml_stan_reduction_data_internal_typedef #ifdef __svml_stan_reduction_data_internal_typedef
typedef unsigned int VUINT32; typedef unsigned int VUINT32;
typedef struct typedef struct {
{
__declspec(align(64)) VUINT32 _sPtable[256][3][1]; __declspec(align(64)) VUINT32 _sPtable[256][3][1];
} __svml_stan_reduction_data_internal; } __svml_stan_reduction_data_internal;
#endif #endif
@ -923,5 +920,5 @@ __svml_stan_reduction_data_internal:
.long 0x4D377036, 0xD8A5664F, 0x10E4107F /* 254 */ .long 0x4D377036, 0xD8A5664F, 0x10E4107F /* 254 */
.long 0x9A6EE06D, 0xB14ACC9E, 0x21C820FF /* 255 */ .long 0x9A6EE06D, 0xB14ACC9E, 0x21C820FF /* 255 */
.align 64 .align 64
.type __svml_stan_reduction_data_internal,@object .type __svml_stan_reduction_data_internal, @object
.size __svml_stan_reduction_data_internal,.-__svml_stan_reduction_data_internal .size __svml_stan_reduction_data_internal, .-__svml_stan_reduction_data_internal