x86_64: Fix svml_d_tan2_core_sse4.S code formatting

This commit contains following formatting changes

1. Instructions proceeded by a tab.
2. Instruction less than 8 characters in length have a tab
   between it and the first operand.
3. Instruction greater than 7 characters in length have a
   space between it and the first operand.
4. Tabs after `#define`d names and their value.
5. 8 space at the beginning of line replaced by tab.
6. Indent comments with code.
7. Remove redundent .text section.
8. 1 space between line content and line comment.
9. Space after all commas.

Reviewed-by: Noah Goldstein <goldstein.w.n@gmail.com>
This commit is contained in:
Sunil K Pandey 2022-03-07 10:47:15 -08:00
parent 160e183a9a
commit 7f852d2592

View File

@ -66,24 +66,23 @@
#include <sysdep.h>
.text
.section .text.sse4,"ax",@progbits
.section .text.sse4, "ax", @progbits
ENTRY(_ZGVbN2v_tan_sse4)
subq $72, %rsp
cfi_def_cfa_offset(80)
movaps %xmm0, %xmm1
movups _dAbsMask+__svml_dtan_data_internal(%rip), %xmm4
/* Legacy Code */
/* Legacy Code */
xorl %eax, %eax
/* b) Remove sign using AND 0x7fffffffffffffff operation */
/* b) Remove sign using AND 0x7fffffffffffffff operation */
movaps %xmm4, %xmm5
/* 1) Range reduction to [-Pi/4; +Pi/4] interval */
/* 1) Range reduction to [-Pi/4; +Pi/4] interval */
pxor %xmm11, %xmm11
/*
/*
* c) Getting octant Y by 2/Pi multiplication
* d) Add "Right Shifter" (0x4330000000000000) value
*/
@ -92,22 +91,22 @@ ENTRY(_ZGVbN2v_tan_sse4)
mulpd %xmm5, %xmm3
movups _dRShift+__svml_dtan_data_internal(%rip), %xmm6
/*
/*
* Range reduction
* X = X - Y*PI1 - Y*PI2 - Y*PI3 - Y*PI4;
*/
movaps %xmm5, %xmm2
addpd %xmm6, %xmm3
/* g) Subtract "Right Shifter" (0x4330000000000000) value */
/* g) Subtract "Right Shifter" (0x4330000000000000) value */
movaps %xmm3, %xmm9
/* a) Grab sign from source argument and save it. */
/* a) Grab sign from source argument and save it. */
andnps %xmm1, %xmm4
subpd %xmm6, %xmm9
movups _dPI1+__svml_dtan_data_internal(%rip), %xmm7
/*
/*
* e) Treat obtained value as integer for destination sign setting.
* Shift first bit of this value to the last (sign) position (S << 63)
* f) Change destination sign if source sign is negative
@ -123,7 +122,7 @@ ENTRY(_ZGVbN2v_tan_sse4)
subpd %xmm8, %xmm2
movups _dPI3+__svml_dtan_data_internal(%rip), %xmm10
/*
/*
* c) Swap P and Q if first bit of obtained value after
* Right Shifting is set to 1. Using And, Andnot & Or operations.
*/
@ -131,12 +130,12 @@ ENTRY(_ZGVbN2v_tan_sse4)
mulpd %xmm9, %xmm10
subpd %xmm10, %xmm2
/* a) Calculate X^2 = X * X */
/* a) Calculate X^2 = X * X */
movaps %xmm2, %xmm15
movaps %xmm3, %xmm14
mulpd %xmm2, %xmm15
/*
/*
* b) Calculate 2 polynomials:
* P = X * (P0 + X^2 * (P1 + x^2 * (P2 + x^2 * (P3))));
* Q = Q0 + X^2 * (Q1 + x^2 * (Q2 + x^2 * (Q3)));
@ -166,13 +165,13 @@ ENTRY(_ZGVbN2v_tan_sse4)
orps %xmm14, %xmm0
orps %xmm3, %xmm2
/* d) Divide R = P / Q; */
/* d) Divide R = P / Q; */
divpd %xmm2, %xmm0
/* Large values check */
/* Large values check */
movaps %xmm5, %xmm4
/*
/*
* 3) Destination sign setting
* a) Set shifted destination sign using XOR operation:
* R = XOR( R, S );
@ -182,22 +181,22 @@ ENTRY(_ZGVbN2v_tan_sse4)
movmskpd %xmm4, %edx
testl %edx, %edx
/* Go to auxilary branch */
/* Go to auxilary branch */
jne L(AUX_BRANCH)
# LOE rbx rbp r12 r13 r14 r15 eax xmm0 xmm1 xmm4 xmm5
/* Return from auxilary branch
/* Return from auxilary branch
* for out of main path inputs
*/
L(AUX_BRANCH_RETURN):
testl %eax, %eax
/* Go to special inputs processing branch */
/* Go to special inputs processing branch */
jne L(SPECIAL_VALUES_BRANCH)
# LOE rbx rbp r12 r13 r14 r15 eax xmm0 xmm1
/* Restore registers
/* Restore registers
* and exit the function
*/
@ -207,7 +206,7 @@ L(EXIT):
ret
cfi_def_cfa_offset(80)
/* Branch to process
/* Branch to process
* special inputs
*/
@ -227,18 +226,18 @@ L(SPECIAL_VALUES_BRANCH):
cfi_offset(14, -80)
# LOE rbx rbp r15 r12d r13d
/* Range mask
/* Range mask
* bits check
*/
L(RANGEMASK_CHECK):
btl %r12d, %r13d
/* Call scalar math function */
/* Call scalar math function */
jc L(SCALAR_MATH_CALL)
# LOE rbx rbp r15 r12d r13d
/* Special inputs
/* Special inputs
* processing loop
*/
@ -246,7 +245,7 @@ L(SPECIAL_VALUES_LOOP):
incl %r12d
cmpl $2, %r12d
/* Check bits in range mask */
/* Check bits in range mask */
jl L(RANGEMASK_CHECK)
# LOE rbx rbp r15 r12d r13d
@ -258,40 +257,40 @@ L(SPECIAL_VALUES_LOOP):
cfi_restore(14)
movups 48(%rsp), %xmm0
/* Go to exit */
/* Go to exit */
jmp L(EXIT)
cfi_offset(12, -64)
cfi_offset(13, -72)
cfi_offset(14, -80)
# LOE rbx rbp r12 r13 r14 r15 xmm0
/* Scalar math fucntion call
/* Scalar math fucntion call
* to process special input
*/
L(SCALAR_MATH_CALL):
movl %r12d, %r14d
movsd 32(%rsp,%r14,8), %xmm0
movsd 32(%rsp, %r14, 8), %xmm0
call tan@PLT
# LOE rbx rbp r14 r15 r12d r13d xmm0
movsd %xmm0, 48(%rsp,%r14,8)
movsd %xmm0, 48(%rsp, %r14, 8)
/* Process special inputs in loop */
/* Process special inputs in loop */
jmp L(SPECIAL_VALUES_LOOP)
cfi_restore(12)
cfi_restore(13)
cfi_restore(14)
# LOE rbx rbp r15 r12d r13d
/* Auxilary branch
/* Auxilary branch
* for out of main path inputs
*/
L(AUX_BRANCH):
movdqu .FLT_17(%rip), %xmm3
/*
/*
* Get the (2^a / 2pi) mod 1 values from the table.
* Because doesn't have L-type gather, we need a trivial cast
*/
@ -299,7 +298,7 @@ L(AUX_BRANCH):
pand %xmm1, %xmm3
psrlq $52, %xmm3
/*
/*
* Also get the significand as an integer
* NB: adding in the integer bit is wrong for denorms!
* To make this work for denorms we should do something slightly different
@ -310,30 +309,30 @@ L(AUX_BRANCH):
paddq .FLT_19(%rip), %xmm2
pextrw $4, %xmm3, %esi
movups _dRangeVal+__svml_dtan_data_internal(%rip), %xmm10
lea (%rdx,%rdx,2), %ecx
lea (%rdx, %rdx, 2), %ecx
shll $3, %ecx
lea (%rsi,%rsi,2), %edi
lea (%rsi, %rsi, 2), %edi
shll $3, %edi
movdqa %xmm2, %xmm6
movq 16(%rcx,%r8), %xmm8
movq 16(%rcx, %r8), %xmm8
andps %xmm10, %xmm5
movhpd 16(%rdi,%r8), %xmm8
movhpd 16(%rdi, %r8), %xmm8
psrlq $32, %xmm6
movups %xmm0, 16(%rsp)
movaps %xmm8, %xmm0
/*
/*
* Break the P_xxx and m into 32-bit chunks ready for
* the long multiplication via 32x32->64 multiplications
*/
movdqu .FLT_20(%rip), %xmm15
psrlq $32, %xmm0
movq 8(%rcx,%r8), %xmm13
movq 8(%rcx, %r8), %xmm13
pand %xmm15, %xmm2
cmpeqpd %xmm10, %xmm5
movdqa %xmm6, %xmm10
movdqa %xmm2, %xmm11
movhpd 8(%rdi,%r8), %xmm13
movhpd 8(%rdi, %r8), %xmm13
pand %xmm15, %xmm8
pmuludq %xmm0, %xmm10
movaps %xmm13, %xmm14
@ -352,9 +351,9 @@ L(AUX_BRANCH):
pand %xmm11, %xmm3
pmuludq %xmm6, %xmm14
paddq %xmm10, %xmm3
movq (%rcx,%r8), %xmm7
movq (%rcx, %r8), %xmm7
movdqa %xmm15, %xmm9
movhpd (%rdi,%r8), %xmm7
movhpd (%rdi, %r8), %xmm7
psrlq $32, %xmm8
psrlq $32, %xmm11
pand %xmm7, %xmm9
@ -364,7 +363,7 @@ L(AUX_BRANCH):
paddq %xmm11, %xmm13
pmuludq %xmm9, %xmm5
/* Now do the big multiplication and carry propagation */
/* Now do the big multiplication and carry propagation */
pmuludq %xmm9, %xmm6
pand %xmm12, %xmm10
movaps %xmm8, %xmm0
@ -391,11 +390,11 @@ L(AUX_BRANCH):
paddq %xmm2, %xmm3
psllq $32, %xmm3
/* Assemble reduced argument from the pieces */
/* Assemble reduced argument from the pieces */
pand %xmm15, %xmm8
paddq %xmm13, %xmm3
/*
/*
* We want to incorporate the original sign now too.
* Do it here for convenience in getting the right N value,
* though we could wait right to the end if we were prepared
@ -405,14 +404,14 @@ L(AUX_BRANCH):
movdqu .FLT_21(%rip), %xmm9
movdqa %xmm3, %xmm5
/*
/*
* Create floating-point high part, implicitly adding integer bit 1
* Incorporate overall sign at this stage too.
*/
movdqu .FLT_22(%rip), %xmm15
pand %xmm1, %xmm9
/*
/*
* Now round at the 2^-9 bit position for reduction mod pi/2^8
* instead of the original 2pi (but still with the same 2pi scaling).
* Use a shifter of 2^43 + 2^42.
@ -431,13 +430,13 @@ L(AUX_BRANCH):
paddq %xmm8, %xmm0
movaps %xmm10, %xmm14
/* Load constants (not all needed at once) */
/* Load constants (not all needed at once) */
lea _dCoeffs+96+__svml_dtan_data_internal(%rip), %rdx
movdqu .FLT_27(%rip), %xmm6
movdqu .FLT_25(%rip), %xmm7
pand %xmm3, %xmm6
/*
/*
* Create floating-point low and medium parts, respectively
* lo_23, ... lo_0, 0, ..., 0
* hi_11, ... hi_0, lo_63, ..., lo_24
@ -457,7 +456,7 @@ L(AUX_BRANCH):
subpd %xmm14, %xmm5
por %xmm9, %xmm6
/* Now add them up into 2 reasonably aligned pieces */
/* Now add them up into 2 reasonably aligned pieces */
movaps %xmm5, %xmm11
subpd %xmm9, %xmm6
addpd %xmm6, %xmm11
@ -466,7 +465,7 @@ L(AUX_BRANCH):
subpd %xmm8, %xmm7
addpd %xmm5, %xmm6
/* Split RHi into 26-bit leading part and 27-bit trailing part */
/* Split RHi into 26-bit leading part and 27-bit trailing part */
movups .FLT_31(%rip), %xmm2
movaps %xmm2, %xmm15
andps %xmm11, %xmm15
@ -474,7 +473,7 @@ L(AUX_BRANCH):
subpd %xmm15, %xmm11
mulpd .FLT_28(%rip), %xmm7
/*
/*
* Now multiply those numbers all by 2 pi, reasonably accurately.
* The top part uses 2pi = d2pi_lead + d2pi_trail, where
* d2pi_lead has 27 significant bits.
@ -482,7 +481,7 @@ L(AUX_BRANCH):
movups .FLT_29(%rip), %xmm0
movups .FLT_30(%rip), %xmm3
/*
/*
* Do the multiplication as exact top part and "naive" low part.
* This still maintains almost 30 bits of offset and doesn't drop
* the accuracy much below what we already have.
@ -496,7 +495,7 @@ L(AUX_BRANCH):
addpd %xmm3, %xmm7
addpd %xmm7, %xmm0
/*
/*
* Do another stage of compensated summation to get full offset
* between the pieces dRedHi + dRedLo.
* Depending on the later algorithm, we might avoid this stage.
@ -505,7 +504,7 @@ L(AUX_BRANCH):
addpd %xmm12, %xmm13
subpd %xmm13, %xmm12
/*
/*
* If the magnitude of the input is <= 2^-20, then
* just pass through the input, since no reduction will be needed and
* the main path will only work accurately if the reduced argument is
@ -519,11 +518,11 @@ L(AUX_BRANCH):
cmplepd %xmm7, %xmm3
addpd %xmm12, %xmm0
/* Grab our final N value as an integer, appropriately masked mod 2^9 */
/* Grab our final N value as an integer, appropriately masked mod 2^9 */
pand .FLT_34(%rip), %xmm10
andps %xmm1, %xmm3
/*
/*
* Argument reduction is now finished: x = n * pi/256 + r
* where n = lIndex and r = dZ
* But we have n modulo 512, needed for sin/cos with period 2pi
@ -538,16 +537,16 @@ L(AUX_BRANCH):
pextrw $4, %xmm10, %r11d
imull $104, %r9d, %r10d
/*
/*
* The output is _VRES_Z (high) + _VRES_E (low), and the integer part is _VRES_IND
* Simply absorb E into Z instead of keeping a 2-part result.
*/
addpd %xmm14, %xmm3
imull $104, %r11d, %r11d
movq -96(%r10,%rdx), %xmm1
movhpd -96(%r11,%rdx), %xmm1
movq -96(%r10, %rdx), %xmm1
movhpd -96(%r11, %rdx), %xmm1
/*
/*
* Compute reciprocal component
* Construct a separate reduced argument modulo pi near pi/2 multiples.
* i.e. (pi/2 - x) mod pi, simply by subtracting the reduced argument
@ -555,11 +554,11 @@ L(AUX_BRANCH):
*/
subpd %xmm3, %xmm1
/* Now compute an approximate reciprocal to mix into the computation. */
/* Now compute an approximate reciprocal to mix into the computation. */
cvtpd2ps %xmm1, %xmm5
movlhps %xmm5, %xmm5
/*
/*
* Now compute the error dEr where dRecip_hi = (1/R_full) * (1 - dEr)
* so that we can compensate for it.
*/
@ -573,14 +572,14 @@ L(AUX_BRANCH):
subpd %xmm2, %xmm5
subpd %xmm1, %xmm5
/*
/*
* Get a working-precision reciprocal 1/dR_full
* using a fourth-order polynomial approximation
* R + (E*R) * (1 + E) * (1 + E^2)
*/
movaps %xmm5, %xmm1
/*
/*
* Higher polynomial terms
* Stage 1 (with unlimited parallelism)
* Z2 = Z^2
@ -591,86 +590,86 @@ L(AUX_BRANCH):
addpd %xmm1, %xmm5
mulpd %xmm6, %xmm1
addpd %xmm6, %xmm1
movq -24(%r10,%rdx), %xmm13
movq -8(%r10,%rdx), %xmm12
movhpd -24(%r11,%rdx), %xmm13
movhpd -8(%r11,%rdx), %xmm12
movq -24(%r10, %rdx), %xmm13
movq -8(%r10, %rdx), %xmm12
movhpd -24(%r11, %rdx), %xmm13
movhpd -8(%r11, %rdx), %xmm12
/* P5 = C3 + C4 * Z */
/* P5 = C3 + C4 * Z */
mulpd %xmm3, %xmm13
/* P6 = C5 + C6 * Z */
/* P6 = C5 + C6 * Z */
mulpd %xmm3, %xmm12
mulpd %xmm1, %xmm5
movq (%r10,%rdx), %xmm15
movhpd (%r11,%rdx), %xmm15
movq (%r10, %rdx), %xmm15
movhpd (%r11, %rdx), %xmm15
/* P9 = C5 + C6 * Z + C7 * Z^2 */
/* P9 = C5 + C6 * Z + C7 * Z^2 */
mulpd %xmm2, %xmm15
addpd %xmm5, %xmm6
movq -40(%r10,%rdx), %xmm14
movq -40(%r10, %rdx), %xmm14
/*
/*
* Stage 2 (with unlimited parallelism)
* Z4 = Z^4
*/
movaps %xmm2, %xmm5
movq -32(%r10,%rdx), %xmm10
movq -16(%r10,%rdx), %xmm11
movhpd -40(%r11,%rdx), %xmm14
movhpd -32(%r11,%rdx), %xmm10
movhpd -16(%r11,%rdx), %xmm11
movq -32(%r10, %rdx), %xmm10
movq -16(%r10, %rdx), %xmm11
movhpd -40(%r11, %rdx), %xmm14
movhpd -32(%r11, %rdx), %xmm10
movhpd -16(%r11, %rdx), %xmm11
/* P4 = C1_lo + C2 * Z */
/* P4 = C1_lo + C2 * Z */
mulpd %xmm3, %xmm14
addpd %xmm13, %xmm10
addpd %xmm12, %xmm11
mulpd %xmm2, %xmm5
/* P10 = C1_lo + C2 * Z + C3 * Z^2 + C4 * Z^3 */
/* P10 = C1_lo + C2 * Z + C3 * Z^2 + C4 * Z^3 */
mulpd %xmm10, %xmm2
addpd %xmm15, %xmm11
movq -48(%r10,%rdx), %xmm0
movhpd -48(%r11,%rdx), %xmm0
movq -48(%r10, %rdx), %xmm0
movhpd -48(%r11, %rdx), %xmm0
addpd %xmm14, %xmm0
/*
/*
* Stage 3 (with unlimited parallelism)
* P12 = C1_lo + C2 * Z + ... + C7 * Z^6
*/
mulpd %xmm11, %xmm5
addpd %xmm2, %xmm0
movq -56(%r10,%rdx), %xmm8
movhpd -56(%r11,%rdx), %xmm8
movq -56(%r10, %rdx), %xmm8
movhpd -56(%r11, %rdx), %xmm8
/*
/*
* Sum of dominant component(s)
* Compute C0_hi + C1_hi * Z + Recip_hi = H4
* H2 = C0_hi + C1_hi * Z (exact since C1_hi is 1 bit)
*/
mulpd %xmm3, %xmm8
addpd %xmm5, %xmm0
movq -80(%r10,%rdx), %xmm9
movhpd -80(%r11,%rdx), %xmm9
movq -80(%r10, %rdx), %xmm9
movhpd -80(%r11, %rdx), %xmm9
/*
/*
* dRecip_hi is only used when dTau is one (cotangent path)
* H4 = C0_hi + C1_hi * Z + Recip_hi
*/
mulpd %xmm6, %xmm9
/* And now the very final summation */
/* And now the very final summation */
mulpd %xmm0, %xmm3
movq -72(%r10,%rdx), %xmm7
movq -72(%r10, %rdx), %xmm7
/*
/*
*
* End of large arguments path
*
* Merge results from main and large paths:
*/
movaps %xmm4, %xmm0
movhpd -72(%r11,%rdx), %xmm7
movhpd -72(%r11, %rdx), %xmm7
addpd %xmm8, %xmm7
addpd %xmm9, %xmm7
addpd %xmm3, %xmm7
@ -679,7 +678,7 @@ L(AUX_BRANCH):
movups (%rsp), %xmm1
orps %xmm7, %xmm0
/* Return to main vector processing path */
/* Return to main vector processing path */
jmp L(AUX_BRANCH_RETURN)
# LOE rbx rbp r12 r13 r14 r15 eax xmm0 xmm1
END(_ZGVbN2v_tan_sse4)
@ -689,8 +688,7 @@ END(_ZGVbN2v_tan_sse4)
#ifdef __svml_dtan_data_internal_typedef
typedef unsigned int VUINT32;
typedef struct
{
typedef struct {
__declspec(align(16)) VUINT32 _dAbsMask[2][2];
__declspec(align(16)) VUINT32 _dRangeVal[2][2];
__declspec(align(16)) VUINT32 _dRShift[2][2];
@ -707,7 +705,7 @@ typedef unsigned int VUINT32;
__declspec(align(16)) VUINT32 _dQ1[2][2];
__declspec(align(16)) VUINT32 _dQ2[2][2];
__declspec(align(16)) VUINT32 _dQ3[2][2];
} __svml_dtan_data_internal;
} __svml_dtan_data_internal;
#endif
__svml_dtan_data_internal:
/* Shared value*/
@ -4071,15 +4069,14 @@ __svml_dtan_data_internal:
.align 16
.quad 0xbf2b525b03bc92a6, 0xbf2b525b03bc92a6 /* _dQ3 */
.align 16
.type __svml_dtan_data_internal,@object
.size __svml_dtan_data_internal,.-__svml_dtan_data_internal
.type __svml_dtan_data_internal, @object
.size __svml_dtan_data_internal, .-__svml_dtan_data_internal
.space 16, 0x00
.align 16
#ifdef __svml_dtan_reduction_data_internal_typedef
typedef unsigned int VUINT32;
typedef struct
{
typedef struct {
__declspec(align(16)) VUINT32 _dPtable[2048][3][2];
} __svml_dtan_reduction_data_internal;
#endif
@ -6134,126 +6131,126 @@ __svml_dtan_reduction_data_internal:
.quad 0x4F758FD7CBE2F67A, 0x0E73EF14A525D4D7, 0xF6BF623F1ABA10AC /* 2046 */
.quad 0x9EEB1FAF97C5ECF4, 0x1CE7DE294A4BA9AF, 0xED7EC47E35742158 /* 2047 */
.align 16
.type __svml_dtan_reduction_data_internal,@object
.size __svml_dtan_reduction_data_internal,.-__svml_dtan_reduction_data_internal
.type __svml_dtan_reduction_data_internal, @object
.size __svml_dtan_reduction_data_internal, .-__svml_dtan_reduction_data_internal
.space 512, 0x00
.align 16
.FLT_17:
.long 0x00000000,0x7ff00000,0x00000000,0x7ff00000
.type .FLT_17,@object
.size .FLT_17,16
.long 0x00000000, 0x7ff00000, 0x00000000, 0x7ff00000
.type .FLT_17, @object
.size .FLT_17, 16
.align 16
.FLT_18:
.long 0xffffffff,0x000fffff,0xffffffff,0x000fffff
.type .FLT_18,@object
.size .FLT_18,16
.long 0xffffffff, 0x000fffff, 0xffffffff, 0x000fffff
.type .FLT_18, @object
.size .FLT_18, 16
.align 16
.FLT_19:
.long 0x00000000,0x00100000,0x00000000,0x00100000
.type .FLT_19,@object
.size .FLT_19,16
.long 0x00000000, 0x00100000, 0x00000000, 0x00100000
.type .FLT_19, @object
.size .FLT_19, 16
.align 16
.FLT_20:
.long 0xffffffff,0x00000000,0xffffffff,0x00000000
.type .FLT_20,@object
.size .FLT_20,16
.long 0xffffffff, 0x00000000, 0xffffffff, 0x00000000
.type .FLT_20, @object
.size .FLT_20, 16
.align 16
.FLT_21:
.long 0x00000000,0x80000000,0x00000000,0x80000000
.type .FLT_21,@object
.size .FLT_21,16
.long 0x00000000, 0x80000000, 0x00000000, 0x80000000
.type .FLT_21, @object
.size .FLT_21, 16
.align 16
.FLT_22:
.long 0x00000000,0x3ff00000,0x00000000,0x3ff00000
.type .FLT_22,@object
.size .FLT_22,16
.long 0x00000000, 0x3ff00000, 0x00000000, 0x3ff00000
.type .FLT_22, @object
.size .FLT_22, 16
.align 16
.FLT_23:
.long 0x00000000,0x42a80000,0x00000000,0x42a80000
.type .FLT_23,@object
.size .FLT_23,16
.long 0x00000000, 0x42a80000, 0x00000000, 0x42a80000
.type .FLT_23, @object
.size .FLT_23, 16
.align 16
.FLT_24:
.long 0x00000000,0x39700000,0x00000000,0x39700000
.type .FLT_24,@object
.size .FLT_24,16
.long 0x00000000, 0x39700000, 0x00000000, 0x39700000
.type .FLT_24, @object
.size .FLT_24, 16
.align 16
.FLT_25:
.long 0x00ffffff,0x00000000,0x00ffffff,0x00000000
.type .FLT_25,@object
.size .FLT_25,16
.long 0x00ffffff, 0x00000000, 0x00ffffff, 0x00000000
.type .FLT_25, @object
.size .FLT_25, 16
.align 16
.FLT_26:
.long 0x00000000,0x3cb00000,0x00000000,0x3cb00000
.type .FLT_26,@object
.size .FLT_26,16
.long 0x00000000, 0x3cb00000, 0x00000000, 0x3cb00000
.type .FLT_26, @object
.size .FLT_26, 16
.align 16
.FLT_27:
.long 0x00000fff,0x00000000,0x00000fff,0x00000000
.type .FLT_27,@object
.size .FLT_27,16
.long 0x00000fff, 0x00000000, 0x00000fff, 0x00000000
.type .FLT_27, @object
.size .FLT_27, 16
.align 16
.FLT_28:
.long 0x54442d18,0x401921fb,0x54442d18,0x401921fb
.type .FLT_28,@object
.size .FLT_28,16
.long 0x54442d18, 0x401921fb, 0x54442d18, 0x401921fb
.type .FLT_28, @object
.size .FLT_28, 16
.align 16
.FLT_29:
.long 0x54000000,0x401921fb,0x54000000,0x401921fb
.type .FLT_29,@object
.size .FLT_29,16
.long 0x54000000, 0x401921fb, 0x54000000, 0x401921fb
.type .FLT_29, @object
.size .FLT_29, 16
.align 16
.FLT_30:
.long 0x11a62633,0x3e310b46,0x11a62633,0x3e310b46
.type .FLT_30,@object
.size .FLT_30,16
.long 0x11a62633, 0x3e310b46, 0x11a62633, 0x3e310b46
.type .FLT_30, @object
.size .FLT_30, 16
.align 16
.FLT_31:
.long 0xf8000000,0xffffffff,0xf8000000,0xffffffff
.type .FLT_31,@object
.size .FLT_31,16
.long 0xf8000000, 0xffffffff, 0xf8000000, 0xffffffff
.type .FLT_31, @object
.size .FLT_31, 16
.align 16
.FLT_32:
.long 0xffffffff,0x7fffffff,0xffffffff,0x7fffffff
.type .FLT_32,@object
.size .FLT_32,16
.long 0xffffffff, 0x7fffffff, 0xffffffff, 0x7fffffff
.type .FLT_32, @object
.size .FLT_32, 16
.align 16
.FLT_33:
.long 0x00000000,0x3eb00000,0x00000000,0x3eb00000
.type .FLT_33,@object
.size .FLT_33,16
.long 0x00000000, 0x3eb00000, 0x00000000, 0x3eb00000
.type .FLT_33, @object
.size .FLT_33, 16
.align 16
.FLT_34:
.long 0x000001ff,0x00000000,0x000001ff,0x00000000
.type .FLT_34,@object
.size .FLT_34,16
.long 0x000001ff, 0x00000000, 0x000001ff, 0x00000000
.type .FLT_34, @object
.size .FLT_34, 16
.align 16
.FLT_35:
.long 0x000000ff,0x00000000,0x000000ff,0x00000000
.type .FLT_35,@object
.size .FLT_35,16
.long 0x000000ff, 0x00000000, 0x000000ff, 0x00000000
.type .FLT_35, @object
.size .FLT_35, 16
.align 16
.FLT_36:
.long 0x00000000,0x3ff00000,0x00000000,0x3ff00000
.type .FLT_36,@object
.size .FLT_36,16
.long 0x00000000, 0x3ff00000, 0x00000000, 0x3ff00000
.type .FLT_36, @object
.size .FLT_36, 16