x86_64: Fix svml_s_exp10f8_core_avx2.S code formatting

This commit contains following formatting changes

1. Instructions proceeded by a tab.
2. Instruction less than 8 characters in length have a tab
   between it and the first operand.
3. Instruction greater than 7 characters in length have a
   space between it and the first operand.
4. Tabs after `#define`d names and their value.
5. 8 space at the beginning of line replaced by tab.
6. Indent comments with code.
7. Remove redundent .text section.
8. 1 space between line content and line comment.
9. Space after all commas.

Reviewed-by: Noah Goldstein <goldstein.w.n@gmail.com>
This commit is contained in:
Sunil K Pandey 2022-03-07 10:47:12 -08:00
parent dcc7a3e02d
commit 008fb2c8b9

View File

@ -22,7 +22,7 @@
* exp10(x) = 2^x/log10(2) = 2^n * (1 + T[j]) * (1 + P(y)) * exp10(x) = 2^x/log10(2) = 2^n * (1 + T[j]) * (1 + P(y))
* where * where
* x = m*log10(2)/K + y, y in [-log10(2)/K..log10(2)/K] * x = m*log10(2)/K + y, y in [-log10(2)/K..log10(2)/K]
* m = n*K + j, m,n,j - signed integer, j in [-K/2..K/2] * m = n*K + j, m, n,j - signed integer, j in [-K/2..K/2]
* *
* values of 2^j/K are tabulated * values of 2^j/K are tabulated
* *
@ -57,8 +57,7 @@
#include <sysdep.h> #include <sysdep.h>
.text .section .text.avx2, "ax", @progbits
.section .text.avx2,"ax",@progbits
ENTRY(_ZGVdN8v_exp10f_avx2) ENTRY(_ZGVdN8v_exp10f_avx2)
pushq %rbp pushq %rbp
cfi_def_cfa_offset(16) cfi_def_cfa_offset(16)
@ -70,20 +69,20 @@ ENTRY(_ZGVdN8v_exp10f_avx2)
lea __svml_sexp10_data_internal(%rip), %rax lea __svml_sexp10_data_internal(%rip), %rax
vmovups _sShifter+__svml_sexp10_data_internal(%rip), %ymm4 vmovups _sShifter+__svml_sexp10_data_internal(%rip), %ymm4
/* Load arument */ /* Load arument */
vmovups _sLg2_10+__svml_sexp10_data_internal(%rip), %ymm1 vmovups _sLg2_10+__svml_sexp10_data_internal(%rip), %ymm1
vmovups _iIndexMask+__svml_sexp10_data_internal(%rip), %ymm2 vmovups _iIndexMask+__svml_sexp10_data_internal(%rip), %ymm2
vmovaps %ymm0, %ymm3 vmovaps %ymm0, %ymm3
vfmadd213ps %ymm4, %ymm3, %ymm1 vfmadd213ps %ymm4, %ymm3, %ymm1
/* Index and lookup */ /* Index and lookup */
vandps %ymm2, %ymm1, %ymm7 vandps %ymm2, %ymm1, %ymm7
/* iIndex *= sizeof(S); */ /* iIndex *= sizeof(S); */
vpslld $2, %ymm7, %ymm10 vpslld $2, %ymm7, %ymm10
vsubps %ymm4, %ymm1, %ymm0 vsubps %ymm4, %ymm1, %ymm0
/* Check for overflow\underflow */ /* Check for overflow\underflow */
vandps _iAbsMask+__svml_sexp10_data_internal(%rip), %ymm3, %ymm5 vandps _iAbsMask+__svml_sexp10_data_internal(%rip), %ymm3, %ymm5
vpcmpgtd _iDomainRange+__svml_sexp10_data_internal(%rip), %ymm5, %ymm6 vpcmpgtd _iDomainRange+__svml_sexp10_data_internal(%rip), %ymm5, %ymm6
vmovmskps %ymm6, %edx vmovmskps %ymm6, %edx
@ -96,15 +95,15 @@ ENTRY(_ZGVdN8v_exp10f_avx2)
movslq %esi, %rsi movslq %esi, %rsi
movslq %edi, %rdi movslq %edi, %rdi
movslq %r8d, %r8 movslq %r8d, %r8
vmovd (%rax,%rcx), %xmm8 vmovd (%rax, %rcx), %xmm8
vmovd (%rax,%rsi), %xmm9 vmovd (%rax, %rsi), %xmm9
vmovd (%rax,%rdi), %xmm11 vmovd (%rax, %rdi), %xmm11
vmovd (%rax,%r8), %xmm12 vmovd (%rax, %r8), %xmm12
vpunpckldq %xmm9, %xmm8, %xmm13 vpunpckldq %xmm9, %xmm8, %xmm13
vpunpckldq %xmm12, %xmm11, %xmm14 vpunpckldq %xmm12, %xmm11, %xmm14
vpunpcklqdq %xmm14, %xmm13, %xmm15 vpunpcklqdq %xmm14, %xmm13, %xmm15
/* R */ /* R */
vmovups _sInvLg2_10hi+__svml_sexp10_data_internal(%rip), %ymm13 vmovups _sInvLg2_10hi+__svml_sexp10_data_internal(%rip), %ymm13
vmovd %xmm6, %r9d vmovd %xmm6, %r9d
vfnmadd213ps %ymm3, %ymm0, %ymm13 vfnmadd213ps %ymm3, %ymm0, %ymm13
@ -112,11 +111,11 @@ ENTRY(_ZGVdN8v_exp10f_avx2)
movslq %r9d, %r9 movslq %r9d, %r9
movslq %r10d, %r10 movslq %r10d, %r10
vfnmadd132ps _sInvLg2_10lo+__svml_sexp10_data_internal(%rip), %ymm13, %ymm0 vfnmadd132ps _sInvLg2_10lo+__svml_sexp10_data_internal(%rip), %ymm13, %ymm0
vmovd (%rax,%r9), %xmm4 vmovd (%rax, %r9), %xmm4
vmovd (%rax,%r10), %xmm5 vmovd (%rax, %r10), %xmm5
vpunpckldq %xmm5, %xmm4, %xmm9 vpunpckldq %xmm5, %xmm4, %xmm9
/* /*
* Polynomial * Polynomial
* exp10 = 2^N*(Tj+Tj*poly) * exp10 = 2^N*(Tj+Tj*poly)
* poly(sN) = {1+later} a0+a1*sR * poly(sN) = {1+later} a0+a1*sR
@ -128,32 +127,32 @@ ENTRY(_ZGVdN8v_exp10f_avx2)
movslq %r11d, %r11 movslq %r11d, %r11
movslq %ecx, %rcx movslq %ecx, %rcx
vfmadd213ps _sPC0+__svml_sexp10_data_internal(%rip), %ymm0, %ymm4 vfmadd213ps _sPC0+__svml_sexp10_data_internal(%rip), %ymm0, %ymm4
vmovd (%rax,%r11), %xmm7 vmovd (%rax, %r11), %xmm7
vmovd (%rax,%rcx), %xmm8 vmovd (%rax, %rcx), %xmm8
vpunpckldq %xmm8, %xmm7, %xmm11 vpunpckldq %xmm8, %xmm7, %xmm11
/* remove index bits */ /* remove index bits */
vpandn %ymm1, %ymm2, %ymm0 vpandn %ymm1, %ymm2, %ymm0
vpunpcklqdq %xmm11, %xmm9, %xmm12 vpunpcklqdq %xmm11, %xmm9, %xmm12
/* 2^N */ /* 2^N */
vpslld $18, %ymm0, %ymm1 vpslld $18, %ymm0, %ymm1
vinsertf128 $1, %xmm12, %ymm15, %ymm14 vinsertf128 $1, %xmm12, %ymm15, %ymm14
/* Tj_l+Tj_h*poly */ /* Tj_l+Tj_h*poly */
vfmadd213ps %ymm14, %ymm14, %ymm4 vfmadd213ps %ymm14, %ymm14, %ymm4
/* quick mul 2^N */ /* quick mul 2^N */
vpaddd %ymm1, %ymm4, %ymm0 vpaddd %ymm1, %ymm4, %ymm0
/* Finish */ /* Finish */
testl %edx, %edx testl %edx, %edx
/* Go to special inputs processing branch */ /* Go to special inputs processing branch */
jne L(SPECIAL_VALUES_BRANCH) jne L(SPECIAL_VALUES_BRANCH)
# LOE rbx r12 r13 r14 r15 edx ymm0 ymm3 # LOE rbx r12 r13 r14 r15 edx ymm0 ymm3
/* Restore registers /* Restore registers
* and exit the function * and exit the function
*/ */
@ -166,7 +165,7 @@ L(EXIT):
cfi_def_cfa(6, 16) cfi_def_cfa(6, 16)
cfi_offset(6, -16) cfi_offset(6, -16)
/* Branch to process /* Branch to process
* special inputs * special inputs
*/ */
@ -192,18 +191,18 @@ L(SPECIAL_VALUES_BRANCH):
.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
# LOE rbx r15 r12d r13d # LOE rbx r15 r12d r13d
/* Range mask /* Range mask
* bits check * bits check
*/ */
L(RANGEMASK_CHECK): L(RANGEMASK_CHECK):
btl %r12d, %r13d btl %r12d, %r13d
/* Call scalar math function */ /* Call scalar math function */
jc L(SCALAR_MATH_CALL) jc L(SCALAR_MATH_CALL)
# LOE rbx r15 r12d r13d # LOE rbx r15 r12d r13d
/* Special inputs /* Special inputs
* processing loop * processing loop
*/ */
@ -211,7 +210,7 @@ L(SPECIAL_VALUES_LOOP):
incl %r12d incl %r12d
cmpl $8, %r12d cmpl $8, %r12d
/* Check bits in range mask */ /* Check bits in range mask */
jl L(RANGEMASK_CHECK) jl L(RANGEMASK_CHECK)
# LOE rbx r15 r12d r13d # LOE rbx r15 r12d r13d
@ -223,7 +222,7 @@ L(SPECIAL_VALUES_LOOP):
cfi_restore(14) cfi_restore(14)
vmovups 64(%rsp), %ymm0 vmovups 64(%rsp), %ymm0
/* Go to exit */ /* Go to exit */
jmp L(EXIT) jmp L(EXIT)
/* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -80; DW_OP_plus) */ /* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -80; DW_OP_plus) */
.cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xb0, 0xff, 0xff, 0xff, 0x22 .cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xb0, 0xff, 0xff, 0xff, 0x22
@ -233,19 +232,19 @@ L(SPECIAL_VALUES_LOOP):
.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22 .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
# LOE rbx r12 r13 r14 r15 ymm0 # LOE rbx r12 r13 r14 r15 ymm0
/* Scalar math fucntion call /* Scalar math fucntion call
* to process special input * to process special input
*/ */
L(SCALAR_MATH_CALL): L(SCALAR_MATH_CALL):
movl %r12d, %r14d movl %r12d, %r14d
movss 32(%rsp,%r14,4), %xmm0 movss 32(%rsp, %r14, 4), %xmm0
call exp10f@PLT call exp10f@PLT
# LOE rbx r14 r15 r12d r13d xmm0 # LOE rbx r14 r15 r12d r13d xmm0
movss %xmm0, 64(%rsp,%r14,4) movss %xmm0, 64(%rsp, %r14, 4)
/* Process special inputs in loop */ /* Process special inputs in loop */
jmp L(SPECIAL_VALUES_LOOP) jmp L(SPECIAL_VALUES_LOOP)
# LOE rbx r15 r12d r13d # LOE rbx r15 r12d r13d
END(_ZGVdN8v_exp10f_avx2) END(_ZGVdN8v_exp10f_avx2)
@ -255,8 +254,7 @@ END(_ZGVdN8v_exp10f_avx2)
#ifdef __svml_sexp10_data_internal_typedef #ifdef __svml_sexp10_data_internal_typedef
typedef unsigned int VUINT32; typedef unsigned int VUINT32;
typedef struct typedef struct {
{
__declspec(align(32)) VUINT32 _sT[(1<<5)][1]; __declspec(align(32)) VUINT32 _sT[(1<<5)][1];
__declspec(align(32)) VUINT32 _sLg2_10[8][1]; __declspec(align(32)) VUINT32 _sLg2_10[8][1];
__declspec(align(32)) VUINT32 _sShifter[8][1]; __declspec(align(32)) VUINT32 _sShifter[8][1];
@ -271,7 +269,7 @@ typedef struct
} __svml_sexp10_data_internal; } __svml_sexp10_data_internal;
#endif #endif
__svml_sexp10_data_internal: __svml_sexp10_data_internal:
/*== _sT ==*/ /* _sT */
.long 0x3f800000 // 2^( 0 /32 ) .long 0x3f800000 // 2^( 0 /32 )
.long 0x3f82cd87 // 2^( 1 /32 ) .long 0x3f82cd87 // 2^( 1 /32 )
.long 0x3f85aac3 // 2^( 2 /32 ) .long 0x3f85aac3 // 2^( 2 /32 )
@ -309,7 +307,7 @@ __svml_sexp10_data_internal:
.align 32 .align 32
.long 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000 /* _sShifter) */ .long 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000 /* _sShifter) */
.align 32 .align 32
.long 0x3c1a2000, 0x3c1a2000, 0x3c1a2000, 0x3c1a2000, 0x3c1a2000, 0x3c1a2000, 0x3c1a2000, 0x3c1a2000 /* _sInvLg2_10hi/2^K hi (24-K-7) bits*/ .long 0x3c1a2000, 0x3c1a2000, 0x3c1a2000, 0x3c1a2000, 0x3c1a2000, 0x3c1a2000, 0x3c1a2000, 0x3c1a2000 /* _sInvLg2_10hi/2^K hi (24-K-7) bits */
.align 32 .align 32
.long 0x341a84fc, 0x341a84fc, 0x341a84fc, 0x341a84fc, 0x341a84fc, 0x341a84fc, 0x341a84fc, 0x341a84fc /* _sInvLg2_10lo/2^K lo bits */ .long 0x341a84fc, 0x341a84fc, 0x341a84fc, 0x341a84fc, 0x341a84fc, 0x341a84fc, 0x341a84fc, 0x341a84fc /* _sInvLg2_10lo/2^K lo bits */
// otherwise exp10(0) won't produce exact 1.0 // otherwise exp10(0) won't produce exact 1.0
@ -320,12 +318,12 @@ __svml_sexp10_data_internal:
.align 32 .align 32
.long 0x4029a8d2, 0x4029a8d2, 0x4029a8d2, 0x4029a8d2, 0x4029a8d2, 0x4029a8d2, 0x4029a8d2, 0x4029a8d2 /* _sPC2 */ .long 0x4029a8d2, 0x4029a8d2, 0x4029a8d2, 0x4029a8d2, 0x4029a8d2, 0x4029a8d2, 0x4029a8d2, 0x4029a8d2 /* _sPC2 */
.align 32 .align 32
.long 0x0000001f, 0x0000001f, 0x0000001f, 0x0000001f, 0x0000001f, 0x0000001f, 0x0000001f, 0x0000001f /* _iIndexMask =(2^K-1)*/ .long 0x0000001f, 0x0000001f, 0x0000001f, 0x0000001f, 0x0000001f, 0x0000001f, 0x0000001f, 0x0000001f /* _iIndexMask =(2^K-1) */
//common //common
.align 32 .align 32
.long 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff /* _iAbsMask */ .long 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff /* _iAbsMask */
.align 32 .align 32
.long 0x4217b818, 0x4217b818, 0x4217b818, 0x4217b818, 0x4217b818, 0x4217b818, 0x4217b818, 0x4217b818 /* _iDomainRange=-log10(max_denormal=0x007fffff) RZ */ .long 0x4217b818, 0x4217b818, 0x4217b818, 0x4217b818, 0x4217b818, 0x4217b818, 0x4217b818, 0x4217b818 /* _iDomainRange=-log10(max_denormal=0x007fffff) RZ */
.align 32 .align 32
.type __svml_sexp10_data_internal,@object .type __svml_sexp10_data_internal, @object
.size __svml_sexp10_data_internal,.-__svml_sexp10_data_internal .size __svml_sexp10_data_internal, .-__svml_sexp10_data_internal