mirror of
https://sourceware.org/git/glibc.git
synced 2024-11-13 00:30:07 +00:00
x86-64: Add strcpy family functions with 256-bit EVEX
Update ifunc-strcpy.h to select the function optimized with 256-bit EVEX instructions using YMM16-YMM31 registers to avoid RTM abort with usable AVX512VL and AVX512BW since VZEROUPPER isn't needed at function exit.
This commit is contained in:
parent
1fd8c163a8
commit
525bc2a32c
@ -43,11 +43,17 @@ sysdep_routines += strncat-c stpncpy-c strncpy-c \
|
||||
memchr-evex \
|
||||
memrchr-evex \
|
||||
rawmemchr-evex \
|
||||
stpcpy-evex \
|
||||
stpncpy-evex \
|
||||
strcat-evex \
|
||||
strchr-evex \
|
||||
strchrnul-evex \
|
||||
strcmp-evex \
|
||||
strcpy-evex \
|
||||
strlen-evex \
|
||||
strncat-evex \
|
||||
strncmp-evex \
|
||||
strncpy-evex \
|
||||
strnlen-evex \
|
||||
strrchr-evex
|
||||
CFLAGS-varshift.c += -msse4
|
||||
|
@ -224,6 +224,10 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
|
||||
__stpncpy_ssse3)
|
||||
IFUNC_IMPL_ADD (array, i, stpncpy, CPU_FEATURE_USABLE (AVX2),
|
||||
__stpncpy_avx2)
|
||||
IFUNC_IMPL_ADD (array, i, stpncpy,
|
||||
(CPU_FEATURE_USABLE (AVX512VL)
|
||||
&& CPU_FEATURE_USABLE (AVX512BW)),
|
||||
__stpncpy_evex)
|
||||
IFUNC_IMPL_ADD (array, i, stpncpy, 1,
|
||||
__stpncpy_sse2_unaligned)
|
||||
IFUNC_IMPL_ADD (array, i, stpncpy, 1, __stpncpy_sse2))
|
||||
@ -234,6 +238,10 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
|
||||
__stpcpy_ssse3)
|
||||
IFUNC_IMPL_ADD (array, i, stpcpy, CPU_FEATURE_USABLE (AVX2),
|
||||
__stpcpy_avx2)
|
||||
IFUNC_IMPL_ADD (array, i, stpcpy,
|
||||
(CPU_FEATURE_USABLE (AVX512VL)
|
||||
&& CPU_FEATURE_USABLE (AVX512BW)),
|
||||
__stpcpy_evex)
|
||||
IFUNC_IMPL_ADD (array, i, stpcpy, 1, __stpcpy_sse2_unaligned)
|
||||
IFUNC_IMPL_ADD (array, i, stpcpy, 1, __stpcpy_sse2))
|
||||
|
||||
@ -268,6 +276,10 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
|
||||
IFUNC_IMPL (i, name, strcat,
|
||||
IFUNC_IMPL_ADD (array, i, strcat, CPU_FEATURE_USABLE (AVX2),
|
||||
__strcat_avx2)
|
||||
IFUNC_IMPL_ADD (array, i, strcat,
|
||||
(CPU_FEATURE_USABLE (AVX512VL)
|
||||
&& CPU_FEATURE_USABLE (AVX512BW)),
|
||||
__strcat_evex)
|
||||
IFUNC_IMPL_ADD (array, i, strcat, CPU_FEATURE_USABLE (SSSE3),
|
||||
__strcat_ssse3)
|
||||
IFUNC_IMPL_ADD (array, i, strcat, 1, __strcat_sse2_unaligned)
|
||||
@ -330,6 +342,10 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
|
||||
IFUNC_IMPL (i, name, strcpy,
|
||||
IFUNC_IMPL_ADD (array, i, strcpy, CPU_FEATURE_USABLE (AVX2),
|
||||
__strcpy_avx2)
|
||||
IFUNC_IMPL_ADD (array, i, strcpy,
|
||||
(CPU_FEATURE_USABLE (AVX512VL)
|
||||
&& CPU_FEATURE_USABLE (AVX512BW)),
|
||||
__strcpy_evex)
|
||||
IFUNC_IMPL_ADD (array, i, strcpy, CPU_FEATURE_USABLE (SSSE3),
|
||||
__strcpy_ssse3)
|
||||
IFUNC_IMPL_ADD (array, i, strcpy, 1, __strcpy_sse2_unaligned)
|
||||
@ -373,6 +389,10 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
|
||||
IFUNC_IMPL (i, name, strncat,
|
||||
IFUNC_IMPL_ADD (array, i, strncat, CPU_FEATURE_USABLE (AVX2),
|
||||
__strncat_avx2)
|
||||
IFUNC_IMPL_ADD (array, i, strncat,
|
||||
(CPU_FEATURE_USABLE (AVX512VL)
|
||||
&& CPU_FEATURE_USABLE (AVX512BW)),
|
||||
__strncat_evex)
|
||||
IFUNC_IMPL_ADD (array, i, strncat, CPU_FEATURE_USABLE (SSSE3),
|
||||
__strncat_ssse3)
|
||||
IFUNC_IMPL_ADD (array, i, strncat, 1,
|
||||
@ -383,6 +403,10 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
|
||||
IFUNC_IMPL (i, name, strncpy,
|
||||
IFUNC_IMPL_ADD (array, i, strncpy, CPU_FEATURE_USABLE (AVX2),
|
||||
__strncpy_avx2)
|
||||
IFUNC_IMPL_ADD (array, i, strncpy,
|
||||
(CPU_FEATURE_USABLE (AVX512VL)
|
||||
&& CPU_FEATURE_USABLE (AVX512BW)),
|
||||
__strncpy_evex)
|
||||
IFUNC_IMPL_ADD (array, i, strncpy, CPU_FEATURE_USABLE (SSSE3),
|
||||
__strncpy_ssse3)
|
||||
IFUNC_IMPL_ADD (array, i, strncpy, 1,
|
||||
|
@ -25,16 +25,23 @@ extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned)
|
||||
attribute_hidden;
|
||||
extern __typeof (REDIRECT_NAME) OPTIMIZE (ssse3) attribute_hidden;
|
||||
extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2) attribute_hidden;
|
||||
extern __typeof (REDIRECT_NAME) OPTIMIZE (evex) attribute_hidden;
|
||||
|
||||
static inline void *
|
||||
IFUNC_SELECTOR (void)
|
||||
{
|
||||
const struct cpu_features* cpu_features = __get_cpu_features ();
|
||||
|
||||
if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER)
|
||||
&& CPU_FEATURE_USABLE_P (cpu_features, AVX2)
|
||||
if (CPU_FEATURE_USABLE_P (cpu_features, AVX2)
|
||||
&& CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load))
|
||||
return OPTIMIZE (avx2);
|
||||
{
|
||||
if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)
|
||||
&& CPU_FEATURE_USABLE_P (cpu_features, AVX512BW))
|
||||
return OPTIMIZE (evex);
|
||||
|
||||
if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER))
|
||||
return OPTIMIZE (avx2);
|
||||
}
|
||||
|
||||
if (CPU_FEATURES_ARCH_P (cpu_features, Fast_Unaligned_Load))
|
||||
return OPTIMIZE (sse2_unaligned);
|
||||
|
3
sysdeps/x86_64/multiarch/stpcpy-evex.S
Normal file
3
sysdeps/x86_64/multiarch/stpcpy-evex.S
Normal file
@ -0,0 +1,3 @@
|
||||
#define USE_AS_STPCPY
|
||||
#define STRCPY __stpcpy_evex
|
||||
#include "strcpy-evex.S"
|
4
sysdeps/x86_64/multiarch/stpncpy-evex.S
Normal file
4
sysdeps/x86_64/multiarch/stpncpy-evex.S
Normal file
@ -0,0 +1,4 @@
|
||||
#define USE_AS_STPCPY
|
||||
#define USE_AS_STRNCPY
|
||||
#define STRCPY __stpncpy_evex
|
||||
#include "strcpy-evex.S"
|
283
sysdeps/x86_64/multiarch/strcat-evex.S
Normal file
283
sysdeps/x86_64/multiarch/strcat-evex.S
Normal file
@ -0,0 +1,283 @@
|
||||
/* strcat with 256-bit EVEX instructions.
|
||||
Copyright (C) 2021 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
The GNU C Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with the GNU C Library; if not, see
|
||||
<https://www.gnu.org/licenses/>. */
|
||||
|
||||
#if IS_IN (libc)
|
||||
|
||||
# include <sysdep.h>
|
||||
|
||||
# ifndef STRCAT
|
||||
# define STRCAT __strcat_evex
|
||||
# endif
|
||||
|
||||
# define VMOVU vmovdqu64
|
||||
# define VMOVA vmovdqa64
|
||||
|
||||
/* zero register */
|
||||
# define XMMZERO xmm16
|
||||
# define YMMZERO ymm16
|
||||
# define YMM0 ymm17
|
||||
# define YMM1 ymm18
|
||||
|
||||
# define USE_AS_STRCAT
|
||||
|
||||
/* Number of bytes in a vector register */
|
||||
# define VEC_SIZE 32
|
||||
|
||||
.section .text.evex,"ax",@progbits
|
||||
ENTRY (STRCAT)
|
||||
mov %rdi, %r9
|
||||
# ifdef USE_AS_STRNCAT
|
||||
mov %rdx, %r8
|
||||
# endif
|
||||
|
||||
xor %eax, %eax
|
||||
mov %edi, %ecx
|
||||
and $((VEC_SIZE * 4) - 1), %ecx
|
||||
vpxorq %XMMZERO, %XMMZERO, %XMMZERO
|
||||
cmp $(VEC_SIZE * 3), %ecx
|
||||
ja L(fourth_vector_boundary)
|
||||
vpcmpb $0, (%rdi), %YMMZERO, %k0
|
||||
kmovd %k0, %edx
|
||||
test %edx, %edx
|
||||
jnz L(exit_null_on_first_vector)
|
||||
mov %rdi, %rax
|
||||
and $-VEC_SIZE, %rax
|
||||
jmp L(align_vec_size_start)
|
||||
L(fourth_vector_boundary):
|
||||
mov %rdi, %rax
|
||||
and $-VEC_SIZE, %rax
|
||||
vpcmpb $0, (%rax), %YMMZERO, %k0
|
||||
mov $-1, %r10d
|
||||
sub %rax, %rcx
|
||||
shl %cl, %r10d
|
||||
kmovd %k0, %edx
|
||||
and %r10d, %edx
|
||||
jnz L(exit)
|
||||
|
||||
L(align_vec_size_start):
|
||||
vpcmpb $0, VEC_SIZE(%rax), %YMMZERO, %k0
|
||||
kmovd %k0, %edx
|
||||
test %edx, %edx
|
||||
jnz L(exit_null_on_second_vector)
|
||||
|
||||
vpcmpb $0, (VEC_SIZE * 2)(%rax), %YMMZERO, %k1
|
||||
kmovd %k1, %edx
|
||||
test %edx, %edx
|
||||
jnz L(exit_null_on_third_vector)
|
||||
|
||||
vpcmpb $0, (VEC_SIZE * 3)(%rax), %YMMZERO, %k2
|
||||
kmovd %k2, %edx
|
||||
test %edx, %edx
|
||||
jnz L(exit_null_on_fourth_vector)
|
||||
|
||||
vpcmpb $0, (VEC_SIZE * 4)(%rax), %YMMZERO, %k3
|
||||
kmovd %k3, %edx
|
||||
test %edx, %edx
|
||||
jnz L(exit_null_on_fifth_vector)
|
||||
|
||||
vpcmpb $0, (VEC_SIZE * 5)(%rax), %YMMZERO, %k4
|
||||
add $(VEC_SIZE * 4), %rax
|
||||
kmovd %k4, %edx
|
||||
test %edx, %edx
|
||||
jnz L(exit_null_on_second_vector)
|
||||
|
||||
vpcmpb $0, (VEC_SIZE * 2)(%rax), %YMMZERO, %k1
|
||||
kmovd %k1, %edx
|
||||
test %edx, %edx
|
||||
jnz L(exit_null_on_third_vector)
|
||||
|
||||
vpcmpb $0, (VEC_SIZE * 3)(%rax), %YMMZERO, %k2
|
||||
kmovd %k2, %edx
|
||||
test %edx, %edx
|
||||
jnz L(exit_null_on_fourth_vector)
|
||||
|
||||
vpcmpb $0, (VEC_SIZE * 4)(%rax), %YMMZERO, %k3
|
||||
kmovd %k3, %edx
|
||||
test %edx, %edx
|
||||
jnz L(exit_null_on_fifth_vector)
|
||||
|
||||
vpcmpb $0, (VEC_SIZE * 5)(%rax), %YMMZERO, %k4
|
||||
kmovd %k4, %edx
|
||||
add $(VEC_SIZE * 4), %rax
|
||||
test %edx, %edx
|
||||
jnz L(exit_null_on_second_vector)
|
||||
|
||||
vpcmpb $0, (VEC_SIZE * 2)(%rax), %YMMZERO, %k1
|
||||
kmovd %k1, %edx
|
||||
test %edx, %edx
|
||||
jnz L(exit_null_on_third_vector)
|
||||
|
||||
vpcmpb $0, (VEC_SIZE * 3)(%rax), %YMMZERO, %k2
|
||||
kmovd %k2, %edx
|
||||
test %edx, %edx
|
||||
jnz L(exit_null_on_fourth_vector)
|
||||
|
||||
vpcmpb $0, (VEC_SIZE * 4)(%rax), %YMMZERO, %k3
|
||||
kmovd %k3, %edx
|
||||
test %edx, %edx
|
||||
jnz L(exit_null_on_fifth_vector)
|
||||
|
||||
vpcmpb $0, (VEC_SIZE * 5)(%rax), %YMMZERO, %k4
|
||||
add $(VEC_SIZE * 4), %rax
|
||||
kmovd %k4, %edx
|
||||
test %edx, %edx
|
||||
jnz L(exit_null_on_second_vector)
|
||||
|
||||
vpcmpb $0, (VEC_SIZE * 2)(%rax), %YMMZERO, %k1
|
||||
kmovd %k1, %edx
|
||||
test %edx, %edx
|
||||
jnz L(exit_null_on_third_vector)
|
||||
|
||||
vpcmpb $0, (VEC_SIZE * 3)(%rax), %YMMZERO, %k2
|
||||
kmovd %k2, %edx
|
||||
test %edx, %edx
|
||||
jnz L(exit_null_on_fourth_vector)
|
||||
|
||||
vpcmpb $0, (VEC_SIZE * 4)(%rax), %YMMZERO, %k3
|
||||
kmovd %k3, %edx
|
||||
test %edx, %edx
|
||||
jnz L(exit_null_on_fifth_vector)
|
||||
|
||||
test $((VEC_SIZE * 4) - 1), %rax
|
||||
jz L(align_four_vec_loop)
|
||||
|
||||
vpcmpb $0, (VEC_SIZE * 5)(%rax), %YMMZERO, %k4
|
||||
add $(VEC_SIZE * 5), %rax
|
||||
kmovd %k4, %edx
|
||||
test %edx, %edx
|
||||
jnz L(exit)
|
||||
|
||||
test $((VEC_SIZE * 4) - 1), %rax
|
||||
jz L(align_four_vec_loop)
|
||||
|
||||
vpcmpb $0, VEC_SIZE(%rax), %YMMZERO, %k0
|
||||
add $VEC_SIZE, %rax
|
||||
kmovd %k0, %edx
|
||||
test %edx, %edx
|
||||
jnz L(exit)
|
||||
|
||||
test $((VEC_SIZE * 4) - 1), %rax
|
||||
jz L(align_four_vec_loop)
|
||||
|
||||
vpcmpb $0, VEC_SIZE(%rax), %YMMZERO, %k0
|
||||
add $VEC_SIZE, %rax
|
||||
kmovd %k0, %edx
|
||||
test %edx, %edx
|
||||
jnz L(exit)
|
||||
|
||||
test $((VEC_SIZE * 4) - 1), %rax
|
||||
jz L(align_four_vec_loop)
|
||||
|
||||
vpcmpb $0, VEC_SIZE(%rax), %YMMZERO, %k1
|
||||
add $VEC_SIZE, %rax
|
||||
kmovd %k1, %edx
|
||||
test %edx, %edx
|
||||
jnz L(exit)
|
||||
|
||||
add $VEC_SIZE, %rax
|
||||
|
||||
.p2align 4
|
||||
L(align_four_vec_loop):
|
||||
VMOVA (%rax), %YMM0
|
||||
VMOVA (VEC_SIZE * 2)(%rax), %YMM1
|
||||
vpminub VEC_SIZE(%rax), %YMM0, %YMM0
|
||||
vpminub (VEC_SIZE * 3)(%rax), %YMM1, %YMM1
|
||||
vpminub %YMM0, %YMM1, %YMM0
|
||||
/* If K0 != 0, there is a null byte. */
|
||||
vpcmpb $0, %YMM0, %YMMZERO, %k0
|
||||
add $(VEC_SIZE * 4), %rax
|
||||
ktestd %k0, %k0
|
||||
jz L(align_four_vec_loop)
|
||||
|
||||
vpcmpb $0, -(VEC_SIZE * 4)(%rax), %YMMZERO, %k0
|
||||
sub $(VEC_SIZE * 5), %rax
|
||||
kmovd %k0, %edx
|
||||
test %edx, %edx
|
||||
jnz L(exit_null_on_second_vector)
|
||||
|
||||
vpcmpb $0, (VEC_SIZE * 2)(%rax), %YMMZERO, %k1
|
||||
kmovd %k1, %edx
|
||||
test %edx, %edx
|
||||
jnz L(exit_null_on_third_vector)
|
||||
|
||||
vpcmpb $0, (VEC_SIZE * 3)(%rax), %YMMZERO, %k2
|
||||
kmovd %k2, %edx
|
||||
test %edx, %edx
|
||||
jnz L(exit_null_on_fourth_vector)
|
||||
|
||||
vpcmpb $0, (VEC_SIZE * 4)(%rax), %YMMZERO, %k3
|
||||
kmovd %k3, %edx
|
||||
sub %rdi, %rax
|
||||
bsf %rdx, %rdx
|
||||
add %rdx, %rax
|
||||
add $(VEC_SIZE * 4), %rax
|
||||
jmp L(StartStrcpyPart)
|
||||
|
||||
.p2align 4
|
||||
L(exit):
|
||||
sub %rdi, %rax
|
||||
L(exit_null_on_first_vector):
|
||||
bsf %rdx, %rdx
|
||||
add %rdx, %rax
|
||||
jmp L(StartStrcpyPart)
|
||||
|
||||
.p2align 4
|
||||
L(exit_null_on_second_vector):
|
||||
sub %rdi, %rax
|
||||
bsf %rdx, %rdx
|
||||
add %rdx, %rax
|
||||
add $VEC_SIZE, %rax
|
||||
jmp L(StartStrcpyPart)
|
||||
|
||||
.p2align 4
|
||||
L(exit_null_on_third_vector):
|
||||
sub %rdi, %rax
|
||||
bsf %rdx, %rdx
|
||||
add %rdx, %rax
|
||||
add $(VEC_SIZE * 2), %rax
|
||||
jmp L(StartStrcpyPart)
|
||||
|
||||
.p2align 4
|
||||
L(exit_null_on_fourth_vector):
|
||||
sub %rdi, %rax
|
||||
bsf %rdx, %rdx
|
||||
add %rdx, %rax
|
||||
add $(VEC_SIZE * 3), %rax
|
||||
jmp L(StartStrcpyPart)
|
||||
|
||||
.p2align 4
|
||||
L(exit_null_on_fifth_vector):
|
||||
sub %rdi, %rax
|
||||
bsf %rdx, %rdx
|
||||
add %rdx, %rax
|
||||
add $(VEC_SIZE * 4), %rax
|
||||
|
||||
.p2align 4
|
||||
L(StartStrcpyPart):
|
||||
lea (%r9, %rax), %rdi
|
||||
mov %rsi, %rcx
|
||||
mov %r9, %rax /* save result */
|
||||
|
||||
# ifdef USE_AS_STRNCAT
|
||||
test %r8, %r8
|
||||
jz L(ExitZero)
|
||||
# define USE_AS_STRNCPY
|
||||
# endif
|
||||
|
||||
# include "strcpy-evex.S"
|
||||
#endif
|
1003
sysdeps/x86_64/multiarch/strcpy-evex.S
Normal file
1003
sysdeps/x86_64/multiarch/strcpy-evex.S
Normal file
File diff suppressed because it is too large
Load Diff
3
sysdeps/x86_64/multiarch/strncat-evex.S
Normal file
3
sysdeps/x86_64/multiarch/strncat-evex.S
Normal file
@ -0,0 +1,3 @@
|
||||
#define USE_AS_STRNCAT
|
||||
#define STRCAT __strncat_evex
|
||||
#include "strcat-evex.S"
|
3
sysdeps/x86_64/multiarch/strncpy-evex.S
Normal file
3
sysdeps/x86_64/multiarch/strncpy-evex.S
Normal file
@ -0,0 +1,3 @@
|
||||
#define USE_AS_STRNCPY
|
||||
#define STRCPY __strncpy_evex
|
||||
#include "strcpy-evex.S"
|
Loading…
Reference in New Issue
Block a user