mirror of
https://sourceware.org/git/glibc.git
synced 2024-12-25 20:21:07 +00:00
e41b395523
On AMD processors, memcpy optimized with unaligned SSE load is slower than emcpy optimized with aligned SSSE3 while other string functions are faster with unaligned SSE load. A feature bit, Fast_Unaligned_Copy, is added to select memcpy optimized with unaligned SSE load. [BZ #19583] * sysdeps/x86/cpu-features.c (init_cpu_features): Set Fast_Unaligned_Copy with Fast_Unaligned_Load for Intel processors. Set Fast_Copy_Backward for AMD Excavator processors. * sysdeps/x86/cpu-features.h (bit_arch_Fast_Unaligned_Copy): New. (index_arch_Fast_Unaligned_Copy): Likewise. * sysdeps/x86_64/multiarch/memcpy.S (__new_memcpy): Check Fast_Unaligned_Copy instead of Fast_Unaligned_Load.
91 lines
2.7 KiB
ArmAsm
91 lines
2.7 KiB
ArmAsm
/* Multiple versions of memcpy
|
|
All versions must be listed in ifunc-impl-list.c.
|
|
Copyright (C) 2010-2016 Free Software Foundation, Inc.
|
|
Contributed by Intel Corporation.
|
|
This file is part of the GNU C Library.
|
|
|
|
The GNU C Library is free software; you can redistribute it and/or
|
|
modify it under the terms of the GNU Lesser General Public
|
|
License as published by the Free Software Foundation; either
|
|
version 2.1 of the License, or (at your option) any later version.
|
|
|
|
The GNU C Library is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
Lesser General Public License for more details.
|
|
|
|
You should have received a copy of the GNU Lesser General Public
|
|
License along with the GNU C Library; if not, see
|
|
<http://www.gnu.org/licenses/>. */
|
|
|
|
#include <sysdep.h>
|
|
#include <shlib-compat.h>
|
|
#include <init-arch.h>
|
|
|
|
/* Define multiple versions only for the definition in lib and for
|
|
DSO. In static binaries we need memcpy before the initialization
|
|
happened. */
|
|
#if defined SHARED && IS_IN (libc)
|
|
.text
|
|
ENTRY(__new_memcpy)
|
|
.type __new_memcpy, @gnu_indirect_function
|
|
LOAD_RTLD_GLOBAL_RO_RDX
|
|
#ifdef HAVE_AVX512_ASM_SUPPORT
|
|
HAS_ARCH_FEATURE (AVX512F_Usable)
|
|
jz 1f
|
|
HAS_ARCH_FEATURE (Prefer_No_VZEROUPPER)
|
|
jz 1f
|
|
lea __memcpy_avx512_no_vzeroupper(%rip), %RAX_LP
|
|
ret
|
|
#endif
|
|
1: lea __memcpy_avx_unaligned(%rip), %RAX_LP
|
|
HAS_ARCH_FEATURE (AVX_Fast_Unaligned_Load)
|
|
jnz 2f
|
|
lea __memcpy_sse2_unaligned(%rip), %RAX_LP
|
|
HAS_ARCH_FEATURE (Fast_Unaligned_Copy)
|
|
jnz 2f
|
|
lea __memcpy_sse2(%rip), %RAX_LP
|
|
HAS_CPU_FEATURE (SSSE3)
|
|
jz 2f
|
|
lea __memcpy_ssse3_back(%rip), %RAX_LP
|
|
HAS_ARCH_FEATURE (Fast_Copy_Backward)
|
|
jnz 2f
|
|
lea __memcpy_ssse3(%rip), %RAX_LP
|
|
2: ret
|
|
END(__new_memcpy)
|
|
|
|
# undef ENTRY
|
|
# define ENTRY(name) \
|
|
.type __memcpy_sse2, @function; \
|
|
.globl __memcpy_sse2; \
|
|
.hidden __memcpy_sse2; \
|
|
.p2align 4; \
|
|
__memcpy_sse2: cfi_startproc; \
|
|
CALL_MCOUNT
|
|
# undef END
|
|
# define END(name) \
|
|
cfi_endproc; .size __memcpy_sse2, .-__memcpy_sse2
|
|
|
|
# undef ENTRY_CHK
|
|
# define ENTRY_CHK(name) \
|
|
.type __memcpy_chk_sse2, @function; \
|
|
.globl __memcpy_chk_sse2; \
|
|
.p2align 4; \
|
|
__memcpy_chk_sse2: cfi_startproc; \
|
|
CALL_MCOUNT
|
|
# undef END_CHK
|
|
# define END_CHK(name) \
|
|
cfi_endproc; .size __memcpy_chk_sse2, .-__memcpy_chk_sse2
|
|
|
|
# undef libc_hidden_builtin_def
|
|
/* It doesn't make sense to send libc-internal memcpy calls through a PLT.
|
|
The speedup we get from using SSSE3 instruction is likely eaten away
|
|
by the indirect call in the PLT. */
|
|
# define libc_hidden_builtin_def(name) \
|
|
.globl __GI_memcpy; __GI_memcpy = __memcpy_sse2
|
|
|
|
versioned_symbol (libc, __new_memcpy, memcpy, GLIBC_2_14);
|
|
#endif
|
|
|
|
#include "../memcpy.S"
|