mirror of
https://sourceware.org/git/glibc.git
synced 2024-11-22 13:00:06 +00:00
c867597bff
Since the new SSE2/AVX2 memcpy/memmove are faster than the previous ones, we can remove the previous SSE2/AVX2 memcpy/memmove and replace them with the new ones. No change in IFUNC selection if SSE2 and AVX2 memcpy/memmove weren't used before. If SSE2 or AVX2 memcpy/memmove were used, the new SSE2 or AVX2 memcpy/memmove optimized with Enhanced REP MOVSB will be used for processors with ERMS. The new AVX512 memcpy/memmove will be used for processors with AVX512 which prefer vzeroupper. Since the new SSE2 memcpy/memmove are faster than the previous default memcpy/memmove used in libc.a and ld.so, we also remove the previous default memcpy/memmove and make them the default memcpy/memmove, except that non-temporal store isn't used in ld.so. Together, it reduces the size of libc.so by about 6 KB and the size of ld.so by about 2 KB. [BZ #19776] * sysdeps/x86_64/memcpy.S: Make it dummy. * sysdeps/x86_64/mempcpy.S: Likewise. * sysdeps/x86_64/memmove.S: New file. * sysdeps/x86_64/memmove_chk.S: Likewise. * sysdeps/x86_64/multiarch/memmove.S: Likewise. * sysdeps/x86_64/multiarch/memmove_chk.S: Likewise. * sysdeps/x86_64/memmove.c: Removed. * sysdeps/x86_64/multiarch/memcpy-avx-unaligned.S: Likewise. * sysdeps/x86_64/multiarch/memcpy-sse2-unaligned.S: Likewise. * sysdeps/x86_64/multiarch/memmove-avx-unaligned.S: Likewise. * sysdeps/x86_64/multiarch/memmove-sse2-unaligned-erms.S: Likewise. * sysdeps/x86_64/multiarch/memmove.c: Likewise. * sysdeps/x86_64/multiarch/memmove_chk.c: Likewise. * sysdeps/x86_64/multiarch/Makefile (sysdep_routines): Remove memcpy-sse2-unaligned, memmove-avx-unaligned, memcpy-avx-unaligned and memmove-sse2-unaligned-erms. * sysdeps/x86_64/multiarch/ifunc-impl-list.c (__libc_ifunc_impl_list): Replace __memmove_chk_avx512_unaligned_2 with __memmove_chk_avx512_unaligned. Remove __memmove_chk_avx_unaligned_2. Replace __memmove_chk_sse2_unaligned_2 with __memmove_chk_sse2_unaligned. Remove __memmove_chk_sse2 and __memmove_avx_unaligned_2. Replace __memmove_avx512_unaligned_2 with __memmove_avx512_unaligned. Replace __memmove_sse2_unaligned_2 with __memmove_sse2_unaligned. Remove __memmove_sse2. Replace __memcpy_chk_avx512_unaligned_2 with __memcpy_chk_avx512_unaligned. Remove __memcpy_chk_avx_unaligned_2. Replace __memcpy_chk_sse2_unaligned_2 with __memcpy_chk_sse2_unaligned. Remove __memcpy_chk_sse2. Remove __memcpy_avx_unaligned_2. Replace __memcpy_avx512_unaligned_2 with __memcpy_avx512_unaligned. Remove __memcpy_sse2_unaligned_2 and __memcpy_sse2. Replace __mempcpy_chk_avx512_unaligned_2 with __mempcpy_chk_avx512_unaligned. Remove __mempcpy_chk_avx_unaligned_2. Replace __mempcpy_chk_sse2_unaligned_2 with __mempcpy_chk_sse2_unaligned. Remove __mempcpy_chk_sse2. Replace __mempcpy_avx512_unaligned_2 with __mempcpy_avx512_unaligned. Remove __mempcpy_avx_unaligned_2. Replace __mempcpy_sse2_unaligned_2 with __mempcpy_sse2_unaligned. Remove __mempcpy_sse2. * sysdeps/x86_64/multiarch/memcpy.S (__new_memcpy): Support __memcpy_avx512_unaligned_erms and __memcpy_avx512_unaligned. Use __memcpy_avx_unaligned_erms and __memcpy_sse2_unaligned_erms if processor has ERMS. Default to __memcpy_sse2_unaligned. (ENTRY): Removed. (END): Likewise. (ENTRY_CHK): Likewise. (libc_hidden_builtin_def): Likewise. Don't include ../memcpy.S. * sysdeps/x86_64/multiarch/memcpy_chk.S (__memcpy_chk): Support __memcpy_chk_avx512_unaligned_erms and __memcpy_chk_avx512_unaligned. Use __memcpy_chk_avx_unaligned_erms and __memcpy_chk_sse2_unaligned_erms if if processor has ERMS. Default to __memcpy_chk_sse2_unaligned. * sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S Change function suffix from unaligned_2 to unaligned. * sysdeps/x86_64/multiarch/mempcpy.S (__mempcpy): Support __mempcpy_avx512_unaligned_erms and __mempcpy_avx512_unaligned. Use __mempcpy_avx_unaligned_erms and __mempcpy_sse2_unaligned_erms if processor has ERMS. Default to __mempcpy_sse2_unaligned. (ENTRY): Removed. (END): Likewise. (ENTRY_CHK): Likewise. (libc_hidden_builtin_def): Likewise. Don't include ../mempcpy.S. (mempcpy): New. Add a weak alias. * sysdeps/x86_64/multiarch/mempcpy_chk.S (__mempcpy_chk): Support __mempcpy_chk_avx512_unaligned_erms and __mempcpy_chk_avx512_unaligned. Use __mempcpy_chk_avx_unaligned_erms and __mempcpy_chk_sse2_unaligned_erms if if processor has ERMS. Default to __mempcpy_chk_sse2_unaligned.
73 lines
2.2 KiB
ArmAsm
73 lines
2.2 KiB
ArmAsm
/* Multiple versions of __mempcpy_chk
|
|
All versions must be listed in ifunc-impl-list.c.
|
|
Copyright (C) 2010-2016 Free Software Foundation, Inc.
|
|
Contributed by Intel Corporation.
|
|
This file is part of the GNU C Library.
|
|
|
|
The GNU C Library is free software; you can redistribute it and/or
|
|
modify it under the terms of the GNU Lesser General Public
|
|
License as published by the Free Software Foundation; either
|
|
version 2.1 of the License, or (at your option) any later version.
|
|
|
|
The GNU C Library is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
Lesser General Public License for more details.
|
|
|
|
You should have received a copy of the GNU Lesser General Public
|
|
License along with the GNU C Library; if not, see
|
|
<http://www.gnu.org/licenses/>. */
|
|
|
|
#include <sysdep.h>
|
|
#include <init-arch.h>
|
|
|
|
/* Define multiple versions only for the definition in lib and for
|
|
DSO. There are no multiarch mempcpy functions for static binaries.
|
|
*/
|
|
#if IS_IN (libc)
|
|
# ifdef SHARED
|
|
.text
|
|
ENTRY(__mempcpy_chk)
|
|
.type __mempcpy_chk, @gnu_indirect_function
|
|
LOAD_RTLD_GLOBAL_RO_RDX
|
|
# ifdef HAVE_AVX512_ASM_SUPPORT
|
|
HAS_ARCH_FEATURE (AVX512F_Usable)
|
|
jz 1f
|
|
lea __mempcpy_chk_avx512_no_vzeroupper(%rip), %RAX_LP
|
|
HAS_ARCH_FEATURE (Prefer_No_VZEROUPPER)
|
|
jnz 2f
|
|
lea __mempcpy_chk_avx512_unaligned_erms(%rip), %RAX_LP
|
|
HAS_CPU_FEATURE (ERMS)
|
|
jnz 2f
|
|
lea __mempcpy_chk_avx512_unaligned(%rip), %RAX_LP
|
|
ret
|
|
# endif
|
|
1: lea __mempcpy_chk_avx_unaligned(%rip), %RAX_LP
|
|
HAS_ARCH_FEATURE (AVX_Fast_Unaligned_Load)
|
|
jz L(Fast_Unaligned_Load)
|
|
HAS_CPU_FEATURE (ERMS)
|
|
jz 2f
|
|
lea __mempcpy_chk_avx_unaligned_erms(%rip), %RAX_LP
|
|
ret
|
|
L(Fast_Unaligned_Load):
|
|
lea __mempcpy_chk_sse2_unaligned(%rip), %RAX_LP
|
|
HAS_ARCH_FEATURE (Fast_Unaligned_Copy)
|
|
jz L(SSSE3)
|
|
HAS_CPU_FEATURE (ERMS)
|
|
jz 2f
|
|
lea __mempcpy_chk_sse2_unaligned_erms(%rip), %RAX_LP
|
|
ret
|
|
L(SSSE3):
|
|
HAS_CPU_FEATURE (SSSE3)
|
|
jz 2f
|
|
lea __mempcpy_chk_ssse3_back(%rip), %RAX_LP
|
|
HAS_ARCH_FEATURE (Fast_Copy_Backward)
|
|
jnz 2f
|
|
lea __mempcpy_chk_ssse3(%rip), %RAX_LP
|
|
2: ret
|
|
END(__mempcpy_chk)
|
|
# else
|
|
# include "../mempcpy_chk.S"
|
|
# endif
|
|
#endif
|