mirror of
https://sourceware.org/git/glibc.git
synced 2024-11-22 13:00:06 +00:00
ef9c4cb6c7
The difference between memset and wmemset is byte vs int. Add stubs to SSE2/AVX2/AVX512 memset for wmemset with updated constant and size: SSE2 wmemset: shl $0x2,%rdx movd %esi,%xmm0 mov %rdi,%rax pshufd $0x0,%xmm0,%xmm0 jmp entry_from_wmemset SSE2 memset: movd %esi,%xmm0 mov %rdi,%rax punpcklbw %xmm0,%xmm0 punpcklwd %xmm0,%xmm0 pshufd $0x0,%xmm0,%xmm0 entry_from_wmemset: Since the ERMS versions of wmemset requires "rep stosl" instead of "rep stosb", only the vector store stubs of SSE2/AVX2/AVX512 wmemset are added. The SSE2 wmemset is about 3X faster and the AVX2 wmemset is about 6X faster on Haswell. * include/wchar.h (__wmemset_chk): New. * sysdeps/x86_64/memset.S (VDUP_TO_VEC0_AND_SET_RETURN): Renamed to MEMSET_VDUP_TO_VEC0_AND_SET_RETURN. (WMEMSET_VDUP_TO_VEC0_AND_SET_RETURN): New. (WMEMSET_CHK_SYMBOL): Likewise. (WMEMSET_SYMBOL): Likewise. (__wmemset): Add hidden definition. (wmemset): Add weak hidden definition. * sysdeps/x86_64/multiarch/Makefile (sysdep_routines): Add wmemset_chk-nonshared. * sysdeps/x86_64/multiarch/ifunc-impl-list.c (__libc_ifunc_impl_list): Add __wmemset_sse2_unaligned, __wmemset_avx2_unaligned, __wmemset_avx512_unaligned, __wmemset_chk_sse2_unaligned, __wmemset_chk_avx2_unaligned and __wmemset_chk_avx512_unaligned. * sysdeps/x86_64/multiarch/memset-avx2-unaligned-erms.S (VDUP_TO_VEC0_AND_SET_RETURN): Renamed to ... (MEMSET_VDUP_TO_VEC0_AND_SET_RETURN): This. (WMEMSET_VDUP_TO_VEC0_AND_SET_RETURN): New. (WMEMSET_SYMBOL): Likewise. * sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S (VDUP_TO_VEC0_AND_SET_RETURN): Renamed to ... (MEMSET_VDUP_TO_VEC0_AND_SET_RETURN): This. (WMEMSET_VDUP_TO_VEC0_AND_SET_RETURN): New. (WMEMSET_SYMBOL): Likewise. * sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S: Updated. (WMEMSET_CHK_SYMBOL): New. (WMEMSET_CHK_SYMBOL (__wmemset_chk, unaligned)): Likewise. (WMEMSET_SYMBOL (__wmemset, unaligned)): Likewise. * sysdeps/x86_64/multiarch/memset.S (WMEMSET_SYMBOL): New. (libc_hidden_builtin_def): Also define __GI_wmemset and __GI___wmemset. (weak_alias): New. * sysdeps/x86_64/multiarch/wmemset.c: New file. * sysdeps/x86_64/multiarch/wmemset.h: Likewise. * sysdeps/x86_64/multiarch/wmemset_chk-nonshared.S: Likewise. * sysdeps/x86_64/multiarch/wmemset_chk.c: Likewise. * sysdeps/x86_64/wmemset.c: Likewise. * sysdeps/x86_64/wmemset_chk.c: Likewise.
83 lines
2.5 KiB
ArmAsm
83 lines
2.5 KiB
ArmAsm
/* Multiple versions of memset
|
|
All versions must be listed in ifunc-impl-list.c.
|
|
Copyright (C) 2014-2017 Free Software Foundation, Inc.
|
|
This file is part of the GNU C Library.
|
|
|
|
The GNU C Library is free software; you can redistribute it and/or
|
|
modify it under the terms of the GNU Lesser General Public
|
|
License as published by the Free Software Foundation; either
|
|
version 2.1 of the License, or (at your option) any later version.
|
|
|
|
The GNU C Library is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
Lesser General Public License for more details.
|
|
|
|
You should have received a copy of the GNU Lesser General Public
|
|
License along with the GNU C Library; if not, see
|
|
<http://www.gnu.org/licenses/>. */
|
|
|
|
#include <sysdep.h>
|
|
#include <shlib-compat.h>
|
|
#include <init-arch.h>
|
|
|
|
/* Define multiple versions only for the definition in lib. */
|
|
#if IS_IN (libc)
|
|
ENTRY(memset)
|
|
.type memset, @gnu_indirect_function
|
|
LOAD_RTLD_GLOBAL_RO_RDX
|
|
lea __memset_erms(%rip), %RAX_LP
|
|
HAS_ARCH_FEATURE (Prefer_ERMS)
|
|
jnz 2f
|
|
lea __memset_sse2_unaligned_erms(%rip), %RAX_LP
|
|
HAS_CPU_FEATURE (ERMS)
|
|
jnz 1f
|
|
lea __memset_sse2_unaligned(%rip), %RAX_LP
|
|
1:
|
|
HAS_ARCH_FEATURE (AVX2_Usable)
|
|
jz 2f
|
|
lea __memset_avx2_unaligned_erms(%rip), %RAX_LP
|
|
HAS_CPU_FEATURE (ERMS)
|
|
jnz L(AVX512F)
|
|
lea __memset_avx2_unaligned(%rip), %RAX_LP
|
|
L(AVX512F):
|
|
HAS_ARCH_FEATURE (Prefer_No_AVX512)
|
|
jnz 2f
|
|
HAS_ARCH_FEATURE (AVX512F_Usable)
|
|
jz 2f
|
|
lea __memset_avx512_no_vzeroupper(%rip), %RAX_LP
|
|
HAS_ARCH_FEATURE (Prefer_No_VZEROUPPER)
|
|
jnz 2f
|
|
lea __memset_avx512_unaligned_erms(%rip), %RAX_LP
|
|
HAS_CPU_FEATURE (ERMS)
|
|
jnz 2f
|
|
lea __memset_avx512_unaligned(%rip), %RAX_LP
|
|
2: ret
|
|
END(memset)
|
|
#endif
|
|
|
|
#if IS_IN (libc)
|
|
# define MEMSET_SYMBOL(p,s) p##_sse2_##s
|
|
# define WMEMSET_SYMBOL(p,s) p##_sse2_##s
|
|
|
|
# ifdef SHARED
|
|
# undef libc_hidden_builtin_def
|
|
/* It doesn't make sense to send libc-internal memset calls through a PLT.
|
|
The speedup we get from using SSE2 instructions is likely eaten away
|
|
by the indirect call in the PLT. */
|
|
# define libc_hidden_builtin_def(name) \
|
|
.globl __GI_memset; __GI_memset = __memset_sse2_unaligned; \
|
|
.globl __GI_wmemset; __GI_wmemset = __wmemset_sse2_unaligned; \
|
|
.globl __GI___wmemset; __GI___wmemset = __wmemset_sse2_unaligned
|
|
# endif
|
|
|
|
# undef weak_alias
|
|
# define weak_alias(original, alias) \
|
|
.weak bzero; bzero = __bzero
|
|
|
|
# undef strong_alias
|
|
# define strong_alias(original, alias)
|
|
#endif
|
|
|
|
#include "../memset.S"
|