mirror of
https://sourceware.org/git/glibc.git
synced 2024-12-25 04:01:10 +00:00
935971ba6b
Optimize x86-64 memcmp/wmemcmp with AVX2. It uses vector compare as much as possible. It is as fast as SSE4 memcmp for size <= 16 bytes and up to 2X faster for size > 16 bytes on Haswell and Skylake. Select AVX2 memcmp/wmemcmp on AVX2 machines where vzeroupper is preferred and AVX unaligned load is fast. NB: It uses TZCNT instead of BSF since TZCNT produces the same result as BSF for non-zero input. TZCNT is faster than BSF and is executed as BSF if machine doesn't support TZCNT. Key features: 1. For size from 2 to 7 bytes, load as big endian with movbe and bswap to avoid branches. 2. Use overlapping compare to avoid branch. 3. Use vector compare when size >= 4 bytes for memcmp or size >= 8 bytes for wmemcmp. 4. If size is 8 * VEC_SIZE or less, unroll the loop. 5. Compare 4 * VEC_SIZE at a time with the aligned first memory area. 6. Use 2 vector compares when size is 2 * VEC_SIZE or less. 7. Use 4 vector compares when size is 4 * VEC_SIZE or less. 8. Use 8 vector compares when size is 8 * VEC_SIZE or less. * sysdeps/x86/cpu-features.h (index_cpu_MOVBE): New. * sysdeps/x86_64/multiarch/Makefile (sysdep_routines): Add memcmp-avx2 and wmemcmp-avx2. * sysdeps/x86_64/multiarch/ifunc-impl-list.c (__libc_ifunc_impl_list): Test __memcmp_avx2 and __wmemcmp_avx2. * sysdeps/x86_64/multiarch/memcmp-avx2.S: New file. * sysdeps/x86_64/multiarch/wmemcmp-avx2.S: Likewise. * sysdeps/x86_64/multiarch/memcmp.S: Use __memcmp_avx2 on AVX 2 machines if AVX unaligned load is fast and vzeroupper is preferred. * sysdeps/x86_64/multiarch/wmemcmp.S: Use __wmemcmp_avx2 on AVX 2 machines if AVX unaligned load is fast and vzeroupper is preferred.
56 lines
1.5 KiB
ArmAsm
56 lines
1.5 KiB
ArmAsm
/* Multiple versions of wmemcmp
|
|
All versions must be listed in ifunc-impl-list.c.
|
|
Copyright (C) 2011-2017 Free Software Foundation, Inc.
|
|
Contributed by Intel Corporation.
|
|
This file is part of the GNU C Library.
|
|
|
|
The GNU C Library is free software; you can redistribute it and/or
|
|
modify it under the terms of the GNU Lesser General Public
|
|
License as published by the Free Software Foundation; either
|
|
version 2.1 of the License, or (at your option) any later version.
|
|
|
|
The GNU C Library is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
Lesser General Public License for more details.
|
|
|
|
You should have received a copy of the GNU Lesser General Public
|
|
License along with the GNU C Library; if not, see
|
|
<http://www.gnu.org/licenses/>. */
|
|
|
|
#include <sysdep.h>
|
|
#include <init-arch.h>
|
|
|
|
/* Define multiple versions only for the definition in libc. */
|
|
#if IS_IN (libc)
|
|
.text
|
|
ENTRY(wmemcmp)
|
|
.type wmemcmp, @gnu_indirect_function
|
|
LOAD_RTLD_GLOBAL_RO_RDX
|
|
HAS_ARCH_FEATURE (Prefer_No_VZEROUPPER)
|
|
jnz 1f
|
|
HAS_ARCH_FEATURE (AVX2_Usable)
|
|
jz 1f
|
|
HAS_CPU_FEATURE (MOVBE)
|
|
jz 1f
|
|
HAS_ARCH_FEATURE (AVX_Fast_Unaligned_Load)
|
|
jz 1f
|
|
leaq __wmemcmp_avx2_movbe(%rip), %rax
|
|
ret
|
|
|
|
1: HAS_CPU_FEATURE (SSSE3)
|
|
jnz 2f
|
|
leaq __wmemcmp_sse2(%rip), %rax
|
|
ret
|
|
|
|
2: HAS_CPU_FEATURE (SSE4_1)
|
|
jz 3f
|
|
leaq __wmemcmp_sse4_1(%rip), %rax
|
|
ret
|
|
|
|
3: leaq __wmemcmp_ssse3(%rip), %rax
|
|
ret
|
|
|
|
END(wmemcmp)
|
|
#endif
|