mirror of
https://sourceware.org/git/glibc.git
synced 2024-11-23 05:20:06 +00:00
5a82c74822
Also, change sources.redhat.com to sourceware.org. This patch was automatically generated by running the following shell script, which uses GNU sed, and which avoids modifying files imported from upstream: sed -ri ' s,(http|ftp)(://(.*\.)?(gnu|fsf|sourceware)\.org($|[^.]|\.[^a-z])),https\2,g s,(http|ftp)(://(.*\.)?)sources\.redhat\.com($|[^.]|\.[^a-z]),https\2sourceware.org\4,g ' \ $(find $(git ls-files) -prune -type f \ ! -name '*.po' \ ! -name 'ChangeLog*' \ ! -path COPYING ! -path COPYING.LIB \ ! -path manual/fdl-1.3.texi ! -path manual/lgpl-2.1.texi \ ! -path manual/texinfo.tex ! -path scripts/config.guess \ ! -path scripts/config.sub ! -path scripts/install-sh \ ! -path scripts/mkinstalldirs ! -path scripts/move-if-change \ ! -path INSTALL ! -path locale/programs/charmap-kw.h \ ! -path po/libc.pot ! -path sysdeps/gnu/errlist.c \ ! '(' -name configure \ -execdir test -f configure.ac -o -f configure.in ';' ')' \ ! '(' -name preconfigure \ -execdir test -f preconfigure.ac ';' ')' \ -print) and then by running 'make dist-prepare' to regenerate files built from the altered files, and then executing the following to cleanup: chmod a+x sysdeps/unix/sysv/linux/riscv/configure # Omit irrelevant whitespace and comment-only changes, # perhaps from a slightly-different Autoconf version. git checkout -f \ sysdeps/csky/configure \ sysdeps/hppa/configure \ sysdeps/riscv/configure \ sysdeps/unix/sysv/linux/csky/configure # Omit changes that caused a pre-commit check to fail like this: # remote: *** error: sysdeps/powerpc/powerpc64/ppc-mcount.S: trailing lines git checkout -f \ sysdeps/powerpc/powerpc64/ppc-mcount.S \ sysdeps/unix/sysv/linux/s390/s390-64/syscall.S # Omit change that caused a pre-commit check to fail like this: # remote: *** error: sysdeps/sparc/sparc64/multiarch/memcpy-ultra3.S: last line does not end in newline git checkout -f sysdeps/sparc/sparc64/multiarch/memcpy-ultra3.S
276 lines
6.1 KiB
ArmAsm
276 lines
6.1 KiB
ArmAsm
/* strcat with AVX2
|
|
Copyright (C) 2011-2018 Free Software Foundation, Inc.
|
|
Contributed by Intel Corporation.
|
|
This file is part of the GNU C Library.
|
|
|
|
The GNU C Library is free software; you can redistribute it and/or
|
|
modify it under the terms of the GNU Lesser General Public
|
|
License as published by the Free Software Foundation; either
|
|
version 2.1 of the License, or (at your option) any later version.
|
|
|
|
The GNU C Library is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
Lesser General Public License for more details.
|
|
|
|
You should have received a copy of the GNU Lesser General Public
|
|
License along with the GNU C Library; if not, see
|
|
<https://www.gnu.org/licenses/>. */
|
|
|
|
#if IS_IN (libc)
|
|
|
|
# include <sysdep.h>
|
|
|
|
# ifndef STRCAT
|
|
# define STRCAT __strcat_avx2
|
|
# endif
|
|
|
|
# define USE_AS_STRCAT
|
|
|
|
/* Number of bytes in a vector register */
|
|
# define VEC_SIZE 32
|
|
|
|
.section .text.avx,"ax",@progbits
|
|
ENTRY (STRCAT)
|
|
mov %rdi, %r9
|
|
# ifdef USE_AS_STRNCAT
|
|
mov %rdx, %r8
|
|
# endif
|
|
|
|
xor %eax, %eax
|
|
mov %edi, %ecx
|
|
and $((VEC_SIZE * 4) - 1), %ecx
|
|
vpxor %xmm6, %xmm6, %xmm6
|
|
cmp $(VEC_SIZE * 3), %ecx
|
|
ja L(fourth_vector_boundary)
|
|
vpcmpeqb (%rdi), %ymm6, %ymm0
|
|
vpmovmskb %ymm0, %edx
|
|
test %edx, %edx
|
|
jnz L(exit_null_on_first_vector)
|
|
mov %rdi, %rax
|
|
and $-VEC_SIZE, %rax
|
|
jmp L(align_vec_size_start)
|
|
L(fourth_vector_boundary):
|
|
mov %rdi, %rax
|
|
and $-VEC_SIZE, %rax
|
|
vpcmpeqb (%rax), %ymm6, %ymm0
|
|
mov $-1, %r10d
|
|
sub %rax, %rcx
|
|
shl %cl, %r10d
|
|
vpmovmskb %ymm0, %edx
|
|
and %r10d, %edx
|
|
jnz L(exit)
|
|
|
|
L(align_vec_size_start):
|
|
vpcmpeqb VEC_SIZE(%rax), %ymm6, %ymm0
|
|
vpmovmskb %ymm0, %edx
|
|
test %edx, %edx
|
|
jnz L(exit_null_on_second_vector)
|
|
|
|
vpcmpeqb (VEC_SIZE * 2)(%rax), %ymm6, %ymm1
|
|
vpmovmskb %ymm1, %edx
|
|
test %edx, %edx
|
|
jnz L(exit_null_on_third_vector)
|
|
|
|
vpcmpeqb (VEC_SIZE * 3)(%rax), %ymm6, %ymm2
|
|
vpmovmskb %ymm2, %edx
|
|
test %edx, %edx
|
|
jnz L(exit_null_on_fourth_vector)
|
|
|
|
vpcmpeqb (VEC_SIZE * 4)(%rax), %ymm6, %ymm3
|
|
vpmovmskb %ymm3, %edx
|
|
test %edx, %edx
|
|
jnz L(exit_null_on_fifth_vector)
|
|
|
|
vpcmpeqb (VEC_SIZE * 5)(%rax), %ymm6, %ymm0
|
|
add $(VEC_SIZE * 4), %rax
|
|
vpmovmskb %ymm0, %edx
|
|
test %edx, %edx
|
|
jnz L(exit_null_on_second_vector)
|
|
|
|
vpcmpeqb (VEC_SIZE * 2)(%rax), %ymm6, %ymm1
|
|
vpmovmskb %ymm1, %edx
|
|
test %edx, %edx
|
|
jnz L(exit_null_on_third_vector)
|
|
|
|
vpcmpeqb (VEC_SIZE * 3)(%rax), %ymm6, %ymm2
|
|
vpmovmskb %ymm2, %edx
|
|
test %edx, %edx
|
|
jnz L(exit_null_on_fourth_vector)
|
|
|
|
vpcmpeqb (VEC_SIZE * 4)(%rax), %ymm6, %ymm3
|
|
vpmovmskb %ymm3, %edx
|
|
test %edx, %edx
|
|
jnz L(exit_null_on_fifth_vector)
|
|
|
|
vpcmpeqb (VEC_SIZE * 5)(%rax), %ymm6, %ymm0
|
|
add $(VEC_SIZE * 4), %rax
|
|
vpmovmskb %ymm0, %edx
|
|
test %edx, %edx
|
|
jnz L(exit_null_on_second_vector)
|
|
|
|
vpcmpeqb (VEC_SIZE * 2)(%rax), %ymm6, %ymm1
|
|
vpmovmskb %ymm1, %edx
|
|
test %edx, %edx
|
|
jnz L(exit_null_on_third_vector)
|
|
|
|
vpcmpeqb (VEC_SIZE * 3)(%rax), %ymm6, %ymm2
|
|
vpmovmskb %ymm2, %edx
|
|
test %edx, %edx
|
|
jnz L(exit_null_on_fourth_vector)
|
|
|
|
vpcmpeqb (VEC_SIZE * 4)(%rax), %ymm6, %ymm3
|
|
vpmovmskb %ymm3, %edx
|
|
test %edx, %edx
|
|
jnz L(exit_null_on_fifth_vector)
|
|
|
|
vpcmpeqb (VEC_SIZE * 5)(%rax), %ymm6, %ymm0
|
|
add $(VEC_SIZE * 4), %rax
|
|
vpmovmskb %ymm0, %edx
|
|
test %edx, %edx
|
|
jnz L(exit_null_on_second_vector)
|
|
|
|
vpcmpeqb (VEC_SIZE * 2)(%rax), %ymm6, %ymm1
|
|
vpmovmskb %ymm1, %edx
|
|
test %edx, %edx
|
|
jnz L(exit_null_on_third_vector)
|
|
|
|
vpcmpeqb (VEC_SIZE * 3)(%rax), %ymm6, %ymm2
|
|
vpmovmskb %ymm2, %edx
|
|
test %edx, %edx
|
|
jnz L(exit_null_on_fourth_vector)
|
|
|
|
vpcmpeqb (VEC_SIZE * 4)(%rax), %ymm6, %ymm3
|
|
vpmovmskb %ymm3, %edx
|
|
test %edx, %edx
|
|
jnz L(exit_null_on_fifth_vector)
|
|
|
|
test $((VEC_SIZE * 4) - 1), %rax
|
|
jz L(align_four_vec_loop)
|
|
|
|
vpcmpeqb (VEC_SIZE * 5)(%rax), %ymm6, %ymm0
|
|
add $(VEC_SIZE * 5), %rax
|
|
vpmovmskb %ymm0, %edx
|
|
test %edx, %edx
|
|
jnz L(exit)
|
|
|
|
test $((VEC_SIZE * 4) - 1), %rax
|
|
jz L(align_four_vec_loop)
|
|
|
|
vpcmpeqb VEC_SIZE(%rax), %ymm6, %ymm1
|
|
add $VEC_SIZE, %rax
|
|
vpmovmskb %ymm1, %edx
|
|
test %edx, %edx
|
|
jnz L(exit)
|
|
|
|
test $((VEC_SIZE * 4) - 1), %rax
|
|
jz L(align_four_vec_loop)
|
|
|
|
vpcmpeqb VEC_SIZE(%rax), %ymm6, %ymm2
|
|
add $VEC_SIZE, %rax
|
|
vpmovmskb %ymm2, %edx
|
|
test %edx, %edx
|
|
jnz L(exit)
|
|
|
|
test $((VEC_SIZE * 4) - 1), %rax
|
|
jz L(align_four_vec_loop)
|
|
|
|
vpcmpeqb VEC_SIZE(%rax), %ymm6, %ymm3
|
|
add $VEC_SIZE, %rax
|
|
vpmovmskb %ymm3, %edx
|
|
test %edx, %edx
|
|
jnz L(exit)
|
|
|
|
add $VEC_SIZE, %rax
|
|
|
|
.p2align 4
|
|
L(align_four_vec_loop):
|
|
vmovaps (%rax), %ymm4
|
|
vpminub VEC_SIZE(%rax), %ymm4, %ymm4
|
|
vmovaps (VEC_SIZE * 2)(%rax), %ymm5
|
|
vpminub (VEC_SIZE * 3)(%rax), %ymm5, %ymm5
|
|
add $(VEC_SIZE * 4), %rax
|
|
vpminub %ymm4, %ymm5, %ymm5
|
|
vpcmpeqb %ymm5, %ymm6, %ymm5
|
|
vpmovmskb %ymm5, %edx
|
|
test %edx, %edx
|
|
jz L(align_four_vec_loop)
|
|
|
|
vpcmpeqb -(VEC_SIZE * 4)(%rax), %ymm6, %ymm0
|
|
sub $(VEC_SIZE * 5), %rax
|
|
vpmovmskb %ymm0, %edx
|
|
test %edx, %edx
|
|
jnz L(exit_null_on_second_vector)
|
|
|
|
vpcmpeqb (VEC_SIZE * 2)(%rax), %ymm6, %ymm1
|
|
vpmovmskb %ymm1, %edx
|
|
test %edx, %edx
|
|
jnz L(exit_null_on_third_vector)
|
|
|
|
vpcmpeqb (VEC_SIZE * 3)(%rax), %ymm6, %ymm2
|
|
vpmovmskb %ymm2, %edx
|
|
test %edx, %edx
|
|
jnz L(exit_null_on_fourth_vector)
|
|
|
|
vpcmpeqb (VEC_SIZE * 4)(%rax), %ymm6, %ymm3
|
|
vpmovmskb %ymm3, %edx
|
|
sub %rdi, %rax
|
|
bsf %rdx, %rdx
|
|
add %rdx, %rax
|
|
add $(VEC_SIZE * 4), %rax
|
|
jmp L(StartStrcpyPart)
|
|
|
|
.p2align 4
|
|
L(exit):
|
|
sub %rdi, %rax
|
|
L(exit_null_on_first_vector):
|
|
bsf %rdx, %rdx
|
|
add %rdx, %rax
|
|
jmp L(StartStrcpyPart)
|
|
|
|
.p2align 4
|
|
L(exit_null_on_second_vector):
|
|
sub %rdi, %rax
|
|
bsf %rdx, %rdx
|
|
add %rdx, %rax
|
|
add $VEC_SIZE, %rax
|
|
jmp L(StartStrcpyPart)
|
|
|
|
.p2align 4
|
|
L(exit_null_on_third_vector):
|
|
sub %rdi, %rax
|
|
bsf %rdx, %rdx
|
|
add %rdx, %rax
|
|
add $(VEC_SIZE * 2), %rax
|
|
jmp L(StartStrcpyPart)
|
|
|
|
.p2align 4
|
|
L(exit_null_on_fourth_vector):
|
|
sub %rdi, %rax
|
|
bsf %rdx, %rdx
|
|
add %rdx, %rax
|
|
add $(VEC_SIZE * 3), %rax
|
|
jmp L(StartStrcpyPart)
|
|
|
|
.p2align 4
|
|
L(exit_null_on_fifth_vector):
|
|
sub %rdi, %rax
|
|
bsf %rdx, %rdx
|
|
add %rdx, %rax
|
|
add $(VEC_SIZE * 4), %rax
|
|
|
|
.p2align 4
|
|
L(StartStrcpyPart):
|
|
lea (%r9, %rax), %rdi
|
|
mov %rsi, %rcx
|
|
mov %r9, %rax /* save result */
|
|
|
|
# ifdef USE_AS_STRNCAT
|
|
test %r8, %r8
|
|
jz L(ExitZero)
|
|
# define USE_AS_STRNCPY
|
|
# endif
|
|
|
|
# include "strcpy-avx2.S"
|
|
#endif
|