2018-01-01 00:32:25 +00:00
|
|
|
/* Copyright (C) 2011-2018 Free Software Foundation, Inc.
|
2011-10-07 15:49:10 +00:00
|
|
|
Contributed by Intel Corporation.
|
2009-04-07 06:36:33 +00:00
|
|
|
This file is part of the GNU C Library.
|
|
|
|
|
|
|
|
The GNU C Library is free software; you can redistribute it and/or
|
|
|
|
modify it under the terms of the GNU Lesser General Public
|
|
|
|
License as published by the Free Software Foundation; either
|
|
|
|
version 2.1 of the License, or (at your option) any later version.
|
|
|
|
|
|
|
|
The GNU C Library is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
Lesser General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU Lesser General Public
|
2012-02-09 23:18:22 +00:00
|
|
|
License along with the GNU C Library; if not, see
|
|
|
|
<http://www.gnu.org/licenses/>. */
|
2009-04-07 06:36:33 +00:00
|
|
|
|
|
|
|
#include <sysdep.h>
|
|
|
|
|
2017-06-09 12:13:15 +00:00
|
|
|
#ifdef USE_AS_WMEMCHR
|
|
|
|
# define MEMCHR wmemchr
|
|
|
|
# define PCMPEQ pcmpeqd
|
|
|
|
#else
|
|
|
|
# define MEMCHR memchr
|
|
|
|
# define PCMPEQ pcmpeqb
|
|
|
|
#endif
|
|
|
|
|
2011-10-07 15:49:10 +00:00
|
|
|
/* fast SSE2 version with using pmaxub and 64 byte loop */
|
2009-04-07 06:36:33 +00:00
|
|
|
|
|
|
|
.text
|
2017-06-09 12:13:15 +00:00
|
|
|
ENTRY(MEMCHR)
|
2017-05-30 19:39:14 +00:00
|
|
|
movd %esi, %xmm1
|
|
|
|
mov %edi, %ecx
|
2011-10-07 15:49:10 +00:00
|
|
|
|
2017-06-09 12:13:15 +00:00
|
|
|
#ifdef USE_AS_WMEMCHR
|
|
|
|
test %rdx, %rdx
|
|
|
|
jz L(return_null)
|
|
|
|
shl $2, %rdx
|
|
|
|
#else
|
2009-04-07 06:36:33 +00:00
|
|
|
punpcklbw %xmm1, %xmm1
|
2011-10-07 15:49:10 +00:00
|
|
|
test %rdx, %rdx
|
|
|
|
jz L(return_null)
|
2009-04-07 06:36:33 +00:00
|
|
|
punpcklbw %xmm1, %xmm1
|
2017-06-09 12:13:15 +00:00
|
|
|
#endif
|
2011-10-07 15:49:10 +00:00
|
|
|
|
2017-05-30 19:39:14 +00:00
|
|
|
and $63, %ecx
|
2009-04-07 06:36:33 +00:00
|
|
|
pshufd $0, %xmm1, %xmm1
|
2011-10-07 15:49:10 +00:00
|
|
|
|
2017-05-30 19:39:14 +00:00
|
|
|
cmp $48, %ecx
|
2011-10-07 15:49:10 +00:00
|
|
|
ja L(crosscache)
|
|
|
|
|
|
|
|
movdqu (%rdi), %xmm0
|
2017-06-09 12:13:15 +00:00
|
|
|
PCMPEQ %xmm1, %xmm0
|
2011-10-07 15:49:10 +00:00
|
|
|
pmovmskb %xmm0, %eax
|
|
|
|
test %eax, %eax
|
|
|
|
|
|
|
|
jnz L(matches_1)
|
|
|
|
sub $16, %rdx
|
|
|
|
jbe L(return_null)
|
|
|
|
add $16, %rdi
|
2017-05-30 19:39:14 +00:00
|
|
|
and $15, %ecx
|
2011-10-07 15:49:10 +00:00
|
|
|
and $-16, %rdi
|
|
|
|
add %rcx, %rdx
|
|
|
|
sub $64, %rdx
|
|
|
|
jbe L(exit_loop)
|
|
|
|
jmp L(loop_prolog)
|
|
|
|
|
|
|
|
.p2align 4
|
|
|
|
L(crosscache):
|
2017-05-30 19:39:14 +00:00
|
|
|
and $15, %ecx
|
2011-10-07 15:49:10 +00:00
|
|
|
and $-16, %rdi
|
|
|
|
movdqa (%rdi), %xmm0
|
|
|
|
|
2017-06-09 12:13:15 +00:00
|
|
|
PCMPEQ %xmm1, %xmm0
|
2011-10-07 15:49:10 +00:00
|
|
|
/* Check if there is a match. */
|
|
|
|
pmovmskb %xmm0, %eax
|
|
|
|
/* Remove the leading bytes. */
|
|
|
|
sar %cl, %eax
|
|
|
|
test %eax, %eax
|
|
|
|
je L(unaligned_no_match)
|
|
|
|
/* Check which byte is a match. */
|
|
|
|
bsf %eax, %eax
|
|
|
|
|
|
|
|
sub %rax, %rdx
|
|
|
|
jbe L(return_null)
|
|
|
|
add %rdi, %rax
|
|
|
|
add %rcx, %rax
|
|
|
|
ret
|
|
|
|
|
|
|
|
.p2align 4
|
|
|
|
L(unaligned_no_match):
|
x86: Optimize SSE2 memchr overflow calculation
SSE2 memchr computes "edx + ecx - 16" where ecx is less than 16. Use
"edx - (16 - ecx)", instead of satured math, to avoid possible addition
overflow. This replaces
add %ecx, %edx
sbb %eax, %eax
or %eax, %edx
sub $16, %edx
with
neg %ecx
add $16, %ecx
sub %ecx, %edx
It is the same for x86_64, except for rcx/rdx, instead of ecx/edx.
* sysdeps/i386/i686/multiarch/memchr-sse2.S (MEMCHR): Use
"edx + ecx - 16" to avoid possible addition overflow.
* sysdeps/x86_64/memchr.S (memchr): Likewise.
2017-05-19 17:46:29 +00:00
|
|
|
/* "rcx" is less than 16. Calculate "rdx + rcx - 16" by using
|
|
|
|
"rdx - (16 - rcx)" instead of "(rdx + rcx) - 16" to void
|
|
|
|
possible addition overflow. */
|
|
|
|
neg %rcx
|
|
|
|
add $16, %rcx
|
|
|
|
sub %rcx, %rdx
|
2011-10-07 15:49:10 +00:00
|
|
|
jbe L(return_null)
|
|
|
|
add $16, %rdi
|
|
|
|
sub $64, %rdx
|
|
|
|
jbe L(exit_loop)
|
|
|
|
|
|
|
|
.p2align 4
|
|
|
|
L(loop_prolog):
|
|
|
|
movdqa (%rdi), %xmm0
|
2017-06-09 12:13:15 +00:00
|
|
|
PCMPEQ %xmm1, %xmm0
|
2011-10-07 15:49:10 +00:00
|
|
|
pmovmskb %xmm0, %eax
|
|
|
|
test %eax, %eax
|
|
|
|
jnz L(matches)
|
|
|
|
|
|
|
|
movdqa 16(%rdi), %xmm2
|
2017-06-09 12:13:15 +00:00
|
|
|
PCMPEQ %xmm1, %xmm2
|
2011-10-07 15:49:10 +00:00
|
|
|
pmovmskb %xmm2, %eax
|
|
|
|
test %eax, %eax
|
|
|
|
jnz L(matches16)
|
|
|
|
|
|
|
|
movdqa 32(%rdi), %xmm3
|
2017-06-09 12:13:15 +00:00
|
|
|
PCMPEQ %xmm1, %xmm3
|
2011-10-07 15:49:10 +00:00
|
|
|
pmovmskb %xmm3, %eax
|
|
|
|
test %eax, %eax
|
|
|
|
jnz L(matches32)
|
|
|
|
|
|
|
|
movdqa 48(%rdi), %xmm4
|
2017-06-09 12:13:15 +00:00
|
|
|
PCMPEQ %xmm1, %xmm4
|
2011-10-07 15:49:10 +00:00
|
|
|
add $64, %rdi
|
|
|
|
pmovmskb %xmm4, %eax
|
|
|
|
test %eax, %eax
|
|
|
|
jnz L(matches0)
|
|
|
|
|
|
|
|
test $0x3f, %rdi
|
|
|
|
jz L(align64_loop)
|
|
|
|
|
|
|
|
sub $64, %rdx
|
|
|
|
jbe L(exit_loop)
|
|
|
|
|
|
|
|
movdqa (%rdi), %xmm0
|
2017-06-09 12:13:15 +00:00
|
|
|
PCMPEQ %xmm1, %xmm0
|
2011-10-07 15:49:10 +00:00
|
|
|
pmovmskb %xmm0, %eax
|
|
|
|
test %eax, %eax
|
|
|
|
jnz L(matches)
|
|
|
|
|
|
|
|
movdqa 16(%rdi), %xmm2
|
2017-06-09 12:13:15 +00:00
|
|
|
PCMPEQ %xmm1, %xmm2
|
2011-10-07 15:49:10 +00:00
|
|
|
pmovmskb %xmm2, %eax
|
|
|
|
test %eax, %eax
|
|
|
|
jnz L(matches16)
|
|
|
|
|
|
|
|
movdqa 32(%rdi), %xmm3
|
2017-06-09 12:13:15 +00:00
|
|
|
PCMPEQ %xmm1, %xmm3
|
2011-10-07 15:49:10 +00:00
|
|
|
pmovmskb %xmm3, %eax
|
|
|
|
test %eax, %eax
|
|
|
|
jnz L(matches32)
|
|
|
|
|
|
|
|
movdqa 48(%rdi), %xmm3
|
2017-06-09 12:13:15 +00:00
|
|
|
PCMPEQ %xmm1, %xmm3
|
2011-10-07 15:49:10 +00:00
|
|
|
pmovmskb %xmm3, %eax
|
|
|
|
|
|
|
|
add $64, %rdi
|
|
|
|
test %eax, %eax
|
|
|
|
jnz L(matches0)
|
|
|
|
|
|
|
|
mov %rdi, %rcx
|
|
|
|
and $-64, %rdi
|
2017-05-30 19:39:14 +00:00
|
|
|
and $63, %ecx
|
2011-10-07 15:49:10 +00:00
|
|
|
add %rcx, %rdx
|
|
|
|
|
|
|
|
.p2align 4
|
|
|
|
L(align64_loop):
|
|
|
|
sub $64, %rdx
|
|
|
|
jbe L(exit_loop)
|
|
|
|
movdqa (%rdi), %xmm0
|
|
|
|
movdqa 16(%rdi), %xmm2
|
|
|
|
movdqa 32(%rdi), %xmm3
|
|
|
|
movdqa 48(%rdi), %xmm4
|
|
|
|
|
2017-06-09 12:13:15 +00:00
|
|
|
PCMPEQ %xmm1, %xmm0
|
|
|
|
PCMPEQ %xmm1, %xmm2
|
|
|
|
PCMPEQ %xmm1, %xmm3
|
|
|
|
PCMPEQ %xmm1, %xmm4
|
2011-10-07 15:49:10 +00:00
|
|
|
|
|
|
|
pmaxub %xmm0, %xmm3
|
|
|
|
pmaxub %xmm2, %xmm4
|
|
|
|
pmaxub %xmm3, %xmm4
|
|
|
|
pmovmskb %xmm4, %eax
|
|
|
|
|
|
|
|
add $64, %rdi
|
|
|
|
|
|
|
|
test %eax, %eax
|
|
|
|
jz L(align64_loop)
|
|
|
|
|
|
|
|
sub $64, %rdi
|
|
|
|
|
|
|
|
pmovmskb %xmm0, %eax
|
|
|
|
test %eax, %eax
|
|
|
|
jnz L(matches)
|
|
|
|
|
|
|
|
pmovmskb %xmm2, %eax
|
|
|
|
test %eax, %eax
|
|
|
|
jnz L(matches16)
|
|
|
|
|
|
|
|
movdqa 32(%rdi), %xmm3
|
2017-06-09 12:13:15 +00:00
|
|
|
PCMPEQ %xmm1, %xmm3
|
2011-10-07 15:49:10 +00:00
|
|
|
|
2017-06-09 12:13:15 +00:00
|
|
|
PCMPEQ 48(%rdi), %xmm1
|
2011-10-07 15:49:10 +00:00
|
|
|
pmovmskb %xmm3, %eax
|
|
|
|
test %eax, %eax
|
|
|
|
jnz L(matches32)
|
|
|
|
|
|
|
|
pmovmskb %xmm1, %eax
|
|
|
|
bsf %eax, %eax
|
|
|
|
lea 48(%rdi, %rax), %rax
|
|
|
|
ret
|
|
|
|
|
|
|
|
.p2align 4
|
|
|
|
L(exit_loop):
|
2017-05-30 19:39:14 +00:00
|
|
|
add $32, %edx
|
2011-10-07 15:49:10 +00:00
|
|
|
jle L(exit_loop_32)
|
|
|
|
|
|
|
|
movdqa (%rdi), %xmm0
|
2017-06-09 12:13:15 +00:00
|
|
|
PCMPEQ %xmm1, %xmm0
|
2011-10-07 15:49:10 +00:00
|
|
|
pmovmskb %xmm0, %eax
|
|
|
|
test %eax, %eax
|
|
|
|
jnz L(matches)
|
|
|
|
|
|
|
|
movdqa 16(%rdi), %xmm2
|
2017-06-09 12:13:15 +00:00
|
|
|
PCMPEQ %xmm1, %xmm2
|
2011-10-07 15:49:10 +00:00
|
|
|
pmovmskb %xmm2, %eax
|
|
|
|
test %eax, %eax
|
|
|
|
jnz L(matches16)
|
|
|
|
|
|
|
|
movdqa 32(%rdi), %xmm3
|
2017-06-09 12:13:15 +00:00
|
|
|
PCMPEQ %xmm1, %xmm3
|
2011-10-07 15:49:10 +00:00
|
|
|
pmovmskb %xmm3, %eax
|
|
|
|
test %eax, %eax
|
|
|
|
jnz L(matches32_1)
|
2017-05-30 19:39:14 +00:00
|
|
|
sub $16, %edx
|
2011-10-07 15:49:10 +00:00
|
|
|
jle L(return_null)
|
|
|
|
|
2017-06-09 12:13:15 +00:00
|
|
|
PCMPEQ 48(%rdi), %xmm1
|
2011-10-07 15:49:10 +00:00
|
|
|
pmovmskb %xmm1, %eax
|
|
|
|
test %eax, %eax
|
|
|
|
jnz L(matches48_1)
|
2017-05-30 19:39:14 +00:00
|
|
|
xor %eax, %eax
|
2011-10-07 15:49:10 +00:00
|
|
|
ret
|
|
|
|
|
|
|
|
.p2align 4
|
|
|
|
L(exit_loop_32):
|
2017-05-30 19:39:14 +00:00
|
|
|
add $32, %edx
|
2011-10-07 15:49:10 +00:00
|
|
|
movdqa (%rdi), %xmm0
|
2017-06-09 12:13:15 +00:00
|
|
|
PCMPEQ %xmm1, %xmm0
|
2011-10-07 15:49:10 +00:00
|
|
|
pmovmskb %xmm0, %eax
|
|
|
|
test %eax, %eax
|
|
|
|
jnz L(matches_1)
|
2017-05-30 19:39:14 +00:00
|
|
|
sub $16, %edx
|
2011-10-07 15:49:10 +00:00
|
|
|
jbe L(return_null)
|
2009-04-07 06:36:33 +00:00
|
|
|
|
2017-06-09 12:13:15 +00:00
|
|
|
PCMPEQ 16(%rdi), %xmm1
|
2011-10-07 15:49:10 +00:00
|
|
|
pmovmskb %xmm1, %eax
|
|
|
|
test %eax, %eax
|
|
|
|
jnz L(matches16_1)
|
2017-05-30 19:39:14 +00:00
|
|
|
xor %eax, %eax
|
2009-04-07 06:36:33 +00:00
|
|
|
ret
|
|
|
|
|
2011-10-07 15:49:10 +00:00
|
|
|
.p2align 4
|
|
|
|
L(matches0):
|
|
|
|
bsf %eax, %eax
|
|
|
|
lea -16(%rax, %rdi), %rax
|
2009-04-07 06:36:33 +00:00
|
|
|
ret
|
2011-10-07 15:49:10 +00:00
|
|
|
|
|
|
|
.p2align 4
|
|
|
|
L(matches):
|
|
|
|
bsf %eax, %eax
|
|
|
|
add %rdi, %rax
|
|
|
|
ret
|
|
|
|
|
|
|
|
.p2align 4
|
|
|
|
L(matches16):
|
|
|
|
bsf %eax, %eax
|
|
|
|
lea 16(%rax, %rdi), %rax
|
|
|
|
ret
|
|
|
|
|
|
|
|
.p2align 4
|
|
|
|
L(matches32):
|
|
|
|
bsf %eax, %eax
|
|
|
|
lea 32(%rax, %rdi), %rax
|
|
|
|
ret
|
|
|
|
|
|
|
|
.p2align 4
|
|
|
|
L(matches_1):
|
|
|
|
bsf %eax, %eax
|
|
|
|
sub %rax, %rdx
|
|
|
|
jbe L(return_null)
|
|
|
|
add %rdi, %rax
|
|
|
|
ret
|
|
|
|
|
|
|
|
.p2align 4
|
|
|
|
L(matches16_1):
|
|
|
|
bsf %eax, %eax
|
|
|
|
sub %rax, %rdx
|
|
|
|
jbe L(return_null)
|
|
|
|
lea 16(%rdi, %rax), %rax
|
|
|
|
ret
|
|
|
|
|
|
|
|
.p2align 4
|
|
|
|
L(matches32_1):
|
|
|
|
bsf %eax, %eax
|
|
|
|
sub %rax, %rdx
|
|
|
|
jbe L(return_null)
|
|
|
|
lea 32(%rdi, %rax), %rax
|
|
|
|
ret
|
|
|
|
|
|
|
|
.p2align 4
|
|
|
|
L(matches48_1):
|
|
|
|
bsf %eax, %eax
|
|
|
|
sub %rax, %rdx
|
|
|
|
jbe L(return_null)
|
|
|
|
lea 48(%rdi, %rax), %rax
|
|
|
|
ret
|
|
|
|
|
|
|
|
.p2align 4
|
|
|
|
L(return_null):
|
2017-05-30 19:39:14 +00:00
|
|
|
xor %eax, %eax
|
2011-10-07 15:49:10 +00:00
|
|
|
ret
|
2017-06-09 12:13:15 +00:00
|
|
|
END(MEMCHR)
|
2009-04-07 06:36:33 +00:00
|
|
|
|
2017-06-09 12:13:15 +00:00
|
|
|
#ifndef USE_AS_WMEMCHR
|
2009-04-07 06:36:33 +00:00
|
|
|
strong_alias (memchr, __memchr)
|
2011-10-07 15:49:10 +00:00
|
|
|
libc_hidden_builtin_def(memchr)
|
2017-06-09 12:13:15 +00:00
|
|
|
#endif
|