glibc/sysdeps/x86_64/multiarch/rawmemchr-sse2.S
Noah Goldstein 3edda6a0f0 x86: Add support for compiling {raw|w}memchr with high ISA level
1. Refactor files so that all implementations for in the multiarch
   directory.
    - Essentially moved sse2 {raw|w}memchr.S implementation to
      multiarch/{raw|w}memchr-sse2.S

    - The non-multiarch {raw|w}memchr.S file now only includes one of
      the implementations in the multiarch directory based on the
      compiled ISA level (only used for non-multiarch builds.
      Otherwise we go through the ifunc selector).

2. Add ISA level build guards to different implementations.
    - I.e memchr-avx2.S which is ISA level 3 will only build if
      compiled ISA level <= 3. Otherwise there is no reason to include
      it as we will always use one of the ISA level 4
      implementations (memchr-evex{-rtm}.S).

3. Add new multiarch/rtld-{raw}memchr.S that just include the
   non-multiarch {raw}memchr.S which will in turn select the best
   implementation based on the compiled ISA level.

4. Refactor the ifunc selector and ifunc implementation list to use
   the ISA level aware wrapper macros that allow functions below the
   compiled ISA level (with a guranteed replacement) to be skipped.
    - Guranteed replacement essentially means that for any ISA level
      build there must be a function that the baseline of the ISA
      supports. So for {raw|w}memchr.S since there is not ISA level 2
      function, the ISA level 2 build still includes the ISA level
      1 (sse2) function. Once we reach the ISA level 3 build, however,
      {raw|w}memchr-avx2{-rtm}.S will always be sufficient so the ISA
      level 1 implementation ({raw|w}memchr-sse2.S) will not be built.

Tested with and without multiarch on x86_64 for ISA levels:
{generic, x86-64-v2, x86-64-v3, x86-64-v4}

And m32 with and without multiarch.
2022-06-22 19:41:35 -07:00

208 lines
3.7 KiB
ArmAsm

/* rawmemchr optimized with SSE2.
Copyright (C) 2017-2022 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <isa-level.h>
#include <sysdep.h>
/* MINIMUM_X86_ISA_LEVEL <= 2 because there is no V2 implementation
so we need this to build for ISA V2 builds. */
#if ISA_SHOULD_BUILD (2)
# ifndef RAWMEMCHR
# define RAWMEMCHR __rawmemchr_sse2
# endif
.text
ENTRY (RAWMEMCHR)
movd %rsi, %xmm1
mov %rdi, %rcx
punpcklbw %xmm1, %xmm1
punpcklbw %xmm1, %xmm1
and $63, %rcx
pshufd $0, %xmm1, %xmm1
cmp $48, %rcx
ja L(crosscache)
movdqu (%rdi), %xmm0
pcmpeqb %xmm1, %xmm0
/* Check if there is a match. */
pmovmskb %xmm0, %eax
test %eax, %eax
jnz L(matches)
add $16, %rdi
and $-16, %rdi
jmp L(loop_prolog)
.p2align 4
L(crosscache):
and $15, %rcx
and $-16, %rdi
movdqa (%rdi), %xmm0
pcmpeqb %xmm1, %xmm0
/* Check if there is a match. */
pmovmskb %xmm0, %eax
/* Remove the leading bytes. */
sar %cl, %eax
test %eax, %eax
je L(unaligned_no_match)
/* Check which byte is a match. */
bsf %eax, %eax
add %rdi, %rax
add %rcx, %rax
ret
.p2align 4
L(unaligned_no_match):
add $16, %rdi
.p2align 4
L(loop_prolog):
movdqa (%rdi), %xmm0
pcmpeqb %xmm1, %xmm0
pmovmskb %xmm0, %eax
test %eax, %eax
jnz L(matches)
movdqa 16(%rdi), %xmm2
pcmpeqb %xmm1, %xmm2
pmovmskb %xmm2, %eax
test %eax, %eax
jnz L(matches16)
movdqa 32(%rdi), %xmm3
pcmpeqb %xmm1, %xmm3
pmovmskb %xmm3, %eax
test %eax, %eax
jnz L(matches32)
movdqa 48(%rdi), %xmm4
pcmpeqb %xmm1, %xmm4
add $64, %rdi
pmovmskb %xmm4, %eax
test %eax, %eax
jnz L(matches0)
test $0x3f, %rdi
jz L(align64_loop)
movdqa (%rdi), %xmm0
pcmpeqb %xmm1, %xmm0
pmovmskb %xmm0, %eax
test %eax, %eax
jnz L(matches)
movdqa 16(%rdi), %xmm2
pcmpeqb %xmm1, %xmm2
pmovmskb %xmm2, %eax
test %eax, %eax
jnz L(matches16)
movdqa 32(%rdi), %xmm3
pcmpeqb %xmm1, %xmm3
pmovmskb %xmm3, %eax
test %eax, %eax
jnz L(matches32)
movdqa 48(%rdi), %xmm3
pcmpeqb %xmm1, %xmm3
pmovmskb %xmm3, %eax
add $64, %rdi
test %eax, %eax
jnz L(matches0)
and $-64, %rdi
.p2align 4
L(align64_loop):
movdqa (%rdi), %xmm0
movdqa 16(%rdi), %xmm2
movdqa 32(%rdi), %xmm3
movdqa 48(%rdi), %xmm4
pcmpeqb %xmm1, %xmm0
pcmpeqb %xmm1, %xmm2
pcmpeqb %xmm1, %xmm3
pcmpeqb %xmm1, %xmm4
pmaxub %xmm0, %xmm3
pmaxub %xmm2, %xmm4
pmaxub %xmm3, %xmm4
pmovmskb %xmm4, %eax
add $64, %rdi
test %eax, %eax
jz L(align64_loop)
sub $64, %rdi
pmovmskb %xmm0, %eax
test %eax, %eax
jnz L(matches)
pmovmskb %xmm2, %eax
test %eax, %eax
jnz L(matches16)
movdqa 32(%rdi), %xmm3
pcmpeqb %xmm1, %xmm3
pcmpeqb 48(%rdi), %xmm1
pmovmskb %xmm3, %eax
test %eax, %eax
jnz L(matches32)
pmovmskb %xmm1, %eax
bsf %eax, %eax
lea 48(%rdi, %rax), %rax
ret
.p2align 4
L(matches0):
bsf %eax, %eax
lea -16(%rax, %rdi), %rax
ret
.p2align 4
L(matches):
bsf %eax, %eax
add %rdi, %rax
ret
.p2align 4
L(matches16):
bsf %eax, %eax
lea 16(%rax, %rdi), %rax
ret
.p2align 4
L(matches32):
bsf %eax, %eax
lea 32(%rax, %rdi), %rax
ret
END (RAWMEMCHR)
#endif