2016-03-14 21:40:46 +00:00
|
|
|
/* Optimized strspn implementation for Power8.
|
|
|
|
|
2018-01-01 00:32:25 +00:00
|
|
|
Copyright (C) 2016-2018 Free Software Foundation, Inc.
|
2016-03-14 21:40:46 +00:00
|
|
|
This file is part of the GNU C Library.
|
|
|
|
|
|
|
|
The GNU C Library is free software; you can redistribute it and/or
|
|
|
|
modify it under the terms of the GNU Lesser General Public
|
|
|
|
License as published by the Free Software Foundation; either
|
|
|
|
version 2.1 of the License, or (at your option) any later version.
|
|
|
|
|
|
|
|
The GNU C Library is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
Lesser General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU Lesser General Public
|
|
|
|
License along with the GNU C Library; if not, see
|
|
|
|
<http://www.gnu.org/licenses/>. */
|
|
|
|
|
|
|
|
/* size_t [r3] strspn (const char *string [r3],
|
|
|
|
const char *needleAccept [r4]) */
|
|
|
|
|
|
|
|
/* This takes a novel approach by computing a 256 bit mask whereby
|
|
|
|
each set bit implies the byte is "accepted". P8 vector hardware
|
|
|
|
has extremely efficient hardware for selecting bits from a mask.
|
|
|
|
|
|
|
|
One might ask "why not use bpermd for short strings"? It is
|
|
|
|
so slow that its performance about matches the generic PPC64
|
|
|
|
variant without any fancy masking, with the added expense of
|
|
|
|
making the mask. That was the first variant of this. */
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#include "sysdep.h"
|
|
|
|
|
2016-04-25 14:11:02 +00:00
|
|
|
#ifndef USE_AS_STRCSPN
|
|
|
|
# define USE_AS_STRCSPN 0
|
|
|
|
# ifndef STRSPN
|
|
|
|
# define STRSPN strspn
|
|
|
|
# endif
|
|
|
|
# define INITIAL_MASK 0
|
|
|
|
# define UPDATE_MASK(RA, RS, RB) or RA, RS, RB
|
|
|
|
#else
|
|
|
|
# ifndef STRSPN
|
|
|
|
# define STRSPN strcspn
|
|
|
|
# endif
|
|
|
|
# define INITIAL_MASK -1
|
|
|
|
# define UPDATE_MASK(RA, RS, RB) andc RA, RS, RB
|
|
|
|
#endif
|
|
|
|
|
2016-03-14 21:40:46 +00:00
|
|
|
/* Simple macro to use VSX instructions in overlapping VR's. */
|
|
|
|
#define XXVR(insn, vrt, vra, vrb) \
|
|
|
|
insn 32+vrt, 32+vra, 32+vrb
|
|
|
|
|
|
|
|
/* ISA 2.07B instructions are not all defined for older binutils.
|
|
|
|
Macros are defined below for these newer instructions in order
|
|
|
|
to maintain compatibility. */
|
|
|
|
|
|
|
|
/* Note, TX/SX is always set as VMX regs are the high 32 VSX regs. */
|
|
|
|
#define MTVRD(v,r) .long (0x7c000167 | ((v)<<(32-11)) | ((r)<<(32-16)))
|
|
|
|
#define MFVRD(r,v) .long (0x7c000067 | ((v)<<(32-11)) | ((r)<<(32-16)))
|
|
|
|
|
|
|
|
#define VBPERMQ(t,a,b) .long (0x1000054c \
|
|
|
|
| ((t)<<(32-11)) \
|
|
|
|
| ((a)<<(32-16)) \
|
|
|
|
| ((b)<<(32-21)) )
|
|
|
|
|
|
|
|
/* This can be updated to power8 once the minimum version of
|
|
|
|
binutils supports power8 and the above instructions. */
|
|
|
|
.machine power7
|
PowerPC64 ENTRY_TOCLESS
A number of functions in the sysdeps/powerpc/powerpc64/ tree don't use
or change r2, yet declare a global entry that sets up r2. This patch
fixes that problem, and consolidates the ENTRY and EALIGN macros.
* sysdeps/powerpc/powerpc64/sysdep.h: Formatting.
(NOPS, ENTRY_3): New macros.
(ENTRY): Rewrite.
(ENTRY_TOCLESS): Define.
(EALIGN, EALIGN_W_0, EALIGN_W_1, EALIGN_W_2, EALIGN_W_4, EALIGN_W_5,
EALIGN_W_6, EALIGN_W_7, EALIGN_W_8): Delete.
* sysdeps/powerpc/powerpc64/a2/memcpy.S: Replace EALIGN with ENTRY.
* sysdeps/powerpc/powerpc64/dl-trampoline.S: Likewise.
* sysdeps/powerpc/powerpc64/fpu/s_ceil.S: Likewise.
* sysdeps/powerpc/powerpc64/fpu/s_ceilf.S: Likewise.
* sysdeps/powerpc/powerpc64/fpu/s_floor.S: Likewise.
* sysdeps/powerpc/powerpc64/fpu/s_floorf.S: Likewise.
* sysdeps/powerpc/powerpc64/fpu/s_nearbyint.S: Likewise.
* sysdeps/powerpc/powerpc64/fpu/s_nearbyintf.S: Likewise.
* sysdeps/powerpc/powerpc64/fpu/s_rint.S: Likewise.
* sysdeps/powerpc/powerpc64/fpu/s_rintf.S: Likewise.
* sysdeps/powerpc/powerpc64/fpu/s_round.S: Likewise.
* sysdeps/powerpc/powerpc64/fpu/s_roundf.S: Likewise.
* sysdeps/powerpc/powerpc64/fpu/s_trunc.S: Likewise.
* sysdeps/powerpc/powerpc64/fpu/s_truncf.S: Likewise.
* sysdeps/powerpc/powerpc64/memset.S: Likewise.
* sysdeps/powerpc/powerpc64/power7/fpu/s_finite.S: Likewise.
* sysdeps/powerpc/powerpc64/power7/fpu/s_isinf.S: Likewise.
* sysdeps/powerpc/powerpc64/power7/fpu/s_isnan.S: Likewise.
* sysdeps/powerpc/powerpc64/power7/strstr.S: Likewise.
* sysdeps/powerpc/powerpc64/power8/fpu/e_expf.S: Likewise.
* sysdeps/powerpc/powerpc64/power8/fpu/s_cosf.S: Likewise.
* sysdeps/powerpc/powerpc64/power8/fpu/s_sinf.S: Likewise.
* sysdeps/powerpc/powerpc64/power8/strcasestr.S: Likewise.
* sysdeps/powerpc/powerpc64/addmul_1.S: Use ENTRY_TOCLESS.
* sysdeps/powerpc/powerpc64/cell/memcpy.S: Likewise.
* sysdeps/powerpc/powerpc64/fpu/s_copysign.S: Likewise.
* sysdeps/powerpc/powerpc64/fpu/s_copysignl.S: Likewise.
* sysdeps/powerpc/powerpc64/fpu/s_fabsl.S: Likewise.
* sysdeps/powerpc/powerpc64/fpu/s_isnan.S: Likewise.
* sysdeps/powerpc/powerpc64/fpu/s_llrint.S: Likewise.
* sysdeps/powerpc/powerpc64/fpu/s_llrintf.S: Likewise.
* sysdeps/powerpc/powerpc64/lshift.S: Likewise.
* sysdeps/powerpc/powerpc64/memcpy.S: Likewise.
* sysdeps/powerpc/powerpc64/mul_1.S: Likewise.
* sysdeps/powerpc/powerpc64/power4/memcmp.S: Likewise.
* sysdeps/powerpc/powerpc64/power4/memcpy.S: Likewise.
* sysdeps/powerpc/powerpc64/power4/memset.S: Likewise.
* sysdeps/powerpc/powerpc64/power4/strncmp.S: Likewise.
* sysdeps/powerpc/powerpc64/power5+/fpu/s_ceil.S: Likewise.
* sysdeps/powerpc/powerpc64/power5+/fpu/s_ceilf.S: Likewise.
* sysdeps/powerpc/powerpc64/power5+/fpu/s_floor.S: Likewise.
* sysdeps/powerpc/powerpc64/power5+/fpu/s_floorf.S: Likewise.
* sysdeps/powerpc/powerpc64/power5+/fpu/s_llround.S: Likewise.
* sysdeps/powerpc/powerpc64/power5+/fpu/s_round.S: Likewise.
* sysdeps/powerpc/powerpc64/power5+/fpu/s_roundf.S: Likewise.
* sysdeps/powerpc/powerpc64/power5+/fpu/s_trunc.S: Likewise.
* sysdeps/powerpc/powerpc64/power5+/fpu/s_truncf.S: Likewise.
* sysdeps/powerpc/powerpc64/power5/fpu/s_isnan.S: Likewise.
* sysdeps/powerpc/powerpc64/power6/fpu/s_copysign.S: Likewise.
* sysdeps/powerpc/powerpc64/power6/fpu/s_isnan.S: Likewise.
* sysdeps/powerpc/powerpc64/power6/memcpy.S: Likewise.
* sysdeps/powerpc/powerpc64/power6/memset.S: Likewise.
* sysdeps/powerpc/powerpc64/power6x/fpu/s_isnan.S: Likewise.
* sysdeps/powerpc/powerpc64/power6x/fpu/s_llrint.S: Likewise.
* sysdeps/powerpc/powerpc64/power6x/fpu/s_llround.S: Likewise.
* sysdeps/powerpc/powerpc64/power7/add_n.S: Likewise.
* sysdeps/powerpc/powerpc64/power7/memchr.S: Likewise.
* sysdeps/powerpc/powerpc64/power7/memcmp.S: Likewise.
* sysdeps/powerpc/powerpc64/power7/memcpy.S: Likewise.
* sysdeps/powerpc/powerpc64/power7/memmove.S: Likewise.
* sysdeps/powerpc/powerpc64/power7/mempcpy.S: Likewise.
* sysdeps/powerpc/powerpc64/power7/memrchr.S: Likewise.
* sysdeps/powerpc/powerpc64/power7/memset.S: Likewise.
* sysdeps/powerpc/powerpc64/power7/rawmemchr.S: Likewise.
* sysdeps/powerpc/powerpc64/power7/strcasecmp.S (strcasecmp_l):
Likewise.
* sysdeps/powerpc/powerpc64/power7/strchr.S: Likewise.
* sysdeps/powerpc/powerpc64/power7/strchrnul.S: Likewise.
* sysdeps/powerpc/powerpc64/power7/strcmp.S: Likewise.
* sysdeps/powerpc/powerpc64/power7/strlen.S: Likewise.
* sysdeps/powerpc/powerpc64/power7/strncmp.S: Likewise.
* sysdeps/powerpc/powerpc64/power7/strncpy.S: Likewise.
* sysdeps/powerpc/powerpc64/power7/strnlen.S: Likewise.
* sysdeps/powerpc/powerpc64/power7/strrchr.S: Likewise.
* sysdeps/powerpc/powerpc64/power8/fpu/s_finite.S: Likewise.
* sysdeps/powerpc/powerpc64/power8/fpu/s_isinf.S: Likewise.
* sysdeps/powerpc/powerpc64/power8/fpu/s_isnan.S: Likewise.
* sysdeps/powerpc/powerpc64/power8/fpu/s_llrint.S: Likewise.
* sysdeps/powerpc/powerpc64/power8/fpu/s_llround.S: Likewise.
* sysdeps/powerpc/powerpc64/power8/memcmp.S: Likewise.
* sysdeps/powerpc/powerpc64/power8/memset.S: Likewise.
* sysdeps/powerpc/powerpc64/power8/strchr.S: Likewise.
* sysdeps/powerpc/powerpc64/power8/strcmp.S: Likewise.
* sysdeps/powerpc/powerpc64/power8/strcpy.S: Likewise.
* sysdeps/powerpc/powerpc64/power8/strlen.S: Likewise.
* sysdeps/powerpc/powerpc64/power8/strncmp.S: Likewise.
* sysdeps/powerpc/powerpc64/power8/strncpy.S: Likewise.
* sysdeps/powerpc/powerpc64/power8/strnlen.S: Likewise.
* sysdeps/powerpc/powerpc64/power8/strrchr.S: Likewise.
* sysdeps/powerpc/powerpc64/power8/strspn.S: Likewise.
* sysdeps/powerpc/powerpc64/power9/strcmp.S: Likewise.
* sysdeps/powerpc/powerpc64/power9/strncmp.S: Likewise.
* sysdeps/powerpc/powerpc64/strchr.S: Likewise.
* sysdeps/powerpc/powerpc64/strcmp.S: Likewise.
* sysdeps/powerpc/powerpc64/strlen.S: Likewise.
* sysdeps/powerpc/powerpc64/strncmp.S: Likewise.
* sysdeps/powerpc/powerpc64/ppc-mcount.S: Store LR earlier. Don't
add nop when SHARED.
* sysdeps/powerpc/powerpc64/start.S: Fix comment.
* sysdeps/powerpc/powerpc64/multiarch/strrchr-power8.S (ENTRY): Don't
define.
(ENTRY_TOCLESS): Define.
* sysdeps/powerpc/powerpc32/sysdep.h (ENTRY_TOCLESS): Define.
* sysdeps/powerpc/fpu/s_fma.S: Use ENTRY_TOCLESS.
* sysdeps/powerpc/fpu/s_fmaf.S: Likewise.
2017-06-14 01:15:50 +00:00
|
|
|
ENTRY_TOCLESS (STRSPN, 4)
|
2016-03-14 21:40:46 +00:00
|
|
|
CALL_MCOUNT 2
|
|
|
|
|
|
|
|
/* Generate useful constants for later on. */
|
|
|
|
vspltisb v1, 7
|
|
|
|
vspltisb v2, -1
|
|
|
|
vslb v1, v1, v1 /* 0x80 to swap high bit for vbpermq. */
|
|
|
|
vspltisb v10, 0
|
|
|
|
vsldoi v4, v10, v2, 2 /* 0xFFFF into vr4. */
|
|
|
|
XXVR(xxmrgld, v4, v4, v10) /* Mask for checking matches. */
|
|
|
|
|
|
|
|
/* Prepare to compute 256b mask. */
|
|
|
|
addi r4, r4, -1
|
2016-04-25 14:11:02 +00:00
|
|
|
li r5, INITIAL_MASK
|
|
|
|
li r6, INITIAL_MASK
|
|
|
|
li r7, INITIAL_MASK
|
|
|
|
li r8, INITIAL_MASK
|
|
|
|
|
|
|
|
#if USE_AS_STRCSPN
|
|
|
|
/* Ensure the null character never matches by clearing ISA bit 0 in
|
|
|
|
in r5 which is the bit which will check for it in the later usage
|
|
|
|
of vbpermq. */
|
|
|
|
srdi r5, r5, 1
|
|
|
|
#endif
|
|
|
|
|
2016-03-14 21:40:46 +00:00
|
|
|
li r11, 1
|
|
|
|
sldi r11, r11, 63
|
|
|
|
|
|
|
|
/* Start interleaved Mask computation.
|
|
|
|
This will eventually or 1's into ignored bits from vbpermq. */
|
|
|
|
lvsr v11, 0, r3
|
|
|
|
vspltb v11, v11, 0 /* Splat shift constant. */
|
|
|
|
|
|
|
|
/* Build a 256b mask in r5-r8. */
|
|
|
|
.align 4
|
|
|
|
L(next_needle):
|
|
|
|
lbzu r9, 1(r4)
|
|
|
|
|
|
|
|
cmpldi cr0, r9, 0
|
|
|
|
cmpldi cr1, r9, 128
|
|
|
|
|
|
|
|
/* This is a little tricky. srd only uses the first 7 bits,
|
|
|
|
and if bit 7 is set, value is always 0. So, we can
|
|
|
|
effectively shift 128b in this case. */
|
|
|
|
xori r12, r9, 0x40 /* Invert bit 6. */
|
|
|
|
srd r10, r11, r9 /* Mask for bits 0-63. */
|
|
|
|
srd r12, r11, r12 /* Mask for bits 64-127. */
|
|
|
|
|
|
|
|
beq cr0, L(start_cmp)
|
|
|
|
|
|
|
|
/* Now, or the value into the correct GPR. */
|
|
|
|
bge cr1,L(needle_gt128)
|
2016-04-25 14:11:02 +00:00
|
|
|
UPDATE_MASK (r5, r5, r10) /* 0 - 63. */
|
|
|
|
UPDATE_MASK (r6, r6, r12) /* 64 - 127. */
|
2016-03-14 21:40:46 +00:00
|
|
|
b L(next_needle)
|
|
|
|
|
|
|
|
.align 4
|
|
|
|
L(needle_gt128):
|
2016-04-25 14:11:02 +00:00
|
|
|
UPDATE_MASK (r7, r7, r10) /* 128 - 191. */
|
|
|
|
UPDATE_MASK (r8, r8, r12) /* 192 - 255. */
|
2016-03-14 21:40:46 +00:00
|
|
|
b L(next_needle)
|
|
|
|
|
|
|
|
|
|
|
|
.align 4
|
|
|
|
L(start_cmp):
|
|
|
|
/* Move and merge bitmap into 2 VRs. bpermd is slower on P8. */
|
|
|
|
mr r0, r3 /* Save r3 for final length computation. */
|
|
|
|
MTVRD (v5, r5)
|
|
|
|
MTVRD (v6, r6)
|
|
|
|
MTVRD (v7, r7)
|
|
|
|
MTVRD (v8, r8)
|
|
|
|
|
|
|
|
/* Continue interleaved mask generation. */
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
|
|
vsrw v11, v2, v11 /* Note, shift ignores higher order bits. */
|
|
|
|
vsplth v11, v11, 0 /* Only care about the high 16 bits of v10. */
|
|
|
|
#else
|
|
|
|
vslw v11, v2, v11 /* Note, shift ignores higher order bits. */
|
|
|
|
vsplth v11, v11, 1 /* Only care about the low 16 bits of v10. */
|
|
|
|
#endif
|
|
|
|
lvx v0, 0, r3 /* Note, unaligned load ignores lower bits. */
|
|
|
|
|
|
|
|
/* Do the merging of the bitmask. */
|
|
|
|
XXVR(xxmrghd, v5, v5, v6)
|
|
|
|
XXVR(xxmrghd, v6, v7, v8)
|
|
|
|
|
|
|
|
/* Finish mask generation. */
|
|
|
|
vand v11, v11, v4 /* Throwaway bits not in the mask. */
|
|
|
|
|
|
|
|
/* Compare the first 1-16B, while masking unwanted bytes. */
|
|
|
|
clrrdi r3, r3, 4 /* Note, counts from qw boundaries. */
|
|
|
|
vxor v9, v0, v1 /* Swap high bit. */
|
|
|
|
VBPERMQ (v8, v5, v0)
|
|
|
|
VBPERMQ (v7, v6, v9)
|
|
|
|
vor v7, v7, v8
|
|
|
|
vor v7, v7, v11 /* Ignore non-participating bytes. */
|
|
|
|
vcmpequh. v8, v7, v4
|
|
|
|
bnl cr6, L(done)
|
|
|
|
|
|
|
|
addi r3, r3, 16
|
|
|
|
|
|
|
|
.align 4
|
|
|
|
L(vec):
|
|
|
|
lvx v0, 0, r3
|
|
|
|
addi r3, r3, 16
|
|
|
|
vxor v9, v0, v1 /* Swap high bit. */
|
|
|
|
VBPERMQ (v8, v5, v0)
|
|
|
|
VBPERMQ (v7, v6, v9)
|
|
|
|
vor v7, v7, v8
|
|
|
|
vcmpequh. v8, v7, v4
|
|
|
|
blt cr6, L(vec)
|
|
|
|
|
|
|
|
addi r3, r3, -16
|
|
|
|
L(done):
|
|
|
|
subf r3, r0, r3
|
|
|
|
MFVRD (r10, v7)
|
|
|
|
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
|
|
addi r0, r10, 1 /* Count the trailing 1's. */
|
|
|
|
andc r10, r10, r0
|
|
|
|
popcntd r10, r10
|
|
|
|
#else
|
|
|
|
xori r10, r10, 0xffff /* Count leading 1's by inverting. */
|
|
|
|
addi r3, r3, -48 /* Account for the extra leading zeros. */
|
|
|
|
cntlzd r10, r10
|
|
|
|
#endif
|
|
|
|
|
|
|
|
add r3, r3, r10
|
|
|
|
blr
|
|
|
|
|
2016-04-25 14:11:02 +00:00
|
|
|
END(STRSPN)
|
|
|
|
libc_hidden_builtin_def (STRSPN)
|