AArch64: Optimize memchr

Optimize the main loop - large strings are 40% faster on modern CPUs.

Reviewed-by: Szabolcs Nagy <szabolcs.nagy@arm.com>
This commit is contained in:
Wilco Dijkstra 2023-01-11 13:50:59 +00:00
parent 569cfcc6bf
commit ce758d4f06

View File

@ -30,7 +30,6 @@
# define MEMCHR __memchr
#endif
/* Arguments and results. */
#define srcin x0
#define chrin w1
#define cntin x2
@ -73,42 +72,44 @@ ENTRY (MEMCHR)
rbit synd, synd
clz synd, synd
add result, srcin, synd, lsr 2
cmp cntin, synd, lsr 2
add result, srcin, synd, lsr 2
csel result, result, xzr, hi
ret
.p2align 3
L(start_loop):
sub tmp, src, srcin
add tmp, tmp, 16
add tmp, tmp, 17
subs cntrem, cntin, tmp
b.ls L(nomatch)
b.lo L(nomatch)
/* Make sure that it won't overread by a 16-byte chunk */
add tmp, cntrem, 15
tbnz tmp, 4, L(loop32_2)
tbz cntrem, 4, L(loop32_2)
sub src, src, 16
.p2align 4
L(loop32):
ldr qdata, [src, 16]!
ldr qdata, [src, 32]!
cmeq vhas_chr.16b, vdata.16b, vrepchr.16b
umaxp vend.16b, vhas_chr.16b, vhas_chr.16b /* 128->64 */
fmov synd, dend
cbnz synd, L(end)
L(loop32_2):
ldr qdata, [src, 16]!
subs cntrem, cntrem, 32
ldr qdata, [src, 16]
cmeq vhas_chr.16b, vdata.16b, vrepchr.16b
b.ls L(end)
subs cntrem, cntrem, 32
b.lo L(end_2)
umaxp vend.16b, vhas_chr.16b, vhas_chr.16b /* 128->64 */
fmov synd, dend
cbz synd, L(loop32)
L(end_2):
add src, src, 16
L(end):
shrn vend.8b, vhas_chr.8h, 4 /* 128->64 */
sub cntrem, src, srcin
fmov synd, dend
add tmp, srcin, cntin
sub cntrem, tmp, src
sub cntrem, cntin, cntrem
#ifndef __AARCH64EB__
rbit synd, synd
#endif