2010-08-19 14:38:41 +00:00
|
|
|
/* Optimized rawmemchr implementation for PowerPC64/POWER7 using cmpb insn.
|
2014-01-01 11:03:15 +00:00
|
|
|
Copyright (C) 2010-2014 Free Software Foundation, Inc.
|
2010-08-19 14:38:41 +00:00
|
|
|
Contributed by Luis Machado <luisgpm@br.ibm.com>.
|
|
|
|
This file is part of the GNU C Library.
|
|
|
|
|
|
|
|
The GNU C Library is free software; you can redistribute it and/or
|
|
|
|
modify it under the terms of the GNU Lesser General Public
|
|
|
|
License as published by the Free Software Foundation; either
|
|
|
|
version 2.1 of the License, or (at your option) any later version.
|
|
|
|
|
|
|
|
The GNU C Library is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
Lesser General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU Lesser General Public
|
2012-02-09 23:18:22 +00:00
|
|
|
License along with the GNU C Library; if not, see
|
|
|
|
<http://www.gnu.org/licenses/>. */
|
2010-08-19 14:38:41 +00:00
|
|
|
|
|
|
|
#include <sysdep.h>
|
|
|
|
|
|
|
|
/* int [r3] rawmemchr (void *s [r3], int c [r4]) */
|
|
|
|
.machine power7
|
2013-03-06 00:10:21 +00:00
|
|
|
ENTRY (__rawmemchr)
|
2010-08-19 14:38:41 +00:00
|
|
|
CALL_MCOUNT 2
|
|
|
|
dcbt 0,r3
|
|
|
|
clrrdi r8,r3,3 /* Align the address to doubleword boundary. */
|
|
|
|
|
|
|
|
/* Replicate byte to doubleword. */
|
PowerPC LE memchr and memrchr
http://sourceware.org/ml/libc-alpha/2013-08/msg00105.html
Like strnlen, memchr and memrchr had a number of defects fixed by this
patch as well as adding little-endian support. The first one I
noticed was that the entry to the main loop needlessly checked for
"are we done yet?" when we know the size is large enough that we can't
be done. The second defect I noticed was that the main loop count was
wrong, which in turn meant that the small loop needed to handle an
extra word. Thirdly, there is nothing to say that the string can't
wrap around zero, except of course that we'd normally hit a segfault
on trying to read from address zero. Fixing that simplified a number
of places:
- /* Are we done already? */
- addi r9,r8,8
- cmpld r9,r7
- bge L(null)
becomes
+ cmpld r8,r7
+ beqlr
However, the exit gets an extra test because I test for being on the
last word then if so whether the byte offset is less than the end.
Overall, the change is a win.
Lastly, memrchr used the wrong cache hint.
* sysdeps/powerpc/powerpc64/power7/memchr.S: Replace rlwimi with
insrdi. Make better use of reg selection to speed exit slightly.
Schedule entry path a little better. Remove useless "are we done"
checks on entry to main loop. Handle wrapping around zero address.
Correct main loop count. Handle single left-over word from main
loop inline rather than by using loop_small. Remove extra word
case in loop_small caused by wrong loop count. Add little-endian
support.
* sysdeps/powerpc/powerpc32/power7/memchr.S: Likewise.
* sysdeps/powerpc/powerpc64/power7/memrchr.S: Likewise. Use proper
cache hint.
* sysdeps/powerpc/powerpc32/power7/memrchr.S: Likewise.
* sysdeps/powerpc/powerpc64/power7/rawmemchr.S: Add little-endian
support. Avoid rlwimi.
* sysdeps/powerpc/powerpc32/power7/rawmemchr.S: Likewise.
2013-08-17 09:18:36 +00:00
|
|
|
insrdi r4,r4,8,48
|
|
|
|
insrdi r4,r4,16,32
|
2010-08-19 14:38:41 +00:00
|
|
|
insrdi r4,r4,32,0
|
|
|
|
|
|
|
|
/* Now r4 has a doubleword of c bytes. */
|
|
|
|
|
|
|
|
rlwinm r6,r3,3,26,28 /* Calculate padding. */
|
|
|
|
ld r12,0(r8) /* Load doubleword from memory. */
|
|
|
|
cmpb r5,r12,r4 /* Compare each byte against c byte. */
|
PowerPC LE memchr and memrchr
http://sourceware.org/ml/libc-alpha/2013-08/msg00105.html
Like strnlen, memchr and memrchr had a number of defects fixed by this
patch as well as adding little-endian support. The first one I
noticed was that the entry to the main loop needlessly checked for
"are we done yet?" when we know the size is large enough that we can't
be done. The second defect I noticed was that the main loop count was
wrong, which in turn meant that the small loop needed to handle an
extra word. Thirdly, there is nothing to say that the string can't
wrap around zero, except of course that we'd normally hit a segfault
on trying to read from address zero. Fixing that simplified a number
of places:
- /* Are we done already? */
- addi r9,r8,8
- cmpld r9,r7
- bge L(null)
becomes
+ cmpld r8,r7
+ beqlr
However, the exit gets an extra test because I test for being on the
last word then if so whether the byte offset is less than the end.
Overall, the change is a win.
Lastly, memrchr used the wrong cache hint.
* sysdeps/powerpc/powerpc64/power7/memchr.S: Replace rlwimi with
insrdi. Make better use of reg selection to speed exit slightly.
Schedule entry path a little better. Remove useless "are we done"
checks on entry to main loop. Handle wrapping around zero address.
Correct main loop count. Handle single left-over word from main
loop inline rather than by using loop_small. Remove extra word
case in loop_small caused by wrong loop count. Add little-endian
support.
* sysdeps/powerpc/powerpc32/power7/memchr.S: Likewise.
* sysdeps/powerpc/powerpc64/power7/memrchr.S: Likewise. Use proper
cache hint.
* sysdeps/powerpc/powerpc32/power7/memrchr.S: Likewise.
* sysdeps/powerpc/powerpc64/power7/rawmemchr.S: Add little-endian
support. Avoid rlwimi.
* sysdeps/powerpc/powerpc32/power7/rawmemchr.S: Likewise.
2013-08-17 09:18:36 +00:00
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
|
|
srd r5,r5,r6
|
|
|
|
sld r5,r5,r6
|
|
|
|
#else
|
2010-08-19 14:38:41 +00:00
|
|
|
sld r5,r5,r6 /* Move left to discard ignored bits. */
|
|
|
|
srd r5,r5,r6 /* Bring the bits back as zeros. */
|
PowerPC LE memchr and memrchr
http://sourceware.org/ml/libc-alpha/2013-08/msg00105.html
Like strnlen, memchr and memrchr had a number of defects fixed by this
patch as well as adding little-endian support. The first one I
noticed was that the entry to the main loop needlessly checked for
"are we done yet?" when we know the size is large enough that we can't
be done. The second defect I noticed was that the main loop count was
wrong, which in turn meant that the small loop needed to handle an
extra word. Thirdly, there is nothing to say that the string can't
wrap around zero, except of course that we'd normally hit a segfault
on trying to read from address zero. Fixing that simplified a number
of places:
- /* Are we done already? */
- addi r9,r8,8
- cmpld r9,r7
- bge L(null)
becomes
+ cmpld r8,r7
+ beqlr
However, the exit gets an extra test because I test for being on the
last word then if so whether the byte offset is less than the end.
Overall, the change is a win.
Lastly, memrchr used the wrong cache hint.
* sysdeps/powerpc/powerpc64/power7/memchr.S: Replace rlwimi with
insrdi. Make better use of reg selection to speed exit slightly.
Schedule entry path a little better. Remove useless "are we done"
checks on entry to main loop. Handle wrapping around zero address.
Correct main loop count. Handle single left-over word from main
loop inline rather than by using loop_small. Remove extra word
case in loop_small caused by wrong loop count. Add little-endian
support.
* sysdeps/powerpc/powerpc32/power7/memchr.S: Likewise.
* sysdeps/powerpc/powerpc64/power7/memrchr.S: Likewise. Use proper
cache hint.
* sysdeps/powerpc/powerpc32/power7/memrchr.S: Likewise.
* sysdeps/powerpc/powerpc64/power7/rawmemchr.S: Add little-endian
support. Avoid rlwimi.
* sysdeps/powerpc/powerpc32/power7/rawmemchr.S: Likewise.
2013-08-17 09:18:36 +00:00
|
|
|
#endif
|
2010-08-19 14:38:41 +00:00
|
|
|
cmpdi cr7,r5,0 /* If r5 == 0, no c bytes have been found. */
|
|
|
|
bne cr7,L(done)
|
|
|
|
|
|
|
|
mtcrf 0x01,r8
|
|
|
|
|
|
|
|
/* Are we now aligned to a quadword boundary? If so, skip to
|
|
|
|
the main loop. Otherwise, go through the alignment code. */
|
|
|
|
|
|
|
|
bt 28,L(loop)
|
|
|
|
|
|
|
|
/* Handle DWORD2 of pair. */
|
|
|
|
ldu r12,8(r8)
|
|
|
|
cmpb r5,r12,r4
|
|
|
|
cmpdi cr7,r5,0
|
|
|
|
bne cr7,L(done)
|
|
|
|
b L(loop) /* We branch here (rather than falling through)
|
|
|
|
to skip the nops due to heavy alignment
|
|
|
|
of the loop below. */
|
|
|
|
|
|
|
|
/* Main loop to look for the end of the string. Since it's a
|
|
|
|
small loop (< 8 instructions), align it to 32-bytes. */
|
|
|
|
.p2align 5
|
|
|
|
L(loop):
|
|
|
|
/* Load two doublewords, compare and merge in a
|
|
|
|
single register for speed. This is an attempt
|
|
|
|
to speed up the byte-checking process for bigger strings. */
|
|
|
|
ld r12,8(r8)
|
|
|
|
ldu r11,16(r8)
|
|
|
|
cmpb r5,r12,r4
|
|
|
|
cmpb r6,r11,r4
|
|
|
|
or r7,r5,r6
|
|
|
|
cmpdi cr7,r7,0
|
|
|
|
beq cr7,L(loop)
|
|
|
|
|
|
|
|
/* OK, one (or both) of the doublewords contains a 'c' byte. Check
|
|
|
|
the first doubleword and decrement the address in case the first
|
|
|
|
doubleword really contains a c byte. */
|
|
|
|
|
|
|
|
cmpdi cr6,r5,0
|
|
|
|
addi r8,r8,-8
|
|
|
|
bne cr6,L(done)
|
|
|
|
|
|
|
|
/* The 'c' byte must be in the second doubleword. Adjust the address
|
|
|
|
again and move the result of cmpb to r10 so we can calculate the
|
|
|
|
pointer. */
|
|
|
|
mr r5,r6
|
|
|
|
addi r8,r8,8
|
|
|
|
|
|
|
|
/* r5 has the output of the cmpb instruction, that is, it contains
|
|
|
|
0xff in the same position as the 'c' byte in the original
|
|
|
|
doubleword from the string. Use that fact to find out what is
|
|
|
|
the position of the byte inside the string. */
|
|
|
|
L(done):
|
PowerPC LE memchr and memrchr
http://sourceware.org/ml/libc-alpha/2013-08/msg00105.html
Like strnlen, memchr and memrchr had a number of defects fixed by this
patch as well as adding little-endian support. The first one I
noticed was that the entry to the main loop needlessly checked for
"are we done yet?" when we know the size is large enough that we can't
be done. The second defect I noticed was that the main loop count was
wrong, which in turn meant that the small loop needed to handle an
extra word. Thirdly, there is nothing to say that the string can't
wrap around zero, except of course that we'd normally hit a segfault
on trying to read from address zero. Fixing that simplified a number
of places:
- /* Are we done already? */
- addi r9,r8,8
- cmpld r9,r7
- bge L(null)
becomes
+ cmpld r8,r7
+ beqlr
However, the exit gets an extra test because I test for being on the
last word then if so whether the byte offset is less than the end.
Overall, the change is a win.
Lastly, memrchr used the wrong cache hint.
* sysdeps/powerpc/powerpc64/power7/memchr.S: Replace rlwimi with
insrdi. Make better use of reg selection to speed exit slightly.
Schedule entry path a little better. Remove useless "are we done"
checks on entry to main loop. Handle wrapping around zero address.
Correct main loop count. Handle single left-over word from main
loop inline rather than by using loop_small. Remove extra word
case in loop_small caused by wrong loop count. Add little-endian
support.
* sysdeps/powerpc/powerpc32/power7/memchr.S: Likewise.
* sysdeps/powerpc/powerpc64/power7/memrchr.S: Likewise. Use proper
cache hint.
* sysdeps/powerpc/powerpc32/power7/memrchr.S: Likewise.
* sysdeps/powerpc/powerpc64/power7/rawmemchr.S: Add little-endian
support. Avoid rlwimi.
* sysdeps/powerpc/powerpc32/power7/rawmemchr.S: Likewise.
2013-08-17 09:18:36 +00:00
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
|
|
addi r0,r5,-1
|
|
|
|
andc r0,r0,r5
|
|
|
|
popcntd r0,r0 /* Count trailing zeros. */
|
|
|
|
#else
|
2010-08-19 14:38:41 +00:00
|
|
|
cntlzd r0,r5 /* Count leading zeros before the match. */
|
PowerPC LE memchr and memrchr
http://sourceware.org/ml/libc-alpha/2013-08/msg00105.html
Like strnlen, memchr and memrchr had a number of defects fixed by this
patch as well as adding little-endian support. The first one I
noticed was that the entry to the main loop needlessly checked for
"are we done yet?" when we know the size is large enough that we can't
be done. The second defect I noticed was that the main loop count was
wrong, which in turn meant that the small loop needed to handle an
extra word. Thirdly, there is nothing to say that the string can't
wrap around zero, except of course that we'd normally hit a segfault
on trying to read from address zero. Fixing that simplified a number
of places:
- /* Are we done already? */
- addi r9,r8,8
- cmpld r9,r7
- bge L(null)
becomes
+ cmpld r8,r7
+ beqlr
However, the exit gets an extra test because I test for being on the
last word then if so whether the byte offset is less than the end.
Overall, the change is a win.
Lastly, memrchr used the wrong cache hint.
* sysdeps/powerpc/powerpc64/power7/memchr.S: Replace rlwimi with
insrdi. Make better use of reg selection to speed exit slightly.
Schedule entry path a little better. Remove useless "are we done"
checks on entry to main loop. Handle wrapping around zero address.
Correct main loop count. Handle single left-over word from main
loop inline rather than by using loop_small. Remove extra word
case in loop_small caused by wrong loop count. Add little-endian
support.
* sysdeps/powerpc/powerpc32/power7/memchr.S: Likewise.
* sysdeps/powerpc/powerpc64/power7/memrchr.S: Likewise. Use proper
cache hint.
* sysdeps/powerpc/powerpc32/power7/memrchr.S: Likewise.
* sysdeps/powerpc/powerpc64/power7/rawmemchr.S: Add little-endian
support. Avoid rlwimi.
* sysdeps/powerpc/powerpc32/power7/rawmemchr.S: Likewise.
2013-08-17 09:18:36 +00:00
|
|
|
#endif
|
|
|
|
srdi r0,r0,3 /* Convert leading zeros to bytes. */
|
2010-08-19 14:38:41 +00:00
|
|
|
add r3,r8,r0 /* Return address of the matching char. */
|
|
|
|
blr
|
2013-03-06 00:10:21 +00:00
|
|
|
END (__rawmemchr)
|
2010-08-19 14:38:41 +00:00
|
|
|
weak_alias (__rawmemchr,rawmemchr)
|
|
|
|
libc_hidden_builtin_def (__rawmemchr)
|