Fix whitespace issues.

This commit is contained in:
Ulrich Drepper 2010-03-10 07:28:04 -08:00
parent fb084e5e80
commit b8907dfd8b
2 changed files with 609 additions and 609 deletions

View File

@ -58,412 +58,412 @@ EALIGN (BP_SYM (memcpy), 5, 0)
/* Get the SRC aligned to 8 bytes. */ /* Get the SRC aligned to 8 bytes. */
1: bf 31,2f 1: bf 31,2f
lbz 6,0(12) lbz 6,0(12)
addi 12,12,1 addi 12,12,1
stb 6,0(3) stb 6,0(3)
addi 3,3,1 addi 3,3,1
2: bf 30,4f 2: bf 30,4f
lhz 6,0(12) lhz 6,0(12)
addi 12,12,2 addi 12,12,2
sth 6,0(3) sth 6,0(3)
addi 3,3,2 addi 3,3,2
4: bf 29,0f 4: bf 29,0f
lwz 6,0(12) lwz 6,0(12)
addi 12,12,4 addi 12,12,4
stw 6,0(3) stw 6,0(3)
addi 3,3,4 addi 3,3,4
0: 0:
clrlwi 10,12,29 /* Check alignment of SRC again. */ clrlwi 10,12,29 /* Check alignment of SRC again. */
srwi 9,31,3 /* Number of full doublewords remaining. */ srwi 9,31,3 /* Number of full doublewords remaining. */
L(copy_GE_32_aligned_cont): L(copy_GE_32_aligned_cont):
clrlwi 11,31,29 clrlwi 11,31,29
mtcrf 0x01,9 mtcrf 0x01,9
srwi 8,31,5 srwi 8,31,5
cmplwi cr1,9,4 cmplwi cr1,9,4
cmplwi cr6,11,0 cmplwi cr6,11,0
mr 11,12 mr 11,12
/* Copy 1~3 doublewords so the main loop starts /* Copy 1~3 doublewords so the main loop starts
at a multiple of 32 bytes. */ at a multiple of 32 bytes. */
bf 30,1f bf 30,1f
lfd 6,0(12) lfd 6,0(12)
lfd 7,8(12) lfd 7,8(12)
addi 11,12,16 addi 11,12,16
mtctr 8 mtctr 8
stfd 6,0(3) stfd 6,0(3)
stfd 7,8(3) stfd 7,8(3)
addi 10,3,16 addi 10,3,16
bf 31,4f bf 31,4f
lfd 0,16(12) lfd 0,16(12)
stfd 0,16(3) stfd 0,16(3)
blt cr1,3f blt cr1,3f
addi 11,12,24 addi 11,12,24
addi 10,3,24 addi 10,3,24
b 4f b 4f
.align 4 .align 4
1: /* Copy 1 doubleword and set the counter. */ 1: /* Copy 1 doubleword and set the counter. */
mr 10,3 mr 10,3
mtctr 8 mtctr 8
bf 31,4f bf 31,4f
lfd 6,0(12) lfd 6,0(12)
addi 11,12,8 addi 11,12,8
stfd 6,0(3) stfd 6,0(3)
addi 10,3,8 addi 10,3,8
.align 4 .align 4
4: /* Main aligned copy loop. Copies 32-bytes at a time. */ 4: /* Main aligned copy loop. Copies 32-bytes at a time. */
lfd 6,0(11) lfd 6,0(11)
lfd 7,8(11) lfd 7,8(11)
lfd 8,16(11) lfd 8,16(11)
lfd 0,24(11) lfd 0,24(11)
addi 11,11,32 addi 11,11,32
stfd 6,0(10) stfd 6,0(10)
stfd 7,8(10) stfd 7,8(10)
stfd 8,16(10) stfd 8,16(10)
stfd 0,24(10) stfd 0,24(10)
addi 10,10,32 addi 10,10,32
bdnz 4b bdnz 4b
3: 3:
/* Check for tail bytes. */ /* Check for tail bytes. */
clrrwi 0,31,3 clrrwi 0,31,3
mtcrf 0x01,31 mtcrf 0x01,31
beq cr6,0f beq cr6,0f
.L9: .L9:
add 3,3,0 add 3,3,0
add 12,12,0 add 12,12,0
/* At this point we have a tail of 0-7 bytes and we know that the /* At this point we have a tail of 0-7 bytes and we know that the
destination is doubleword-aligned. */ destination is doubleword-aligned. */
4: /* Copy 4 bytes. */ 4: /* Copy 4 bytes. */
bf 29,2f bf 29,2f
lwz 6,0(12) lwz 6,0(12)
addi 12,12,4 addi 12,12,4
stw 6,0(3) stw 6,0(3)
addi 3,3,4 addi 3,3,4
2: /* Copy 2 bytes. */ 2: /* Copy 2 bytes. */
bf 30,1f bf 30,1f
lhz 6,0(12) lhz 6,0(12)
addi 12,12,2 addi 12,12,2
sth 6,0(3) sth 6,0(3)
addi 3,3,2 addi 3,3,2
1: /* Copy 1 byte. */ 1: /* Copy 1 byte. */
bf 31,0f bf 31,0f
lbz 6,0(12) lbz 6,0(12)
stb 6,0(3) stb 6,0(3)
0: /* Return original DST pointer. */ 0: /* Return original DST pointer. */
mr 3,30 mr 3,30
lwz 30,20(1) lwz 30,20(1)
lwz 31,24(1) lwz 31,24(1)
addi 1,1,32 addi 1,1,32
blr blr
/* Handle copies of 0~31 bytes. */ /* Handle copies of 0~31 bytes. */
.align 4 .align 4
L(copy_LT_32): L(copy_LT_32):
cmplwi cr6,5,8 cmplwi cr6,5,8
mr 12,4 mr 12,4
mtcrf 0x01,5 mtcrf 0x01,5
ble cr6,L(copy_LE_8) ble cr6,L(copy_LE_8)
/* At least 9 bytes to go. */ /* At least 9 bytes to go. */
neg 8,4 neg 8,4
clrrwi 11,4,2 clrrwi 11,4,2
andi. 0,8,3 andi. 0,8,3
cmplwi cr1,5,16 cmplwi cr1,5,16
mr 10,5 mr 10,5
beq L(copy_LT_32_aligned) beq L(copy_LT_32_aligned)
/* Force 4-bytes alignment for SRC. */ /* Force 4-bytes alignment for SRC. */
mtocrf 0x01,0 mtocrf 0x01,0
subf 10,0,5 subf 10,0,5
2: bf 30,1f 2: bf 30,1f
lhz 6,0(12) lhz 6,0(12)
addi 12,12,2 addi 12,12,2
sth 6,0(3) sth 6,0(3)
addi 3,3,2 addi 3,3,2
1: bf 31,L(end_4bytes_alignment) 1: bf 31,L(end_4bytes_alignment)
lbz 6,0(12) lbz 6,0(12)
addi 12,12,1 addi 12,12,1
stb 6,0(3) stb 6,0(3)
addi 3,3,1 addi 3,3,1
.align 4 .align 4
L(end_4bytes_alignment): L(end_4bytes_alignment):
cmplwi cr1,10,16 cmplwi cr1,10,16
mtcrf 0x01,10 mtcrf 0x01,10
L(copy_LT_32_aligned): L(copy_LT_32_aligned):
/* At least 6 bytes to go, and SRC is word-aligned. */ /* At least 6 bytes to go, and SRC is word-aligned. */
blt cr1,8f blt cr1,8f
/* Copy 16 bytes. */ /* Copy 16 bytes. */
lwz 6,0(12) lwz 6,0(12)
lwz 7,4(12) lwz 7,4(12)
stw 6,0(3) stw 6,0(3)
lwz 8,8(12) lwz 8,8(12)
stw 7,4(3) stw 7,4(3)
lwz 6,12(12) lwz 6,12(12)
addi 12,12,16 addi 12,12,16
stw 8,8(3) stw 8,8(3)
stw 6,12(3) stw 6,12(3)
addi 3,3,16 addi 3,3,16
8: /* Copy 8 bytes. */ 8: /* Copy 8 bytes. */
bf 28,4f bf 28,4f
lwz 6,0(12) lwz 6,0(12)
lwz 7,4(12) lwz 7,4(12)
addi 12,12,8 addi 12,12,8
stw 6,0(3) stw 6,0(3)
stw 7,4(3) stw 7,4(3)
addi 3,3,8 addi 3,3,8
4: /* Copy 4 bytes. */ 4: /* Copy 4 bytes. */
bf 29,2f bf 29,2f
lwz 6,0(12) lwz 6,0(12)
addi 12,12,4 addi 12,12,4
stw 6,0(3) stw 6,0(3)
addi 3,3,4 addi 3,3,4
2: /* Copy 2-3 bytes. */ 2: /* Copy 2-3 bytes. */
bf 30,1f bf 30,1f
lhz 6,0(12) lhz 6,0(12)
sth 6,0(3) sth 6,0(3)
bf 31,0f bf 31,0f
lbz 7,2(12) lbz 7,2(12)
stb 7,2(3) stb 7,2(3)
/* Return original DST pointer. */ /* Return original DST pointer. */
mr 3,30 mr 3,30
lwz 30,20(1) lwz 30,20(1)
addi 1,1,32 addi 1,1,32
blr blr
.align 4 .align 4
1: /* Copy 1 byte. */ 1: /* Copy 1 byte. */
bf 31,0f bf 31,0f
lbz 6,0(12) lbz 6,0(12)
stb 6,0(3) stb 6,0(3)
0: /* Return original DST pointer. */ 0: /* Return original DST pointer. */
mr 3,30 mr 3,30
lwz 30,20(1) lwz 30,20(1)
addi 1,1,32 addi 1,1,32
blr blr
/* Handles copies of 0~8 bytes. */ /* Handles copies of 0~8 bytes. */
.align 4 .align 4
L(copy_LE_8): L(copy_LE_8):
bne cr6,4f bne cr6,4f
/* Though we could've used lfd/stfd here, they are still /* Though we could've used lfd/stfd here, they are still
slow for unaligned cases. */ slow for unaligned cases. */
lwz 6,0(4) lwz 6,0(4)
lwz 7,4(4) lwz 7,4(4)
stw 6,0(3) stw 6,0(3)
stw 7,4(3) stw 7,4(3)
/* Return original DST pointer. */ /* Return original DST pointer. */
mr 3,30 mr 3,30
lwz 30,20(1) lwz 30,20(1)
addi 1,1,32 addi 1,1,32
blr blr
.align 4 .align 4
4: /* Copies 4~7 bytes. */ 4: /* Copies 4~7 bytes. */
bf 29,2b bf 29,2b
lwz 6,0(4) lwz 6,0(4)
stw 6,0(3) stw 6,0(3)
bf 30,5f bf 30,5f
lhz 7,4(4) lhz 7,4(4)
sth 7,4(3) sth 7,4(3)
bf 31,0f bf 31,0f
lbz 8,6(4) lbz 8,6(4)
stb 8,6(3) stb 8,6(3)
/* Return original DST pointer. */ /* Return original DST pointer. */
mr 3,30 mr 3,30
lwz 30,20(1) lwz 30,20(1)
addi 1,1,32 addi 1,1,32
blr blr
.align 4 .align 4
5: /* Copy 1 byte. */ 5: /* Copy 1 byte. */
bf 31,0f bf 31,0f
lbz 6,4(4) lbz 6,4(4)
stb 6,4(3) stb 6,4(3)
0: /* Return original DST pointer. */ 0: /* Return original DST pointer. */
mr 3,30 mr 3,30
lwz 30,20(1) lwz 30,20(1)
addi 1,1,32 addi 1,1,32
blr blr
/* Handle copies of 32+ bytes where DST is aligned (to quadword) but /* Handle copies of 32+ bytes where DST is aligned (to quadword) but
SRC is not. Use aligned quadword loads from SRC, shifted to realign SRC is not. Use aligned quadword loads from SRC, shifted to realign
the data, allowing for aligned DST stores. */ the data, allowing for aligned DST stores. */
.align 4 .align 4
L(copy_GE_32_unaligned): L(copy_GE_32_unaligned):
andi. 11,3,15 /* Check alignment of DST. */ andi. 11,3,15 /* Check alignment of DST. */
clrlwi 0,0,28 /* Number of bytes until the 1st clrlwi 0,0,28 /* Number of bytes until the 1st
quadword of DST. */ quadword of DST. */
srwi 9,5,4 /* Number of full quadwords remaining. */ srwi 9,5,4 /* Number of full quadwords remaining. */
beq L(copy_GE_32_unaligned_cont) beq L(copy_GE_32_unaligned_cont)
/* SRC is not quadword aligned, get it aligned. */ /* SRC is not quadword aligned, get it aligned. */
mtcrf 0x01,0 mtcrf 0x01,0
subf 31,0,5 subf 31,0,5
/* Vector instructions work best when proper alignment (16-bytes) /* Vector instructions work best when proper alignment (16-bytes)
is present. Move 0~15 bytes as needed to get DST quadword-aligned. */ is present. Move 0~15 bytes as needed to get DST quadword-aligned. */
1: /* Copy 1 byte. */ 1: /* Copy 1 byte. */
bf 31,2f bf 31,2f
lbz 6,0(12) lbz 6,0(12)
addi 12,12,1 addi 12,12,1
stb 6,0(3) stb 6,0(3)
addi 3,3,1 addi 3,3,1
2: /* Copy 2 bytes. */ 2: /* Copy 2 bytes. */
bf 30,4f bf 30,4f
lhz 6,0(12) lhz 6,0(12)
addi 12,12,2 addi 12,12,2
sth 6,0(3) sth 6,0(3)
addi 3,3,2 addi 3,3,2
4: /* Copy 4 bytes. */ 4: /* Copy 4 bytes. */
bf 29,8f bf 29,8f
lwz 6,0(12) lwz 6,0(12)
addi 12,12,4 addi 12,12,4
stw 6,0(3) stw 6,0(3)
addi 3,3,4 addi 3,3,4
8: /* Copy 8 bytes. */ 8: /* Copy 8 bytes. */
bf 28,0f bf 28,0f
lfd 6,0(12) lfd 6,0(12)
addi 12,12,8 addi 12,12,8
stfd 6,0(3) stfd 6,0(3)
addi 3,3,8 addi 3,3,8
0: 0:
clrlwi 10,12,28 /* Check alignment of SRC. */ clrlwi 10,12,28 /* Check alignment of SRC. */
srdi 9,31,4 /* Number of full quadwords remaining. */ srdi 9,31,4 /* Number of full quadwords remaining. */
/* The proper alignment is present, it is OK to copy the bytes now. */ /* The proper alignment is present, it is OK to copy the bytes now. */
L(copy_GE_32_unaligned_cont): L(copy_GE_32_unaligned_cont):
/* Setup two indexes to speed up the indexed vector operations. */ /* Setup two indexes to speed up the indexed vector operations. */
clrlwi 11,31,28 clrlwi 11,31,28
li 6,16 /* Index for 16-bytes offsets. */ li 6,16 /* Index for 16-bytes offsets. */
li 7,32 /* Index for 32-bytes offsets. */ li 7,32 /* Index for 32-bytes offsets. */
cmplwi cr1,11,0 cmplwi cr1,11,0
srdi 8,31,5 /* Setup the loop counter. */ srdi 8,31,5 /* Setup the loop counter. */
mr 10,3 mr 10,3
mr 11,12 mr 11,12
mtcrf 0x01,9 mtcrf 0x01,9
cmplwi cr6,9,1 cmplwi cr6,9,1
lvsl 5,0,12 lvsl 5,0,12
lvx 3,0,12 lvx 3,0,12
bf 31,L(setup_unaligned_loop) bf 31,L(setup_unaligned_loop)
/* Copy another 16 bytes to align to 32-bytes due to the loop . */ /* Copy another 16 bytes to align to 32-bytes due to the loop . */
lvx 4,12,6 lvx 4,12,6
vperm 6,3,4,5 vperm 6,3,4,5
addi 11,12,16 addi 11,12,16
addi 10,3,16 addi 10,3,16
stvx 6,0,3 stvx 6,0,3
vor 3,4,4 vor 3,4,4
L(setup_unaligned_loop): L(setup_unaligned_loop):
mtctr 8 mtctr 8
ble cr6,L(end_unaligned_loop) ble cr6,L(end_unaligned_loop)
/* Copy 32 bytes at a time using vector instructions. */ /* Copy 32 bytes at a time using vector instructions. */
.align 4 .align 4
L(unaligned_loop): L(unaligned_loop):
/* Note: vr6/vr10 may contain data that was already copied, /* Note: vr6/vr10 may contain data that was already copied,
but in order to get proper alignment, we may have to copy but in order to get proper alignment, we may have to copy
some portions again. This is faster than having unaligned some portions again. This is faster than having unaligned
vector instructions though. */ vector instructions though. */
lvx 4,11,6 /* vr4 = r11+16. */ lvx 4,11,6 /* vr4 = r11+16. */
vperm 6,3,4,5 /* Merge the correctly-aligned portions vperm 6,3,4,5 /* Merge the correctly-aligned portions
of vr3/vr4 into vr6. */ of vr3/vr4 into vr6. */
lvx 3,11,7 /* vr3 = r11+32. */ lvx 3,11,7 /* vr3 = r11+32. */
vperm 10,4,3,5 /* Merge the correctly-aligned portions vperm 10,4,3,5 /* Merge the correctly-aligned portions
of vr3/vr4 into vr10. */ of vr3/vr4 into vr10. */
addi 11,11,32 addi 11,11,32
stvx 6,0,10 stvx 6,0,10
stvx 10,10,6 stvx 10,10,6
addi 10,10,32 addi 10,10,32
bdnz L(unaligned_loop) bdnz L(unaligned_loop)
.align 4 .align 4
L(end_unaligned_loop): L(end_unaligned_loop):
/* Check for tail bytes. */ /* Check for tail bytes. */
clrrwi 0,31,4 clrrwi 0,31,4
mtcrf 0x01,31 mtcrf 0x01,31
beq cr1,0f beq cr1,0f
add 3,3,0 add 3,3,0
add 12,12,0 add 12,12,0
/* We have 1~15 tail bytes to copy, and DST is quadword aligned. */ /* We have 1~15 tail bytes to copy, and DST is quadword aligned. */
8: /* Copy 8 bytes. */ 8: /* Copy 8 bytes. */
bf 28,4f bf 28,4f
lwz 6,0(12) lwz 6,0(12)
lwz 7,4(12) lwz 7,4(12)
addi 12,12,8 addi 12,12,8
stw 6,0(3) stw 6,0(3)
stw 7,4(3) stw 7,4(3)
addi 3,3,8 addi 3,3,8
4: /* Copy 4 bytes. */ 4: /* Copy 4 bytes. */
bf 29,2f bf 29,2f
lwz 6,0(12) lwz 6,0(12)
addi 12,12,4 addi 12,12,4
stw 6,0(3) stw 6,0(3)
addi 3,3,4 addi 3,3,4
2: /* Copy 2~3 bytes. */ 2: /* Copy 2~3 bytes. */
bf 30,1f bf 30,1f
lhz 6,0(12) lhz 6,0(12)
addi 12,12,2 addi 12,12,2
sth 6,0(3) sth 6,0(3)
addi 3,3,2 addi 3,3,2
1: /* Copy 1 byte. */ 1: /* Copy 1 byte. */
bf 31,0f bf 31,0f
lbz 6,0(12) lbz 6,0(12)
stb 6,0(3) stb 6,0(3)
0: /* Return original DST pointer. */ 0: /* Return original DST pointer. */
mr 3,30 mr 3,30
lwz 30,20(1) lwz 30,20(1)
lwz 31,24(1) lwz 31,24(1)
addi 1,1,32 addi 1,1,32
blr blr
END (BP_SYM (memcpy)) END (BP_SYM (memcpy))
libc_hidden_builtin_def (memcpy) libc_hidden_builtin_def (memcpy)

View File

@ -58,23 +58,23 @@ EALIGN (BP_SYM (memcpy), 5, 0)
/* Get the SRC aligned to 8 bytes. */ /* Get the SRC aligned to 8 bytes. */
1: bf 31,2f 1: bf 31,2f
lbz 6,0(12) lbz 6,0(12)
addi 12,12,1 addi 12,12,1
stb 6,0(3) stb 6,0(3)
addi 3,3,1 addi 3,3,1
2: bf 30,4f 2: bf 30,4f
lhz 6,0(12) lhz 6,0(12)
addi 12,12,2 addi 12,12,2
sth 6,0(3) sth 6,0(3)
addi 3,3,2 addi 3,3,2
4: bf 29,0f 4: bf 29,0f
lwz 6,0(12) lwz 6,0(12)
addi 12,12,4 addi 12,12,4
stw 6,0(3) stw 6,0(3)
addi 3,3,4 addi 3,3,4
0: 0:
clrldi 10,12,61 /* Check alignment of SRC again. */ clrldi 10,12,61 /* Check alignment of SRC again. */
srdi 9,31,3 /* Number of full doublewords remaining. */ srdi 9,31,3 /* Number of full doublewords remaining. */
L(copy_GE_32_aligned_cont): L(copy_GE_32_aligned_cont):
@ -107,343 +107,343 @@ L(copy_GE_32_aligned_cont):
.align 4 .align 4
1: /* Copy 1 doubleword and set the counter. */ 1: /* Copy 1 doubleword and set the counter. */
mr 10,3 mr 10,3
mtctr 8 mtctr 8
bf 31,4f bf 31,4f
ld 6,0(12) ld 6,0(12)
addi 11,12,8 addi 11,12,8
std 6,0(3) std 6,0(3)
addi 10,3,8 addi 10,3,8
/* Main aligned copy loop. Copies 32-bytes at a time. */ /* Main aligned copy loop. Copies 32-bytes at a time. */
.align 4 .align 4
4: 4:
ld 6,0(11) ld 6,0(11)
ld 7,8(11) ld 7,8(11)
ld 8,16(11) ld 8,16(11)
ld 0,24(11) ld 0,24(11)
addi 11,11,32 addi 11,11,32
std 6,0(10) std 6,0(10)
std 7,8(10) std 7,8(10)
std 8,16(10) std 8,16(10)
std 0,24(10) std 0,24(10)
addi 10,10,32 addi 10,10,32
bdnz 4b bdnz 4b
3: 3:
/* Check for tail bytes. */ /* Check for tail bytes. */
rldicr 0,31,0,60 rldicr 0,31,0,60
mtcrf 0x01,31 mtcrf 0x01,31
beq cr6,0f beq cr6,0f
.L9: .L9:
add 3,3,0 add 3,3,0
add 12,12,0 add 12,12,0
/* At this point we have a tail of 0-7 bytes and we know that the /* At this point we have a tail of 0-7 bytes and we know that the
destination is doubleword-aligned. */ destination is doubleword-aligned. */
4: /* Copy 4 bytes. */ 4: /* Copy 4 bytes. */
bf 29,2f bf 29,2f
lwz 6,0(12) lwz 6,0(12)
addi 12,12,4 addi 12,12,4
stw 6,0(3) stw 6,0(3)
addi 3,3,4 addi 3,3,4
2: /* Copy 2 bytes. */ 2: /* Copy 2 bytes. */
bf 30,1f bf 30,1f
lhz 6,0(12) lhz 6,0(12)
addi 12,12,2 addi 12,12,2
sth 6,0(3) sth 6,0(3)
addi 3,3,2 addi 3,3,2
1: /* Copy 1 byte. */ 1: /* Copy 1 byte. */
bf 31,0f bf 31,0f
lbz 6,0(12) lbz 6,0(12)
stb 6,0(3) stb 6,0(3)
0: /* Return original DST pointer. */ 0: /* Return original DST pointer. */
ld 31,-8(1) ld 31,-8(1)
ld 3,-16(1) ld 3,-16(1)
blr blr
/* Handle copies of 0~31 bytes. */ /* Handle copies of 0~31 bytes. */
.align 4 .align 4
L(copy_LT_32): L(copy_LT_32):
cmpldi cr6,5,8 cmpldi cr6,5,8
mr 12,4 mr 12,4
mtcrf 0x01,5 mtcrf 0x01,5
ble cr6,L(copy_LE_8) ble cr6,L(copy_LE_8)
/* At least 9 bytes to go. */ /* At least 9 bytes to go. */
neg 8,4 neg 8,4
clrrdi 11,4,2 clrrdi 11,4,2
andi. 0,8,3 andi. 0,8,3
cmpldi cr1,5,16 cmpldi cr1,5,16
mr 10,5 mr 10,5
beq L(copy_LT_32_aligned) beq L(copy_LT_32_aligned)
/* Force 4-bytes alignment for SRC. */ /* Force 4-bytes alignment for SRC. */
mtocrf 0x01,0 mtocrf 0x01,0
subf 10,0,5 subf 10,0,5
2: bf 30,1f 2: bf 30,1f
lhz 6,0(12) lhz 6,0(12)
addi 12,12,2 addi 12,12,2
sth 6,0(3) sth 6,0(3)
addi 3,3,2 addi 3,3,2
1: bf 31,L(end_4bytes_alignment) 1: bf 31,L(end_4bytes_alignment)
lbz 6,0(12) lbz 6,0(12)
addi 12,12,1 addi 12,12,1
stb 6,0(3) stb 6,0(3)
addi 3,3,1 addi 3,3,1
.align 4 .align 4
L(end_4bytes_alignment): L(end_4bytes_alignment):
cmpldi cr1,10,16 cmpldi cr1,10,16
mtcrf 0x01,10 mtcrf 0x01,10
L(copy_LT_32_aligned): L(copy_LT_32_aligned):
/* At least 6 bytes to go, and SRC is word-aligned. */ /* At least 6 bytes to go, and SRC is word-aligned. */
blt cr1,8f blt cr1,8f
/* Copy 16 bytes. */ /* Copy 16 bytes. */
lwz 6,0(12) lwz 6,0(12)
lwz 7,4(12) lwz 7,4(12)
stw 6,0(3) stw 6,0(3)
lwz 8,8(12) lwz 8,8(12)
stw 7,4(3) stw 7,4(3)
lwz 6,12(12) lwz 6,12(12)
addi 12,12,16 addi 12,12,16
stw 8,8(3) stw 8,8(3)
stw 6,12(3) stw 6,12(3)
addi 3,3,16 addi 3,3,16
8: /* Copy 8 bytes. */ 8: /* Copy 8 bytes. */
bf 28,4f bf 28,4f
lwz 6,0(12) lwz 6,0(12)
lwz 7,4(12) lwz 7,4(12)
addi 12,12,8 addi 12,12,8
stw 6,0(3) stw 6,0(3)
stw 7,4(3) stw 7,4(3)
addi 3,3,8 addi 3,3,8
4: /* Copy 4 bytes. */ 4: /* Copy 4 bytes. */
bf 29,2f bf 29,2f
lwz 6,0(12) lwz 6,0(12)
addi 12,12,4 addi 12,12,4
stw 6,0(3) stw 6,0(3)
addi 3,3,4 addi 3,3,4
2: /* Copy 2-3 bytes. */ 2: /* Copy 2-3 bytes. */
bf 30,1f bf 30,1f
lhz 6,0(12) lhz 6,0(12)
sth 6,0(3) sth 6,0(3)
bf 31,0f bf 31,0f
lbz 7,2(12) lbz 7,2(12)
stb 7,2(3) stb 7,2(3)
ld 3,-16(1) ld 3,-16(1)
blr blr
.align 4 .align 4
1: /* Copy 1 byte. */ 1: /* Copy 1 byte. */
bf 31,0f bf 31,0f
lbz 6,0(12) lbz 6,0(12)
stb 6,0(3) stb 6,0(3)
0: /* Return original DST pointer. */ 0: /* Return original DST pointer. */
ld 3,-16(1) ld 3,-16(1)
blr blr
/* Handles copies of 0~8 bytes. */ /* Handles copies of 0~8 bytes. */
.align 4 .align 4
L(copy_LE_8): L(copy_LE_8):
bne cr6,4f bne cr6,4f
/* Though we could've used ld/std here, they are still /* Though we could've used ld/std here, they are still
slow for unaligned cases. */ slow for unaligned cases. */
lwz 6,0(4) lwz 6,0(4)
lwz 7,4(4) lwz 7,4(4)
stw 6,0(3) stw 6,0(3)
stw 7,4(3) stw 7,4(3)
ld 3,-16(1) /* Return original DST pointers. */ ld 3,-16(1) /* Return original DST pointers. */
blr blr
.align 4 .align 4
4: /* Copies 4~7 bytes. */ 4: /* Copies 4~7 bytes. */
bf 29,2b bf 29,2b
lwz 6,0(4) lwz 6,0(4)
stw 6,0(3) stw 6,0(3)
bf 30,5f bf 30,5f
lhz 7,4(4) lhz 7,4(4)
sth 7,4(3) sth 7,4(3)
bf 31,0f bf 31,0f
lbz 8,6(4) lbz 8,6(4)
stb 8,6(3) stb 8,6(3)
ld 3,-16(1) ld 3,-16(1)
blr blr
.align 4 .align 4
5: /* Copy 1 byte. */ 5: /* Copy 1 byte. */
bf 31,0f bf 31,0f
lbz 6,4(4) lbz 6,4(4)
stb 6,4(3) stb 6,4(3)
0: /* Return original DST pointer. */ 0: /* Return original DST pointer. */
ld 3,-16(1) ld 3,-16(1)
blr blr
/* Handle copies of 32+ bytes where DST is aligned (to quadword) but /* Handle copies of 32+ bytes where DST is aligned (to quadword) but
SRC is not. Use aligned quadword loads from SRC, shifted to realign SRC is not. Use aligned quadword loads from SRC, shifted to realign
the data, allowing for aligned DST stores. */ the data, allowing for aligned DST stores. */
.align 4 .align 4
L(copy_GE_32_unaligned): L(copy_GE_32_unaligned):
clrldi 0,0,60 /* Number of bytes until the 1st clrldi 0,0,60 /* Number of bytes until the 1st
quadword. */ quadword. */
andi. 11,3,15 /* Check alignment of DST (against andi. 11,3,15 /* Check alignment of DST (against
quadwords). */ quadwords). */
srdi 9,5,4 /* Number of full quadwords remaining. */ srdi 9,5,4 /* Number of full quadwords remaining. */
beq L(copy_GE_32_unaligned_cont) beq L(copy_GE_32_unaligned_cont)
/* SRC is not quadword aligned, get it aligned. */ /* SRC is not quadword aligned, get it aligned. */
mtcrf 0x01,0 mtcrf 0x01,0
subf 31,0,5 subf 31,0,5
/* Vector instructions work best when proper alignment (16-bytes) /* Vector instructions work best when proper alignment (16-bytes)
is present. Move 0~15 bytes as needed to get DST quadword-aligned. */ is present. Move 0~15 bytes as needed to get DST quadword-aligned. */
1: /* Copy 1 byte. */ 1: /* Copy 1 byte. */
bf 31,2f bf 31,2f
lbz 6,0(12) lbz 6,0(12)
addi 12,12,1 addi 12,12,1
stb 6,0(3) stb 6,0(3)
addi 3,3,1 addi 3,3,1
2: /* Copy 2 bytes. */ 2: /* Copy 2 bytes. */
bf 30,4f bf 30,4f
lhz 6,0(12) lhz 6,0(12)
addi 12,12,2 addi 12,12,2
sth 6,0(3) sth 6,0(3)
addi 3,3,2 addi 3,3,2
4: /* Copy 4 bytes. */ 4: /* Copy 4 bytes. */
bf 29,8f bf 29,8f
lwz 6,0(12) lwz 6,0(12)
addi 12,12,4 addi 12,12,4
stw 6,0(3) stw 6,0(3)
addi 3,3,4 addi 3,3,4
8: /* Copy 8 bytes. */ 8: /* Copy 8 bytes. */
bf 28,0f bf 28,0f
ld 6,0(12) ld 6,0(12)
addi 12,12,8 addi 12,12,8
std 6,0(3) std 6,0(3)
addi 3,3,8 addi 3,3,8
0: 0:
clrldi 10,12,60 /* Check alignment of SRC. */ clrldi 10,12,60 /* Check alignment of SRC. */
srdi 9,31,4 /* Number of full quadwords remaining. */ srdi 9,31,4 /* Number of full quadwords remaining. */
/* The proper alignment is present, it is OK to copy the bytes now. */ /* The proper alignment is present, it is OK to copy the bytes now. */
L(copy_GE_32_unaligned_cont): L(copy_GE_32_unaligned_cont):
/* Setup two indexes to speed up the indexed vector operations. */ /* Setup two indexes to speed up the indexed vector operations. */
clrldi 11,31,60 clrldi 11,31,60
li 6,16 /* Index for 16-bytes offsets. */ li 6,16 /* Index for 16-bytes offsets. */
li 7,32 /* Index for 32-bytes offsets. */ li 7,32 /* Index for 32-bytes offsets. */
cmpldi cr1,11,0 cmpldi cr1,11,0
srdi 8,31,5 /* Setup the loop counter. */ srdi 8,31,5 /* Setup the loop counter. */
mr 10,3 mr 10,3
mr 11,12 mr 11,12
mtcrf 0x01,9 mtcrf 0x01,9
cmpldi cr6,9,1 cmpldi cr6,9,1
lvsl 5,0,12 lvsl 5,0,12
lvx 3,0,12 lvx 3,0,12
bf 31,L(setup_unaligned_loop) bf 31,L(setup_unaligned_loop)
/* Copy another 16 bytes to align to 32-bytes due to the loop . */ /* Copy another 16 bytes to align to 32-bytes due to the loop . */
lvx 4,12,6 lvx 4,12,6
vperm 6,3,4,5 vperm 6,3,4,5
addi 11,12,16 addi 11,12,16
addi 10,3,16 addi 10,3,16
stvx 6,0,3 stvx 6,0,3
vor 3,4,4 vor 3,4,4
L(setup_unaligned_loop): L(setup_unaligned_loop):
mtctr 8 mtctr 8
ble cr6,L(end_unaligned_loop) ble cr6,L(end_unaligned_loop)
/* Copy 32 bytes at a time using vector instructions. */ /* Copy 32 bytes at a time using vector instructions. */
.align 4 .align 4
L(unaligned_loop): L(unaligned_loop):
/* Note: vr6/vr10 may contain data that was already copied, /* Note: vr6/vr10 may contain data that was already copied,
but in order to get proper alignment, we may have to copy but in order to get proper alignment, we may have to copy
some portions again. This is faster than having unaligned some portions again. This is faster than having unaligned
vector instructions though. */ vector instructions though. */
lvx 4,11,6 /* vr4 = r11+16. */ lvx 4,11,6 /* vr4 = r11+16. */
vperm 6,3,4,5 /* Merge the correctly-aligned portions vperm 6,3,4,5 /* Merge the correctly-aligned portions
of vr3/vr4 into vr6. */ of vr3/vr4 into vr6. */
lvx 3,11,7 /* vr3 = r11+32. */ lvx 3,11,7 /* vr3 = r11+32. */
vperm 10,4,3,5 /* Merge the correctly-aligned portions vperm 10,4,3,5 /* Merge the correctly-aligned portions
of vr3/vr4 into vr10. */ of vr3/vr4 into vr10. */
addi 11,11,32 addi 11,11,32
stvx 6,0,10 stvx 6,0,10
stvx 10,10,6 stvx 10,10,6
addi 10,10,32 addi 10,10,32
bdnz L(unaligned_loop) bdnz L(unaligned_loop)
.align 4 .align 4
L(end_unaligned_loop): L(end_unaligned_loop):
/* Check for tail bytes. */ /* Check for tail bytes. */
rldicr 0,31,0,59 rldicr 0,31,0,59
mtcrf 0x01,31 mtcrf 0x01,31
beq cr1,0f beq cr1,0f
add 3,3,0 add 3,3,0
add 12,12,0 add 12,12,0
/* We have 1~15 tail bytes to copy, and DST is quadword aligned. */ /* We have 1~15 tail bytes to copy, and DST is quadword aligned. */
8: /* Copy 8 bytes. */ 8: /* Copy 8 bytes. */
bf 28,4f bf 28,4f
lwz 6,0(12) lwz 6,0(12)
lwz 7,4(12) lwz 7,4(12)
addi 12,12,8 addi 12,12,8
stw 6,0(3) stw 6,0(3)
stw 7,4(3) stw 7,4(3)
addi 3,3,8 addi 3,3,8
4: /* Copy 4 bytes. */ 4: /* Copy 4 bytes. */
bf 29,2f bf 29,2f
lwz 6,0(12) lwz 6,0(12)
addi 12,12,4 addi 12,12,4
stw 6,0(3) stw 6,0(3)
addi 3,3,4 addi 3,3,4
2: /* Copy 2~3 bytes. */ 2: /* Copy 2~3 bytes. */
bf 30,1f bf 30,1f
lhz 6,0(12) lhz 6,0(12)
addi 12,12,2 addi 12,12,2
sth 6,0(3) sth 6,0(3)
addi 3,3,2 addi 3,3,2
1: /* Copy 1 byte. */ 1: /* Copy 1 byte. */
bf 31,0f bf 31,0f
lbz 6,0(12) lbz 6,0(12)
stb 6,0(3) stb 6,0(3)
0: /* Return original DST pointer. */ 0: /* Return original DST pointer. */
ld 31,-8(1) ld 31,-8(1)
ld 3,-16(1) ld 3,-16(1)
blr blr
END_GEN_TB (BP_SYM (memcpy),TB_TOCLESS) END_GEN_TB (BP_SYM (memcpy),TB_TOCLESS)
libc_hidden_builtin_def (memcpy) libc_hidden_builtin_def (memcpy)