mirror of
https://sourceware.org/git/glibc.git
synced 2024-11-06 05:10:05 +00:00
bb769ab68f
2001-04-06 Ulrich Drepper <drepper@redhat.com> * sysdeps/generic/e_scalb.c: Don't use FE_INVALID unless it's available. * sysdeps/generic/e_scalbf.c: Likewise. * sysdeps/generic/e_scalbl.c: Likewise. Patch by Mark Hatle <fray@mvista.com>. 2001-04-05 David S. Miller <davem@redhat.com> * sysdeps/sparc/abort-instr.h: New file. 2001-04-05 Matt Wilson <msw@redhat.com> * sysdeps/unix/sysv/linux/alpha/getsysstats.c (GET_NPROCS_PARSER): added code to parse new 2.4 format. (GET_NPROCS_CONF_PARSER): Likewise. 2001-04-05 David S. Miller <davem@redhat.com> * scripts/config.sub: Recognize sparcv9b like sparcv9. * configure.in: Add sparcv9b. * sysdeps/sparc/sparc32/sparcv9b/Implies: New file * sysdeps/sparc/sparc32/sparcv9b/memcpy.S: New file * sysdeps/sparc/sparc64/sparcv9b/memcpy.S: New file
714 lines
23 KiB
ArmAsm
714 lines
23 KiB
ArmAsm
/* Copy SIZE bytes from SRC to DEST.
|
|
For UltraSPARC-III.
|
|
Copyright (C) 2001 Free Software Foundation, Inc.
|
|
This file is part of the GNU C Library.
|
|
Contributed by David S. Miller (davem@redhat.com)
|
|
|
|
The GNU C Library is free software; you can redistribute it and/or
|
|
modify it under the terms of the GNU Library General Public License as
|
|
published by the Free Software Foundation; either version 2 of the
|
|
License, or (at your option) any later version.
|
|
|
|
The GNU C Library is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
Library General Public License for more details.
|
|
|
|
You should have received a copy of the GNU Library General Public
|
|
License along with the GNU C Library; see the file COPYING.LIB. If not,
|
|
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
Boston, MA 02111-1307, USA. */
|
|
|
|
#include <sysdep.h>
|
|
#define ASI_BLK_P 0xf0
|
|
#define FPRS_FEF 0x04
|
|
#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
|
|
#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
|
|
#define SMALL_COPY_USES_FPU
|
|
|
|
#ifndef XCC
|
|
#define USE_BPR
|
|
#define XCC xcc
|
|
#endif
|
|
|
|
.text
|
|
.align 32
|
|
|
|
ENTRY(bcopy)
|
|
sub %o1, %o0, %o4 /* IEU0 Group */
|
|
mov %o0, %g3 /* IEU1 */
|
|
cmp %o4, %o2 /* IEU1 Group */
|
|
mov %o1, %o0 /* IEU0 */
|
|
bgeu,pt %XCC, 100f /* CTI */
|
|
mov %g3, %o1 /* IEU0 Group */
|
|
#ifndef USE_BPR
|
|
srl %o2, 0, %o2 /* IEU1 */
|
|
#endif
|
|
brnz,pn %o2, 220f /* CTI Group */
|
|
add %o0, %o2, %o0 /* IEU0 */
|
|
retl
|
|
nop
|
|
END(bcopy)
|
|
|
|
/* Special/non-trivial issues of this code:
|
|
*
|
|
* 1) %o5 is preserved from VISEntryHalf to VISExitHalf
|
|
* 2) Only low 32 FPU registers are used so that only the
|
|
* lower half of the FPU register set is dirtied by this
|
|
* code. This is especially important in the kernel.
|
|
* 3) This code never prefetches cachelines past the end
|
|
* of the source buffer.
|
|
*
|
|
* The cheetah's flexible spine, oversized liver, enlarged heart,
|
|
* slender muscular body, and claws make it the swiftest hunter
|
|
* in Africa and the fastest animal on land. Can reach speeds
|
|
* of up to 2.4GB per second.
|
|
*/
|
|
.align 32
|
|
ENTRY(memcpy)
|
|
|
|
100: /* %o0=dst, %o1=src, %o2=len */
|
|
#ifndef __KERNEL__
|
|
/* Save away original 'dst' for memcpy return value. */
|
|
mov %o0, %g3 ! A0 Group
|
|
#endif
|
|
/* Anything to copy at all? */
|
|
cmp %o2, 0 ! A1
|
|
ble,pn %XCC, 102f ! BR
|
|
|
|
/* Extremely small copy? */
|
|
218: cmp %o2, 31 ! A0 Group
|
|
ble,pn %XCC, 101f ! BR
|
|
|
|
/* Large enough to use unrolled prefetch loops? */
|
|
cmp %o2, 0x100 ! A1
|
|
bge,a,pt %XCC, 103f ! BR Group
|
|
andcc %o0, 0x3f, %g2 ! A0
|
|
|
|
ba,pt %XCC, 108f ! BR Group
|
|
andcc %o0, 0x7, %g2 ! A0
|
|
|
|
.align 32
|
|
101:
|
|
/* Copy %o2 bytes from src to dst, one byte at a time. */
|
|
ldub [%o1 + 0x00], %o3 ! MS Group
|
|
add %o1, 0x1, %o1 ! A0
|
|
add %o0, 0x1, %o0 ! A1
|
|
subcc %o2, 1, %o2 ! A0 Group
|
|
|
|
bg,pt %XCC, 101b ! BR
|
|
stb %o3, [%o0 + -1] ! MS Group (1-cycle stall)
|
|
|
|
102:
|
|
#ifdef __KERNEL__
|
|
retl ! BR Group (0-4 cycle stall)
|
|
clr %o0 ! A0
|
|
#else
|
|
retl ! BR Group (0-4 cycle stall)
|
|
mov %g3, %o0 ! A0
|
|
#endif
|
|
|
|
/* Here len >= (6 * 64) and condition codes reflect execution
|
|
* of "andcc %o0, 0x7, %g2", done by caller.
|
|
*/
|
|
.align 64
|
|
103:
|
|
/* Is 'dst' already aligned on an 64-byte boundary? */
|
|
be,pt %XCC, 2f ! BR
|
|
|
|
/* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
|
|
* of bytes to copy to make 'dst' 64-byte aligned. We pre-
|
|
* subtract this from 'len'.
|
|
*/
|
|
sub %g2, 0x40, %g2 ! A0 Group
|
|
sub %g0, %g2, %g2 ! A0 Group
|
|
sub %o2, %g2, %o2 ! A0 Group
|
|
|
|
/* Copy %g2 bytes from src to dst, one byte at a time. */
|
|
1: ldub [%o1 + 0x00], %o3 ! MS (Group)
|
|
add %o1, 0x1, %o1 ! A1
|
|
add %o0, 0x1, %o0 ! A0 Group
|
|
subcc %g2, 0x1, %g2 ! A1
|
|
|
|
bg,pt %XCC, 1b ! BR Group
|
|
stb %o3, [%o0 + -1] ! MS Group
|
|
|
|
2: VISEntryHalf ! MS+MS
|
|
and %o1, 0x7, %g1 ! A1
|
|
ba,pt %XCC, 104f ! BR
|
|
alignaddr %o1, %g0, %o1 ! MS (Break-after)
|
|
|
|
.align 64
|
|
104:
|
|
prefetch [%o1 + 0x000], #one_read ! MS Group1
|
|
prefetch [%o1 + 0x040], #one_read ! MS Group2
|
|
andn %o2, (0x40 - 1), %o4 ! A0
|
|
prefetch [%o1 + 0x080], #one_read ! MS Group3
|
|
cmp %o4, 0x140 ! A0
|
|
prefetch [%o1 + 0x0c0], #one_read ! MS Group4
|
|
ldd [%o1 + 0x000], %f0 ! MS Group5 (%f0 results at G8)
|
|
bge,a,pt %XCC, 1f ! BR
|
|
|
|
prefetch [%o1 + 0x100], #one_read ! MS Group6
|
|
1: ldd [%o1 + 0x008], %f2 ! AX (%f2 results at G9)
|
|
cmp %o4, 0x180 ! A1
|
|
bge,a,pt %XCC, 1f ! BR
|
|
prefetch [%o1 + 0x140], #one_read ! MS Group7
|
|
1: ldd [%o1 + 0x010], %f4 ! AX (%f4 results at G10)
|
|
cmp %o4, 0x1c0 ! A1
|
|
bge,a,pt %XCC, 1f ! BR
|
|
|
|
prefetch [%o1 + 0x180], #one_read ! MS Group8
|
|
1: faligndata %f0, %f2, %f16 ! FGA Group9 (%f16 at G12)
|
|
ldd [%o1 + 0x018], %f6 ! AX (%f6 results at G12)
|
|
faligndata %f2, %f4, %f18 ! FGA Group10 (%f18 results at G13)
|
|
ldd [%o1 + 0x020], %f8 ! MS (%f8 results at G13)
|
|
faligndata %f4, %f6, %f20 ! FGA Group12 (1-cycle stall,%f20 at G15)
|
|
ldd [%o1 + 0x028], %f10 ! MS (%f10 results at G15)
|
|
faligndata %f6, %f8, %f22 ! FGA Group13 (%f22 results at G16)
|
|
|
|
ldd [%o1 + 0x030], %f12 ! MS (%f12 results at G16)
|
|
faligndata %f8, %f10, %f24 ! FGA Group15 (1-cycle stall,%f24 at G18)
|
|
ldd [%o1 + 0x038], %f14 ! MS (%f14 results at G18)
|
|
faligndata %f10, %f12, %f26 ! FGA Group16 (%f26 results at G19)
|
|
ldd [%o1 + 0x040], %f0 ! MS (%f0 results at G19)
|
|
|
|
/* We only use the first loop if len > (7 * 64). */
|
|
subcc %o4, 0x1c0, %o4 ! A0 Group17
|
|
bg,pt %XCC, 105f ! BR
|
|
add %o1, 0x40, %o1 ! A1
|
|
|
|
add %o4, 0x140, %o4 ! A0 Group18
|
|
ba,pt %XCC, 106f ! BR
|
|
srl %o4, 6, %o3 ! A0 Group19
|
|
nop
|
|
nop
|
|
nop
|
|
nop
|
|
nop
|
|
|
|
nop
|
|
nop
|
|
|
|
/* This loop performs the copy and queues new prefetches.
|
|
* We drop into the second loop when len <= (5 * 64). Note
|
|
* that this (5 * 64) factor has been subtracted from len
|
|
* already.
|
|
*/
|
|
105:
|
|
ldd [%o1 + 0x008], %f2 ! MS Group2 (%f2 results at G5)
|
|
faligndata %f12, %f14, %f28 ! FGA (%f28 results at G5)
|
|
ldd [%o1 + 0x010], %f4 ! MS Group3 (%f4 results at G6)
|
|
faligndata %f14, %f0, %f30 ! FGA Group4 (1-cycle stall, %f30 at G7)
|
|
stda %f16, [%o0] ASI_BLK_P ! MS
|
|
ldd [%o1 + 0x018], %f6 ! AX (%f6 results at G7)
|
|
|
|
faligndata %f0, %f2, %f16 ! FGA Group12 (7-cycle stall)
|
|
ldd [%o1 + 0x020], %f8 ! MS (%f8 results at G15)
|
|
faligndata %f2, %f4, %f18 ! FGA Group13 (%f18 results at G16)
|
|
ldd [%o1 + 0x028], %f10 ! MS (%f10 results at G16)
|
|
faligndata %f4, %f6, %f20 ! FGA Group14 (%f20 results at G17)
|
|
ldd [%o1 + 0x030], %f12 ! MS (%f12 results at G17)
|
|
faligndata %f6, %f8, %f22 ! FGA Group15 (%f22 results at G18)
|
|
ldd [%o1 + 0x038], %f14 ! MS (%f14 results at G18)
|
|
|
|
faligndata %f8, %f10, %f24 ! FGA Group16 (%f24 results at G19)
|
|
ldd [%o1 + 0x040], %f0 ! AX (%f0 results at G19)
|
|
prefetch [%o1 + 0x180], #one_read ! MS
|
|
faligndata %f10, %f12, %f26 ! FGA Group17 (%f26 results at G20)
|
|
subcc %o4, 0x40, %o4 ! A0
|
|
add %o1, 0x40, %o1 ! A1
|
|
bg,pt %XCC, 105b ! BR
|
|
add %o0, 0x40, %o0 ! A0 Group18
|
|
|
|
mov 5, %o3 ! A1
|
|
|
|
/* This loop performs on the copy, no new prefetches are
|
|
* queued. We do things this way so that we do not perform
|
|
* any spurious prefetches past the end of the src buffer.
|
|
*/
|
|
106:
|
|
ldd [%o1 + 0x008], %f2 ! MS
|
|
faligndata %f12, %f14, %f28 ! FGA Group2
|
|
ldd [%o1 + 0x010], %f4 ! MS
|
|
faligndata %f14, %f0, %f30 ! FGA Group4 (1-cycle stall)
|
|
stda %f16, [%o0] ASI_BLK_P ! MS
|
|
ldd [%o1 + 0x018], %f6 ! AX
|
|
faligndata %f0, %f2, %f16 ! FGA Group12 (7-cycle stall)
|
|
|
|
ldd [%o1 + 0x020], %f8 ! MS
|
|
faligndata %f2, %f4, %f18 ! FGA Group13
|
|
ldd [%o1 + 0x028], %f10 ! MS
|
|
faligndata %f4, %f6, %f20 ! FGA Group14
|
|
ldd [%o1 + 0x030], %f12 ! MS
|
|
faligndata %f6, %f8, %f22 ! FGA Group15
|
|
ldd [%o1 + 0x038], %f14 ! MS
|
|
faligndata %f8, %f10, %f24 ! FGA Group16
|
|
|
|
ldd [%o1 + 0x040], %f0 ! AX
|
|
faligndata %f10, %f12, %f26 ! FGA Group17
|
|
subcc %o3, 0x01, %o3 ! A0
|
|
add %o1, 0x40, %o1 ! A1
|
|
bg,pt %XCC, 106b ! BR
|
|
add %o0, 0x40, %o0 ! A0 Group18
|
|
|
|
/* Finally we copy the last full 64-byte block. */
|
|
ldd [%o1 + 0x008], %f2 ! MS
|
|
faligndata %f12, %f14, %f28 ! FGA
|
|
ldd [%o1 + 0x010], %f4 ! MS Group19
|
|
faligndata %f14, %f0, %f30 ! FGA
|
|
stda %f16, [%o0] ASI_BLK_P ! MS Group20
|
|
ldd [%o1 + 0x018], %f6 ! AX
|
|
faligndata %f0, %f2, %f16 ! FGA Group11 (7-cycle stall)
|
|
ldd [%o1 + 0x020], %f8 ! MS
|
|
faligndata %f2, %f4, %f18 ! FGA Group12
|
|
ldd [%o1 + 0x028], %f10 ! MS
|
|
faligndata %f4, %f6, %f20 ! FGA Group13
|
|
ldd [%o1 + 0x030], %f12 ! MS
|
|
faligndata %f6, %f8, %f22 ! FGA Group14
|
|
ldd [%o1 + 0x038], %f14 ! MS
|
|
faligndata %f8, %f10, %f24 ! FGA Group15
|
|
cmp %g1, 0 ! A0
|
|
be,pt %XCC, 1f ! BR
|
|
add %o0, 0x40, %o0 ! A1
|
|
ldd [%o1 + 0x040], %f0 ! MS
|
|
1: faligndata %f10, %f12, %f26 ! FGA Group16
|
|
faligndata %f12, %f14, %f28 ! FGA Group17
|
|
faligndata %f14, %f0, %f30 ! FGA Group18
|
|
stda %f16, [%o0] ASI_BLK_P ! MS
|
|
add %o0, 0x40, %o0 ! A0
|
|
add %o1, 0x40, %o1 ! A1
|
|
membar #Sync ! MS Group26 (7-cycle stall)
|
|
|
|
/* Now we copy the (len modulo 64) bytes at the end.
|
|
* Note how we borrow the %f0 loaded above.
|
|
*
|
|
* Also notice how this code is careful not to perform a
|
|
* load past the end of the src buffer just like similar
|
|
* code found in 'toosmall' processing.
|
|
*/
|
|
and %o2, 0x3f, %o2 ! A0 Group
|
|
andcc %o2, 0x38, %g2 ! A0 Group
|
|
be,pn %XCC, 107f ! BR
|
|
subcc %g2, 0x8, %g2 ! A1
|
|
be,pn %XCC, 107f ! BR Group
|
|
cmp %g1, 0 ! A0
|
|
|
|
be,a,pt %XCC, 1f ! BR Group
|
|
ldd [%o1 + 0x00], %f0 ! MS
|
|
|
|
1: ldd [%o1 + 0x08], %f2 ! MS Group
|
|
add %o1, 0x8, %o1 ! A0
|
|
sub %o2, 0x8, %o2 ! A1
|
|
subcc %g2, 0x8, %g2 ! A0 Group
|
|
faligndata %f0, %f2, %f8 ! FGA Group
|
|
std %f8, [%o0 + 0x00] ! MS (XXX does it stall here? XXX)
|
|
be,pn %XCC, 107f ! BR
|
|
add %o0, 0x8, %o0 ! A0
|
|
ldd [%o1 + 0x08], %f0 ! MS Group
|
|
add %o1, 0x8, %o1 ! A0
|
|
sub %o2, 0x8, %o2 ! A1
|
|
subcc %g2, 0x8, %g2 ! A0 Group
|
|
faligndata %f2, %f0, %f8 ! FGA
|
|
std %f8, [%o0 + 0x00] ! MS (XXX does it stall here? XXX)
|
|
bne,pn %XCC, 1b ! BR
|
|
add %o0, 0x8, %o0 ! A0 Group
|
|
|
|
/* If anything is left, we copy it one byte at a time.
|
|
* Note that %g1 is (src & 0x3) saved above before the
|
|
* alignaddr was performed.
|
|
*/
|
|
107:
|
|
cmp %o2, 0
|
|
add %o1, %g1, %o1
|
|
VISExitHalf
|
|
be,pn %XCC, 102b
|
|
nop
|
|
ba,a,pt %XCC, 101b
|
|
|
|
/* If we get here, then 32 <= len < (6 * 64) */
|
|
108:
|
|
|
|
#ifdef SMALL_COPY_USES_FPU
|
|
|
|
/* Is 'dst' already aligned on an 8-byte boundary? */
|
|
be,pt %XCC, 2f ! BR Group
|
|
|
|
/* Compute abs((dst & 7) - 8) into %g2. This is the number
|
|
* of bytes to copy to make 'dst' 8-byte aligned. We pre-
|
|
* subtract this from 'len'.
|
|
*/
|
|
sub %g2, 0x8, %g2 ! A0
|
|
sub %g0, %g2, %g2 ! A0 Group (reg-dep)
|
|
sub %o2, %g2, %o2 ! A0 Group (reg-dep)
|
|
|
|
/* Copy %g2 bytes from src to dst, one byte at a time. */
|
|
1: ldub [%o1 + 0x00], %o3 ! MS (Group) (%o3 in 3 cycles)
|
|
add %o1, 0x1, %o1 ! A1
|
|
add %o0, 0x1, %o0 ! A0 Group
|
|
subcc %g2, 0x1, %g2 ! A1
|
|
|
|
bg,pt %XCC, 1b ! BR Group
|
|
stb %o3, [%o0 + -1] ! MS Group
|
|
|
|
2: VISEntryHalf ! MS+MS
|
|
|
|
/* Compute (len - (len % 8)) into %g2. This is guarenteed
|
|
* to be nonzero.
|
|
*/
|
|
andn %o2, 0x7, %g2 ! A0 Group
|
|
|
|
/* You may read this and believe that it allows reading
|
|
* one 8-byte longword past the end of src. It actually
|
|
* does not, as %g2 is subtracted as loads are done from
|
|
* src, so we always stop before running off the end.
|
|
* Also, we are guarenteed to have at least 0x10 bytes
|
|
* to move here.
|
|
*/
|
|
sub %g2, 0x8, %g2 ! A0 Group (reg-dep)
|
|
alignaddr %o1, %g0, %g1 ! MS (Break-after)
|
|
ldd [%g1 + 0x00], %f0 ! MS Group (1-cycle stall)
|
|
add %g1, 0x8, %g1 ! A0
|
|
|
|
1: ldd [%g1 + 0x00], %f2 ! MS Group
|
|
add %g1, 0x8, %g1 ! A0
|
|
sub %o2, 0x8, %o2 ! A1
|
|
subcc %g2, 0x8, %g2 ! A0 Group
|
|
|
|
faligndata %f0, %f2, %f8 ! FGA Group (1-cycle stall)
|
|
std %f8, [%o0 + 0x00] ! MS Group (2-cycle stall)
|
|
add %o1, 0x8, %o1 ! A0
|
|
be,pn %XCC, 2f ! BR
|
|
|
|
add %o0, 0x8, %o0 ! A1
|
|
ldd [%g1 + 0x00], %f0 ! MS Group
|
|
add %g1, 0x8, %g1 ! A0
|
|
sub %o2, 0x8, %o2 ! A1
|
|
|
|
subcc %g2, 0x8, %g2 ! A0 Group
|
|
faligndata %f2, %f0, %f8 ! FGA Group (1-cycle stall)
|
|
std %f8, [%o0 + 0x00] ! MS Group (2-cycle stall)
|
|
add %o1, 0x8, %o1 ! A0
|
|
|
|
bne,pn %XCC, 1b ! BR
|
|
add %o0, 0x8, %o0 ! A1
|
|
|
|
/* Nothing left to copy? */
|
|
2: cmp %o2, 0 ! A0 Group
|
|
VISExitHalf ! A0+MS
|
|
be,pn %XCC, 102b ! BR Group
|
|
nop ! A0
|
|
ba,a,pt %XCC, 101b ! BR Group
|
|
|
|
#else /* !(SMALL_COPY_USES_FPU) */
|
|
|
|
xor %o1, %o0, %g2
|
|
andcc %g2, 0x7, %g0
|
|
bne,pn %XCC, 101b
|
|
andcc %o1, 0x7, %g2
|
|
|
|
be,pt %XCC, 2f
|
|
sub %g2, 0x8, %g2
|
|
sub %g0, %g2, %g2
|
|
sub %o2, %g2, %o2
|
|
|
|
1: ldub [%o1 + 0x00], %o3
|
|
add %o1, 0x1, %o1
|
|
add %o0, 0x1, %o0
|
|
subcc %g2, 0x1, %g2
|
|
bg,pt %XCC, 1b
|
|
stb %o3, [%o0 + -1]
|
|
|
|
2: andn %o2, 0x7, %g2
|
|
sub %o2, %g2, %o2
|
|
|
|
3: ldx [%o1 + 0x00], %o3
|
|
add %o1, 0x8, %o1
|
|
add %o0, 0x8, %o0
|
|
subcc %g2, 0x8, %g2
|
|
bg,pt %XCC, 3b
|
|
stx %o3, [%o0 + -8]
|
|
|
|
cmp %o2, 0
|
|
bne,pn %XCC, 101b
|
|
nop
|
|
ba,a,pt %XCC, 102b
|
|
|
|
#endif /* !(SMALL_COPY_USES_FPU) */
|
|
END(memcpy)
|
|
|
|
#define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3) \
|
|
ldx [%src - offset - 0x20], %t0; \
|
|
ldx [%src - offset - 0x18], %t1; \
|
|
ldx [%src - offset - 0x10], %t2; \
|
|
ldx [%src - offset - 0x08], %t3; \
|
|
stw %t0, [%dst - offset - 0x1c]; \
|
|
srlx %t0, 32, %t0; \
|
|
stw %t0, [%dst - offset - 0x20]; \
|
|
stw %t1, [%dst - offset - 0x14]; \
|
|
srlx %t1, 32, %t1; \
|
|
stw %t1, [%dst - offset - 0x18]; \
|
|
stw %t2, [%dst - offset - 0x0c]; \
|
|
srlx %t2, 32, %t2; \
|
|
stw %t2, [%dst - offset - 0x10]; \
|
|
stw %t3, [%dst - offset - 0x04]; \
|
|
srlx %t3, 32, %t3; \
|
|
stw %t3, [%dst - offset - 0x08];
|
|
|
|
#define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \
|
|
ldx [%src - offset - 0x20], %t0; \
|
|
ldx [%src - offset - 0x18], %t1; \
|
|
ldx [%src - offset - 0x10], %t2; \
|
|
ldx [%src - offset - 0x08], %t3; \
|
|
stx %t0, [%dst - offset - 0x20]; \
|
|
stx %t1, [%dst - offset - 0x18]; \
|
|
stx %t2, [%dst - offset - 0x10]; \
|
|
stx %t3, [%dst - offset - 0x08]; \
|
|
ldx [%src - offset - 0x40], %t0; \
|
|
ldx [%src - offset - 0x38], %t1; \
|
|
ldx [%src - offset - 0x30], %t2; \
|
|
ldx [%src - offset - 0x28], %t3; \
|
|
stx %t0, [%dst - offset - 0x40]; \
|
|
stx %t1, [%dst - offset - 0x38]; \
|
|
stx %t2, [%dst - offset - 0x30]; \
|
|
stx %t3, [%dst - offset - 0x28];
|
|
|
|
#define RMOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
|
|
ldx [%src + offset + 0x00], %t0; \
|
|
ldx [%src + offset + 0x08], %t1; \
|
|
stw %t0, [%dst + offset + 0x04]; \
|
|
srlx %t0, 32, %t2; \
|
|
stw %t2, [%dst + offset + 0x00]; \
|
|
stw %t1, [%dst + offset + 0x0c]; \
|
|
srlx %t1, 32, %t3; \
|
|
stw %t3, [%dst + offset + 0x08];
|
|
|
|
#define RMOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1) \
|
|
ldx [%src + offset + 0x00], %t0; \
|
|
ldx [%src + offset + 0x08], %t1; \
|
|
stx %t0, [%dst + offset + 0x00]; \
|
|
stx %t1, [%dst + offset + 0x08];
|
|
|
|
.align 32
|
|
228: andcc %o2, 1, %g0 /* IEU1 Group */
|
|
be,pt %icc, 2f+4 /* CTI */
|
|
1: ldub [%o1 - 1], %o5 /* LOAD Group */
|
|
sub %o1, 1, %o1 /* IEU0 */
|
|
sub %o0, 1, %o0 /* IEU1 */
|
|
subcc %o2, 1, %o2 /* IEU1 Group */
|
|
be,pn %xcc, 229f /* CTI */
|
|
stb %o5, [%o0] /* Store */
|
|
2: ldub [%o1 - 1], %o5 /* LOAD Group */
|
|
sub %o0, 2, %o0 /* IEU0 */
|
|
ldub [%o1 - 2], %g5 /* LOAD Group */
|
|
sub %o1, 2, %o1 /* IEU0 */
|
|
subcc %o2, 2, %o2 /* IEU1 Group */
|
|
stb %o5, [%o0 + 1] /* Store */
|
|
bne,pt %xcc, 2b /* CTI */
|
|
stb %g5, [%o0] /* Store */
|
|
229: retl
|
|
mov %g4, %o0
|
|
|
|
.align 32
|
|
ENTRY(memmove)
|
|
mov %o0, %g3
|
|
#ifndef USE_BPR
|
|
srl %o2, 0, %o2 /* IEU1 Group */
|
|
#endif
|
|
brz,pn %o2, 102b /* CTI Group */
|
|
sub %o0, %o1, %o4 /* IEU0 */
|
|
cmp %o4, %o2 /* IEU1 Group */
|
|
bgeu,pt %XCC, 218b /* CTI */
|
|
mov %o0, %g4 /* IEU0 */
|
|
add %o0, %o2, %o0 /* IEU0 Group */
|
|
220: add %o1, %o2, %o1 /* IEU1 */
|
|
cmp %o2, 15 /* IEU1 Group */
|
|
bleu,pn %xcc, 228b /* CTI */
|
|
andcc %o0, 7, %g2 /* IEU1 Group */
|
|
sub %o0, %o1, %g5 /* IEU0 */
|
|
andcc %g5, 3, %o5 /* IEU1 Group */
|
|
bne,pn %xcc, 232f /* CTI */
|
|
andcc %o1, 3, %g0 /* IEU1 Group */
|
|
be,a,pt %xcc, 236f /* CTI */
|
|
andcc %o1, 4, %g0 /* IEU1 Group */
|
|
andcc %o1, 1, %g0 /* IEU1 Group */
|
|
be,pn %xcc, 4f /* CTI */
|
|
andcc %o1, 2, %g0 /* IEU1 Group */
|
|
ldub [%o1 - 1], %g2 /* Load Group */
|
|
sub %o1, 1, %o1 /* IEU0 */
|
|
sub %o0, 1, %o0 /* IEU1 */
|
|
sub %o2, 1, %o2 /* IEU0 Group */
|
|
be,pn %xcc, 5f /* CTI Group */
|
|
stb %g2, [%o0] /* Store */
|
|
4: lduh [%o1 - 2], %g2 /* Load Group */
|
|
sub %o1, 2, %o1 /* IEU0 */
|
|
sub %o0, 2, %o0 /* IEU1 */
|
|
sub %o2, 2, %o2 /* IEU0 */
|
|
sth %g2, [%o0] /* Store Group + bubble */
|
|
5: andcc %o1, 4, %g0 /* IEU1 */
|
|
236: be,a,pn %xcc, 2f /* CTI */
|
|
andcc %o2, -128, %g7 /* IEU1 Group */
|
|
lduw [%o1 - 4], %g5 /* Load Group */
|
|
sub %o1, 4, %o1 /* IEU0 */
|
|
sub %o0, 4, %o0 /* IEU1 */
|
|
sub %o2, 4, %o2 /* IEU0 Group */
|
|
stw %g5, [%o0] /* Store */
|
|
andcc %o2, -128, %g7 /* IEU1 Group */
|
|
2: be,pn %xcc, 235f /* CTI */
|
|
andcc %o0, 4, %g0 /* IEU1 Group */
|
|
be,pn %xcc, 282f + 4 /* CTI Group */
|
|
5: RMOVE_BIGCHUNK(o1, o0, 0x00, g1, g3, g5, o5)
|
|
RMOVE_BIGCHUNK(o1, o0, 0x20, g1, g3, g5, o5)
|
|
RMOVE_BIGCHUNK(o1, o0, 0x40, g1, g3, g5, o5)
|
|
RMOVE_BIGCHUNK(o1, o0, 0x60, g1, g3, g5, o5)
|
|
subcc %g7, 128, %g7 /* IEU1 Group */
|
|
sub %o1, 128, %o1 /* IEU0 */
|
|
bne,pt %xcc, 5b /* CTI */
|
|
sub %o0, 128, %o0 /* IEU0 Group */
|
|
235: andcc %o2, 0x70, %g7 /* IEU1 Group */
|
|
41: be,pn %xcc, 280f /* CTI */
|
|
andcc %o2, 8, %g0 /* IEU1 Group */
|
|
/* Clk1 8-( */
|
|
/* Clk2 8-( */
|
|
/* Clk3 8-( */
|
|
/* Clk4 8-( */
|
|
279: rd %pc, %o5 /* PDU Group */
|
|
sll %g7, 1, %g5 /* IEU0 Group */
|
|
sub %o1, %g7, %o1 /* IEU1 */
|
|
sub %o5, %g5, %o5 /* IEU0 Group */
|
|
jmpl %o5 + %lo(280f - 279b), %g0 /* CTI Group brk forced*/
|
|
sub %o0, %g7, %o0 /* IEU0 Group */
|
|
RMOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g5, o5)
|
|
RMOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g5, o5)
|
|
RMOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g5, o5)
|
|
RMOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g5, o5)
|
|
RMOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g5, o5)
|
|
RMOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g5, o5)
|
|
RMOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g5, o5)
|
|
280: be,pt %xcc, 281f /* CTI */
|
|
andcc %o2, 4, %g0 /* IEU1 */
|
|
ldx [%o1 - 8], %g2 /* Load Group */
|
|
sub %o0, 8, %o0 /* IEU0 */
|
|
stw %g2, [%o0 + 4] /* Store Group */
|
|
sub %o1, 8, %o1 /* IEU1 */
|
|
srlx %g2, 32, %g2 /* IEU0 Group */
|
|
stw %g2, [%o0] /* Store */
|
|
281: be,pt %xcc, 1f /* CTI */
|
|
andcc %o2, 2, %g0 /* IEU1 Group */
|
|
lduw [%o1 - 4], %g2 /* Load Group */
|
|
sub %o1, 4, %o1 /* IEU0 */
|
|
stw %g2, [%o0 - 4] /* Store Group */
|
|
sub %o0, 4, %o0 /* IEU0 */
|
|
1: be,pt %xcc, 1f /* CTI */
|
|
andcc %o2, 1, %g0 /* IEU1 Group */
|
|
lduh [%o1 - 2], %g2 /* Load Group */
|
|
sub %o1, 2, %o1 /* IEU0 */
|
|
sth %g2, [%o0 - 2] /* Store Group */
|
|
sub %o0, 2, %o0 /* IEU0 */
|
|
1: be,pt %xcc, 211f /* CTI */
|
|
nop /* IEU1 */
|
|
ldub [%o1 - 1], %g2 /* Load Group */
|
|
stb %g2, [%o0 - 1] /* Store Group + bubble */
|
|
211: retl
|
|
mov %g4, %o0
|
|
|
|
282: RMOVE_BIGALIGNCHUNK(o1, o0, 0x00, g1, g3, g5, o5)
|
|
RMOVE_BIGALIGNCHUNK(o1, o0, 0x40, g1, g3, g5, o5)
|
|
subcc %g7, 128, %g7 /* IEU1 Group */
|
|
sub %o1, 128, %o1 /* IEU0 */
|
|
bne,pt %xcc, 282b /* CTI */
|
|
sub %o0, 128, %o0 /* IEU0 Group */
|
|
andcc %o2, 0x70, %g7 /* IEU1 */
|
|
be,pn %xcc, 284f /* CTI */
|
|
andcc %o2, 8, %g0 /* IEU1 Group */
|
|
/* Clk1 8-( */
|
|
/* Clk2 8-( */
|
|
/* Clk3 8-( */
|
|
/* Clk4 8-( */
|
|
283: rd %pc, %o5 /* PDU Group */
|
|
sub %o1, %g7, %o1 /* IEU0 Group */
|
|
sub %o5, %g7, %o5 /* IEU1 */
|
|
jmpl %o5 + %lo(284f - 283b), %g0 /* CTI Group brk forced*/
|
|
sub %o0, %g7, %o0 /* IEU0 Group */
|
|
RMOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3)
|
|
RMOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3)
|
|
RMOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3)
|
|
RMOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2, g3)
|
|
RMOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2, g3)
|
|
RMOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2, g3)
|
|
RMOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2, g3)
|
|
284: be,pt %xcc, 285f /* CTI Group */
|
|
andcc %o2, 4, %g0 /* IEU1 */
|
|
ldx [%o1 - 8], %g2 /* Load Group */
|
|
sub %o0, 8, %o0 /* IEU0 */
|
|
sub %o1, 8, %o1 /* IEU0 Group */
|
|
stx %g2, [%o0] /* Store */
|
|
285: be,pt %xcc, 1f /* CTI */
|
|
andcc %o2, 2, %g0 /* IEU1 Group */
|
|
lduw [%o1 - 4], %g2 /* Load Group */
|
|
sub %o0, 4, %o0 /* IEU0 */
|
|
sub %o1, 4, %o1 /* IEU0 Group */
|
|
stw %g2, [%o0] /* Store */
|
|
1: be,pt %xcc, 1f /* CTI */
|
|
andcc %o2, 1, %g0 /* IEU1 Group */
|
|
lduh [%o1 - 2], %g2 /* Load Group */
|
|
sub %o0, 2, %o0 /* IEU0 */
|
|
sub %o1, 2, %o1 /* IEU0 Group */
|
|
sth %g2, [%o0] /* Store */
|
|
1: be,pt %xcc, 1f /* CTI */
|
|
nop /* IEU0 Group */
|
|
ldub [%o1 - 1], %g2 /* Load Group */
|
|
stb %g2, [%o0 - 1] /* Store Group + bubble */
|
|
1: retl
|
|
mov %g4, %o0
|
|
|
|
232: brz,pt %g2, 2f /* CTI Group */
|
|
sub %o2, %g2, %o2 /* IEU0 Group */
|
|
1: ldub [%o1 - 1], %g5 /* Load Group */
|
|
sub %o1, 1, %o1 /* IEU0 */
|
|
sub %o0, 1, %o0 /* IEU1 */
|
|
subcc %g2, 1, %g2 /* IEU1 Group */
|
|
bne,pt %xcc, 1b /* CTI */
|
|
stb %g5, [%o0] /* Store */
|
|
2: andn %o2, 7, %g5 /* IEU0 Group */
|
|
and %o2, 7, %o2 /* IEU1 */
|
|
fmovd %f0, %f2 /* FPU */
|
|
alignaddr %o1, %g0, %g1 /* GRU Group */
|
|
ldd [%g1], %f4 /* Load Group */
|
|
1: ldd [%g1 - 8], %f6 /* Load Group */
|
|
sub %g1, 8, %g1 /* IEU0 Group */
|
|
subcc %g5, 8, %g5 /* IEU1 */
|
|
faligndata %f6, %f4, %f0 /* GRU Group */
|
|
std %f0, [%o0 - 8] /* Store */
|
|
sub %o1, 8, %o1 /* IEU0 Group */
|
|
be,pn %xcc, 233f /* CTI */
|
|
sub %o0, 8, %o0 /* IEU1 */
|
|
ldd [%g1 - 8], %f4 /* Load Group */
|
|
sub %g1, 8, %g1 /* IEU0 */
|
|
subcc %g5, 8, %g5 /* IEU1 */
|
|
faligndata %f4, %f6, %f0 /* GRU Group */
|
|
std %f0, [%o0 - 8] /* Store */
|
|
sub %o1, 8, %o1 /* IEU0 */
|
|
bne,pn %xcc, 1b /* CTI Group */
|
|
sub %o0, 8, %o0 /* IEU0 */
|
|
233: brz,pn %o2, 234f /* CTI Group */
|
|
nop /* IEU0 */
|
|
237: ldub [%o1 - 1], %g5 /* LOAD */
|
|
sub %o1, 1, %o1 /* IEU0 */
|
|
sub %o0, 1, %o0 /* IEU1 */
|
|
subcc %o2, 1, %o2 /* IEU1 */
|
|
bne,pt %xcc, 237b /* CTI */
|
|
stb %g5, [%o0] /* Store Group */
|
|
234: wr %g0, FPRS_FEF, %fprs
|
|
retl
|
|
mov %g4, %o0
|
|
END(memmove)
|
|
|
|
#ifdef USE_BPR
|
|
weak_alias(memcpy, __align_cpy_1)
|
|
weak_alias(memcpy, __align_cpy_2)
|
|
weak_alias(memcpy, __align_cpy_4)
|
|
weak_alias(memcpy, __align_cpy_8)
|
|
weak_alias(memcpy, __align_cpy_16)
|
|
#endif
|