mirror of
https://sourceware.org/git/glibc.git
synced 2024-12-20 09:41:18 +00:00
ae6b873031
1999-03-29 Jakub Jelinek <jj@ultra.linux.cz> * sysdeps/sparc/sparc32/sparcv8/Makefile: New file. * sysdeps/sparc/sparc32/sparcv8/rem.S: Delay after write %y. * sysdeps/sparc/sparc32/sparcv8/sdiv.S: Likewise. * sysdeps/sparc/sparc32/sparcv8/udiv.S: Likewise. * sysdeps/sparc/sparc32/sparcv8/urem.S: Likewise. * sysdeps/sparc/sparc32/sparcv9/bcopy.c: New file. * sysdeps/sparc/sparc32/sparcv9/bzero.c: New file. * sysdeps/sparc/sparc32/sparcv9/memchr.S: New file. * sysdeps/sparc/sparc32/sparcv9/memcmp.S: New file. * sysdeps/sparc/sparc32/sparcv9/memcpy.S: New file. * sysdeps/sparc/sparc32/sparcv9/memset.S: New file. * sysdeps/sparc/sparc32/sparcv9/stpcpy.S: New file. * sysdeps/sparc/sparc32/sparcv9/strcat.S: New file. * sysdeps/sparc/sparc32/sparcv9/strchr.S: New file. * sysdeps/sparc/sparc32/sparcv9/strcmp.S: New file. * sysdeps/sparc/sparc32/sparcv9/strcpy.S: New file. * sysdeps/sparc/sparc32/sparcv9/strcspn.S: New file. * sysdeps/sparc/sparc32/sparcv9/strlen.S: New file. * sysdeps/sparc/sparc32/sparcv9/strncpy.S: New file. * sysdeps/sparc/sparc32/sparcv9/strpbrk.S: New file. * sysdeps/sparc/sparc32/sparcv9/strrchr.c: New file. * sysdeps/sparc/sparc32/sparcv9/strspn.S: New file. * sysdeps/sparc/sparc32/sparcv9/dotmul.S: New file. * sysdeps/sparc/sparc32/sparcv9/rem.S: New file. * sysdeps/sparc/sparc32/sparcv9/sdiv.S: New file. * sysdeps/sparc/sparc32/sparcv9/udiv.S: New file. * sysdeps/sparc/sparc32/sparcv9/umul.S: New file. * sysdeps/sparc/sparc32/sparcv9/urem.S: New file. * sysdeps/sparc/sparc32/sparcv9/Makefile: New file. * sysdeps/sparc/sparc32/sparcv9/stpncpy.S: New file. * sysdeps/sparc/sparc32/sparcv9/strncmp.S: New file. * sysdeps/sparc/sparc32/bcopy.c: New file. * sysdeps/sparc/sparc32/bzero.c: New file. * sysdeps/sparc/sparc32/memchr.S: New file. * sysdeps/sparc/sparc32/memcpy.S: New file. * sysdeps/sparc/sparc32/memmove.c: New file. * sysdeps/sparc/sparc32/memset.S: New file. * sysdeps/sparc/sparc32/stpcpy.S: New file. * sysdeps/sparc/sparc32/strchr.S: New file. * sysdeps/sparc/sparc32/strrchr.c: New file. * sysdeps/sparc/sparc32/strcpy.S: New file. * sysdeps/sparc/sparc32/strlen.S: New file. * sysdeps/sparc/sparc32/strcat.S: New file. * sysdeps/sparc/sparc32/strcmp.S: New file. * sysdeps/sparc/sparc64/bcopy.c: New file. * sysdeps/sparc/sparc64/bzero.c: New file. * sysdeps/sparc/sparc64/memchr.S: New file. * sysdeps/sparc/sparc64/memcmp.S: New file. * sysdeps/sparc/sparc64/memcpy.S: New file. * sysdeps/sparc/sparc64/memset.S: New file. * sysdeps/sparc/sparc64/stpcpy.S: New file. * sysdeps/sparc/sparc64/strcat.S: New file. * sysdeps/sparc/sparc64/strchr.S: New file. * sysdeps/sparc/sparc64/strcmp.S: New file. * sysdeps/sparc/sparc64/strcpy.S: New file. * sysdeps/sparc/sparc64/strcspn.S: New file. * sysdeps/sparc/sparc64/strlen.S: New file. * sysdeps/sparc/sparc64/strncpy.S: New file. * sysdeps/sparc/sparc64/strpbrk.S: New file. * sysdeps/sparc/sparc64/strrchr.c: New file. * sysdeps/sparc/sparc64/strspn.S: New file. * sysdeps/sparc/sparc64/stpncpy.S: New file. * sysdeps/sparc/sparc64/strncmp.S: New file. * sysdeps/unix/sysv/linux/sparc/sparc32/Makefile (ASFLAGS-.os): Append -fPIC, don't replace. * configure.in: Recognize sparcv8plus / sparcv9.
975 lines
22 KiB
ArmAsm
975 lines
22 KiB
ArmAsm
/* Copy SIZE bytes from SRC to DEST.
|
|
For SPARC v7.
|
|
Copyright (C) 1996, 1999 Free Software Foundation, Inc.
|
|
This file is part of the GNU C Library.
|
|
Contributed by David S. Miller <davem@caip.rutgers.edu>,
|
|
Eddie C. Dost <ecd@skynet.be> and
|
|
Jakub Jelinek <jj@ultra.linux.cz>.
|
|
|
|
The GNU C Library is free software; you can redistribute it and/or
|
|
modify it under the terms of the GNU Library General Public License as
|
|
published by the Free Software Foundation; either version 2 of the
|
|
License, or (at your option) any later version.
|
|
|
|
The GNU C Library is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
Library General Public License for more details.
|
|
|
|
You should have received a copy of the GNU Library General Public
|
|
License along with the GNU C Library; see the file COPYING.LIB. If not,
|
|
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
Boston, MA 02111-1307, USA. */
|
|
|
|
#include <sysdep.h>
|
|
|
|
/* Both these macros have to start with exactly the same insn */
|
|
#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
|
|
ldd [%src + offset + 0x00], %t0; \
|
|
ldd [%src + offset + 0x08], %t2; \
|
|
ldd [%src + offset + 0x10], %t4; \
|
|
ldd [%src + offset + 0x18], %t6; \
|
|
st %t0, [%dst + offset + 0x00]; \
|
|
st %t1, [%dst + offset + 0x04]; \
|
|
st %t2, [%dst + offset + 0x08]; \
|
|
st %t3, [%dst + offset + 0x0c]; \
|
|
st %t4, [%dst + offset + 0x10]; \
|
|
st %t5, [%dst + offset + 0x14]; \
|
|
st %t6, [%dst + offset + 0x18]; \
|
|
st %t7, [%dst + offset + 0x1c];
|
|
|
|
#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
|
|
ldd [%src + offset + 0x00], %t0; \
|
|
ldd [%src + offset + 0x08], %t2; \
|
|
ldd [%src + offset + 0x10], %t4; \
|
|
ldd [%src + offset + 0x18], %t6; \
|
|
std %t0, [%dst + offset + 0x00]; \
|
|
std %t2, [%dst + offset + 0x08]; \
|
|
std %t4, [%dst + offset + 0x10]; \
|
|
std %t6, [%dst + offset + 0x18];
|
|
|
|
#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
|
|
ldd [%src - offset - 0x10], %t0; \
|
|
ldd [%src - offset - 0x08], %t2; \
|
|
st %t0, [%dst - offset - 0x10]; \
|
|
st %t1, [%dst - offset - 0x0c]; \
|
|
st %t2, [%dst - offset - 0x08]; \
|
|
st %t3, [%dst - offset - 0x04];
|
|
|
|
#define MOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \
|
|
ldd [%src - offset - 0x10], %t0; \
|
|
ldd [%src - offset - 0x08], %t2; \
|
|
std %t0, [%dst - offset - 0x10]; \
|
|
std %t2, [%dst - offset - 0x08];
|
|
|
|
#define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
|
|
ldub [%src - offset - 0x02], %t0; \
|
|
ldub [%src - offset - 0x01], %t1; \
|
|
stb %t0, [%dst - offset - 0x02]; \
|
|
stb %t1, [%dst - offset - 0x01];
|
|
|
|
/* Both these macros have to start with exactly the same insn */
|
|
#define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
|
|
ldd [%src - offset - 0x20], %t0; \
|
|
ldd [%src - offset - 0x18], %t2; \
|
|
ldd [%src - offset - 0x10], %t4; \
|
|
ldd [%src - offset - 0x08], %t6; \
|
|
st %t0, [%dst - offset - 0x20]; \
|
|
st %t1, [%dst - offset - 0x1c]; \
|
|
st %t2, [%dst - offset - 0x18]; \
|
|
st %t3, [%dst - offset - 0x14]; \
|
|
st %t4, [%dst - offset - 0x10]; \
|
|
st %t5, [%dst - offset - 0x0c]; \
|
|
st %t6, [%dst - offset - 0x08]; \
|
|
st %t7, [%dst - offset - 0x04];
|
|
|
|
#define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
|
|
ldd [%src - offset - 0x20], %t0; \
|
|
ldd [%src - offset - 0x18], %t2; \
|
|
ldd [%src - offset - 0x10], %t4; \
|
|
ldd [%src - offset - 0x08], %t6; \
|
|
std %t0, [%dst - offset - 0x20]; \
|
|
std %t2, [%dst - offset - 0x18]; \
|
|
std %t4, [%dst - offset - 0x10]; \
|
|
std %t6, [%dst - offset - 0x08];
|
|
|
|
#define RMOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
|
|
ldd [%src + offset + 0x00], %t0; \
|
|
ldd [%src + offset + 0x08], %t2; \
|
|
st %t0, [%dst + offset + 0x00]; \
|
|
st %t1, [%dst + offset + 0x04]; \
|
|
st %t2, [%dst + offset + 0x08]; \
|
|
st %t3, [%dst + offset + 0x0c];
|
|
|
|
#define RMOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
|
|
ldub [%src + offset + 0x00], %t0; \
|
|
ldub [%src + offset + 0x01], %t1; \
|
|
stb %t0, [%dst + offset + 0x00]; \
|
|
stb %t1, [%dst + offset + 0x01];
|
|
|
|
#define SMOVE_CHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
|
|
ldd [%src + offset + 0x00], %t0; \
|
|
ldd [%src + offset + 0x08], %t2; \
|
|
srl %t0, shir, %t5; \
|
|
srl %t1, shir, %t6; \
|
|
sll %t0, shil, %t0; \
|
|
or %t5, %prev, %t5; \
|
|
sll %t1, shil, %prev; \
|
|
or %t6, %t0, %t0; \
|
|
srl %t2, shir, %t1; \
|
|
srl %t3, shir, %t6; \
|
|
sll %t2, shil, %t2; \
|
|
or %t1, %prev, %t1; \
|
|
std %t4, [%dst + offset + offset2 - 0x04]; \
|
|
std %t0, [%dst + offset + offset2 + 0x04]; \
|
|
sll %t3, shil, %prev; \
|
|
or %t6, %t2, %t4;
|
|
|
|
#define SMOVE_ALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
|
|
ldd [%src + offset + 0x00], %t0; \
|
|
ldd [%src + offset + 0x08], %t2; \
|
|
srl %t0, shir, %t4; \
|
|
srl %t1, shir, %t5; \
|
|
sll %t0, shil, %t6; \
|
|
or %t4, %prev, %t0; \
|
|
sll %t1, shil, %prev; \
|
|
or %t5, %t6, %t1; \
|
|
srl %t2, shir, %t4; \
|
|
srl %t3, shir, %t5; \
|
|
sll %t2, shil, %t6; \
|
|
or %t4, %prev, %t2; \
|
|
sll %t3, shil, %prev; \
|
|
or %t5, %t6, %t3; \
|
|
std %t0, [%dst + offset + offset2 + 0x00]; \
|
|
std %t2, [%dst + offset + offset2 + 0x08];
|
|
|
|
.text
|
|
.align 4
|
|
|
|
70: andcc %o1, 1, %g0
|
|
be 4f
|
|
andcc %o1, 2, %g0
|
|
|
|
ldub [%o1 - 1], %g2
|
|
sub %o1, 1, %o1
|
|
stb %g2, [%o0 - 1]
|
|
sub %o2, 1, %o2
|
|
be 3f
|
|
sub %o0, 1, %o0
|
|
4: lduh [%o1 - 2], %g2
|
|
sub %o1, 2, %o1
|
|
sth %g2, [%o0 - 2]
|
|
sub %o2, 2, %o2
|
|
b 3f
|
|
sub %o0, 2, %o0
|
|
|
|
0: retl
|
|
nop ! Only bcopy returns here and it retuns void...
|
|
|
|
ENTRY(bcopy)
|
|
mov %o0, %o3
|
|
mov %o1, %o0
|
|
mov %o3, %o1
|
|
tst %o2
|
|
bcs 0b
|
|
/* Do the cmp in the delay slot */
|
|
END(bcopy)
|
|
ENTRY(memmove)
|
|
cmp %o0, %o1
|
|
st %o0, [%sp + 64]
|
|
bleu 9f
|
|
sub %o0, %o1, %o4
|
|
|
|
add %o1, %o2, %o3
|
|
cmp %o3, %o0
|
|
bleu 0f
|
|
andcc %o4, 3, %o5
|
|
|
|
add %o1, %o2, %o1
|
|
add %o0, %o2, %o0
|
|
bne 77f
|
|
cmp %o2, 15
|
|
bleu 91f
|
|
andcc %o1, 3, %g0
|
|
bne 70b
|
|
3: andcc %o1, 4, %g0
|
|
|
|
be 2f
|
|
mov %o2, %g1
|
|
|
|
ld [%o1 - 4], %o4
|
|
sub %g1, 4, %g1
|
|
st %o4, [%o0 - 4]
|
|
sub %o1, 4, %o1
|
|
sub %o0, 4, %o0
|
|
2: andcc %g1, 0xffffff80, %g7
|
|
be 3f
|
|
andcc %o0, 4, %g0
|
|
|
|
be 74f + 4
|
|
5: RMOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
|
|
RMOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
|
|
RMOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
|
|
RMOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
|
|
subcc %g7, 128, %g7
|
|
sub %o1, 128, %o1
|
|
bne 5b
|
|
sub %o0, 128, %o0
|
|
|
|
3: andcc %g1, 0x70, %g7
|
|
be 72f
|
|
andcc %g1, 8, %g0
|
|
|
|
srl %g7, 1, %o4
|
|
mov %o7, %g2
|
|
add %g7, %o4, %o4
|
|
101: call 100f
|
|
sub %o1, %g7, %o1
|
|
mov %g2, %o7
|
|
jmpl %o5 + (72f - 101b), %g0
|
|
sub %o0, %g7, %o0
|
|
|
|
71: RMOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
|
|
RMOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
|
|
RMOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
|
|
RMOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
|
|
RMOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
|
|
RMOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
|
|
RMOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
|
|
72: be 73f
|
|
andcc %g1, 4, %g0
|
|
|
|
ldd [%o1 - 0x08], %g2
|
|
sub %o0, 8, %o0
|
|
sub %o1, 8, %o1
|
|
st %g2, [%o0]
|
|
st %g3, [%o0 + 0x04]
|
|
73: be 1f
|
|
andcc %g1, 2, %g0
|
|
|
|
ld [%o1 - 4], %g2
|
|
sub %o1, 4, %o1
|
|
st %g2, [%o0 - 4]
|
|
sub %o0, 4, %o0
|
|
1: be 1f
|
|
andcc %g1, 1, %g0
|
|
|
|
lduh [%o1 - 2], %g2
|
|
sub %o1, 2, %o1
|
|
sth %g2, [%o0 - 2]
|
|
sub %o0, 2, %o0
|
|
1: be 1f
|
|
nop
|
|
|
|
ldub [%o1 - 1], %g2
|
|
stb %g2, [%o0 - 1]
|
|
1: retl
|
|
ld [%sp + 64], %o0
|
|
|
|
74: RMOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
|
|
RMOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
|
|
RMOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
|
|
RMOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
|
|
subcc %g7, 128, %g7
|
|
sub %o1, 128, %o1
|
|
bne 74b
|
|
sub %o0, 128, %o0
|
|
|
|
andcc %g1, 0x70, %g7
|
|
be 72b
|
|
andcc %g1, 8, %g0
|
|
|
|
srl %g7, 1, %o4
|
|
mov %o7, %g2
|
|
add %g7, %o4, %o4
|
|
102: call 100f
|
|
sub %o1, %g7, %o1
|
|
mov %g2, %o7
|
|
jmpl %o5 + (72b - 102b), %g0
|
|
sub %o0, %g7, %o0
|
|
|
|
75: and %o2, 0xe, %o3
|
|
mov %o7, %g2
|
|
sll %o3, 3, %o4
|
|
sub %o0, %o3, %o0
|
|
103: call 100f
|
|
sub %o1, %o3, %o1
|
|
mov %g2, %o7
|
|
jmpl %o5 + (76f - 103b), %g0
|
|
andcc %o2, 1, %g0
|
|
|
|
RMOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
|
|
RMOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
|
|
RMOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
|
|
RMOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
|
|
RMOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
|
|
RMOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
|
|
RMOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
|
|
|
|
76: be 1f
|
|
nop
|
|
ldub [%o1 - 1], %g2
|
|
stb %g2, [%o0 - 1]
|
|
1: retl
|
|
ld [%sp + 64], %o0
|
|
|
|
91: bne 75b
|
|
andcc %o2, 8, %g0
|
|
|
|
be 1f
|
|
andcc %o2, 4, %g0
|
|
|
|
ld [%o1 - 0x08], %g2
|
|
ld [%o1 - 0x04], %g3
|
|
sub %o1, 8, %o1
|
|
st %g2, [%o0 - 0x08]
|
|
st %g3, [%o0 - 0x04]
|
|
sub %o0, 8, %o0
|
|
1: b 73b
|
|
mov %o2, %g1
|
|
|
|
77: cmp %o2, 15
|
|
bleu 75b
|
|
andcc %o0, 3, %g0
|
|
be 64f
|
|
andcc %o0, 1, %g0
|
|
be 63f
|
|
andcc %o0, 2, %g0
|
|
ldub [%o1 - 1], %g5
|
|
sub %o1, 1, %o1
|
|
stb %g5, [%o0 - 1]
|
|
sub %o0, 1, %o0
|
|
be 64f
|
|
sub %o2, 1, %o2
|
|
|
|
63: ldub [%o1 - 1], %g5
|
|
sub %o1, 2, %o1
|
|
stb %g5, [%o0 - 1]
|
|
sub %o0, 2, %o0
|
|
ldub [%o1], %g5
|
|
sub %o2, 2, %o2
|
|
stb %g5, [%o0]
|
|
64: and %o1, 3, %g2
|
|
and %o1, -4, %o1
|
|
and %o2, 0xc, %g3
|
|
add %o1, 4, %o1
|
|
cmp %g3, 4
|
|
sll %g2, 3, %g4
|
|
mov 32, %g2
|
|
be 4f
|
|
sub %g2, %g4, %g7
|
|
|
|
blu 3f
|
|
cmp %g3, 8
|
|
|
|
be 2f
|
|
srl %o2, 2, %g3
|
|
|
|
ld [%o1 - 4], %o3
|
|
add %o0, -8, %o0
|
|
ld [%o1 - 8], %o4
|
|
add %o1, -16, %o1
|
|
b 7f
|
|
add %g3, 1, %g3
|
|
2: ld [%o1 - 4], %o4
|
|
add %o0, -4, %o0
|
|
ld [%o1 - 8], %g1
|
|
add %o1, -12, %o1
|
|
b 8f
|
|
add %g3, 2, %g3
|
|
3: ld [%o1 - 4], %o5
|
|
add %o0, -12, %o0
|
|
ld [%o1 - 8], %o3
|
|
add %o1, -20, %o1
|
|
b 6f
|
|
srl %o2, 2, %g3
|
|
4: ld [%o1 - 4], %g1
|
|
srl %o2, 2, %g3
|
|
ld [%o1 - 8], %o5
|
|
add %o1, -24, %o1
|
|
add %o0, -16, %o0
|
|
add %g3, -1, %g3
|
|
|
|
ld [%o1 + 12], %o3
|
|
5: sll %o5, %g4, %g2
|
|
srl %g1, %g7, %g5
|
|
or %g2, %g5, %g2
|
|
st %g2, [%o0 + 12]
|
|
6: ld [%o1 + 8], %o4
|
|
sll %o3, %g4, %g2
|
|
srl %o5, %g7, %g5
|
|
or %g2, %g5, %g2
|
|
st %g2, [%o0 + 8]
|
|
7: ld [%o1 + 4], %g1
|
|
sll %o4, %g4, %g2
|
|
srl %o3, %g7, %g5
|
|
or %g2, %g5, %g2
|
|
st %g2, [%o0 + 4]
|
|
8: ld [%o1], %o5
|
|
sll %g1, %g4, %g2
|
|
srl %o4, %g7, %g5
|
|
addcc %g3, -4, %g3
|
|
or %g2, %g5, %g2
|
|
add %o1, -16, %o1
|
|
st %g2, [%o0]
|
|
add %o0, -16, %o0
|
|
bne,a 5b
|
|
ld [%o1 + 12], %o3
|
|
sll %o5, %g4, %g2
|
|
srl %g1, %g7, %g5
|
|
srl %g4, 3, %g3
|
|
or %g2, %g5, %g2
|
|
add %o1, %g3, %o1
|
|
andcc %o2, 2, %g0
|
|
st %g2, [%o0 + 12]
|
|
be 1f
|
|
andcc %o2, 1, %g0
|
|
|
|
ldub [%o1 + 15], %g5
|
|
add %o1, -2, %o1
|
|
stb %g5, [%o0 + 11]
|
|
add %o0, -2, %o0
|
|
ldub [%o1 + 16], %g5
|
|
stb %g5, [%o0 + 12]
|
|
1: be 1f
|
|
nop
|
|
ldub [%o1 + 15], %g5
|
|
stb %g5, [%o0 + 11]
|
|
1: retl
|
|
ld [%sp + 64], %o0
|
|
|
|
78: andcc %o1, 1, %g0
|
|
be 4f
|
|
andcc %o1, 2, %g0
|
|
|
|
ldub [%o1], %g2
|
|
add %o1, 1, %o1
|
|
stb %g2, [%o0]
|
|
sub %o2, 1, %o2
|
|
bne 3f
|
|
add %o0, 1, %o0
|
|
4: lduh [%o1], %g2
|
|
add %o1, 2, %o1
|
|
sth %g2, [%o0]
|
|
sub %o2, 2, %o2
|
|
b 3f
|
|
add %o0, 2, %o0
|
|
END(memmove)
|
|
|
|
ENTRY(memcpy) /* %o0=dst %o1=src %o2=len */
|
|
sub %o0, %o1, %o4
|
|
st %o0, [%sp + 64]
|
|
9: andcc %o4, 3, %o5
|
|
0: bne 86f
|
|
cmp %o2, 15
|
|
|
|
bleu 90f
|
|
andcc %o1, 3, %g0
|
|
|
|
bne 78b
|
|
3: andcc %o1, 4, %g0
|
|
|
|
be 2f
|
|
mov %o2, %g1
|
|
|
|
ld [%o1], %o4
|
|
sub %g1, 4, %g1
|
|
st %o4, [%o0]
|
|
add %o1, 4, %o1
|
|
add %o0, 4, %o0
|
|
2: andcc %g1, 0xffffff80, %g7
|
|
be 3f
|
|
andcc %o0, 4, %g0
|
|
|
|
be 82f + 4
|
|
5: MOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
|
|
MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
|
|
MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
|
|
MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
|
|
subcc %g7, 128, %g7
|
|
add %o1, 128, %o1
|
|
bne 5b
|
|
add %o0, 128, %o0
|
|
3: andcc %g1, 0x70, %g7
|
|
be 80f
|
|
andcc %g1, 8, %g0
|
|
|
|
srl %g7, 1, %o4
|
|
mov %o7, %g2
|
|
add %g7, %o4, %o4
|
|
add %o1, %g7, %o1
|
|
104: call 100f
|
|
add %o0, %g7, %o0
|
|
jmpl %o5 + (80f - 104b), %g0
|
|
mov %g2, %o7
|
|
|
|
79: MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
|
|
MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
|
|
MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
|
|
MOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
|
|
MOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
|
|
MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
|
|
MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
|
|
|
|
80: be 81f
|
|
andcc %g1, 4, %g0
|
|
|
|
ldd [%o1], %g2
|
|
add %o0, 8, %o0
|
|
st %g2, [%o0 - 0x08]
|
|
add %o1, 8, %o1
|
|
st %g3, [%o0 - 0x04]
|
|
|
|
81: be 1f
|
|
andcc %g1, 2, %g0
|
|
|
|
ld [%o1], %g2
|
|
add %o1, 4, %o1
|
|
st %g2, [%o0]
|
|
add %o0, 4, %o0
|
|
1: be 1f
|
|
andcc %g1, 1, %g0
|
|
|
|
lduh [%o1], %g2
|
|
add %o1, 2, %o1
|
|
sth %g2, [%o0]
|
|
add %o0, 2, %o0
|
|
1: be 1f
|
|
nop
|
|
|
|
ldub [%o1], %g2
|
|
stb %g2, [%o0]
|
|
1: retl
|
|
ld [%sp + 64], %o0
|
|
|
|
82: /* ldd_std */
|
|
MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
|
|
MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
|
|
MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
|
|
MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
|
|
subcc %g7, 128, %g7
|
|
add %o1, 128, %o1
|
|
bne 82b
|
|
add %o0, 128, %o0
|
|
|
|
andcc %g1, 0x70, %g7
|
|
be 84f
|
|
andcc %g1, 8, %g0
|
|
|
|
mov %o7, %g2
|
|
111: call 110f
|
|
add %o1, %g7, %o1
|
|
mov %g2, %o7
|
|
jmpl %o5 + (84f - 111b), %g0
|
|
add %o0, %g7, %o0
|
|
|
|
83: MOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
|
|
MOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
|
|
MOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
|
|
MOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
|
|
MOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
|
|
MOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
|
|
MOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
|
|
|
|
84: be 85f
|
|
andcc %g1, 4, %g0
|
|
|
|
ldd [%o1], %g2
|
|
add %o0, 8, %o0
|
|
std %g2, [%o0 - 0x08]
|
|
add %o1, 8, %o1
|
|
85: be 1f
|
|
andcc %g1, 2, %g0
|
|
|
|
ld [%o1], %g2
|
|
add %o1, 4, %o1
|
|
st %g2, [%o0]
|
|
add %o0, 4, %o0
|
|
1: be 1f
|
|
andcc %g1, 1, %g0
|
|
|
|
lduh [%o1], %g2
|
|
add %o1, 2, %o1
|
|
sth %g2, [%o0]
|
|
add %o0, 2, %o0
|
|
1: be 1f
|
|
nop
|
|
|
|
ldub [%o1], %g2
|
|
stb %g2, [%o0]
|
|
1: retl
|
|
ld [%sp + 64], %o0
|
|
|
|
86: cmp %o2, 6
|
|
bleu 88f
|
|
|
|
cmp %o2, 256
|
|
bcc 87f
|
|
|
|
andcc %o0, 3, %g0
|
|
be 61f
|
|
andcc %o0, 1, %g0
|
|
be 60f
|
|
andcc %o0, 2, %g0
|
|
|
|
ldub [%o1], %g5
|
|
add %o1, 1, %o1
|
|
stb %g5, [%o0]
|
|
sub %o2, 1, %o2
|
|
bne 61f
|
|
add %o0, 1, %o0
|
|
60: ldub [%o1], %g3
|
|
add %o1, 2, %o1
|
|
stb %g3, [%o0]
|
|
sub %o2, 2, %o2
|
|
ldub [%o1 - 1], %g3
|
|
add %o0, 2, %o0
|
|
stb %g3, [%o0 - 1]
|
|
61: and %o1, 3, %g2
|
|
and %o2, 0xc, %g3
|
|
and %o1, -4, %o1
|
|
cmp %g3, 4
|
|
sll %g2, 3, %g4
|
|
mov 32, %g2
|
|
be 4f
|
|
sub %g2, %g4, %g7
|
|
|
|
blu 3f
|
|
cmp %g3, 0x8
|
|
|
|
be 2f
|
|
srl %o2, 2, %g3
|
|
|
|
ld [%o1], %o3
|
|
add %o0, -8, %o0
|
|
ld [%o1 + 4], %o4
|
|
b 8f
|
|
add %g3, 1, %g3
|
|
2: ld [%o1], %o4
|
|
add %o0, -12, %o0
|
|
ld [%o1 + 4], %o5
|
|
add %g3, 2, %g3
|
|
b 9f
|
|
add %o1, -4, %o1
|
|
3: ld [%o1], %g1
|
|
add %o0, -4, %o0
|
|
ld [%o1 + 4], %o3
|
|
srl %o2, 2, %g3
|
|
b 7f
|
|
add %o1, 4, %o1
|
|
4: ld [%o1], %o5
|
|
cmp %o2, 7
|
|
ld [%o1 + 4], %g1
|
|
srl %o2, 2, %g3
|
|
bleu 10f
|
|
add %o1, 8, %o1
|
|
|
|
ld [%o1], %o3
|
|
add %g3, -1, %g3
|
|
5: sll %o5, %g4, %g2
|
|
srl %g1, %g7, %g5
|
|
or %g2, %g5, %g2
|
|
st %g2, [%o0]
|
|
7: ld [%o1 + 4], %o4
|
|
sll %g1, %g4, %g2
|
|
srl %o3, %g7, %g5
|
|
or %g2, %g5, %g2
|
|
st %g2, [%o0 + 4]
|
|
8: ld [%o1 + 8], %o5
|
|
sll %o3, %g4, %g2
|
|
srl %o4, %g7, %g5
|
|
or %g2, %g5, %g2
|
|
st %g2, [%o0 + 8]
|
|
9: ld [%o1 + 12], %g1
|
|
sll %o4, %g4, %g2
|
|
srl %o5, %g7, %g5
|
|
addcc %g3, -4, %g3
|
|
or %g2, %g5, %g2
|
|
add %o1, 16, %o1
|
|
st %g2, [%o0 + 12]
|
|
add %o0, 16, %o0
|
|
bne,a 5b
|
|
ld [%o1], %o3
|
|
10: sll %o5, %g4, %g2
|
|
srl %g1, %g7, %g5
|
|
srl %g7, 3, %g3
|
|
or %g2, %g5, %g2
|
|
sub %o1, %g3, %o1
|
|
andcc %o2, 2, %g0
|
|
st %g2, [%o0]
|
|
be 1f
|
|
andcc %o2, 1, %g0
|
|
|
|
ldub [%o1], %g2
|
|
add %o1, 2, %o1
|
|
stb %g2, [%o0 + 4]
|
|
add %o0, 2, %o0
|
|
ldub [%o1 - 1], %g2
|
|
stb %g2, [%o0 + 3]
|
|
1: be 1f
|
|
nop
|
|
ldub [%o1], %g2
|
|
stb %g2, [%o0 + 4]
|
|
1: retl
|
|
ld [%sp + 64], %o0
|
|
|
|
87: andcc %o1, 3, %g0
|
|
be 3f
|
|
andcc %o1, 1, %g0
|
|
|
|
be 4f
|
|
andcc %o1, 2, %g0
|
|
|
|
ldub [%o1], %g2
|
|
add %o1, 1, %o1
|
|
stb %g2, [%o0]
|
|
sub %o2, 1, %o2
|
|
bne 3f
|
|
add %o0, 1, %o0
|
|
4: lduh [%o1], %g2
|
|
add %o1, 2, %o1
|
|
srl %g2, 8, %g3
|
|
sub %o2, 2, %o2
|
|
stb %g3, [%o0]
|
|
add %o0, 2, %o0
|
|
stb %g2, [%o0 - 1]
|
|
3: andcc %o1, 4, %g0
|
|
|
|
bne 2f
|
|
cmp %o5, 1
|
|
|
|
ld [%o1], %o4
|
|
srl %o4, 24, %g2
|
|
stb %g2, [%o0]
|
|
srl %o4, 16, %g3
|
|
stb %g3, [%o0 + 1]
|
|
srl %o4, 8, %g2
|
|
stb %g2, [%o0 + 2]
|
|
sub %o2, 4, %o2
|
|
stb %o4, [%o0 + 3]
|
|
add %o1, 4, %o1
|
|
add %o0, 4, %o0
|
|
2: be 33f
|
|
cmp %o5, 2
|
|
be 32f
|
|
sub %o2, 4, %o2
|
|
31: ld [%o1], %g2
|
|
add %o1, 4, %o1
|
|
srl %g2, 24, %g3
|
|
and %o0, 7, %g5
|
|
stb %g3, [%o0]
|
|
cmp %g5, 7
|
|
sll %g2, 8, %g1
|
|
add %o0, 4, %o0
|
|
be 41f
|
|
and %o2, 0xffffffc0, %o3
|
|
ld [%o0 - 7], %o4
|
|
4: SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
|
|
SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
|
|
SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
|
|
SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
|
|
subcc %o3, 64, %o3
|
|
add %o1, 64, %o1
|
|
bne 4b
|
|
add %o0, 64, %o0
|
|
|
|
andcc %o2, 0x30, %o3
|
|
be,a 1f
|
|
srl %g1, 16, %g2
|
|
4: SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
|
|
subcc %o3, 16, %o3
|
|
add %o1, 16, %o1
|
|
bne 4b
|
|
add %o0, 16, %o0
|
|
|
|
srl %g1, 16, %g2
|
|
1: st %o4, [%o0 - 7]
|
|
sth %g2, [%o0 - 3]
|
|
srl %g1, 8, %g4
|
|
b 88f
|
|
stb %g4, [%o0 - 1]
|
|
32: ld [%o1], %g2
|
|
add %o1, 4, %o1
|
|
srl %g2, 16, %g3
|
|
and %o0, 7, %g5
|
|
sth %g3, [%o0]
|
|
cmp %g5, 6
|
|
sll %g2, 16, %g1
|
|
add %o0, 4, %o0
|
|
be 42f
|
|
and %o2, 0xffffffc0, %o3
|
|
ld [%o0 - 6], %o4
|
|
4: SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
|
|
SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
|
|
SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
|
|
SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
|
|
subcc %o3, 64, %o3
|
|
add %o1, 64, %o1
|
|
bne 4b
|
|
add %o0, 64, %o0
|
|
|
|
andcc %o2, 0x30, %o3
|
|
be,a 1f
|
|
srl %g1, 16, %g2
|
|
4: SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
|
|
subcc %o3, 16, %o3
|
|
add %o1, 16, %o1
|
|
bne 4b
|
|
add %o0, 16, %o0
|
|
|
|
srl %g1, 16, %g2
|
|
1: st %o4, [%o0 - 6]
|
|
b 88f
|
|
sth %g2, [%o0 - 2]
|
|
33: ld [%o1], %g2
|
|
sub %o2, 4, %o2
|
|
srl %g2, 24, %g3
|
|
and %o0, 7, %g5
|
|
stb %g3, [%o0]
|
|
cmp %g5, 5
|
|
srl %g2, 8, %g4
|
|
sll %g2, 24, %g1
|
|
sth %g4, [%o0 + 1]
|
|
add %o1, 4, %o1
|
|
be 43f
|
|
and %o2, 0xffffffc0, %o3
|
|
|
|
ld [%o0 - 1], %o4
|
|
add %o0, 4, %o0
|
|
4: SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
|
|
SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
|
|
SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
|
|
SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
|
|
subcc %o3, 64, %o3
|
|
add %o1, 64, %o1
|
|
bne 4b
|
|
add %o0, 64, %o0
|
|
|
|
andcc %o2, 0x30, %o3
|
|
be,a 1f
|
|
srl %g1, 24, %g2
|
|
4: SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
|
|
subcc %o3, 16, %o3
|
|
add %o1, 16, %o1
|
|
bne 4b
|
|
add %o0, 16, %o0
|
|
|
|
srl %g1, 24, %g2
|
|
1: st %o4, [%o0 - 5]
|
|
b 88f
|
|
stb %g2, [%o0 - 1]
|
|
41: SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
|
|
SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
|
|
SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
|
|
SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
|
|
subcc %o3, 64, %o3
|
|
add %o1, 64, %o1
|
|
bne 41b
|
|
add %o0, 64, %o0
|
|
|
|
andcc %o2, 0x30, %o3
|
|
be,a 1f
|
|
srl %g1, 16, %g2
|
|
4: SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
|
|
subcc %o3, 16, %o3
|
|
add %o1, 16, %o1
|
|
bne 4b
|
|
add %o0, 16, %o0
|
|
|
|
srl %g1, 16, %g2
|
|
1: sth %g2, [%o0 - 3]
|
|
srl %g1, 8, %g4
|
|
b 88f
|
|
stb %g4, [%o0 - 1]
|
|
43: SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
|
|
SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
|
|
SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
|
|
SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
|
|
subcc %o3, 64, %o3
|
|
add %o1, 64, %o1
|
|
bne 43b
|
|
add %o0, 64, %o0
|
|
|
|
andcc %o2, 0x30, %o3
|
|
be,a 1f
|
|
srl %g1, 24, %g2
|
|
4: SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
|
|
subcc %o3, 16, %o3
|
|
add %o1, 16, %o1
|
|
bne 4b
|
|
add %o0, 16, %o0
|
|
|
|
srl %g1, 24, %g2
|
|
1: stb %g2, [%o0 + 3]
|
|
b 88f
|
|
add %o0, 4, %o0
|
|
42: SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
|
|
SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
|
|
SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
|
|
SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
|
|
subcc %o3, 64, %o3
|
|
add %o1, 64, %o1
|
|
bne 42b
|
|
add %o0, 64, %o0
|
|
|
|
andcc %o2, 0x30, %o3
|
|
be,a 1f
|
|
srl %g1, 16, %g2
|
|
4: SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
|
|
subcc %o3, 16, %o3
|
|
add %o1, 16, %o1
|
|
bne 4b
|
|
add %o0, 16, %o0
|
|
|
|
srl %g1, 16, %g2
|
|
1: sth %g2, [%o0 - 2]
|
|
|
|
/* Fall through */
|
|
|
|
88: and %o2, 0xe, %o3
|
|
mov %o7, %g2
|
|
sll %o3, 3, %o4
|
|
add %o0, %o3, %o0
|
|
106: call 100f
|
|
add %o1, %o3, %o1
|
|
mov %g2, %o7
|
|
jmpl %o5 + (89f - 106b), %g0
|
|
andcc %o2, 1, %g0
|
|
|
|
MOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
|
|
MOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
|
|
MOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
|
|
MOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
|
|
MOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
|
|
MOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
|
|
MOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
|
|
|
|
89: be 1f
|
|
nop
|
|
|
|
ldub [%o1], %g2
|
|
stb %g2, [%o0]
|
|
1: retl
|
|
ld [%sp + 64], %o0
|
|
|
|
90: bne 88b
|
|
andcc %o2, 8, %g0
|
|
|
|
be 1f
|
|
andcc %o2, 4, %g0
|
|
|
|
ld [%o1 + 0x00], %g2
|
|
ld [%o1 + 0x04], %g3
|
|
add %o1, 8, %o1
|
|
st %g2, [%o0 + 0x00]
|
|
st %g3, [%o0 + 0x04]
|
|
add %o0, 8, %o0
|
|
1: b 81b
|
|
mov %o2, %g1
|
|
|
|
100: retl
|
|
sub %o7, %o4, %o5
|
|
110: retl
|
|
sub %o7, %g7, %o5
|
|
END(memcpy)
|