bcopy should handle overlapping copies like memmove, while the previous

version worked like memcpy.  Implement optimized assembly memmove.
This commit is contained in:
Ulrich Drepper 1999-11-23 17:07:14 +00:00
parent a11bd15912
commit d0c05fdbbc

View File

@ -3,7 +3,7 @@
Copyright (C) 1996, 97, 98, 99 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by David S. Miller (davem@caip.rutgers.edu) and
Jakub Jelinek (jj@ultra.linux.cz).
Jakub Jelinek (jakub@redhat.com).
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public License as
@ -24,6 +24,7 @@
#include <asm/asi.h>
#ifndef XCC
#define USE_BPR
#define XCC xcc
#endif
#define FPRS_FEF 4
@ -132,16 +133,76 @@
stx %t0, [%dst - offset - 0x10]; \
stx %t1, [%dst - offset - 0x08];
/* Macros for non-VIS memmove code. */
#define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3) \
ldx [%src - offset - 0x20], %t0; \
ldx [%src - offset - 0x18], %t1; \
ldx [%src - offset - 0x10], %t2; \
ldx [%src - offset - 0x08], %t3; \
stw %t0, [%dst - offset - 0x1c]; \
srlx %t0, 32, %t0; \
stw %t0, [%dst - offset - 0x20]; \
stw %t1, [%dst - offset - 0x14]; \
srlx %t1, 32, %t1; \
stw %t1, [%dst - offset - 0x18]; \
stw %t2, [%dst - offset - 0x0c]; \
srlx %t2, 32, %t2; \
stw %t2, [%dst - offset - 0x10]; \
stw %t3, [%dst - offset - 0x04]; \
srlx %t3, 32, %t3; \
stw %t3, [%dst - offset - 0x08];
#define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \
ldx [%src - offset - 0x20], %t0; \
ldx [%src - offset - 0x18], %t1; \
ldx [%src - offset - 0x10], %t2; \
ldx [%src - offset - 0x08], %t3; \
stx %t0, [%dst - offset - 0x20]; \
stx %t1, [%dst - offset - 0x18]; \
stx %t2, [%dst - offset - 0x10]; \
stx %t3, [%dst - offset - 0x08]; \
ldx [%src - offset - 0x40], %t0; \
ldx [%src - offset - 0x38], %t1; \
ldx [%src - offset - 0x30], %t2; \
ldx [%src - offset - 0x28], %t3; \
stx %t0, [%dst - offset - 0x40]; \
stx %t1, [%dst - offset - 0x38]; \
stx %t2, [%dst - offset - 0x30]; \
stx %t3, [%dst - offset - 0x28];
#define RMOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
ldx [%src + offset + 0x00], %t0; \
ldx [%src + offset + 0x08], %t1; \
stw %t0, [%dst + offset + 0x04]; \
srlx %t0, 32, %t2; \
stw %t2, [%dst + offset + 0x00]; \
stw %t1, [%dst + offset + 0x0c]; \
srlx %t1, 32, %t3; \
stw %t3, [%dst + offset + 0x08];
#define RMOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1) \
ldx [%src + offset + 0x00], %t0; \
ldx [%src + offset + 0x08], %t1; \
stx %t0, [%dst + offset + 0x00]; \
stx %t1, [%dst + offset + 0x08];
.text
.align 32
ENTRY(bcopy)
or %o0, 0, %g3 /* IEU0 Group */
addcc %o1, 0, %o0 /* IEU1 */
brgez,pt %o2, 210f /* CTI */
or %g3, 0, %o1 /* IEU0 Group */
retl /* CTI Group brk forced*/
clr %o0 /* IEU0 */
sub %o1, %o0, %o4 /* IEU0 Group */
mov %o0, %g3 /* IEU1 */
cmp %o4, %o2 /* IEU1 Group */
mov %o1, %o0 /* IEU0 */
bgeu,pt %XCC, 210f /* CTI */
mov %g3, %o1 /* IEU0 Group */
#ifndef USE_BPR
srl %o2, 0, %o2 /* IEU1 */
#endif
brnz,pn %o2, 220f /* CTI Group */
add %o0, %o2, %o0 /* IEU0 */
retl
nop
END(bcopy)
.align 32
@ -447,7 +508,7 @@ ENTRY(memcpy)
#endif
brz,pn %o2, 209b /* CTI Group */
mov %o0, %g4 /* IEU0 */
cmp %o2, 15 /* IEU1 Group */
218: cmp %o2, 15 /* IEU1 Group */
bleu,pn %xcc, 208b /* CTI */
cmp %o2, (64 * 6) /* IEU1 Group */
bgeu,pn %xcc, 200b /* CTI */
@ -633,6 +694,222 @@ ENTRY(memcpy)
mov %g4, %o0
END(memcpy)
.align 32
228: andcc %o2, 1, %g0 /* IEU1 Group */
be,pt %icc, 2f+4 /* CTI */
1: ldub [%o1 - 1], %o5 /* LOAD Group */
sub %o1, 1, %o1 /* IEU0 */
sub %o0, 1, %o0 /* IEU1 */
subcc %o2, 1, %o2 /* IEU1 Group */
be,pn %xcc, 229f /* CTI */
stb %o5, [%o0] /* Store */
2: ldub [%o1 - 1], %o5 /* LOAD Group */
sub %o0, 2, %o0 /* IEU0 */
ldub [%o1 - 2], %g5 /* LOAD Group */
sub %o1, 2, %o1 /* IEU0 */
subcc %o2, 2, %o2 /* IEU1 Group */
stb %o5, [%o0 + 1] /* Store */
bne,pt %xcc, 2b /* CTI */
stb %g5, [%o0] /* Store */
229: retl
mov %g4, %o0
219: retl
nop
.align 32
ENTRY(memmove)
#ifndef USE_BPR
srl %o2, 0, %o2 /* IEU1 Group */
#endif
brz,pn %o2, 219b /* CTI Group */
sub %o0, %o1, %o4 /* IEU0 */
cmp %o4, %o2 /* IEU1 Group */
bgeu,pt %XCC, 218b /* CTI */
mov %o0, %g4 /* IEU0 */
add %o0, %o2, %o0 /* IEU0 Group */
220: add %o1, %o2, %o1 /* IEU1 */
cmp %o2, 15 /* IEU1 Group */
bleu,pn %xcc, 228b /* CTI */
andcc %o0, 7, %g2 /* IEU1 Group */
sub %o0, %o1, %g5 /* IEU0 */
andcc %g5, 3, %o5 /* IEU1 Group */
bne,pn %xcc, 232f /* CTI */
andcc %o1, 3, %g0 /* IEU1 Group */
be,a,pt %xcc, 236f /* CTI */
andcc %o1, 4, %g0 /* IEU1 Group */
andcc %o1, 1, %g0 /* IEU1 Group */
be,pn %xcc, 4f /* CTI */
andcc %o1, 2, %g0 /* IEU1 Group */
ldub [%o1 - 1], %g2 /* Load Group */
sub %o1, 1, %o1 /* IEU0 */
sub %o0, 1, %o0 /* IEU1 */
sub %o2, 1, %o2 /* IEU0 Group */
be,pn %xcc, 5f /* CTI Group */
stb %g2, [%o0] /* Store */
4: lduh [%o1 - 2], %g2 /* Load Group */
sub %o1, 2, %o1 /* IEU0 */
sub %o0, 2, %o0 /* IEU1 */
sub %o2, 2, %o2 /* IEU0 */
sth %g2, [%o0] /* Store Group + bubble */
5: andcc %o1, 4, %g0 /* IEU1 */
236: be,a,pn %xcc, 2f /* CTI */
andcc %o2, -128, %g7 /* IEU1 Group */
lduw [%o1 - 4], %g5 /* Load Group */
sub %o1, 4, %o1 /* IEU0 */
sub %o0, 4, %o0 /* IEU1 */
sub %o2, 4, %o2 /* IEU0 Group */
stw %g5, [%o0] /* Store */
andcc %o2, -128, %g7 /* IEU1 Group */
2: be,pn %xcc, 235f /* CTI */
andcc %o0, 4, %g0 /* IEU1 Group */
be,pn %xcc, 282f + 4 /* CTI Group */
5: RMOVE_BIGCHUNK(o1, o0, 0x00, g1, g3, g5, o5)
RMOVE_BIGCHUNK(o1, o0, 0x20, g1, g3, g5, o5)
RMOVE_BIGCHUNK(o1, o0, 0x40, g1, g3, g5, o5)
RMOVE_BIGCHUNK(o1, o0, 0x60, g1, g3, g5, o5)
subcc %g7, 128, %g7 /* IEU1 Group */
sub %o1, 128, %o1 /* IEU0 */
bne,pt %xcc, 5b /* CTI */
sub %o0, 128, %o0 /* IEU0 Group */
235: andcc %o2, 0x70, %g7 /* IEU1 Group */
41: be,pn %xcc, 280f /* CTI */
andcc %o2, 8, %g0 /* IEU1 Group */
/* Clk1 8-( */
/* Clk2 8-( */
/* Clk3 8-( */
/* Clk4 8-( */
279: rd %pc, %o5 /* PDU Group */
sll %g7, 1, %g5 /* IEU0 Group */
sub %o1, %g7, %o1 /* IEU1 */
sub %o5, %g5, %o5 /* IEU0 Group */
jmpl %o5 + %lo(280f - 279b), %g0 /* CTI Group brk forced*/
sub %o0, %g7, %o0 /* IEU0 Group */
RMOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g5, o5)
RMOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g5, o5)
RMOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g5, o5)
RMOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g5, o5)
RMOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g5, o5)
RMOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g5, o5)
RMOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g5, o5)
280: be,pt %xcc, 281f /* CTI */
andcc %o2, 4, %g0 /* IEU1 */
ldx [%o1 - 8], %g2 /* Load Group */
sub %o0, 8, %o0 /* IEU0 */
stw %g2, [%o0 + 4] /* Store Group */
sub %o1, 8, %o1 /* IEU1 */
srlx %g2, 32, %g2 /* IEU0 Group */
stw %g2, [%o0] /* Store */
281: be,pt %xcc, 1f /* CTI */
andcc %o2, 2, %g0 /* IEU1 Group */
lduw [%o1 - 4], %g2 /* Load Group */
sub %o1, 4, %o1 /* IEU0 */
stw %g2, [%o0 - 4] /* Store Group */
sub %o0, 4, %o0 /* IEU0 */
1: be,pt %xcc, 1f /* CTI */
andcc %o2, 1, %g0 /* IEU1 Group */
lduh [%o1 - 2], %g2 /* Load Group */
sub %o1, 2, %o1 /* IEU0 */
sth %g2, [%o0 - 2] /* Store Group */
sub %o0, 2, %o0 /* IEU0 */
1: be,pt %xcc, 211f /* CTI */
nop /* IEU1 */
ldub [%o1 - 1], %g2 /* Load Group */
stb %g2, [%o0 - 1] /* Store Group + bubble */
211: retl
mov %g4, %o0
282: RMOVE_BIGALIGNCHUNK(o1, o0, 0x00, g1, g3, g5, o5)
RMOVE_BIGALIGNCHUNK(o1, o0, 0x40, g1, g3, g5, o5)
subcc %g7, 128, %g7 /* IEU1 Group */
sub %o1, 128, %o1 /* IEU0 */
bne,pt %xcc, 282b /* CTI */
sub %o0, 128, %o0 /* IEU0 Group */
andcc %o2, 0x70, %g7 /* IEU1 */
be,pn %xcc, 284f /* CTI */
andcc %o2, 8, %g0 /* IEU1 Group */
/* Clk1 8-( */
/* Clk2 8-( */
/* Clk3 8-( */
/* Clk4 8-( */
283: rd %pc, %o5 /* PDU Group */
sub %o1, %g7, %o1 /* IEU0 Group */
sub %o5, %g7, %o5 /* IEU1 */
jmpl %o5 + %lo(284f - 283b), %g0 /* CTI Group brk forced*/
sub %o0, %g7, %o0 /* IEU0 Group */
RMOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3)
RMOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3)
RMOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3)
RMOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2, g3)
RMOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2, g3)
RMOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2, g3)
RMOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2, g3)
284: be,pt %xcc, 285f /* CTI Group */
andcc %o2, 4, %g0 /* IEU1 */
ldx [%o1 - 8], %g2 /* Load Group */
sub %o0, 8, %o0 /* IEU0 */
sub %o1, 8, %o1 /* IEU0 Group */
stx %g2, [%o0] /* Store */
285: be,pt %xcc, 1f /* CTI */
andcc %o2, 2, %g0 /* IEU1 Group */
lduw [%o1 - 4], %g2 /* Load Group */
sub %o0, 4, %o0 /* IEU0 */
sub %o1, 4, %o1 /* IEU0 Group */
stw %g2, [%o0] /* Store */
1: be,pt %xcc, 1f /* CTI */
andcc %o2, 1, %g0 /* IEU1 Group */
lduh [%o1 - 2], %g2 /* Load Group */
sub %o0, 2, %o0 /* IEU0 */
sub %o1, 2, %o1 /* IEU0 Group */
sth %g2, [%o0] /* Store */
1: be,pt %xcc, 1f /* CTI */
nop /* IEU0 Group */
ldub [%o1 - 1], %g2 /* Load Group */
stb %g2, [%o0 - 1] /* Store Group + bubble */
1: retl
mov %g4, %o0
232: brz,pt %g2, 2f /* CTI Group */
sub %o2, %g2, %o2 /* IEU0 Group */
1: ldub [%o1 - 1], %g5 /* Load Group */
sub %o1, 1, %o1 /* IEU0 */
sub %o0, 1, %o0 /* IEU1 */
subcc %g2, 1, %g2 /* IEU1 Group */
bne,pt %xcc, 1b /* CTI */
stb %g5, [%o0] /* Store */
2: andn %o2, 7, %g5 /* IEU0 Group */
and %o2, 7, %o2 /* IEU1 */
fmovd %f0, %f2 /* FPU */
alignaddr %o1, %g0, %g1 /* GRU Group */
ldd [%g1], %f4 /* Load Group */
1: ldd [%g1 - 8], %f6 /* Load Group */
sub %g1, 8, %g1 /* IEU0 Group */
subcc %g5, 8, %g5 /* IEU1 */
faligndata %f6, %f4, %f0 /* GRU Group */
std %f0, [%o0 - 8] /* Store */
sub %o1, 8, %o1 /* IEU0 Group */
be,pn %xcc, 233f /* CTI */
sub %o0, 8, %o0 /* IEU1 */
ldd [%g1 - 8], %f4 /* Load Group */
sub %g1, 8, %g1 /* IEU0 */
subcc %g5, 8, %g5 /* IEU1 */
faligndata %f4, %f6, %f0 /* GRU Group */
std %f0, [%o0 - 8] /* Store */
sub %o1, 8, %o1 /* IEU0 */
bne,pn %xcc, 1b /* CTI Group */
sub %o0, 8, %o0 /* IEU0 */
233: brz,pn %o2, 234f /* CTI Group */
nop /* IEU0 */
237: ldub [%o1 - 1], %g5 /* LOAD */
sub %o1, 1, %o1 /* IEU0 */
sub %o0, 1, %o0 /* IEU1 */
subcc %o2, 1, %o2 /* IEU1 */
bne,pt %xcc, 237b /* CTI */
stb %g5, [%o0] /* Store Group */
234: wr %g0, FPRS_FEF, %fprs
retl
mov %g4, %o0
END(memmove)
#ifdef USE_BPR
weak_alias(memcpy, __align_cpy_1)
weak_alias(memcpy, __align_cpy_2)