* locale/newlocale.c (__newlocale): If setting all categories to "C",

just return &_nl_C_locobj instead of copying it.
	* locale/freelocale.c (__freelocale): Check for &_nl_C_locobj.
	* locale/duplocale.c (__duplocale): Likewise.

2002-10-07  Roland McGrath  <roland@frob.com>

	* config.h.in (HAVE_I386_SET_GDT): New #undef.
	* sysdeps/mach/configure.in: Define it with new check for i386_set_gdt.
	* sysdeps/mach/configure: Regenerated.

2002-10-06  Franz Sirl  <Franz.Sirl-kernel@lauterbach.com>

	* sysdeps/unix/sysv/linux/powerpc/powerpc32/sysdep.h (INLINE_SYSCALL):
	Add all necessary register outputs for syscall-clobbered registers.

2002-10-02  David Mosberger  <davidm@hpl.hp.com>

	* sysdeps/ia64/bzero.S: Rewritten by Sverre Jarp to tune for
	Itanium 2 (and Itanium).
	Fix unwind directives and make it fit in 80 columns.
	* sysdeps/ia64/memset.S: Ditto.
	* sysdeps/ia64/memcpy.S: Ditto.
	Move jump table to .rodata section.

2002-10-03  Roland McGrath  <roland@frob.com>

	* sysdeps/mach/hurd/i386/init-first.c (_hurd_stack_setup): Add
	clobbers to asm.
This commit is contained in:
Roland McGrath 2002-10-11 07:22:18 +00:00
parent 704bb2fd8e
commit 679e4c434f
11 changed files with 1066 additions and 239 deletions

View File

@ -1,3 +1,35 @@
2002-10-08 Roland McGrath <roland@redhat.com>
* locale/newlocale.c (__newlocale): If setting all categories to "C",
just return &_nl_C_locobj instead of copying it.
* locale/freelocale.c (__freelocale): Check for &_nl_C_locobj.
* locale/duplocale.c (__duplocale): Likewise.
2002-10-07 Roland McGrath <roland@frob.com>
* config.h.in (HAVE_I386_SET_GDT): New #undef.
* sysdeps/mach/configure.in: Define it with new check for i386_set_gdt.
* sysdeps/mach/configure: Regenerated.
2002-10-06 Franz Sirl <Franz.Sirl-kernel@lauterbach.com>
* sysdeps/unix/sysv/linux/powerpc/powerpc32/sysdep.h (INLINE_SYSCALL):
Add all necessary register outputs for syscall-clobbered registers.
2002-10-02 David Mosberger <davidm@hpl.hp.com>
* sysdeps/ia64/bzero.S: Rewritten by Sverre Jarp to tune for
Itanium 2 (and Itanium).
Fix unwind directives and make it fit in 80 columns.
* sysdeps/ia64/memset.S: Ditto.
* sysdeps/ia64/memcpy.S: Ditto.
Move jump table to .rodata section.
2002-10-03 Roland McGrath <roland@frob.com>
* sysdeps/mach/hurd/i386/init-first.c (_hurd_stack_setup): Add
clobbers to asm.
2002-10-10 Andreas Jaeger <aj@suse.de> 2002-10-10 Andreas Jaeger <aj@suse.de>
* sysdeps/x86_64/_mcount.S: Restore correct registers. * sysdeps/x86_64/_mcount.S: Restore correct registers.

View File

@ -179,6 +179,9 @@
/* Mach/i386 specific: define if the `i386_io_perm_*' RPCs are available. */ /* Mach/i386 specific: define if the `i386_io_perm_*' RPCs are available. */
#undef HAVE_I386_IO_PERM_MODIFY #undef HAVE_I386_IO_PERM_MODIFY
/* Mach/i386 specific: define if the `i386_set_gdt' RPC is available. */
#undef HAVE_I386_SET_GDT
/* /*
*/ */

View File

@ -33,6 +33,10 @@ __libc_lock_define (extern , __libc_setlocale_lock attribute_hidden)
__locale_t __locale_t
__duplocale (__locale_t dataset) __duplocale (__locale_t dataset)
{ {
/* This static object is returned for newlocale (LC_ALL_MASK, "C"). */
if (dataset == &_nl_C_locobj)
return dataset;
__locale_t result; __locale_t result;
int cnt; int cnt;
size_t names_len = 0; size_t names_len = 0;

View File

@ -34,6 +34,10 @@ __freelocale (__locale_t dataset)
{ {
int cnt; int cnt;
/* This static object is returned for newlocale (LC_ALL_MASK, "C"). */
if (dataset == &_nl_C_locobj)
return;
/* We modify global data (the usage counts). */ /* We modify global data (the usage counts). */
__libc_lock_lock (__libc_setlocale_lock); __libc_lock_lock (__libc_setlocale_lock);

View File

@ -60,6 +60,17 @@ __newlocale (int category_mask, const char *locale, __locale_t base)
if (locale == NULL) if (locale == NULL)
ERROR_RETURN; ERROR_RETURN;
if (base == &_nl_C_locobj)
/* We're to modify BASE, returned for a previous call with "C".
We can't really modify the read-only structure, so instead
start over by copying it. */
base = NULL;
if ((base == NULL || category_mask == (1 << __LC_LAST) - 1 - (1 << LC_ALL))
&& (category_mask == 0 || !strcmp (locale, "C")))
/* Asking for the "C" locale needn't allocate a new object. */
return &_nl_C_locobj;
/* Allocate memory for the result. */ /* Allocate memory for the result. */
if (base != NULL) if (base != NULL)
result = *base; result = *base;

View File

@ -1,7 +1,8 @@
/* Optimized version of the standard bzero() function. /* Optimized version of the standard bzero() function.
This file is part of the GNU C Library. This file is part of the GNU C Library.
Copyright (C) 2000, 2001, 2002 Free Software Foundation, Inc. Copyright (C) 2000, 2001, 2002 Free Software Foundation, Inc.
Contributed by Dan Pop <Dan.Pop@cern.ch>. Contributed by Dan Pop for Itanium <Dan.Pop@cern.ch>.
Rewritten for McKinley by Sverre Jarp, HP Labs/CERN <Sverre.Jarp@cern.ch>
The GNU C Library is free software; you can redistribute it and/or The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public modify it under the terms of the GNU Lesser General Public
@ -25,8 +26,11 @@
in1: count in1: count
The algorithm is fairly straightforward: set byte by byte until we The algorithm is fairly straightforward: set byte by byte until we
we get to a word aligned address, then set word by word as much as we get to a 16B-aligned address, then loop on 128 B chunks using an
possible; the remaining few bytes are set one by one. */ early store as prefetching, then loop on 32B chucks, then clear remaining
words, finally clear remaining bytes.
Since a stf.spill f0 can store 16B in one go, we use this instruction
to get peak speed. */
#include <sysdep.h> #include <sysdep.h>
#undef ret #undef ret
@ -34,62 +38,278 @@
#define dest in0 #define dest in0
#define cnt in1 #define cnt in1
#define save_pfs loc0 #define tmp r31
#define ptr1 loc1 #define save_lc r30
#define ptr2 loc2 #define ptr0 r29
#define tmp loc3 #define ptr1 r28
#define loopcnt loc4 #define ptr2 r27
#define save_lc loc5 #define ptr3 r26
#define ptr9 r24
#define loopcnt r23
#define linecnt r22
#define bytecnt r21
ENTRY(__bzero) // This routine uses only scratch predicate registers (p6 - p15)
#define p_scr p6 // default register for same-cycle branches
#define p_unalgn p9
#define p_y p11
#define p_n p12
#define p_yy p13
#define p_nn p14
#define movi0 mov
#define MIN1 15
#define MIN1P1HALF 8
#define LINE_SIZE 128
#define LSIZE_SH 7 // shift amount
#define PREF_AHEAD 8
#define USE_FLP
#if defined(USE_INT)
#define store st8
#define myval r0
#elif defined(USE_FLP)
#define store stf8
#define myval f0
#endif
.align 64
ENTRY(bzero)
{ .mmi
.prologue .prologue
alloc save_pfs = ar.pfs, 2, 6, 0, 0 alloc tmp = ar.pfs, 2, 0, 0, 0
.save ar.lc, save_lc lfetch.nt1 [dest]
mov save_lc = ar.lc .save ar.lc, save_lc
movi0 save_lc = ar.lc
} { .mmi
.body .body
mov ret0 = dest mov ret0 = dest // return value
and tmp = 7, dest nop.m 0
cmp.eq p6, p0 = cnt, r0 cmp.eq p_scr, p0 = cnt, r0
(p6) br.cond.spnt .restore_and_exit ;; ;; }
{ .mmi
and ptr2 = -(MIN1+1), dest // aligned address
and tmp = MIN1, dest // prepare to check for alignment
tbit.nz p_y, p_n = dest, 0 // Do we have an odd address? (M_B_U)
} { .mib
mov ptr1 = dest mov ptr1 = dest
sub loopcnt = 8, tmp nop.i 0
cmp.gt p6, p0 = 16, cnt (p_scr) br.ret.dpnt.many rp // return immediately if count = 0
(p6) br.cond.spnt .set_few;; ;; }
cmp.eq p6, p0 = tmp, r0 { .mib
(p6) br.cond.sptk .dest_aligned cmp.ne p_unalgn, p0 = tmp, r0
sub cnt = cnt, loopcnt } { .mib // NB: # of bytes to move is 1
adds loopcnt = -1, loopcnt;; sub bytecnt = (MIN1+1), tmp // higher than loopcnt
mov ar.lc = loopcnt;; cmp.gt p_scr, p0 = 16, cnt // is it a minimalistic task?
.l1: (p_scr) br.cond.dptk.many .move_bytes_unaligned // go move just a few (M_B_U)
st1 [ptr1] = r0, 1 ;; }
br.cloop.dptk .l1 ;; { .mmi
.dest_aligned: (p_unalgn) add ptr1 = (MIN1+1), ptr2 // after alignment
adds ptr2 = 8, ptr1 (p_unalgn) add ptr2 = MIN1P1HALF, ptr2 // after alignment
shr.u loopcnt = cnt, 4 ;; // loopcnt = cnt / 16 (p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 3 // should we do a st8 ?
cmp.eq p6, p0 = loopcnt, r0 ;; }
(p6) br.cond.spnt .one_more { .mib
and cnt = 0xf, cnt // compute the remaining cnt (p_y) add cnt = -8, cnt
adds loopcnt = -1, loopcnt;; (p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 2 // should we do a st4 ?
mov ar.lc = loopcnt;; } { .mib
.l2: (p_y) st8 [ptr2] = r0,-4
st8 [ptr1] = r0, 16 (p_n) add ptr2 = 4, ptr2
st8 [ptr2] = r0, 16 ;; }
br.cloop.dptk .l2 { .mib
cmp.le p6, p0 = 8, cnt ;; (p_yy) add cnt = -4, cnt
.one_more: (p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 1 // should we do a st2 ?
(p6) st8 [ptr1] = r0, 8 } { .mib
(p6) adds cnt = -8, cnt ;; (p_yy) st4 [ptr2] = r0,-2
cmp.eq p6, p0 = cnt, r0 (p_nn) add ptr2 = 2, ptr2
(p6) br.cond.spnt .restore_and_exit ;; }
.set_few: { .mmi
adds loopcnt = -1, cnt;; mov tmp = LINE_SIZE+1 // for compare
mov ar.lc = loopcnt;; (p_y) add cnt = -2, cnt
.l3: (p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 0 // should we do a st1 ?
st1 [ptr1] = r0, 1 } { .mmi
br.cloop.dptk .l3 ;; nop.m 0
(p_y) st2 [ptr2] = r0,-1
(p_n) add ptr2 = 1, ptr2
;; }
{ .mmi
(p_yy) st1 [ptr2] = r0
cmp.gt p_scr, p0 = tmp, cnt // is it a minimalistic task?
} { .mbb
(p_yy) add cnt = -1, cnt
(p_scr) br.cond.dpnt.many .fraction_of_line // go move just a few
;; }
{ .mib
nop.m 0
shr.u linecnt = cnt, LSIZE_SH
nop.b 0
;; }
.align 32
.l1b: // ------------------// L1B: store ahead into cache lines; fill later
{ .mmi
and tmp = -(LINE_SIZE), cnt // compute end of range
mov ptr9 = ptr1 // used for prefetching
and cnt = (LINE_SIZE-1), cnt // remainder
} { .mmi
mov loopcnt = PREF_AHEAD-1 // default prefetch loop
cmp.gt p_scr, p0 = PREF_AHEAD, linecnt // check against actual value
;; }
{ .mmi
(p_scr) add loopcnt = -1, linecnt
add ptr2 = 16, ptr1 // start of stores (beyond prefetch stores)
add ptr1 = tmp, ptr1 // first address beyond total range
;; }
{ .mmi
add tmp = -1, linecnt // next loop count
movi0 ar.lc = loopcnt
;; }
.pref_l1b:
{ .mib
stf.spill [ptr9] = f0, 128 // Do stores one cache line apart
nop.i 0
br.cloop.dptk.few .pref_l1b
;; }
{ .mmi
add ptr0 = 16, ptr2 // Two stores in parallel
movi0 ar.lc = tmp
;; }
.l1bx:
{ .mmi
stf.spill [ptr2] = f0, 32
stf.spill [ptr0] = f0, 32
;; }
{ .mmi
stf.spill [ptr2] = f0, 32
stf.spill [ptr0] = f0, 32
;; }
{ .mmi
stf.spill [ptr2] = f0, 32
stf.spill [ptr0] = f0, 64
cmp.lt p_scr, p0 = ptr9, ptr1 // do we need more prefetching?
;; }
{ .mmb
stf.spill [ptr2] = f0, 32
(p_scr) stf.spill [ptr9] = f0, 128
br.cloop.dptk.few .l1bx
;; }
{ .mib
cmp.gt p_scr, p0 = 8, cnt // just a few bytes left ?
(p_scr) br.cond.dpnt.many .move_bytes_from_alignment
;; }
.fraction_of_line:
{ .mib
add ptr2 = 16, ptr1
shr.u loopcnt = cnt, 5 // loopcnt = cnt / 32
;; }
{ .mib
cmp.eq p_scr, p0 = loopcnt, r0
add loopcnt = -1, loopcnt
(p_scr) br.cond.dpnt.many .store_words
;; }
{ .mib
and cnt = 0x1f, cnt // compute the remaining cnt
movi0 ar.lc = loopcnt
;; }
.align 32
.l2: // -----------------------------// L2A: store 32B in 2 cycles
{ .mmb
store [ptr1] = myval, 8
store [ptr2] = myval, 8
;; } { .mmb
store [ptr1] = myval, 24
store [ptr2] = myval, 24
br.cloop.dptk.many .l2
;; }
.store_words:
{ .mib
cmp.gt p_scr, p0 = 8, cnt // just a few bytes left ?
(p_scr) br.cond.dpnt.many .move_bytes_from_alignment // Branch
;; }
{ .mmi
store [ptr1] = myval, 8 // store
cmp.le p_y, p_n = 16, cnt //
add cnt = -8, cnt // subtract
;; }
{ .mmi
(p_y) store [ptr1] = myval, 8 // store
(p_y) cmp.le.unc p_yy, p_nn = 16, cnt
(p_y) add cnt = -8, cnt // subtract
;; }
{ .mmi // store
(p_yy) store [ptr1] = myval, 8
(p_yy) add cnt = -8, cnt // subtract
;; }
.move_bytes_from_alignment:
{ .mib
cmp.eq p_scr, p0 = cnt, r0
tbit.nz.unc p_y, p0 = cnt, 2 // should we terminate with a st4 ?
(p_scr) br.cond.dpnt.few .restore_and_exit
;; }
{ .mib
(p_y) st4 [ptr1] = r0,4
tbit.nz.unc p_yy, p0 = cnt, 1 // should we terminate with a st2 ?
;; }
{ .mib
(p_yy) st2 [ptr1] = r0,2
tbit.nz.unc p_y, p0 = cnt, 0 // should we terminate with a st1 ?
;; }
{ .mib
(p_y) st1 [ptr1] = r0
;; }
.restore_and_exit: .restore_and_exit:
mov ar.lc = save_lc { .mib
mov ar.pfs = save_pfs nop.m 0
br.ret.sptk.many b0 movi0 ar.lc = save_lc
END(__bzero) br.ret.sptk.many rp
weak_alias (__bzero, bzero) ;; }
.move_bytes_unaligned:
{ .mmi
.pred.rel "mutex",p_y, p_n
.pred.rel "mutex",p_yy, p_nn
(p_n) cmp.le p_yy, p_nn = 4, cnt
(p_y) cmp.le p_yy, p_nn = 5, cnt
(p_n) add ptr2 = 2, ptr1
} { .mmi
(p_y) add ptr2 = 3, ptr1
(p_y) st1 [ptr1] = r0, 1 // fill 1 (odd-aligned) byte
(p_y) add cnt = -1, cnt // [15, 14 (or less) left]
;; }
{ .mmi
(p_yy) cmp.le.unc p_y, p0 = 8, cnt
add ptr3 = ptr1, cnt // prepare last store
movi0 ar.lc = save_lc
} { .mmi
(p_yy) st2 [ptr1] = r0, 4 // fill 2 (aligned) bytes
(p_yy) st2 [ptr2] = r0, 4 // fill 2 (aligned) bytes
(p_yy) add cnt = -4, cnt // [11, 10 (o less) left]
;; }
{ .mmi
(p_y) cmp.le.unc p_yy, p0 = 8, cnt
add ptr3 = -1, ptr3 // last store
tbit.nz p_scr, p0 = cnt, 1 // will there be a st2 at the end ?
} { .mmi
(p_y) st2 [ptr1] = r0, 4 // fill 2 (aligned) bytes
(p_y) st2 [ptr2] = r0, 4 // fill 2 (aligned) bytes
(p_y) add cnt = -4, cnt // [7, 6 (or less) left]
;; }
{ .mmi
(p_yy) st2 [ptr1] = r0, 4 // fill 2 (aligned) bytes
(p_yy) st2 [ptr2] = r0, 4 // fill 2 (aligned) bytes
// [3, 2 (or less) left]
tbit.nz p_y, p0 = cnt, 0 // will there be a st1 at the end ?
} { .mmi
(p_yy) add cnt = -4, cnt
;; }
{ .mmb
(p_scr) st2 [ptr1] = r0 // fill 2 (aligned) bytes
(p_y) st1 [ptr3] = r0 // fill last byte (using ptr3)
br.ret.sptk.many rp
;; }
END(bzero)

View File

@ -1,7 +1,8 @@
/* Optimized version of the standard memcpy() function. /* Optimized version of the standard memcpy() function.
This file is part of the GNU C Library. This file is part of the GNU C Library.
Copyright (C) 2000, 2001 Free Software Foundation, Inc. Copyright (C) 2000, 2001 Free Software Foundation, Inc.
Contributed by Dan Pop <Dan.Pop@cern.ch>. Contributed by Dan Pop for Itanium <Dan.Pop@cern.ch>.
Rewritten for McKinley by Sverre Jarp, HP Labs/CERN <Sverre.Jarp@cern.ch>
The GNU C Library is free software; you can redistribute it and/or The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public modify it under the terms of the GNU Lesser General Public
@ -26,27 +27,39 @@
in2: byte count in2: byte count
An assembly implementation of the algorithm used by the generic C An assembly implementation of the algorithm used by the generic C
version from glibc. The case when all three arguments are multiples version from glibc. The case when source and sest are aligned is
of 8 is treated separatedly, for extra performance. treated separately, for extra performance.
In this form, it assumes little endian mode. For big endian mode, In this form, memcpy assumes little endian mode. For big endian mode,
sh1 must be computed using an extra instruction: sub sh1 = 64, sh1 sh1 must be computed using an extra instruction: sub sh1 = 64, sh1
and the order of r[MEMLAT] and r[MEMLAT+1] must be reverted in the and the order of r[MEMLAT] and r[MEMLAT+1] must be reverted in the
shrp instruction. */ shrp instruction. */
#define USE_LFETCH
#define USE_FLP
#include <sysdep.h> #include <sysdep.h>
#undef ret #undef ret
#define LFETCH_DIST 500
#define ALIGN_UNROLL_no 4 // no. of elements
#define ALIGN_UNROLL_sh 2 // (shift amount)
#define MEMLAT 8
#define Nrot ((4*(MEMLAT+2) + 7) & ~7)
#define OP_T_THRES 16 #define OP_T_THRES 16
#define OPSIZ 8 #define OPSIZ 8
#define adest r15 #define loopcnt r14
#define saved_pr r17 #define elemcnt r15
#define saved_lc r18 #define saved_pr r16
#define saved_lc r17
#define adest r18
#define dest r19 #define dest r19
#define src r20 #define asrc r20
#define len r21 #define src r21
#define asrc r22 #define len r22
#define tmp2 r23 #define tmp2 r23
#define tmp3 r24 #define tmp3 r24
#define tmp4 r25 #define tmp4 r25
@ -54,113 +67,339 @@
#define ploop56 r27 #define ploop56 r27
#define loopaddr r28 #define loopaddr r28
#define sh1 r29 #define sh1 r29
#define loopcnt r30 #define ptr1 r30
#define value r31 #define ptr2 r31
#define LOOP(shift) \ #define movi0 mov
.align 32 ; \
.loop##shift##: \ #define p_scr p6
(p[0]) ld8 r[0] = [asrc], 8 ; /* w1 */ \ #define p_xtr p7
(p[MEMLAT+1]) st8 [dest] = value, 8 ; \ #define p_nxtr p8
(p[MEMLAT]) shrp value = r[MEMLAT], r[MEMLAT+1], shift ; \ #define p_few p9
nop.b 0 ; \
nop.b 0 ; \ #if defined(USE_FLP)
br.ctop.sptk .loop##shift ; \ #define load ldf8
br.cond.sptk .cpyfew ; /* deal with the remaining bytes */ #define store stf8
#define tempreg f6
#define the_r fr
#define the_s fs
#define the_t ft
#define the_q fq
#define the_w fw
#define the_x fx
#define the_y fy
#define the_z fz
#elif defined(USE_INT)
#define load ld8
#define store st8
#define tempreg tmp2
#define the_r r
#define the_s s
#define the_t t
#define the_q q
#define the_w w
#define the_x x
#define the_y y
#define the_z z
#endif
#if defined(USE_LFETCH)
#define LOOP(shift) \
.align 32 ; \
.loop##shift##: \
{ .mmb \
(p[0]) ld8.nt1 r[0] = [asrc], 8 ; \
(p[0]) lfetch.nt1 [ptr1], 16 ; \
nop.b 0 ; \
} { .mib \
(p[MEMLAT+1]) st8 [dest] = tmp3, 8 ; \
(p[MEMLAT]) shrp tmp3 = r[MEMLAT], s[MEMLAT+1], shift ; \
nop.b 0 ;; \
} { .mmb \
(p[0]) ld8.nt1 s[0] = [asrc], 8 ; \
(p[0]) lfetch.nt1 [ptr2], 16 ; \
nop.b 0 ; \
} { .mib \
(p[MEMLAT+1]) st8 [dest] = tmp4, 8 ; \
(p[MEMLAT]) shrp tmp4 = s[MEMLAT], r[MEMLAT], shift ; \
br.ctop.sptk.many .loop##shift \
;; } \
{ .mib \
br.cond.sptk.many .copy_bytes ; /* deal with the remaining bytes */ \
}
#else
#define LOOP(shift) \
.align 32 ; \
.loop##shift##: \
{ .mmb \
(p[0]) ld8.nt1 r[0] = [asrc], 8 ; \
nop.b 0 ; \
} { .mib \
(p[MEMLAT+1]) st8 [dest] = tmp3, 8 ; \
(p[MEMLAT]) shrp tmp3 = r[MEMLAT], s[MEMLAT+1], shift ; \
nop.b 0 ;; \
} { .mmb \
(p[0]) ld8.nt1 s[0] = [asrc], 8 ; \
nop.b 0 ; \
} { .mib \
(p[MEMLAT+1]) st8 [dest] = tmp4, 8 ; \
(p[MEMLAT]) shrp tmp4 = s[MEMLAT], r[MEMLAT], shift ; \
br.ctop.sptk.many .loop##shift \
;; } \
{ .mib \
br.cond.sptk.many .copy_bytes ; /* deal with the remaining bytes */ \
}
#endif
#define MEMLAT 21
#define Nrot (((2*MEMLAT+3) + 7) & ~7)
ENTRY(memcpy) ENTRY(memcpy)
{ .mmi
.prologue .prologue
alloc r2 = ar.pfs, 3, Nrot - 3, 0, Nrot alloc r2 = ar.pfs, 3, Nrot - 3, 0, Nrot
.rotr r[MEMLAT + 2], q[MEMLAT + 1] .rotr r[MEMLAT+1], s[MEMLAT+2], q[MEMLAT+1], t[MEMLAT+1]
.rotp p[MEMLAT + 2] .rotp p[MEMLAT+2]
mov ret0 = in0 // return value = dest .rotf fr[MEMLAT+1], fq[MEMLAT+1], fs[MEMLAT+1], ft[MEMLAT+1]
.save pr, saved_pr mov ret0 = in0 // return tmp2 = dest
mov saved_pr = pr // save the predicate registers .save pr, saved_pr
.save ar.lc, saved_lc movi0 saved_pr = pr // save the predicate registers
mov saved_lc = ar.lc // save the loop counter } { .mmi
.body and tmp4 = 7, in0 // check if destination is aligned
or tmp3 = in0, in1 ;; // tmp3 = dest | src
or tmp3 = tmp3, in2 // tmp3 = dest | src | len
mov dest = in0 // dest mov dest = in0 // dest
mov src = in1 // src mov src = in1 // src
;; }
{ .mii
cmp.eq p_scr, p0 = in2, r0 // if (len == 0)
.save ar.lc, saved_lc
movi0 saved_lc = ar.lc // save the loop counter
.body
cmp.ge p_few, p0 = OP_T_THRES, in2 // is len <= OP_T_THRESH
} { .mbb
mov len = in2 // len mov len = in2 // len
sub tmp2 = r0, in0 // tmp2 = -dest (p_scr) br.cond.dpnt.few .restore_and_exit // Branch no. 1: return dest
cmp.eq p6, p0 = in2, r0 // if (len == 0) (p_few) br.cond.dpnt.many .copy_bytes // Branch no. 2: copy byte by byte
(p6) br.cond.spnt .restore_and_exit;;// return dest; ;; }
and tmp4 = 7, tmp3 // tmp4 = (dest | src | len) & 7 { .mmi
shr.u loopcnt = len, 4 ;; // loopcnt = len / 16 #if defined(USE_LFETCH)
cmp.ne p6, p0 = tmp4, r0 // if ((dest | src | len) & 7 != 0) lfetch.nt1 [dest] //
(p6) br.cond.sptk .next // goto next; lfetch.nt1 [src] //
#endif
shr.u elemcnt = len, 3 // elemcnt = len / 8
} { .mib
cmp.eq p_scr, p0 = tmp4, r0 // is destination aligned?
sub loopcnt = 7, tmp4 //
(p_scr) br.cond.dptk.many .dest_aligned
;; }
{ .mmi
ld1 tmp2 = [src], 1 //
sub len = len, loopcnt, 1 // reduce len
movi0 ar.lc = loopcnt //
} { .mib
cmp.ne p_scr, p0 = 0, loopcnt // avoid loading beyond end-point
;; }
// The optimal case, when dest, src and len are all multiples of 8 .l0: // ---------------------------- // L0: Align src on 8-byte boundary
{ .mmi
st1 [dest] = tmp2, 1 //
(p_scr) ld1 tmp2 = [src], 1 //
} { .mib
cmp.lt p_scr, p0 = 1, loopcnt // avoid load beyond end-point
add loopcnt = -1, loopcnt
br.cloop.dptk.few .l0 //
;; }
and tmp3 = 0xf, len // tmp3 = len % 16
mov pr.rot = 1 << 16 // set rotating predicates
mov ar.ec = MEMLAT + 1 ;; // set the epilog counter
cmp.ne p6, p0 = tmp3, r0 // do we have to copy an extra word?
adds loopcnt = -1, loopcnt;; // --loopcnt
(p6) ld8 value = [src], 8;;
(p6) st8 [dest] = value, 8 // copy the "extra" word
mov ar.lc = loopcnt // set the loop counter
cmp.eq p6, p0 = 8, len
(p6) br.cond.spnt .restore_and_exit;;// there was only one word to copy
adds adest = 8, dest
adds asrc = 8, src ;;
.align 32
.l0:
(p[0]) ld8 r[0] = [src], 16
(p[0]) ld8 q[0] = [asrc], 16
(p[MEMLAT]) st8 [dest] = r[MEMLAT], 16
(p[MEMLAT]) st8 [adest] = q[MEMLAT], 16
br.ctop.dptk .l0 ;;
mov pr = saved_pr, -1 // restore the predicate registers
mov ar.lc = saved_lc // restore the loop counter
br.ret.sptk.many b0
.next:
cmp.ge p6, p0 = OP_T_THRES, len // is len <= OP_T_THRES
and loopcnt = 7, tmp2 // loopcnt = -dest % 8
(p6) br.cond.spnt .cpyfew // copy byte by byte
;;
cmp.eq p6, p0 = loopcnt, r0
(p6) br.cond.sptk .dest_aligned
sub len = len, loopcnt // len -= -dest % 8
adds loopcnt = -1, loopcnt // --loopcnt
;;
mov ar.lc = loopcnt
.l1: // copy -dest % 8 bytes
ld1 value = [src], 1 // value = *src++
;;
st1 [dest] = value, 1 // *dest++ = value
br.cloop.dptk .l1 ;;
.dest_aligned: .dest_aligned:
{ .mmi
and tmp4 = 7, src // ready for alignment check
shr.u elemcnt = len, 3 // elemcnt = len / 8
;; }
{ .mib
cmp.ne p_scr, p0 = tmp4, r0 // is source also aligned
tbit.nz p_xtr, p_nxtr = src, 3 // prepare a separate move if src
} { .mib // is not 16B aligned
add ptr2 = LFETCH_DIST, dest // prefetch address
add ptr1 = LFETCH_DIST, src
(p_scr) br.cond.dptk.many .src_not_aligned
;; }
// The optimal case, when dest, and src are aligned
.both_aligned:
{ .mmi
.pred.rel "mutex",p_xtr,p_nxtr
(p_xtr) cmp.gt p_scr, p0 = ALIGN_UNROLL_no+1, elemcnt // Need N + 1 to qualify
(p_nxtr) cmp.gt p_scr, p0 = ALIGN_UNROLL_no, elemcnt // Need only N to qualify
movi0 pr.rot = 1 << 16 // set rotating predicates
} { .mib
(p_scr) br.cond.dpnt.many .copy_full_words
;; }
{ .mmi
(p_xtr) load tempreg = [src], 8
(p_xtr) add elemcnt = -1, elemcnt
movi0 ar.ec = MEMLAT + 1 // set the epilog counter
;; }
{ .mmi
(p_xtr) add len = -8, len //
add asrc = 16, src // one bank apart (for USE_INT)
shr.u loopcnt = elemcnt, ALIGN_UNROLL_sh // cater for unrolling
;;}
{ .mmi
add loopcnt = -1, loopcnt
(p_xtr) store [dest] = tempreg, 8 // copy the "extra" word
nop.i 0
;; }
{ .mib
add adest = 16, dest
movi0 ar.lc = loopcnt // set the loop counter
;; }
.align 32
#if defined(USE_FLP)
.l1: // ------------------------------- // L1: Everything a multiple of 8
{ .mmi
#if defined(USE_LFETCH)
(p[0]) lfetch.nt1 [ptr2],32
#endif
(p[0]) ldfp8 the_r[0],the_q[0] = [src], 16
(p[0]) add len = -32, len
} {.mmb
(p[MEMLAT]) store [dest] = the_r[MEMLAT], 8
(p[MEMLAT]) store [adest] = the_s[MEMLAT], 8
;; }
{ .mmi
#if defined(USE_LFETCH)
(p[0]) lfetch.nt1 [ptr1],32
#endif
(p[0]) ldfp8 the_s[0], the_t[0] = [src], 16
} {.mmb
(p[MEMLAT]) store [dest] = the_q[MEMLAT], 24
(p[MEMLAT]) store [adest] = the_t[MEMLAT], 24
br.ctop.dptk.many .l1
;; }
#elif defined(USE_INT)
.l1: // ------------------------------- // L1: Everything a multiple of 8
{ .mmi
(p[0]) load the_r[0] = [src], 8
(p[0]) load the_q[0] = [asrc], 8
(p[0]) add len = -32, len
} {.mmb
(p[MEMLAT]) store [dest] = the_r[MEMLAT], 8
(p[MEMLAT]) store [adest] = the_q[MEMLAT], 8
;; }
{ .mmi
(p[0]) load the_s[0] = [src], 24
(p[0]) load the_t[0] = [asrc], 24
} {.mmb
(p[MEMLAT]) store [dest] = the_s[MEMLAT], 24
(p[MEMLAT]) store [adest] = the_t[MEMLAT], 24
#if defined(USE_LFETCH)
;; }
{ .mmb
(p[0]) lfetch.nt1 [ptr2],32
(p[0]) lfetch.nt1 [ptr1],32
#endif
br.ctop.dptk.many .l1
;; }
#endif
.copy_full_words:
{ .mib
cmp.gt p_scr, p0 = 8, len //
shr.u elemcnt = len, 3 //
(p_scr) br.cond.dpnt.many .copy_bytes
;; }
{ .mii
load tempreg = [src], 8
add loopcnt = -1, elemcnt //
;; }
{ .mii
cmp.ne p_scr, p0 = 0, loopcnt //
mov ar.lc = loopcnt //
;; }
.l2: // ------------------------------- // L2: Max 4 words copied separately
{ .mmi
store [dest] = tempreg, 8
(p_scr) load tempreg = [src], 8 //
add len = -8, len
} { .mib
cmp.lt p_scr, p0 = 1, loopcnt // avoid load beyond end-point
add loopcnt = -1, loopcnt
br.cloop.dptk.few .l2
;; }
.copy_bytes:
{ .mib
cmp.eq p_scr, p0 = len, r0 // is len == 0 ?
add loopcnt = -1, len // len--;
(p_scr) br.cond.spnt .restore_and_exit
;; }
{ .mii
ld1 tmp2 = [src], 1
movi0 ar.lc = loopcnt
cmp.ne p_scr, p0 = 0, loopcnt // avoid load beyond end-point
;; }
.l3: // ------------------------------- // L3: Final byte move
{ .mmi
st1 [dest] = tmp2, 1
(p_scr) ld1 tmp2 = [src], 1
} { .mib
cmp.lt p_scr, p0 = 1, loopcnt // avoid load beyond end-point
add loopcnt = -1, loopcnt
br.cloop.dptk.few .l3
;; }
.restore_and_exit:
{ .mmi
movi0 pr = saved_pr, -1 // restore the predicate registers
;; }
{ .mib
movi0 ar.lc = saved_lc // restore the loop counter
br.ret.sptk.many b0
;; }
.src_not_aligned:
{ .mmi
cmp.gt p_scr, p0 = 16, len
and sh1 = 7, src // sh1 = src % 8 and sh1 = 7, src // sh1 = src % 8
and tmp2 = -8, len // tmp2 = len & -OPSIZ shr.u loopcnt = len, 4 // element-cnt = len / 16
and asrc = -8, src // asrc = src & -OPSIZ -- align src } { .mib
shr.u loopcnt = len, 3 // loopcnt = len / 8 add tmp4 = @ltoff(.table), gp
and len = 7, len;; // len = len % 8 add tmp3 = @ltoff(.loop56), gp
adds loopcnt = -1, loopcnt // --loopcnt (p_scr) br.cond.dpnt.many .copy_bytes // do byte by byte if too few
addl tmp4 = @ltoff(.table), gp ;; }
addl tmp3 = @ltoff(.loop56), gp { .mmi
mov ar.ec = MEMLAT + 1 // set EC and asrc = -8, src // asrc = (-8) -- align src for loop
mov pr.rot = 1 << 16;; // set rotating predicates add loopcnt = -1, loopcnt // loopcnt--
mov ar.lc = loopcnt // set LC
cmp.eq p6, p0 = sh1, r0 // is the src aligned?
(p6) br.cond.sptk .src_aligned
add src = src, tmp2 // src += len & -OPSIZ
shl sh1 = sh1, 3 // sh1 = 8 * (src % 8) shl sh1 = sh1, 3 // sh1 = 8 * (src % 8)
} { .mmi
ld8 ptable = [tmp4] // ptable = &table
ld8 ploop56 = [tmp3] // ploop56 = &loop56 ld8 ploop56 = [tmp3] // ploop56 = &loop56
ld8 ptable = [tmp4];; // ptable = &table and tmp2 = -16, len // tmp2 = len & -OPSIZ
add tmp3 = ptable, sh1;; // tmp3 = &table + sh1 ;; }
mov ar.ec = MEMLAT + 1 + 1 // one more pass needed { .mmi
ld8 tmp4 = [tmp3];; // tmp4 = loop offset add tmp3 = ptable, sh1 // tmp3 = &table + sh1
add src = src, tmp2 // src += len & (-16)
movi0 ar.lc = loopcnt // set LC
;; }
{ .mmi
ld8 tmp4 = [tmp3] // tmp4 = loop offset
sub len = len, tmp2 // len -= len & (-16)
movi0 ar.ec = MEMLAT + 2 // one more pass needed
;; }
{ .mmi
ld8 s[1] = [asrc], 8 // preload
sub loopaddr = ploop56,tmp4 // loopadd = &loop56 - loop offset sub loopaddr = ploop56,tmp4 // loopadd = &loop56 - loop offset
ld8 r[1] = [asrc], 8;; // w0 movi0 pr.rot = 1 << 16 // set rotating predicates
mov b6 = loopaddr;; ;; }
{ .mib
nop.m 0
movi0 b6 = loopaddr
br b6 // jump to the appropriate loop br b6 // jump to the appropriate loop
;; }
LOOP(8) LOOP(8)
LOOP(16) LOOP(16)
@ -169,26 +408,9 @@ ENTRY(memcpy)
LOOP(40) LOOP(40)
LOOP(48) LOOP(48)
LOOP(56) LOOP(56)
END(memcpy)
.src_aligned:
.l3: .rodata
(p[0]) ld8 r[0] = [src], 8
(p[MEMLAT]) st8 [dest] = r[MEMLAT], 8
br.ctop.dptk .l3 ;;
.cpyfew:
cmp.eq p6, p0 = len, r0 // is len == 0 ?
adds len = -1, len // --len;
(p6) br.cond.spnt .restore_and_exit ;;
mov ar.lc = len
.l4:
ld1 value = [src], 1
;;
st1 [dest] = value, 1
br.cloop.dptk .l4 ;;
.restore_and_exit:
mov pr = saved_pr, -1 // restore the predicate registers
mov ar.lc = saved_lc // restore the loop counter
br.ret.sptk.many b0
.align 8 .align 8
.table: .table:
data8 0 // dummy entry data8 0 // dummy entry
@ -199,5 +421,3 @@ ENTRY(memcpy)
data8 .loop56 - .loop40 data8 .loop56 - .loop40
data8 .loop56 - .loop48 data8 .loop56 - .loop48
data8 .loop56 - .loop56 data8 .loop56 - .loop56
END(memcpy)

View File

@ -1,7 +1,8 @@
/* Optimized version of the standard memset() function. /* Optimized version of the standard memset() function.
This file is part of the GNU C Library. This file is part of the GNU C Library.
Copyright (C) 2000, 2001 Free Software Foundation, Inc. Copyright (C) 2000, 2001, 2002 Free Software Foundation, Inc.
Contributed by Dan Pop <Dan.Pop@cern.ch>. Contributed by Dan Pop for Itanium <Dan.Pop@cern.ch>.
Rewritten for McKinley by Sverre Jarp, HP Labs/CERN <Sverre.Jarp@cern.ch>
The GNU C Library is free software; you can redistribute it and/or The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public modify it under the terms of the GNU Lesser General Public
@ -19,80 +20,373 @@
02111-1307 USA. */ 02111-1307 USA. */
/* Return: dest /* Return: dest
Inputs: Inputs:
in0: dest in0: dest
in1: value in1: value
in2: count in2: count
The algorithm is fairly straightforward: set byte by byte until we The algorithm is fairly straightforward: set byte by byte until we
we get to a word aligned address, then set word by word as much as we get to a 16B-aligned address, then loop on 128 B chunks using an
possible; the remaining few bytes are set one by one. */ early store as prefetching, then loop on 32B chucks, then clear remaining
words, finally clear remaining bytes.
Since a stf.spill f0 can store 16B in one go, we use this instruction
to get peak speed when value = 0. */
#include <sysdep.h> #include <sysdep.h>
#undef ret #undef ret
#define dest in0 #define dest in0
#define byteval in1 #define value in1
#define cnt in2 #define cnt in2
#define save_pfs loc0 #define tmp r31
#define ptr1 loc1 #define save_lc r30
#define ptr2 loc2 #define ptr0 r29
#define tmp loc3 #define ptr1 r28
#define loopcnt loc4 #define ptr2 r27
#define save_lc loc5 #define ptr3 r26
#define wordval loc6 #define ptr9 r24
#define loopcnt r23
#define linecnt r22
#define bytecnt r21
#define fvalue f6
// This routine uses only scratch predicate registers (p6 - p15)
#define p_scr p6 // default register for same-cycle branches
#define p_nz p7
#define p_zr p8
#define p_unalgn p9
#define p_y p11
#define p_n p12
#define p_yy p13
#define p_nn p14
#define movi0 mov
#define MIN1 15
#define MIN1P1HALF 8
#define LINE_SIZE 128
#define LSIZE_SH 7 // shift amount
#define PREF_AHEAD 8
#define USE_FLP
#if defined(USE_INT)
#define store st8
#define myval value
#elif defined(USE_FLP)
#define store stf8
#define myval fvalue
#endif
.align 64
ENTRY(memset) ENTRY(memset)
{ .mmi
.prologue .prologue
alloc save_pfs = ar.pfs, 3, 7, 0, 0 alloc tmp = ar.pfs, 3, 0, 0, 0
.save ar.lc, save_lc lfetch.nt1 [dest]
mov save_lc = ar.lc .save ar.lc, save_lc
movi0 save_lc = ar.lc
} { .mmi
.body .body
mov ret0 = dest mov ret0 = dest // return value
and tmp = 7, dest cmp.ne p_nz, p_zr = value, r0 // use stf.spill if value is zero
cmp.eq p6, p0 = cnt, r0 cmp.eq p_scr, p0 = cnt, r0
(p6) br.cond.spnt .restore_and_exit ;; ;; }
{ .mmi
and ptr2 = -(MIN1+1), dest // aligned address
and tmp = MIN1, dest // prepare to check for alignment
tbit.nz p_y, p_n = dest, 0 // Do we have an odd address? (M_B_U)
} { .mib
mov ptr1 = dest mov ptr1 = dest
sub loopcnt = 8, tmp mux1 value = value, @brcst // create 8 identical bytes in word
cmp.gt p6, p0 = 16, cnt (p_scr) br.ret.dpnt.many rp // return immediately if count = 0
(p6) br.cond.spnt .set_few;; ;; }
cmp.eq p6, p0 = tmp, r0 { .mib
(p6) br.cond.sptk .dest_aligned cmp.ne p_unalgn, p0 = tmp, r0
sub cnt = cnt, loopcnt } { .mib // NB: # of bytes to move is 1 higher
adds loopcnt = -1, loopcnt;; sub bytecnt = (MIN1+1), tmp // than loopcnt
mov ar.lc = loopcnt;; cmp.gt p_scr, p0 = 16, cnt // is it a minimalistic task?
.l1: (p_scr) br.cond.dptk.many .move_bytes_unaligned // go move just a few (M_B_U)
st1 [ptr1] = byteval, 1 ;; }
br.cloop.dptk .l1 ;; { .mmi
.dest_aligned: (p_unalgn) add ptr1 = (MIN1+1), ptr2 // after alignment
adds ptr2 = 8, ptr1 (p_unalgn) add ptr2 = MIN1P1HALF, ptr2 // after alignment
mux1 wordval = byteval, @brcst (p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 3 // should we do a st8 ?
shr.u loopcnt = cnt, 4 ;; // loopcnt = cnt / 16 ;; }
cmp.eq p6, p0 = loopcnt, r0 { .mib
(p6) br.cond.spnt .one_more (p_y) add cnt = -8, cnt
and cnt = 0xf, cnt // compute the remaining cnt (p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 2 // should we do a st4 ?
adds loopcnt = -1, loopcnt;; } { .mib
mov ar.lc = loopcnt;; (p_y) st8 [ptr2] = value, -4
.l2: (p_n) add ptr2 = 4, ptr2
st8 [ptr1] = wordval, 16 ;; }
st8 [ptr2] = wordval, 16 { .mib
br.cloop.dptk .l2 (p_yy) add cnt = -4, cnt
cmp.le p6, p0 = 8, cnt ;; (p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 1 // should we do a st2 ?
.one_more: } { .mib
(p6) st8 [ptr1] = wordval, 8 (p_yy) st4 [ptr2] = value, -2
(p6) adds cnt = -8, cnt ;; (p_nn) add ptr2 = 2, ptr2
cmp.eq p6, p0 = cnt, r0 ;; }
(p6) br.cond.spnt .restore_and_exit { .mmi
.set_few: mov tmp = LINE_SIZE+1 // for compare
adds loopcnt = -1, cnt;; (p_y) add cnt = -2, cnt
mov ar.lc = loopcnt;; (p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 0 // should we do a st1 ?
.l3: } { .mmi
st1 [ptr1] = byteval, 1 setf.sig fvalue=value // transfer value to FLP side
br.cloop.dptk .l3 ;; (p_y) st2 [ptr2] = value, -1
(p_n) add ptr2 = 1, ptr2
;; }
{ .mmi
(p_yy) st1 [ptr2] = value
cmp.gt p_scr, p0 = tmp, cnt // is it a minimalistic task?
} { .mbb
(p_yy) add cnt = -1, cnt
(p_scr) br.cond.dpnt.many .fraction_of_line // go move just a few
;; }
{ .mib
nop.m 0
shr.u linecnt = cnt, LSIZE_SH
(p_zr) br.cond.dptk.many .l1b // Jump to use stf.spill
;; }
.align 32 // -------- // L1A: store ahead into cache lines; fill later
{ .mmi
and tmp = -(LINE_SIZE), cnt // compute end of range
mov ptr9 = ptr1 // used for prefetching
and cnt = (LINE_SIZE-1), cnt // remainder
} { .mmi
mov loopcnt = PREF_AHEAD-1 // default prefetch loop
cmp.gt p_scr, p0 = PREF_AHEAD, linecnt // check against actual value
;; }
{ .mmi
(p_scr) add loopcnt = -1, linecnt // start of stores
add ptr2 = 8, ptr1 // (beyond prefetch stores)
add ptr1 = tmp, ptr1 // first address beyond total
;; } // range
{ .mmi
add tmp = -1, linecnt // next loop count
movi0 ar.lc = loopcnt
;; }
.pref_l1a:
{ .mib
store [ptr9] = myval, 128 // Do stores one cache line apart
nop.i 0
br.cloop.dptk.few .pref_l1a
;; }
{ .mmi
add ptr0 = 16, ptr2 // Two stores in parallel
movi0 ar.lc = tmp
;; }
.l1ax:
{ .mmi
store [ptr2] = myval, 8
store [ptr0] = myval, 8
;; }
{ .mmi
store [ptr2] = myval, 24
store [ptr0] = myval, 24
;; }
{ .mmi
store [ptr2] = myval, 8
store [ptr0] = myval, 8
;; }
{ .mmi
store [ptr2] = myval, 24
store [ptr0] = myval, 24
;; }
{ .mmi
store [ptr2] = myval, 8
store [ptr0] = myval, 8
;; }
{ .mmi
store [ptr2] = myval, 24
store [ptr0] = myval, 24
;; }
{ .mmi
store [ptr2] = myval, 8
store [ptr0] = myval, 32
cmp.lt p_scr, p0 = ptr9, ptr1 // do we need more prefetching?
;; }
{ .mmb
store [ptr2] = myval, 24
(p_scr) store [ptr9] = myval, 128
br.cloop.dptk.few .l1ax
;; }
{ .mbb
cmp.le p_scr, p0 = 8, cnt // just a few bytes left ?
(p_scr) br.cond.dpnt.many .fraction_of_line // Branch no. 2
br.cond.dpnt.many .move_bytes_from_alignment // Branch no. 3
;; }
.align 32
.l1b: // ------------------ // L1B: store ahead into cache lines; fill later
{ .mmi
and tmp = -(LINE_SIZE), cnt // compute end of range
mov ptr9 = ptr1 // used for prefetching
and cnt = (LINE_SIZE-1), cnt // remainder
} { .mmi
mov loopcnt = PREF_AHEAD-1 // default prefetch loop
cmp.gt p_scr, p0 = PREF_AHEAD, linecnt // check against actual value
;; }
{ .mmi
(p_scr) add loopcnt = -1, linecnt
add ptr2 = 16, ptr1 // start of stores (beyond prefetch stores)
add ptr1 = tmp, ptr1 // first address beyond total range
;; }
{ .mmi
add tmp = -1, linecnt // next loop count
movi0 ar.lc = loopcnt
;; }
.pref_l1b:
{ .mib
stf.spill [ptr9] = f0, 128 // Do stores one cache line apart
nop.i 0
br.cloop.dptk.few .pref_l1b
;; }
{ .mmi
add ptr0 = 16, ptr2 // Two stores in parallel
movi0 ar.lc = tmp
;; }
.l1bx:
{ .mmi
stf.spill [ptr2] = f0, 32
stf.spill [ptr0] = f0, 32
;; }
{ .mmi
stf.spill [ptr2] = f0, 32
stf.spill [ptr0] = f0, 32
;; }
{ .mmi
stf.spill [ptr2] = f0, 32
stf.spill [ptr0] = f0, 64
cmp.lt p_scr, p0 = ptr9, ptr1 // do we need more prefetching?
;; }
{ .mmb
stf.spill [ptr2] = f0, 32
(p_scr) stf.spill [ptr9] = f0, 128
br.cloop.dptk.few .l1bx
;; }
{ .mib
cmp.gt p_scr, p0 = 8, cnt // just a few bytes left ?
(p_scr) br.cond.dpnt.many .move_bytes_from_alignment
;; }
.fraction_of_line:
{ .mib
add ptr2 = 16, ptr1
shr.u loopcnt = cnt, 5 // loopcnt = cnt / 32
;; }
{ .mib
cmp.eq p_scr, p0 = loopcnt, r0
add loopcnt = -1, loopcnt
(p_scr) br.cond.dpnt.many .store_words
;; }
{ .mib
and cnt = 0x1f, cnt // compute the remaining cnt
movi0 ar.lc = loopcnt
;; }
.align 32
.l2: // ---------------------------- // L2A: store 32B in 2 cycles
{ .mmb
store [ptr1] = myval, 8
store [ptr2] = myval, 8
;; } { .mmb
store [ptr1] = myval, 24
store [ptr2] = myval, 24
br.cloop.dptk.many .l2
;; }
.store_words:
{ .mib
cmp.gt p_scr, p0 = 8, cnt // just a few bytes left ?
(p_scr) br.cond.dpnt.many .move_bytes_from_alignment // Branch
;; }
{ .mmi
store [ptr1] = myval, 8 // store
cmp.le p_y, p_n = 16, cnt //
add cnt = -8, cnt // subtract
;; }
{ .mmi
(p_y) store [ptr1] = myval, 8 // store
(p_y) cmp.le.unc p_yy, p_nn = 16, cnt //
(p_y) add cnt = -8, cnt // subtract
;; }
{ .mmi // store
(p_yy) store [ptr1] = myval, 8 //
(p_yy) add cnt = -8, cnt // subtract
;; }
.move_bytes_from_alignment:
{ .mib
cmp.eq p_scr, p0 = cnt, r0
tbit.nz.unc p_y, p0 = cnt, 2 // should we terminate with a st4 ?
(p_scr) br.cond.dpnt.few .restore_and_exit
;; }
{ .mib
(p_y) st4 [ptr1] = value, 4
tbit.nz.unc p_yy, p0 = cnt, 1 // should we terminate with a st2 ?
;; }
{ .mib
(p_yy) st2 [ptr1] = value, 2
tbit.nz.unc p_y, p0 = cnt, 0
;; }
{ .mib
(p_y) st1 [ptr1] = value
;; }
.restore_and_exit: .restore_and_exit:
mov ar.lc = save_lc { .mib
mov ar.pfs = save_pfs nop.m 0
br.ret.sptk.many b0 movi0 ar.lc = save_lc
br.ret.sptk.many rp
;; }
.move_bytes_unaligned:
{ .mmi
.pred.rel "mutex",p_y, p_n
.pred.rel "mutex",p_yy, p_nn
(p_n) cmp.le p_yy, p_nn = 4, cnt
(p_y) cmp.le p_yy, p_nn = 5, cnt
(p_n) add ptr2 = 2, ptr1
} { .mmi
(p_y) add ptr2 = 3, ptr1
(p_y) st1 [ptr1] = value, 1 // fill 1 (odd-aligned) byte
(p_y) add cnt = -1, cnt // [15, 14 (or less) left]
;; }
{ .mmi
(p_yy) cmp.le.unc p_y, p0 = 8, cnt
add ptr3 = ptr1, cnt // prepare last store
movi0 ar.lc = save_lc
} { .mmi
(p_yy) st2 [ptr1] = value, 4 // fill 2 (aligned) bytes
(p_yy) st2 [ptr2] = value, 4 // fill 2 (aligned) bytes
(p_yy) add cnt = -4, cnt // [11, 10 (o less) left]
;; }
{ .mmi
(p_y) cmp.le.unc p_yy, p0 = 8, cnt
add ptr3 = -1, ptr3 // last store
tbit.nz p_scr, p0 = cnt, 1 // will there be a st2 at the end ?
} { .mmi
(p_y) st2 [ptr1] = value, 4 // fill 2 (aligned) bytes
(p_y) st2 [ptr2] = value, 4 // fill 2 (aligned) bytes
(p_y) add cnt = -4, cnt // [7, 6 (or less) left]
;; }
{ .mmi
(p_yy) st2 [ptr1] = value, 4 // fill 2 (aligned) bytes
(p_yy) st2 [ptr2] = value, 4 // fill 2 (aligned) bytes
// [3, 2 (or less) left]
tbit.nz p_y, p0 = cnt, 0 // will there be a st1 at the end ?
} { .mmi
(p_yy) add cnt = -4, cnt
;; }
{ .mmb
(p_scr) st2 [ptr1] = value // fill 2 (aligned) bytes
(p_y) st1 [ptr3] = value // fill last byte (using ptr3)
br.ret.sptk.many rp
;; }
END(memset) END(memset)

View File

@ -338,3 +338,33 @@ if test $libc_cv_mach_i386_ioports = yes; then
EOF EOF
fi fi
echo $ac_n "checking for i386_set_gdt in mach_i386.defs""... $ac_c" 1>&6
echo "configure:344: checking for i386_set_gdt in mach_i386.defs" >&5
if eval "test \"`echo '$''{'libc_cv_mach_i386_gdt'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
else
cat > conftest.$ac_ext <<EOF
#line 349 "configure"
#include "confdefs.h"
#include <mach/i386/mach_i386.defs>
EOF
if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
egrep "i386_set_gdt" >/dev/null 2>&1; then
rm -rf conftest*
libc_cv_mach_i386_gdt=yes
else
rm -rf conftest*
libc_cv_mach_i386_gdt=no
fi
rm -f conftest*
fi
echo "$ac_t""$libc_cv_mach_i386_gdt" 1>&6
if test $libc_cv_mach_i386_gdt = yes; then
cat >> confdefs.h <<\EOF
#define HAVE_I386_SET_GDT 1
EOF
fi

View File

@ -89,3 +89,12 @@ AC_EGREP_HEADER(i386_io_perm_modify, mach/i386/mach_i386.defs,
if test $libc_cv_mach_i386_ioports = yes; then if test $libc_cv_mach_i386_ioports = yes; then
AC_DEFINE([HAVE_I386_IO_PERM_MODIFY]) AC_DEFINE([HAVE_I386_IO_PERM_MODIFY])
fi fi
AC_CACHE_CHECK(for i386_set_gdt in mach_i386.defs,
libc_cv_mach_i386_gdt, [dnl
AC_EGREP_HEADER(i386_set_gdt, mach/i386/mach_i386.defs,
libc_cv_mach_i386_gdt=yes,
libc_cv_mach_i386_gdt=no)])
if test $libc_cv_mach_i386_gdt = yes; then
AC_DEFINE([HAVE_I386_SET_GDT])
fi

View File

@ -336,7 +336,7 @@ _hurd_stack_setup (volatile int argc, ...)
*--data = (&argc)[-1]; *--data = (&argc)[-1];
asm volatile ("movl %0, %%esp\n" /* Switch to new outermost stack. */ asm volatile ("movl %0, %%esp\n" /* Switch to new outermost stack. */
"movl $0, %%ebp\n" /* Clear outermost frame pointer. */ "movl $0, %%ebp\n" /* Clear outermost frame pointer. */
"jmp *%1" : : "r" (data), "r" (&doinit1)); "jmp *%1" : : "r" (data), "r" (&doinit1) : "sp", "bp");
/* NOTREACHED */ /* NOTREACHED */
} }