2004-03-07  Ulrich Drepper  <drepper@redhat.com>

	* sysdeps/powerpc/elf/rtld-global-offsets.sym: Adjust for moving
	_dl_hwcap into _rtld_global_ro.
	* sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S: Likewise.
	* sysdeps/powerpc/powerpc32/fpu/setjmp-common.S: Likewise.
	* sysdeps/powerpc/powerpc64/__longjmp-common.S: Likewise.
	* sysdeps/powerpc/powerpc64/setjmp-common.S: Likewise.
	* sysdeps/unix/sysv/linux/powerpc/powerpc32/getcontext.S: Likewise.
	* sysdeps/unix/sysv/linux/powerpc/powerpc32/setcontext.S: Likewise.
	* sysdeps/unix/sysv/linux/powerpc/powerpc32/swapcontext.S: Likewise.
	* sysdeps/unix/sysv/linux/powerpc/powerpc64/getcontext.S: Likewise.
	* sysdeps/unix/sysv/linux/powerpc/powerpc64/setcontext.S: Likewise.
	* sysdeps/unix/sysv/linux/powerpc/powerpc64/swapcontext.S: Likewise.
This commit is contained in:
Ulrich Drepper 2004-03-07 19:53:49 +00:00
parent 4166148912
commit ef690addb5
12 changed files with 199 additions and 184 deletions

View File

@ -1,3 +1,18 @@
2004-03-07 Ulrich Drepper <drepper@redhat.com>
* sysdeps/powerpc/elf/rtld-global-offsets.sym: Adjust for moving
_dl_hwcap into _rtld_global_ro.
* sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S: Likewise.
* sysdeps/powerpc/powerpc32/fpu/setjmp-common.S: Likewise.
* sysdeps/powerpc/powerpc64/__longjmp-common.S: Likewise.
* sysdeps/powerpc/powerpc64/setjmp-common.S: Likewise.
* sysdeps/unix/sysv/linux/powerpc/powerpc32/getcontext.S: Likewise.
* sysdeps/unix/sysv/linux/powerpc/powerpc32/setcontext.S: Likewise.
* sysdeps/unix/sysv/linux/powerpc/powerpc32/swapcontext.S: Likewise.
* sysdeps/unix/sysv/linux/powerpc/powerpc64/getcontext.S: Likewise.
* sysdeps/unix/sysv/linux/powerpc/powerpc64/setcontext.S: Likewise.
* sysdeps/unix/sysv/linux/powerpc/powerpc64/swapcontext.S: Likewise.
2004-03-07 Andreas Schwab <schwab@suse.de> 2004-03-07 Andreas Schwab <schwab@suse.de>
* elf/dl-sym.c: Include <dl-tls.h> only when USE_TLS. * elf/dl-sym.c: Include <dl-tls.h> only when USE_TLS.

View File

@ -2,6 +2,6 @@
#include <ldsodefs.h> #include <ldsodefs.h>
#define rtdl_global_offsetof(mem) offsetof (struct rtld_global, mem) #define rtdl_global_ro_offsetof(mem) offsetof (struct rtld_global_ro, mem)
RTLD_GLOBAL_DL_HWCAP_OFFSET rtdl_global_offsetof (_dl_hwcap) RTLD_GLOBAL_RO_DL_HWCAP_OFFSET rtdl_global_ro_offsetof (_dl_hwcap)

View File

@ -36,11 +36,11 @@ ENTRY (BP_SYM (__longjmp))
bl _GLOBAL_OFFSET_TABLE_@local-4 bl _GLOBAL_OFFSET_TABLE_@local-4
mflr r5 mflr r5
# ifdef SHARED # ifdef SHARED
lwz r5,_rtld_global@got(r5) lwz r5,_rtld_global_ro@got(r5)
mtlr r6 mtlr r6
lwz r5,RTLD_GLOBAL_DL_HWCAP_OFFSET(r5) lwz r5,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET(r5)
# else # else
lwz r5,_rtld_global@got(r5) lwz r5,_rtld_global_ro@got(r5)
mtlr r6 mtlr r6
lwz r5,0(r5) lwz r5,0(r5)
# endif # endif

View File

@ -79,11 +79,11 @@ ENTRY (BP_SYM (__sigsetjmp))
bl _GLOBAL_OFFSET_TABLE_@local-4 bl _GLOBAL_OFFSET_TABLE_@local-4
mflr r5 mflr r5
#ifdef SHARED #ifdef SHARED
lwz r5,_rtld_global@got(r5) lwz r5,_rtld_global_ro@got(r5)
mtlr r6 mtlr r6
lwz r5,RTLD_GLOBAL_DL_HWCAP_OFFSET(r5) lwz r5,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET(r5)
#else #else
lwz r5,_rtld_global@got(r5) lwz r5,_rtld_global_ro@got(r5)
mtlr r6 mtlr r6
lwz r5,0(r5) lwz r5,0(r5)
#endif #endif

View File

@ -32,11 +32,11 @@
#ifndef __NO_VMX__ #ifndef __NO_VMX__
.section ".toc","aw" .section ".toc","aw"
.LC__dl_hwcap: .LC__dl_hwcap:
#ifdef SHARED # ifdef SHARED
.tc _rtld_global[TC],_rtld_global .tc _rtld_global_ro[TC],_rtld_global_ro
#else # else
.tc _dl_hwcap[TC],_dl_hwcap .tc _dl_hwcap[TC],_dl_hwcap
#endif # endif
.section ".text" .section ".text"
#endif #endif
@ -44,12 +44,12 @@ ENTRY (BP_SYM (__longjmp))
CHECK_BOUNDS_BOTH_WIDE_LIT (r3, r8, r9, JB_SIZE) CHECK_BOUNDS_BOTH_WIDE_LIT (r3, r8, r9, JB_SIZE)
#ifndef __NO_VMX__ #ifndef __NO_VMX__
ld r5,.LC__dl_hwcap@toc(r2) ld r5,.LC__dl_hwcap@toc(r2)
#ifdef SHARED # ifdef SHARED
/* Load _rtld-global._dl_hwcap. */ /* Load _rtld-global._dl_hwcap. */
ld r5,RTLD_GLOBAL_DL_HWCAP_OFFSET(r5) ld r5,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET(r5)
#else # else
ld r5,0(r5) /* Load extern _dl_hwcap. */ ld r5,0(r5) /* Load extern _dl_hwcap. */
#endif # endif
andis. r5,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16) andis. r5,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16)
beq no_vmx beq no_vmx
la r5,((JB_VRS)*8)(3) la r5,((JB_VRS)*8)(3)
@ -63,7 +63,7 @@ ENTRY (BP_SYM (__longjmp))
addi r5,r5,32 addi r5,r5,32
lvx v21,0,r6 lvx v21,0,r6
vperm v20,v1,v21,v0 vperm v20,v1,v21,v0
#define load_misaligned_vmx_lo_loaded(loadvr,lovr,shiftvr,loadgpr,addgpr) \ # define load_misaligned_vmx_lo_loaded(loadvr,lovr,shiftvr,loadgpr,addgpr) \
addi addgpr,addgpr,32; \ addi addgpr,addgpr,32; \
lvx lovr,0,loadgpr; \ lvx lovr,0,loadgpr; \
vperm loadvr,loadvr,lovr,shiftvr; vperm loadvr,loadvr,lovr,shiftvr;

View File

@ -31,11 +31,11 @@
#ifndef __NO_VMX__ #ifndef __NO_VMX__
.section ".toc","aw" .section ".toc","aw"
.LC__dl_hwcap: .LC__dl_hwcap:
#ifdef SHARED # ifdef SHARED
.tc _rtld_global[TC],_rtld_global .tc _rtld_global_ro[TC],_rtld_global_ro
#else # else
.tc _dl_hwcap[TC],_dl_hwcap .tc _dl_hwcap[TC],_dl_hwcap
#endif # endif
.section ".text" .section ".text"
#endif #endif
@ -85,12 +85,12 @@ ENTRY (BP_SYM (__sigsetjmp))
stfd fp31,((JB_FPRS+17)*8)(3) stfd fp31,((JB_FPRS+17)*8)(3)
#ifndef __NO_VMX__ #ifndef __NO_VMX__
ld r5,.LC__dl_hwcap@toc(r2) ld r5,.LC__dl_hwcap@toc(r2)
#ifdef SHARED # ifdef SHARED
/* Load _rtld-global._dl_hwcap. */ /* Load _rtld-global._dl_hwcap. */
ld r5,RTLD_GLOBAL_DL_HWCAP_OFFSET(r5) ld r5,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET(r5)
#else # else
ld r5,0(r5) /* Load extern _dl_hwcap. */ ld r5,0(r5) /* Load extern _dl_hwcap. */
#endif # endif
andis. r5,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16) andis. r5,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16)
beq no_vmx beq no_vmx
la r5,((JB_VRS)*8)(3) la r5,((JB_VRS)*8)(3)
@ -114,7 +114,7 @@ ENTRY (BP_SYM (__sigsetjmp))
vsel v20,v20,v2,v3 vsel v20,v20,v2,v3
stvx v5,0,r5 stvx v5,0,r5
#define save_2vmx_partial(savevr,prev_savevr,hivr,shiftvr,maskvr,savegpr,addgpr) \ # define save_2vmx_partial(savevr,prev_savevr,hivr,shiftvr,maskvr,savegpr,addgpr) \
addi addgpr,addgpr,32; \ addi addgpr,addgpr,32; \
vperm savevr,savevr,savevr,shiftvr; \ vperm savevr,savevr,savevr,shiftvr; \
vsel hivr,prev_savevr,savevr,maskvr; \ vsel hivr,prev_savevr,savevr,maskvr; \

View File

@ -121,15 +121,15 @@ ENTRY(__getcontext)
mflr r8 mflr r8
bl _GLOBAL_OFFSET_TABLE_@local-4 bl _GLOBAL_OFFSET_TABLE_@local-4
mflr r7 mflr r7
#ifdef SHARED # ifdef SHARED
lwz r7,_rtld_global_ro@got(r7) lwz r7,_rtld_global_ro@got(r7)
mtlr r8 mtlr r8
lwz r7,RTLD_GLOBAL_DL_HWCAP_OFFSET(r7) lwz r7,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET(r7)
#else # else
lwz r7,_dl_hwcap@got(r7) lwz r7,_dl_hwcap@got(r7)
mtlr r8 mtlr r8
lwz r7,0(r7) lwz r7,0(r7)
#endif # endif
#else #else
lis r7,_dl_hwcap@ha lis r7,_dl_hwcap@ha
lwz r7,_dl_hwcap@l(r7) lwz r7,_dl_hwcap@l(r7)

View File

@ -54,20 +54,20 @@ ENTRY(__setcontext)
bl JUMPTARGET(sigprocmask) bl JUMPTARGET(sigprocmask)
cmpwi r3,0 cmpwi r3,0
bne L(error_exit) bne L(error_exit)
#ifdef PIC #ifdef PIC
mflr r8 mflr r8
bl _GLOBAL_OFFSET_TABLE_@local-4 bl _GLOBAL_OFFSET_TABLE_@local-4
mflr r7 mflr r7
#ifdef SHARED # ifdef SHARED
lwz r7,_rtld_global@got(r7) lwz r7,_rtld_global_ro@got(r7)
mtlr r8 mtlr r8
lwz r7,RTLD_GLOBAL_DL_HWCAP_OFFSET(r7) lwz r7,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET(r7)
#else # else
lwz r7,_dl_hwcap@got(r7) lwz r7,_dl_hwcap@got(r7)
mtlr r8 mtlr r8
lwz r7,0(r7) lwz r7,0(r7)
#endif # endif
#else #else
lis r7,_dl_hwcap@ha lis r7,_dl_hwcap@ha
lwz r7,_dl_hwcap@l(r7) lwz r7,_dl_hwcap@l(r7)
@ -75,100 +75,100 @@ ENTRY(__setcontext)
andis. r7,r7,(PPC_FEATURE_HAS_ALTIVEC >> 16) andis. r7,r7,(PPC_FEATURE_HAS_ALTIVEC >> 16)
la r10,(_UC_VREGS)(r31) la r10,(_UC_VREGS)(r31)
beq L(has_no_vec) beq L(has_no_vec)
lwz r0,(32*16)(r10) lwz r0,(32*16)(r10)
li r9,(32*16) li r9,(32*16)
cmpwi r0,0 cmpwi r0,0
mtspr VRSAVE,r0 mtspr VRSAVE,r0
beq L(has_no_vec) beq L(has_no_vec)
lvx v19,r9,r10 lvx v19,r9,r10
la r9,(16)(r10) la r9,(16)(r10)
lvx v0,0,r10 lvx v0,0,r10
lvx v1,0,r9 lvx v1,0,r9
addi r10,r10,32 addi r10,r10,32
addi r9,r9,32 addi r9,r9,32
mtvscr v19 mtvscr v19
lvx v2,0,r10 lvx v2,0,r10
lvx v3,0,r9 lvx v3,0,r9
addi r10,r10,32 addi r10,r10,32
addi r9,r9,32 addi r9,r9,32
lvx v4,0,r10 lvx v4,0,r10
lvx v5,0,r9 lvx v5,0,r9
addi r10,r10,32 addi r10,r10,32
addi r9,r9,32 addi r9,r9,32
lvx v6,0,r10 lvx v6,0,r10
lvx v7,0,r9 lvx v7,0,r9
addi r10,r10,32 addi r10,r10,32
addi r9,r9,32 addi r9,r9,32
lvx v8,0,r10 lvx v8,0,r10
lvx v9,0,r9 lvx v9,0,r9
addi r10,r10,32 addi r10,r10,32
addi r9,r9,32 addi r9,r9,32
lvx v10,0,r10 lvx v10,0,r10
lvx v11,0,r9 lvx v11,0,r9
addi r10,r10,32 addi r10,r10,32
addi r9,r9,32 addi r9,r9,32
lvx v12,0,r10 lvx v12,0,r10
lvx v13,0,r9 lvx v13,0,r9
addi r10,r10,32 addi r10,r10,32
addi r9,r9,32 addi r9,r9,32
lvx v14,0,r10 lvx v14,0,r10
lvx v15,0,r9 lvx v15,0,r9
addi r10,r10,32 addi r10,r10,32
addi r9,r9,32 addi r9,r9,32
lvx v16,0,r10 lvx v16,0,r10
lvx v17,0,r9 lvx v17,0,r9
addi r10,r10,32 addi r10,r10,32
addi r9,r9,32 addi r9,r9,32
lvx v18,0,r10 lvx v18,0,r10
lvx v11,0,r9 lvx v11,0,r9
addi r19,r10,32 addi r19,r10,32
addi r9,r9,32 addi r9,r9,32
lvx v20,0,r10 lvx v20,0,r10
lvx v21,0,r9 lvx v21,0,r9
addi r10,r10,32 addi r10,r10,32
addi r9,r9,32 addi r9,r9,32
lvx v22,0,r10 lvx v22,0,r10
lvx v23,0,r9 lvx v23,0,r9
addi r10,r10,32 addi r10,r10,32
addi r9,r9,32 addi r9,r9,32
lvx v24,0,r10 lvx v24,0,r10
lvx v25,0,r9 lvx v25,0,r9
addi r10,r10,32 addi r10,r10,32
addi r9,r9,32 addi r9,r9,32
lvx v26,0,r10 lvx v26,0,r10
lvx v27,0,r9 lvx v27,0,r9
addi r10,r10,32 addi r10,r10,32
addi r9,r9,32 addi r9,r9,32
lvx v28,0,r10 lvx v28,0,r10
lvx v29,0,r9 lvx v29,0,r9
addi r10,r10,32 addi r10,r10,32
addi r9,r9,32 addi r9,r9,32
lvx v30,0,r10 lvx v30,0,r10
lvx v31,0,r9 lvx v31,0,r9
addi r10,r10,32 addi r10,r10,32
addi r9,r9,32 addi r9,r9,32
lvx v10,0,r10 lvx v10,0,r10
lvx v11,0,r9 lvx v11,0,r9
L(has_no_vec): L(has_no_vec):
/* Restore the floating-point registers */ /* Restore the floating-point registers */
lfd fp31,_UC_FREGS+(32*8)(r31) lfd fp31,_UC_FREGS+(32*8)(r31)
@ -254,7 +254,7 @@ L(error_exit):
addi r1,r1,16 addi r1,r1,16
mtlr r0 mtlr r0
blr blr
L(do_sigret): L(do_sigret):
addi r1,r3,-0xd0 addi r1,r3,-0xd0
li r0,SYS_ify(rt_sigreturn) li r0,SYS_ify(rt_sigreturn)
@ -381,7 +381,7 @@ L(novec_error_exit):
addi r1,r1,16 addi r1,r1,16
mtlr r0 mtlr r0
blr blr
L(novec_do_sigret): L(novec_do_sigret):
addi r1,r3,-0xd0 addi r1,r3,-0xd0
li r0,SYS_ify(rt_sigreturn) li r0,SYS_ify(rt_sigreturn)

View File

@ -123,15 +123,15 @@ ENTRY(__swapcontext)
mflr r8 mflr r8
bl _GLOBAL_OFFSET_TABLE_@local-4 bl _GLOBAL_OFFSET_TABLE_@local-4
mflr r7 mflr r7
#ifdef SHARED # ifdef SHARED
lwz r7,_rtld_global_ro@got(r7) lwz r7,_rtld_global_ro@got(r7)
mtlr r8 mtlr r8
lwz r7,RTLD_GLOBAL_DL_HWCAP_OFFSET(r7) lwz r7,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET(r7)
#else # else
lwz r7,_dl_hwcap@got(r7) lwz r7,_dl_hwcap@got(r7)
mtlr r8 mtlr r8
lwz r7,0(r7) lwz r7,0(r7)
#endif # endif
#else #else
lis r7,_dl_hwcap@ha lis r7,_dl_hwcap@ha
lwz r7,_dl_hwcap@l(r7) lwz r7,_dl_hwcap@l(r7)
@ -265,15 +265,15 @@ L(no_vec):
mflr r8 mflr r8
bl _GLOBAL_OFFSET_TABLE_@local-4 bl _GLOBAL_OFFSET_TABLE_@local-4
mflr r7 mflr r7
#ifdef SHARED # ifdef SHARED
lwz r7,_rtld_global_ro@got(r7) lwz r7,_rtld_global_ro@got(r7)
mtlr r8 mtlr r8
lwz r7,RTLD_GLOBAL_DL_HWCAP_OFFSET(r7) lwz r7,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET(r7)
#else # else
lwz r7,_dl_hwcap@got(r7) lwz r7,_dl_hwcap@got(r7)
mtlr r8 mtlr r8
lwz r7,0(r7) lwz r7,0(r7)
#endif # endif
#else #else
lis r7,_dl_hwcap@ha lis r7,_dl_hwcap@ha
lwz r7,_dl_hwcap@l(r7) lwz r7,_dl_hwcap@l(r7)

View File

@ -268,12 +268,12 @@ ENTRY(__getcontext)
ld r5,.LC__dl_hwcap@toc(r2) ld r5,.LC__dl_hwcap@toc(r2)
li r10,0 li r10,0
#ifdef SHARED # ifdef SHARED
/* Load _rtld-global._dl_hwcap. */ /* Load _rtld-global._dl_hwcap. */
ld r5,RTLD_GLOBAL_DL_HWCAP_OFFSET(r5) ld r5,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET(r5)
#else # else
ld r5,0(r5) /* Load extern _dl_hwcap. */ ld r5,0(r5) /* Load extern _dl_hwcap. */
#endif # endif
andis. r5,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16) andis. r5,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16)
beq L(has_no_vec) beq L(has_no_vec)

View File

@ -59,41 +59,41 @@ ENTRY(__novec_setcontext)
bne L(nv_error_exit) bne L(nv_error_exit)
lfd fp0,(SIGCONTEXT_FP_REGS+(32*8))(r31) lfd fp0,(SIGCONTEXT_FP_REGS+(32*8))(r31)
lfd fp31,(SIGCONTEXT_FP_REGS+(PT_R31*8))(r31) lfd fp31,(SIGCONTEXT_FP_REGS+(PT_R31*8))(r31)
lfd fp30,(SIGCONTEXT_FP_REGS+(PT_R30*8))(r31) lfd fp30,(SIGCONTEXT_FP_REGS+(PT_R30*8))(r31)
mtfsf 0xff,fp0 mtfsf 0xff,fp0
lfd fp29,(SIGCONTEXT_FP_REGS+(PT_R29*8))(r31) lfd fp29,(SIGCONTEXT_FP_REGS+(PT_R29*8))(r31)
lfd fp28,(SIGCONTEXT_FP_REGS+(PT_R28*8))(r31) lfd fp28,(SIGCONTEXT_FP_REGS+(PT_R28*8))(r31)
lfd fp27,(SIGCONTEXT_FP_REGS+(PT_R27*8))(r31) lfd fp27,(SIGCONTEXT_FP_REGS+(PT_R27*8))(r31)
lfd fp26,(SIGCONTEXT_FP_REGS+(PT_R26*8))(r31) lfd fp26,(SIGCONTEXT_FP_REGS+(PT_R26*8))(r31)
lfd fp25,(SIGCONTEXT_FP_REGS+(PT_R25*8))(r31) lfd fp25,(SIGCONTEXT_FP_REGS+(PT_R25*8))(r31)
lfd fp24,(SIGCONTEXT_FP_REGS+(PT_R24*8))(r31) lfd fp24,(SIGCONTEXT_FP_REGS+(PT_R24*8))(r31)
lfd fp23,(SIGCONTEXT_FP_REGS+(PT_R23*8))(r31) lfd fp23,(SIGCONTEXT_FP_REGS+(PT_R23*8))(r31)
lfd fp22,(SIGCONTEXT_FP_REGS+(PT_R22*8))(r31) lfd fp22,(SIGCONTEXT_FP_REGS+(PT_R22*8))(r31)
lfd fp21,(SIGCONTEXT_FP_REGS+(PT_R21*8))(r31) lfd fp21,(SIGCONTEXT_FP_REGS+(PT_R21*8))(r31)
lfd fp20,(SIGCONTEXT_FP_REGS+(PT_R20*8))(r31) lfd fp20,(SIGCONTEXT_FP_REGS+(PT_R20*8))(r31)
lfd fp19,(SIGCONTEXT_FP_REGS+(PT_R19*8))(r31) lfd fp19,(SIGCONTEXT_FP_REGS+(PT_R19*8))(r31)
lfd fp18,(SIGCONTEXT_FP_REGS+(PT_R18*8))(r31) lfd fp18,(SIGCONTEXT_FP_REGS+(PT_R18*8))(r31)
lfd fp17,(SIGCONTEXT_FP_REGS+(PT_R17*8))(r31) lfd fp17,(SIGCONTEXT_FP_REGS+(PT_R17*8))(r31)
lfd fp16,(SIGCONTEXT_FP_REGS+(PT_R16*8))(r31) lfd fp16,(SIGCONTEXT_FP_REGS+(PT_R16*8))(r31)
lfd fp15,(SIGCONTEXT_FP_REGS+(PT_R15*8))(r31) lfd fp15,(SIGCONTEXT_FP_REGS+(PT_R15*8))(r31)
lfd fp14,(SIGCONTEXT_FP_REGS+(PT_R14*8))(r31) lfd fp14,(SIGCONTEXT_FP_REGS+(PT_R14*8))(r31)
lfd fp13,(SIGCONTEXT_FP_REGS+(PT_R13*8))(r31) lfd fp13,(SIGCONTEXT_FP_REGS+(PT_R13*8))(r31)
lfd fp12,(SIGCONTEXT_FP_REGS+(PT_R12*8))(r31) lfd fp12,(SIGCONTEXT_FP_REGS+(PT_R12*8))(r31)
lfd fp11,(SIGCONTEXT_FP_REGS+(PT_R11*8))(r31) lfd fp11,(SIGCONTEXT_FP_REGS+(PT_R11*8))(r31)
lfd fp10,(SIGCONTEXT_FP_REGS+(PT_R10*8))(r31) lfd fp10,(SIGCONTEXT_FP_REGS+(PT_R10*8))(r31)
lfd fp9,(SIGCONTEXT_FP_REGS+(PT_R9*8))(r31) lfd fp9,(SIGCONTEXT_FP_REGS+(PT_R9*8))(r31)
lfd fp8,(SIGCONTEXT_FP_REGS+(PT_R8*8))(r31) lfd fp8,(SIGCONTEXT_FP_REGS+(PT_R8*8))(r31)
lfd fp7,(SIGCONTEXT_FP_REGS+(PT_R7*8))(r31) lfd fp7,(SIGCONTEXT_FP_REGS+(PT_R7*8))(r31)
lfd fp6,(SIGCONTEXT_FP_REGS+(PT_R6*8))(r31) lfd fp6,(SIGCONTEXT_FP_REGS+(PT_R6*8))(r31)
lfd fp5,(SIGCONTEXT_FP_REGS+(PT_R5*8))(r31) lfd fp5,(SIGCONTEXT_FP_REGS+(PT_R5*8))(r31)
lfd fp4,(SIGCONTEXT_FP_REGS+(PT_R4*8))(r31) lfd fp4,(SIGCONTEXT_FP_REGS+(PT_R4*8))(r31)
lfd fp3,(SIGCONTEXT_FP_REGS+(PT_R3*8))(r31) lfd fp3,(SIGCONTEXT_FP_REGS+(PT_R3*8))(r31)
lfd fp2,(SIGCONTEXT_FP_REGS+(PT_R2*8))(r31) lfd fp2,(SIGCONTEXT_FP_REGS+(PT_R2*8))(r31)
lfd fp1,(SIGCONTEXT_FP_REGS+(PT_R1*8))(r31) lfd fp1,(SIGCONTEXT_FP_REGS+(PT_R1*8))(r31)
lfd fp0,(SIGCONTEXT_FP_REGS+(PT_R0*8))(r31) lfd fp0,(SIGCONTEXT_FP_REGS+(PT_R0*8))(r31)
ld r0,(SIGCONTEXT_GP_REGS+(PT_LNK*8))(r31) ld r0,(SIGCONTEXT_GP_REGS+(PT_LNK*8))(r31)
ld r1,(SIGCONTEXT_GP_REGS+(PT_R1*8))(r31) ld r1,(SIGCONTEXT_GP_REGS+(PT_R1*8))(r31)
mtlr r0 mtlr r0
ld r2,(SIGCONTEXT_GP_REGS+(PT_R2*8))(r31) ld r2,(SIGCONTEXT_GP_REGS+(PT_R2*8))(r31)
@ -129,33 +129,33 @@ ENTRY(__novec_setcontext)
ld r28,(SIGCONTEXT_GP_REGS+(PT_R28*8))(r31) ld r28,(SIGCONTEXT_GP_REGS+(PT_R28*8))(r31)
ld r29,(SIGCONTEXT_GP_REGS+(PT_R29*8))(r31) ld r29,(SIGCONTEXT_GP_REGS+(PT_R29*8))(r31)
ld r30,(SIGCONTEXT_GP_REGS+(PT_R30*8))(r31) ld r30,(SIGCONTEXT_GP_REGS+(PT_R30*8))(r31)
/* Now we branch to the "Next Instruction Pointer" from the saved /* Now we branch to the "Next Instruction Pointer" from the saved
context. With the powerpc64 instruction set there is no good way to context. With the powerpc64 instruction set there is no good way to
do this (from user state) without clobbering either the LR or CTR. do this (from user state) without clobbering either the LR or CTR.
The makecontext and swapcontext functions depend on the callers The makecontext and swapcontext functions depend on the callers
LR being preserved so we use the CTR. */ LR being preserved so we use the CTR. */
ld r0,(SIGCONTEXT_GP_REGS+(PT_NIP*8))(r31) ld r0,(SIGCONTEXT_GP_REGS+(PT_NIP*8))(r31)
mtctr r0 mtctr r0
ld r0,(SIGCONTEXT_GP_REGS+(PT_R0*8))(r31) ld r0,(SIGCONTEXT_GP_REGS+(PT_R0*8))(r31)
ld r31,(SIGCONTEXT_GP_REGS+(PT_R31*8))(r31) ld r31,(SIGCONTEXT_GP_REGS+(PT_R31*8))(r31)
bctr bctr
L(nv_error_exit): L(nv_error_exit):
ld r0,128+FRAME_LR_SAVE(r1) ld r0,128+FRAME_LR_SAVE(r1)
addi r1,r1,128 addi r1,r1,128
mtlr r0 mtlr r0
ld r31,-8(r1) ld r31,-8(r1)
blr blr
/* At this point we assume that the ucontext was created by a /* At this point we assume that the ucontext was created by a
rt_signal and we should use rt_sigreturn to restore the original rt_signal and we should use rt_sigreturn to restore the original
state. As of the 2.4.21 kernel the ucontext is the first thing state. As of the 2.4.21 kernel the ucontext is the first thing
(offset 0) in the rt_signal frame and rt_sigreturn expects the (offset 0) in the rt_signal frame and rt_sigreturn expects the
ucontext address in R1. Normally the rt-signal trampoline handles ucontext address in R1. Normally the rt-signal trampoline handles
this by popping dummy frame before the rt_signal syscall. In our this by popping dummy frame before the rt_signal syscall. In our
case the stack may not be in its original (signal handler return with case the stack may not be in its original (signal handler return with
R1 pointing at the dummy frame) state. We do have the ucontext R1 pointing at the dummy frame) state. We do have the ucontext
address in R3, so simply copy R3 to R1 before the syscall. */ address in R3, so simply copy R3 to R1 before the syscall. */
L(nv_do_sigret): L(nv_do_sigret):
mr r1,r3, mr r1,r3,
@ -171,7 +171,7 @@ L(nv_do_sigret):
bl JUMPTARGET(__syscall_error) bl JUMPTARGET(__syscall_error)
nop nop
li r3,-1 li r3,-1
ld r0,128+FRAME_LR_SAVE(r1) ld r0,128+FRAME_LR_SAVE(r1)
addi r1,r1,128 addi r1,r1,128
mtlr r0 mtlr r0
blr blr
@ -186,7 +186,7 @@ compat_symbol (libc, __novec_setcontext, setcontext, GLIBC_2_3)
.section ".toc","aw" .section ".toc","aw"
.LC__dl_hwcap: .LC__dl_hwcap:
#ifdef SHARED #ifdef SHARED
.tc _rtld_global[TC],_rtld_global .tc _rtld_global_ro[TC],_rtld_global_ro
#else #else
.tc _dl_hwcap[TC],_dl_hwcap .tc _dl_hwcap[TC],_dl_hwcap
#endif #endif
@ -221,153 +221,153 @@ ENTRY(__setcontext)
nop nop
cmpdi r3,0 cmpdi r3,0
bne L(error_exit) bne L(error_exit)
ld r5,.LC__dl_hwcap@toc(r2) ld r5,.LC__dl_hwcap@toc(r2)
ld r10,(SIGCONTEXT_V_REGS_PTR)(r31) ld r10,(SIGCONTEXT_V_REGS_PTR)(r31)
#ifdef SHARED # ifdef SHARED
/* Load _rtld-global._dl_hwcap. */ /* Load _rtld-global._dl_hwcap. */
ld r5,RTLD_GLOBAL_DL_HWCAP_OFFSET(r5) ld r5,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET(r5)
#else # else
ld r5,0(r5) /* Load extern _dl_hwcap. */ ld r5,0(r5) /* Load extern _dl_hwcap. */
#endif # endif
andis. r5,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16) andis. r5,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16)
beq L(has_no_vec) beq L(has_no_vec)
cmpdi r10,0 cmpdi r10,0
beq L(has_no_vec) beq L(has_no_vec)
lwz r0,(33*16)(r10) lwz r0,(33*16)(r10)
li r9,(16*32) li r9,(16*32)
mtspr VRSAVE,r0 mtspr VRSAVE,r0
cmpwi r0,0 cmpwi r0,0
beq L(has_no_vec) beq L(has_no_vec)
lvx v19,r9,r10 lvx v19,r9,r10
la r9,(16)(r10) la r9,(16)(r10)
lvx v0,0,r10 lvx v0,0,r10
lvx v1,0,r9 lvx v1,0,r9
addi r10,r10,32 addi r10,r10,32
addi r9,r9,32 addi r9,r9,32
mtvscr v19 mtvscr v19
lvx v2,0,r10 lvx v2,0,r10
lvx v3,0,r9 lvx v3,0,r9
addi r10,r10,32 addi r10,r10,32
addi r9,r9,32 addi r9,r9,32
lvx v4,0,r10 lvx v4,0,r10
lvx v5,0,r9 lvx v5,0,r9
addi r10,r10,32 addi r10,r10,32
addi r9,r9,32 addi r9,r9,32
lvx v6,0,r10 lvx v6,0,r10
lvx v7,0,r9 lvx v7,0,r9
addi r10,r10,32 addi r10,r10,32
addi r9,r9,32 addi r9,r9,32
lvx v8,0,r10 lvx v8,0,r10
lvx v9,0,r9 lvx v9,0,r9
addi r10,r10,32 addi r10,r10,32
addi r9,r9,32 addi r9,r9,32
lvx v10,0,r10 lvx v10,0,r10
lvx v11,0,r9 lvx v11,0,r9
addi r10,r10,32 addi r10,r10,32
addi r9,r9,32 addi r9,r9,32
lvx v12,0,r10 lvx v12,0,r10
lvx v13,0,r9 lvx v13,0,r9
addi r10,r10,32 addi r10,r10,32
addi r9,r9,32 addi r9,r9,32
lvx v14,0,r10 lvx v14,0,r10
lvx v15,0,r9 lvx v15,0,r9
addi r10,r10,32 addi r10,r10,32
addi r9,r9,32 addi r9,r9,32
lvx v16,0,r10 lvx v16,0,r10
lvx v17,0,r9 lvx v17,0,r9
addi r10,r10,32 addi r10,r10,32
addi r9,r9,32 addi r9,r9,32
lvx v18,0,r10 lvx v18,0,r10
lvx v11,0,r9 lvx v11,0,r9
addi r19,r10,32 addi r19,r10,32
addi r9,r9,32 addi r9,r9,32
lvx v20,0,r10 lvx v20,0,r10
lvx v21,0,r9 lvx v21,0,r9
addi r10,r10,32 addi r10,r10,32
addi r9,r9,32 addi r9,r9,32
lvx v22,0,r10 lvx v22,0,r10
lvx v23,0,r9 lvx v23,0,r9
addi r10,r10,32 addi r10,r10,32
addi r9,r9,32 addi r9,r9,32
lvx v24,0,r10 lvx v24,0,r10
lvx v25,0,r9 lvx v25,0,r9
addi r10,r10,32 addi r10,r10,32
addi r9,r9,32 addi r9,r9,32
lvx v26,0,r10 lvx v26,0,r10
lvx v27,0,r9 lvx v27,0,r9
addi r10,r10,32 addi r10,r10,32
addi r9,r9,32 addi r9,r9,32
lvx v28,0,r10 lvx v28,0,r10
lvx v29,0,r9 lvx v29,0,r9
addi r10,r10,32 addi r10,r10,32
addi r9,r9,32 addi r9,r9,32
lvx v30,0,r10 lvx v30,0,r10
lvx v31,0,r9 lvx v31,0,r9
addi r10,r10,32 addi r10,r10,32
addi r9,r9,32 addi r9,r9,32
lvx v10,0,r10 lvx v10,0,r10
lvx v11,0,r9 lvx v11,0,r9
addi r10,r10,32 addi r10,r10,32
addi r9,r9,32 addi r9,r9,32
L(has_no_vec): L(has_no_vec):
lfd fp0,(SIGCONTEXT_FP_REGS+(32*8))(r31) lfd fp0,(SIGCONTEXT_FP_REGS+(32*8))(r31)
lfd fp31,(SIGCONTEXT_FP_REGS+(PT_R31*8))(r31) lfd fp31,(SIGCONTEXT_FP_REGS+(PT_R31*8))(r31)
lfd fp30,(SIGCONTEXT_FP_REGS+(PT_R30*8))(r31) lfd fp30,(SIGCONTEXT_FP_REGS+(PT_R30*8))(r31)
mtfsf 0xff,fp0 mtfsf 0xff,fp0
lfd fp29,(SIGCONTEXT_FP_REGS+(PT_R29*8))(r31) lfd fp29,(SIGCONTEXT_FP_REGS+(PT_R29*8))(r31)
lfd fp28,(SIGCONTEXT_FP_REGS+(PT_R28*8))(r31) lfd fp28,(SIGCONTEXT_FP_REGS+(PT_R28*8))(r31)
lfd fp27,(SIGCONTEXT_FP_REGS+(PT_R27*8))(r31) lfd fp27,(SIGCONTEXT_FP_REGS+(PT_R27*8))(r31)
lfd fp26,(SIGCONTEXT_FP_REGS+(PT_R26*8))(r31) lfd fp26,(SIGCONTEXT_FP_REGS+(PT_R26*8))(r31)
lfd fp25,(SIGCONTEXT_FP_REGS+(PT_R25*8))(r31) lfd fp25,(SIGCONTEXT_FP_REGS+(PT_R25*8))(r31)
lfd fp24,(SIGCONTEXT_FP_REGS+(PT_R24*8))(r31) lfd fp24,(SIGCONTEXT_FP_REGS+(PT_R24*8))(r31)
lfd fp23,(SIGCONTEXT_FP_REGS+(PT_R23*8))(r31) lfd fp23,(SIGCONTEXT_FP_REGS+(PT_R23*8))(r31)
lfd fp22,(SIGCONTEXT_FP_REGS+(PT_R22*8))(r31) lfd fp22,(SIGCONTEXT_FP_REGS+(PT_R22*8))(r31)
lfd fp21,(SIGCONTEXT_FP_REGS+(PT_R21*8))(r31) lfd fp21,(SIGCONTEXT_FP_REGS+(PT_R21*8))(r31)
lfd fp20,(SIGCONTEXT_FP_REGS+(PT_R20*8))(r31) lfd fp20,(SIGCONTEXT_FP_REGS+(PT_R20*8))(r31)
lfd fp19,(SIGCONTEXT_FP_REGS+(PT_R19*8))(r31) lfd fp19,(SIGCONTEXT_FP_REGS+(PT_R19*8))(r31)
lfd fp18,(SIGCONTEXT_FP_REGS+(PT_R18*8))(r31) lfd fp18,(SIGCONTEXT_FP_REGS+(PT_R18*8))(r31)
lfd fp17,(SIGCONTEXT_FP_REGS+(PT_R17*8))(r31) lfd fp17,(SIGCONTEXT_FP_REGS+(PT_R17*8))(r31)
lfd fp16,(SIGCONTEXT_FP_REGS+(PT_R16*8))(r31) lfd fp16,(SIGCONTEXT_FP_REGS+(PT_R16*8))(r31)
lfd fp15,(SIGCONTEXT_FP_REGS+(PT_R15*8))(r31) lfd fp15,(SIGCONTEXT_FP_REGS+(PT_R15*8))(r31)
lfd fp14,(SIGCONTEXT_FP_REGS+(PT_R14*8))(r31) lfd fp14,(SIGCONTEXT_FP_REGS+(PT_R14*8))(r31)
lfd fp13,(SIGCONTEXT_FP_REGS+(PT_R13*8))(r31) lfd fp13,(SIGCONTEXT_FP_REGS+(PT_R13*8))(r31)
lfd fp12,(SIGCONTEXT_FP_REGS+(PT_R12*8))(r31) lfd fp12,(SIGCONTEXT_FP_REGS+(PT_R12*8))(r31)
lfd fp11,(SIGCONTEXT_FP_REGS+(PT_R11*8))(r31) lfd fp11,(SIGCONTEXT_FP_REGS+(PT_R11*8))(r31)
lfd fp10,(SIGCONTEXT_FP_REGS+(PT_R10*8))(r31) lfd fp10,(SIGCONTEXT_FP_REGS+(PT_R10*8))(r31)
lfd fp9,(SIGCONTEXT_FP_REGS+(PT_R9*8))(r31) lfd fp9,(SIGCONTEXT_FP_REGS+(PT_R9*8))(r31)
lfd fp8,(SIGCONTEXT_FP_REGS+(PT_R8*8))(r31) lfd fp8,(SIGCONTEXT_FP_REGS+(PT_R8*8))(r31)
lfd fp7,(SIGCONTEXT_FP_REGS+(PT_R7*8))(r31) lfd fp7,(SIGCONTEXT_FP_REGS+(PT_R7*8))(r31)
lfd fp6,(SIGCONTEXT_FP_REGS+(PT_R6*8))(r31) lfd fp6,(SIGCONTEXT_FP_REGS+(PT_R6*8))(r31)
lfd fp5,(SIGCONTEXT_FP_REGS+(PT_R5*8))(r31) lfd fp5,(SIGCONTEXT_FP_REGS+(PT_R5*8))(r31)
lfd fp4,(SIGCONTEXT_FP_REGS+(PT_R4*8))(r31) lfd fp4,(SIGCONTEXT_FP_REGS+(PT_R4*8))(r31)
lfd fp3,(SIGCONTEXT_FP_REGS+(PT_R3*8))(r31) lfd fp3,(SIGCONTEXT_FP_REGS+(PT_R3*8))(r31)
lfd fp2,(SIGCONTEXT_FP_REGS+(PT_R2*8))(r31) lfd fp2,(SIGCONTEXT_FP_REGS+(PT_R2*8))(r31)
lfd fp1,(SIGCONTEXT_FP_REGS+(PT_R1*8))(r31) lfd fp1,(SIGCONTEXT_FP_REGS+(PT_R1*8))(r31)
lfd fp0,(SIGCONTEXT_FP_REGS+(PT_R0*8))(r31) lfd fp0,(SIGCONTEXT_FP_REGS+(PT_R0*8))(r31)
ld r0,(SIGCONTEXT_GP_REGS+(PT_LNK*8))(r31) ld r0,(SIGCONTEXT_GP_REGS+(PT_LNK*8))(r31)
ld r1,(SIGCONTEXT_GP_REGS+(PT_R1*8))(r31) ld r1,(SIGCONTEXT_GP_REGS+(PT_R1*8))(r31)
mtlr r0 mtlr r0
ld r2,(SIGCONTEXT_GP_REGS+(PT_R2*8))(r31) ld r2,(SIGCONTEXT_GP_REGS+(PT_R2*8))(r31)
@ -403,33 +403,33 @@ L(has_no_vec):
ld r28,(SIGCONTEXT_GP_REGS+(PT_R28*8))(r31) ld r28,(SIGCONTEXT_GP_REGS+(PT_R28*8))(r31)
ld r29,(SIGCONTEXT_GP_REGS+(PT_R29*8))(r31) ld r29,(SIGCONTEXT_GP_REGS+(PT_R29*8))(r31)
ld r30,(SIGCONTEXT_GP_REGS+(PT_R30*8))(r31) ld r30,(SIGCONTEXT_GP_REGS+(PT_R30*8))(r31)
/* Now we branch to the "Next Instruction Pointer" from the saved /* Now we branch to the "Next Instruction Pointer" from the saved
context. With the powerpc64 instruction set there is no good way to context. With the powerpc64 instruction set there is no good way to
do this (from user state) without clobbering either the LR or CTR. do this (from user state) without clobbering either the LR or CTR.
The makecontext and swapcontext functions depend on the callers The makecontext and swapcontext functions depend on the callers
LR being preserved so we use the CTR. */ LR being preserved so we use the CTR. */
ld r0,(SIGCONTEXT_GP_REGS+(PT_NIP*8))(r31) ld r0,(SIGCONTEXT_GP_REGS+(PT_NIP*8))(r31)
mtctr r0 mtctr r0
ld r0,(SIGCONTEXT_GP_REGS+(PT_R0*8))(r31) ld r0,(SIGCONTEXT_GP_REGS+(PT_R0*8))(r31)
ld r31,(SIGCONTEXT_GP_REGS+(PT_R31*8))(r31) ld r31,(SIGCONTEXT_GP_REGS+(PT_R31*8))(r31)
bctr bctr
L(error_exit): L(error_exit):
ld r0,128+FRAME_LR_SAVE(r1) ld r0,128+FRAME_LR_SAVE(r1)
addi r1,r1,128 addi r1,r1,128
mtlr r0 mtlr r0
ld r31,-8(r1) ld r31,-8(r1)
blr blr
/* At this point we assume that the ucontext was created by a /* At this point we assume that the ucontext was created by a
rt_signal and we should use rt_sigreturn to restore the original rt_signal and we should use rt_sigreturn to restore the original
state. As of the 2.4.21 kernel the ucontext is the first thing state. As of the 2.4.21 kernel the ucontext is the first thing
(offset 0) in the rt_signal frame and rt_sigreturn expects the (offset 0) in the rt_signal frame and rt_sigreturn expects the
ucontext address in R1. Normally the rt-signal trampoline handles ucontext address in R1. Normally the rt-signal trampoline handles
this by popping dummy frame before the rt_signal syscall. In our this by popping dummy frame before the rt_signal syscall. In our
case the stack may not be in its original (signal handler return with case the stack may not be in its original (signal handler return with
R1 pointing at the dummy frame) state. We do have the ucontext R1 pointing at the dummy frame) state. We do have the ucontext
address in R3, so simply copy R3 to R1 before the syscall. */ address in R3, so simply copy R3 to R1 before the syscall. */
L(do_sigret): L(do_sigret):
mr r1,r3, mr r1,r3,
@ -445,7 +445,7 @@ L(do_sigret):
bl JUMPTARGET(__syscall_error) bl JUMPTARGET(__syscall_error)
nop nop
li r3,-1 li r3,-1
ld r0,128+FRAME_LR_SAVE(r1) ld r0,128+FRAME_LR_SAVE(r1)
addi r1,r1,128 addi r1,r1,128
mtlr r0 mtlr r0
blr blr

View File

@ -393,7 +393,7 @@ ENTRY(__swapcontext)
li r10,0 li r10,0
#ifdef SHARED #ifdef SHARED
/* Load _rtld-global._dl_hwcap. */ /* Load _rtld-global._dl_hwcap. */
ld r8,RTLD_GLOBAL_DL_HWCAP_OFFSET(r8) ld r8,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET(r8)
#else #else
ld r8,0(r8) /* Load extern _dl_hwcap. */ ld r8,0(r8) /* Load extern _dl_hwcap. */
#endif #endif
@ -527,12 +527,12 @@ L(has_no_vec):
ld r8,.LC__dl_hwcap@toc(r2) ld r8,.LC__dl_hwcap@toc(r2)
ld r10,(SIGCONTEXT_V_REGS_PTR)(r31) ld r10,(SIGCONTEXT_V_REGS_PTR)(r31)
#ifdef SHARED # ifdef SHARED
/* Load _rtld-global._dl_hwcap. */ /* Load _rtld-global._dl_hwcap. */
ld r8,RTLD_GLOBAL_DL_HWCAP_OFFSET(r8) ld r8,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET(r8)
#else # else
ld r8,0(r8) /* Load extern _dl_hwcap. */ ld r8,0(r8) /* Load extern _dl_hwcap. */
#endif # endif
andis. r8,r8,(PPC_FEATURE_HAS_ALTIVEC >> 16) andis. r8,r8,(PPC_FEATURE_HAS_ALTIVEC >> 16)
beq L(has_no_vec2) beq L(has_no_vec2)