2012-11-09 17:53:51 +00:00
|
|
|
/* Thread-local storage handling in the ELF dynamic linker.
|
|
|
|
AArch64 version.
|
2016-01-04 16:05:18 +00:00
|
|
|
Copyright (C) 2011-2016 Free Software Foundation, Inc.
|
2012-11-09 17:53:51 +00:00
|
|
|
|
|
|
|
This file is part of the GNU C Library.
|
|
|
|
|
|
|
|
The GNU C Library is free software; you can redistribute it and/or
|
|
|
|
modify it under the terms of the GNU Lesser General Public
|
|
|
|
License as published by the Free Software Foundation; either
|
|
|
|
version 2.1 of the License, or (at your option) any later version.
|
|
|
|
|
|
|
|
The GNU C Library is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
Lesser General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU Lesser General Public
|
|
|
|
License along with the GNU C Library; if not, see
|
|
|
|
<http://www.gnu.org/licenses/>. */
|
|
|
|
|
|
|
|
#include <sysdep.h>
|
|
|
|
#include <tls.h>
|
|
|
|
#include "tlsdesc.h"
|
|
|
|
|
|
|
|
#define NSAVEDQREGPAIRS 16
|
|
|
|
#define SAVE_Q_REGISTERS \
|
|
|
|
stp q0, q1, [sp, #-32*NSAVEDQREGPAIRS]!; \
|
|
|
|
cfi_adjust_cfa_offset (32*NSAVEDQREGPAIRS); \
|
|
|
|
stp q2, q3, [sp, #32*1]; \
|
|
|
|
stp q4, q5, [sp, #32*2]; \
|
|
|
|
stp q6, q7, [sp, #32*3]; \
|
|
|
|
stp q8, q9, [sp, #32*4]; \
|
|
|
|
stp q10, q11, [sp, #32*5]; \
|
|
|
|
stp q12, q13, [sp, #32*6]; \
|
|
|
|
stp q14, q15, [sp, #32*7]; \
|
|
|
|
stp q16, q17, [sp, #32*8]; \
|
|
|
|
stp q18, q19, [sp, #32*9]; \
|
|
|
|
stp q20, q21, [sp, #32*10]; \
|
|
|
|
stp q22, q23, [sp, #32*11]; \
|
|
|
|
stp q24, q25, [sp, #32*12]; \
|
|
|
|
stp q26, q27, [sp, #32*13]; \
|
|
|
|
stp q28, q29, [sp, #32*14]; \
|
|
|
|
stp q30, q31, [sp, #32*15];
|
|
|
|
|
|
|
|
#define RESTORE_Q_REGISTERS \
|
|
|
|
ldp q2, q3, [sp, #32*1]; \
|
|
|
|
ldp q4, q5, [sp, #32*2]; \
|
|
|
|
ldp q6, q7, [sp, #32*3]; \
|
|
|
|
ldp q8, q9, [sp, #32*4]; \
|
|
|
|
ldp q10, q11, [sp, #32*5]; \
|
|
|
|
ldp q12, q13, [sp, #32*6]; \
|
|
|
|
ldp q14, q15, [sp, #32*7]; \
|
|
|
|
ldp q16, q17, [sp, #32*8]; \
|
|
|
|
ldp q18, q19, [sp, #32*9]; \
|
|
|
|
ldp q20, q21, [sp, #32*10]; \
|
|
|
|
ldp q22, q23, [sp, #32*11]; \
|
|
|
|
ldp q24, q25, [sp, #32*12]; \
|
|
|
|
ldp q26, q27, [sp, #32*13]; \
|
|
|
|
ldp q28, q29, [sp, #32*14]; \
|
|
|
|
ldp q30, q31, [sp, #32*15]; \
|
|
|
|
ldp q0, q1, [sp], #32*NSAVEDQREGPAIRS; \
|
|
|
|
cfi_adjust_cfa_offset (-32*NSAVEDQREGPAIRS);
|
|
|
|
|
|
|
|
.text
|
|
|
|
|
|
|
|
/* Compute the thread pointer offset for symbols in the static
|
|
|
|
TLS block. The offset is the same for all threads.
|
|
|
|
Prototype:
|
|
|
|
_dl_tlsdesc_return (tlsdesc *) ;
|
|
|
|
*/
|
|
|
|
.hidden _dl_tlsdesc_return
|
|
|
|
.global _dl_tlsdesc_return
|
|
|
|
.type _dl_tlsdesc_return,%function
|
|
|
|
cfi_startproc
|
|
|
|
.align 2
|
|
|
|
_dl_tlsdesc_return:
|
|
|
|
ldr x0, [x0, #8]
|
|
|
|
RET
|
|
|
|
cfi_endproc
|
|
|
|
.size _dl_tlsdesc_return, .-_dl_tlsdesc_return
|
|
|
|
|
[BZ 18034][AArch64] Lazy TLSDESC relocation data race fix
Lazy TLSDESC initialization needs to be synchronized with concurrent TLS
accesses. The TLS descriptor contains a function pointer (entry) and an
argument that is accessed from the entry function. With lazy initialization
the first call to the entry function updates the entry and the argument to
their final value. A final entry function must make sure that it accesses an
initialized argument, this needs synchronization on systems with weak memory
ordering otherwise the writes of the first call can be observed out of order.
There are at least two issues with the current code:
tlsdesc.c (i386, x86_64, arm, aarch64) uses volatile memory accesses on the
write side (in the initial entry function) instead of C11 atomics.
And on systems with weak memory ordering (arm, aarch64) the read side
synchronization is missing from the final entry functions (dl-tlsdesc.S).
This patch only deals with aarch64.
* Write side:
Volatile accesses were replaced with C11 relaxed atomics, and a release
store was used for the initialization of entry so the read side can
synchronize with it.
* Read side:
TLS access generated by the compiler and an entry function code is roughly
ldr x1, [x0] // load the entry
blr x1 // call it
entryfunc:
ldr x0, [x0,#8] // load the arg
ret
Various alternatives were considered to force the ordering in the entry
function between the two loads:
(1) barrier
entryfunc:
dmb ishld
ldr x0, [x0,#8]
(2) address dependency (if the address of the second load depends on the
result of the first one the ordering is guaranteed):
entryfunc:
ldr x1,[x0]
and x1,x1,#8
orr x1,x1,#8
ldr x0,[x0,x1]
(3) load-acquire (ARMv8 instruction that is ordered before subsequent
loads and stores)
entryfunc:
ldar xzr,[x0]
ldr x0,[x0,#8]
Option (1) is the simplest but slowest (note: this runs at every TLS
access), options (2) and (3) do one extra load from [x0] (same address
loads are ordered so it happens-after the load on the call site),
option (2) clobbers x1 which is problematic because existing gcc does
not expect that, so approach (3) was chosen.
A new _dl_tlsdesc_return_lazy entry function was introduced for lazily
relocated static TLS, so non-lazy static TLS can avoid the synchronization
cost.
[BZ #18034]
* sysdeps/aarch64/dl-tlsdesc.h (_dl_tlsdesc_return_lazy): Declare.
* sysdeps/aarch64/dl-tlsdesc.S (_dl_tlsdesc_return_lazy): Define.
(_dl_tlsdesc_undefweak): Guarantee TLSDESC entry and argument load-load
ordering using ldar.
(_dl_tlsdesc_dynamic): Likewise.
(_dl_tlsdesc_return_lazy): Likewise.
* sysdeps/aarch64/tlsdesc.c (_dl_tlsdesc_resolve_rela_fixup): Use
relaxed atomics instead of volatile and synchronize with release store.
(_dl_tlsdesc_resolve_hold_fixup): Use relaxed atomics instead of
volatile.
* elf/tlsdeschtab.h (_dl_tlsdesc_resolve_early_return_p): Likewise.
2015-06-17 11:37:49 +00:00
|
|
|
/* Same as _dl_tlsdesc_return but with synchronization for
|
|
|
|
lazy relocation.
|
|
|
|
Prototype:
|
|
|
|
_dl_tlsdesc_return_lazy (tlsdesc *) ;
|
|
|
|
*/
|
|
|
|
.hidden _dl_tlsdesc_return_lazy
|
|
|
|
.global _dl_tlsdesc_return_lazy
|
|
|
|
.type _dl_tlsdesc_return_lazy,%function
|
|
|
|
cfi_startproc
|
|
|
|
.align 2
|
|
|
|
_dl_tlsdesc_return_lazy:
|
|
|
|
/* The ldar here happens after the load from [x0] at the call site
|
|
|
|
(that is generated by the compiler as part of the TLS access ABI),
|
|
|
|
so it reads the same value (this function is the final value of
|
|
|
|
td->entry) and thus it synchronizes with the release store to
|
|
|
|
td->entry in _dl_tlsdesc_resolve_rela_fixup ensuring that the load
|
|
|
|
from [x0,#8] here happens after the initialization of td->arg. */
|
|
|
|
ldar xzr, [x0]
|
|
|
|
ldr x0, [x0, #8]
|
|
|
|
RET
|
|
|
|
cfi_endproc
|
|
|
|
.size _dl_tlsdesc_return_lazy, .-_dl_tlsdesc_return_lazy
|
|
|
|
|
2012-11-09 17:53:51 +00:00
|
|
|
/* Handler for undefined weak TLS symbols.
|
|
|
|
Prototype:
|
|
|
|
_dl_tlsdesc_undefweak (tlsdesc *);
|
|
|
|
|
|
|
|
The second word of the descriptor contains the addend.
|
|
|
|
Return the addend minus the thread pointer. This ensures
|
|
|
|
that when the caller adds on the thread pointer it gets back
|
|
|
|
the addend. */
|
|
|
|
|
|
|
|
.hidden _dl_tlsdesc_undefweak
|
|
|
|
.global _dl_tlsdesc_undefweak
|
|
|
|
.type _dl_tlsdesc_undefweak,%function
|
|
|
|
cfi_startproc
|
|
|
|
.align 2
|
|
|
|
_dl_tlsdesc_undefweak:
|
|
|
|
str x1, [sp, #-16]!
|
2015-06-17 11:44:53 +00:00
|
|
|
cfi_adjust_cfa_offset (16)
|
[BZ 18034][AArch64] Lazy TLSDESC relocation data race fix
Lazy TLSDESC initialization needs to be synchronized with concurrent TLS
accesses. The TLS descriptor contains a function pointer (entry) and an
argument that is accessed from the entry function. With lazy initialization
the first call to the entry function updates the entry and the argument to
their final value. A final entry function must make sure that it accesses an
initialized argument, this needs synchronization on systems with weak memory
ordering otherwise the writes of the first call can be observed out of order.
There are at least two issues with the current code:
tlsdesc.c (i386, x86_64, arm, aarch64) uses volatile memory accesses on the
write side (in the initial entry function) instead of C11 atomics.
And on systems with weak memory ordering (arm, aarch64) the read side
synchronization is missing from the final entry functions (dl-tlsdesc.S).
This patch only deals with aarch64.
* Write side:
Volatile accesses were replaced with C11 relaxed atomics, and a release
store was used for the initialization of entry so the read side can
synchronize with it.
* Read side:
TLS access generated by the compiler and an entry function code is roughly
ldr x1, [x0] // load the entry
blr x1 // call it
entryfunc:
ldr x0, [x0,#8] // load the arg
ret
Various alternatives were considered to force the ordering in the entry
function between the two loads:
(1) barrier
entryfunc:
dmb ishld
ldr x0, [x0,#8]
(2) address dependency (if the address of the second load depends on the
result of the first one the ordering is guaranteed):
entryfunc:
ldr x1,[x0]
and x1,x1,#8
orr x1,x1,#8
ldr x0,[x0,x1]
(3) load-acquire (ARMv8 instruction that is ordered before subsequent
loads and stores)
entryfunc:
ldar xzr,[x0]
ldr x0,[x0,#8]
Option (1) is the simplest but slowest (note: this runs at every TLS
access), options (2) and (3) do one extra load from [x0] (same address
loads are ordered so it happens-after the load on the call site),
option (2) clobbers x1 which is problematic because existing gcc does
not expect that, so approach (3) was chosen.
A new _dl_tlsdesc_return_lazy entry function was introduced for lazily
relocated static TLS, so non-lazy static TLS can avoid the synchronization
cost.
[BZ #18034]
* sysdeps/aarch64/dl-tlsdesc.h (_dl_tlsdesc_return_lazy): Declare.
* sysdeps/aarch64/dl-tlsdesc.S (_dl_tlsdesc_return_lazy): Define.
(_dl_tlsdesc_undefweak): Guarantee TLSDESC entry and argument load-load
ordering using ldar.
(_dl_tlsdesc_dynamic): Likewise.
(_dl_tlsdesc_return_lazy): Likewise.
* sysdeps/aarch64/tlsdesc.c (_dl_tlsdesc_resolve_rela_fixup): Use
relaxed atomics instead of volatile and synchronize with release store.
(_dl_tlsdesc_resolve_hold_fixup): Use relaxed atomics instead of
volatile.
* elf/tlsdeschtab.h (_dl_tlsdesc_resolve_early_return_p): Likewise.
2015-06-17 11:37:49 +00:00
|
|
|
/* The ldar here happens after the load from [x0] at the call site
|
|
|
|
(that is generated by the compiler as part of the TLS access ABI),
|
|
|
|
so it reads the same value (this function is the final value of
|
|
|
|
td->entry) and thus it synchronizes with the release store to
|
|
|
|
td->entry in _dl_tlsdesc_resolve_rela_fixup ensuring that the load
|
|
|
|
from [x0,#8] here happens after the initialization of td->arg. */
|
|
|
|
ldar xzr, [x0]
|
2012-11-09 17:53:51 +00:00
|
|
|
ldr x0, [x0, #8]
|
|
|
|
mrs x1, tpidr_el0
|
|
|
|
sub x0, x0, x1
|
|
|
|
ldr x1, [sp], #16
|
2015-06-17 11:44:53 +00:00
|
|
|
cfi_adjust_cfa_offset (-16)
|
2012-11-09 17:53:51 +00:00
|
|
|
RET
|
|
|
|
cfi_endproc
|
|
|
|
.size _dl_tlsdesc_undefweak, .-_dl_tlsdesc_undefweak
|
|
|
|
|
|
|
|
#ifdef SHARED
|
|
|
|
/* Handler for dynamic TLS symbols.
|
|
|
|
Prototype:
|
|
|
|
_dl_tlsdesc_dynamic (tlsdesc *) ;
|
|
|
|
|
|
|
|
The second word of the descriptor points to a
|
|
|
|
tlsdesc_dynamic_arg structure.
|
|
|
|
|
|
|
|
Returns the offset between the thread pointer and the
|
|
|
|
object referenced by the argument.
|
|
|
|
|
|
|
|
ptrdiff_t
|
|
|
|
__attribute__ ((__regparm__ (1)))
|
|
|
|
_dl_tlsdesc_dynamic (struct tlsdesc *tdp)
|
|
|
|
{
|
|
|
|
struct tlsdesc_dynamic_arg *td = tdp->arg;
|
|
|
|
dtv_t *dtv = *(dtv_t **)((char *)__thread_pointer + DTV_OFFSET);
|
|
|
|
if (__builtin_expect (td->gen_count <= dtv[0].counter
|
|
|
|
&& (dtv[td->tlsinfo.ti_module].pointer.val
|
|
|
|
!= TLS_DTV_UNALLOCATED),
|
|
|
|
1))
|
|
|
|
return dtv[td->tlsinfo.ti_module].pointer.val
|
|
|
|
+ td->tlsinfo.ti_offset
|
|
|
|
- __thread_pointer;
|
|
|
|
|
|
|
|
return ___tls_get_addr (&td->tlsinfo) - __thread_pointer;
|
|
|
|
}
|
|
|
|
*/
|
|
|
|
|
|
|
|
.hidden _dl_tlsdesc_dynamic
|
|
|
|
.global _dl_tlsdesc_dynamic
|
|
|
|
.type _dl_tlsdesc_dynamic,%function
|
|
|
|
cfi_startproc
|
|
|
|
.align 2
|
|
|
|
_dl_tlsdesc_dynamic:
|
|
|
|
# define NSAVEXREGPAIRS 2
|
|
|
|
stp x29, x30, [sp,#-(32+16*NSAVEXREGPAIRS)]!
|
|
|
|
cfi_adjust_cfa_offset (32+16*NSAVEXREGPAIRS)
|
|
|
|
mov x29, sp
|
|
|
|
|
|
|
|
/* Save just enough registers to support fast path, if we fall
|
|
|
|
into slow path we will save additional registers. */
|
|
|
|
|
|
|
|
stp x1, x2, [sp, #32+16*0]
|
|
|
|
stp x3, x4, [sp, #32+16*1]
|
|
|
|
|
|
|
|
mrs x4, tpidr_el0
|
[BZ 18034][AArch64] Lazy TLSDESC relocation data race fix
Lazy TLSDESC initialization needs to be synchronized with concurrent TLS
accesses. The TLS descriptor contains a function pointer (entry) and an
argument that is accessed from the entry function. With lazy initialization
the first call to the entry function updates the entry and the argument to
their final value. A final entry function must make sure that it accesses an
initialized argument, this needs synchronization on systems with weak memory
ordering otherwise the writes of the first call can be observed out of order.
There are at least two issues with the current code:
tlsdesc.c (i386, x86_64, arm, aarch64) uses volatile memory accesses on the
write side (in the initial entry function) instead of C11 atomics.
And on systems with weak memory ordering (arm, aarch64) the read side
synchronization is missing from the final entry functions (dl-tlsdesc.S).
This patch only deals with aarch64.
* Write side:
Volatile accesses were replaced with C11 relaxed atomics, and a release
store was used for the initialization of entry so the read side can
synchronize with it.
* Read side:
TLS access generated by the compiler and an entry function code is roughly
ldr x1, [x0] // load the entry
blr x1 // call it
entryfunc:
ldr x0, [x0,#8] // load the arg
ret
Various alternatives were considered to force the ordering in the entry
function between the two loads:
(1) barrier
entryfunc:
dmb ishld
ldr x0, [x0,#8]
(2) address dependency (if the address of the second load depends on the
result of the first one the ordering is guaranteed):
entryfunc:
ldr x1,[x0]
and x1,x1,#8
orr x1,x1,#8
ldr x0,[x0,x1]
(3) load-acquire (ARMv8 instruction that is ordered before subsequent
loads and stores)
entryfunc:
ldar xzr,[x0]
ldr x0,[x0,#8]
Option (1) is the simplest but slowest (note: this runs at every TLS
access), options (2) and (3) do one extra load from [x0] (same address
loads are ordered so it happens-after the load on the call site),
option (2) clobbers x1 which is problematic because existing gcc does
not expect that, so approach (3) was chosen.
A new _dl_tlsdesc_return_lazy entry function was introduced for lazily
relocated static TLS, so non-lazy static TLS can avoid the synchronization
cost.
[BZ #18034]
* sysdeps/aarch64/dl-tlsdesc.h (_dl_tlsdesc_return_lazy): Declare.
* sysdeps/aarch64/dl-tlsdesc.S (_dl_tlsdesc_return_lazy): Define.
(_dl_tlsdesc_undefweak): Guarantee TLSDESC entry and argument load-load
ordering using ldar.
(_dl_tlsdesc_dynamic): Likewise.
(_dl_tlsdesc_return_lazy): Likewise.
* sysdeps/aarch64/tlsdesc.c (_dl_tlsdesc_resolve_rela_fixup): Use
relaxed atomics instead of volatile and synchronize with release store.
(_dl_tlsdesc_resolve_hold_fixup): Use relaxed atomics instead of
volatile.
* elf/tlsdeschtab.h (_dl_tlsdesc_resolve_early_return_p): Likewise.
2015-06-17 11:37:49 +00:00
|
|
|
/* The ldar here happens after the load from [x0] at the call site
|
|
|
|
(that is generated by the compiler as part of the TLS access ABI),
|
|
|
|
so it reads the same value (this function is the final value of
|
|
|
|
td->entry) and thus it synchronizes with the release store to
|
|
|
|
td->entry in _dl_tlsdesc_resolve_rela_fixup ensuring that the load
|
|
|
|
from [x0,#8] here happens after the initialization of td->arg. */
|
|
|
|
ldar xzr, [x0]
|
2012-11-09 17:53:51 +00:00
|
|
|
ldr x1, [x0,#8]
|
|
|
|
ldr x0, [x4]
|
|
|
|
ldr x3, [x1,#16]
|
|
|
|
ldr x2, [x0]
|
|
|
|
cmp x3, x2
|
|
|
|
b.hi 2f
|
|
|
|
ldr x2, [x1]
|
|
|
|
add x0, x0, x2, lsl #4
|
|
|
|
ldr x0, [x0]
|
|
|
|
cmn x0, #0x1
|
|
|
|
b.eq 2f
|
|
|
|
ldr x1, [x1,#8]
|
|
|
|
add x0, x0, x1
|
|
|
|
sub x0, x0, x4
|
|
|
|
1:
|
|
|
|
ldp x1, x2, [sp, #32+16*0]
|
|
|
|
ldp x3, x4, [sp, #32+16*1]
|
|
|
|
|
|
|
|
ldp x29, x30, [sp], #(32+16*NSAVEXREGPAIRS)
|
2015-06-17 11:44:53 +00:00
|
|
|
cfi_adjust_cfa_offset (-32-16*NSAVEXREGPAIRS)
|
2012-11-09 17:53:51 +00:00
|
|
|
# undef NSAVEXREGPAIRS
|
|
|
|
RET
|
|
|
|
2:
|
|
|
|
/* This is the slow path. We need to call __tls_get_addr() which
|
|
|
|
means we need to save and restore all the register that the
|
|
|
|
callee will trash. */
|
|
|
|
|
|
|
|
/* Save the remaining registers that we must treat as caller save. */
|
|
|
|
# define NSAVEXREGPAIRS 7
|
|
|
|
stp x5, x6, [sp, #-16*NSAVEXREGPAIRS]!
|
|
|
|
cfi_adjust_cfa_offset (16*NSAVEXREGPAIRS)
|
|
|
|
stp x7, x8, [sp, #16*1]
|
|
|
|
stp x9, x10, [sp, #16*2]
|
|
|
|
stp x11, x12, [sp, #16*3]
|
|
|
|
stp x13, x14, [sp, #16*4]
|
|
|
|
stp x15, x16, [sp, #16*5]
|
|
|
|
stp x17, x18, [sp, #16*6]
|
|
|
|
|
|
|
|
SAVE_Q_REGISTERS
|
|
|
|
|
|
|
|
mov x0, x1
|
|
|
|
bl __tls_get_addr
|
|
|
|
|
|
|
|
mrs x1, tpidr_el0
|
|
|
|
sub x0, x0, x1
|
|
|
|
|
|
|
|
RESTORE_Q_REGISTERS
|
|
|
|
|
|
|
|
ldp x7, x8, [sp, #16*1]
|
|
|
|
ldp x9, x10, [sp, #16*2]
|
|
|
|
ldp x11, x12, [sp, #16*3]
|
|
|
|
ldp x13, x14, [sp, #16*4]
|
|
|
|
ldp x15, x16, [sp, #16*5]
|
|
|
|
ldp x17, x18, [sp, #16*6]
|
|
|
|
ldp x5, x6, [sp], #16*NSAVEXREGPAIRS
|
|
|
|
cfi_adjust_cfa_offset (-16*NSAVEXREGPAIRS)
|
|
|
|
b 1b
|
|
|
|
cfi_endproc
|
|
|
|
.size _dl_tlsdesc_dynamic, .-_dl_tlsdesc_dynamic
|
|
|
|
# undef NSAVEXREGPAIRS
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* This function is a wrapper for a lazy resolver for TLS_DESC
|
|
|
|
RELA relocations.
|
|
|
|
When the actual resolver returns, it will have adjusted the
|
|
|
|
TLS descriptor such that we can tail-call it for it to return
|
|
|
|
the TP offset of the symbol. */
|
|
|
|
|
|
|
|
.hidden _dl_tlsdesc_resolve_rela
|
|
|
|
.global _dl_tlsdesc_resolve_rela
|
|
|
|
.type _dl_tlsdesc_resolve_rela,%function
|
|
|
|
cfi_startproc
|
|
|
|
.align 2
|
|
|
|
_dl_tlsdesc_resolve_rela:
|
|
|
|
#define NSAVEXREGPAIRS 9
|
|
|
|
stp x29, x30, [sp, #-(32+16*NSAVEXREGPAIRS)]!
|
|
|
|
cfi_adjust_cfa_offset (32+16*NSAVEXREGPAIRS)
|
|
|
|
mov x29, sp
|
|
|
|
stp x1, x4, [sp, #32+16*0]
|
|
|
|
stp x5, x6, [sp, #32+16*1]
|
|
|
|
stp x7, x8, [sp, #32+16*2]
|
|
|
|
stp x9, x10, [sp, #32+16*3]
|
|
|
|
stp x11, x12, [sp, #32+16*4]
|
|
|
|
stp x13, x14, [sp, #32+16*5]
|
|
|
|
stp x15, x16, [sp, #32+16*6]
|
|
|
|
stp x17, x18, [sp, #32+16*7]
|
|
|
|
str x0, [sp, #32+16*8]
|
|
|
|
|
|
|
|
SAVE_Q_REGISTERS
|
|
|
|
|
|
|
|
ldr x1, [x3, #8]
|
|
|
|
bl _dl_tlsdesc_resolve_rela_fixup
|
|
|
|
|
|
|
|
RESTORE_Q_REGISTERS
|
|
|
|
|
|
|
|
ldr x0, [sp, #32+16*8]
|
|
|
|
ldr x1, [x0]
|
|
|
|
blr x1
|
|
|
|
|
|
|
|
ldp x1, x4, [sp, #32+16*0]
|
|
|
|
ldp x5, x6, [sp, #32+16*1]
|
|
|
|
ldp x7, x8, [sp, #32+16*2]
|
|
|
|
ldp x9, x10, [sp, #32+16*3]
|
|
|
|
ldp x11, x12, [sp, #32+16*4]
|
|
|
|
ldp x13, x14, [sp, #32+16*5]
|
|
|
|
ldp x15, x16, [sp, #32+16*6]
|
|
|
|
ldp x17, x18, [sp, #32+16*7]
|
|
|
|
ldp x29, x30, [sp], #(32+16*NSAVEXREGPAIRS)
|
2015-06-17 11:44:53 +00:00
|
|
|
cfi_adjust_cfa_offset (-32-16*NSAVEXREGPAIRS)
|
2012-11-09 17:53:51 +00:00
|
|
|
ldp x2, x3, [sp], #16
|
|
|
|
cfi_adjust_cfa_offset (-16)
|
|
|
|
RET
|
|
|
|
#undef NSAVEXREGPAIRS
|
|
|
|
cfi_endproc
|
|
|
|
.size _dl_tlsdesc_resolve_rela, .-_dl_tlsdesc_resolve_rela
|
|
|
|
|
|
|
|
/* This function is a placeholder for lazy resolving of TLS
|
|
|
|
relocations. Once some thread starts resolving a TLS
|
|
|
|
relocation, it sets up the TLS descriptor to use this
|
|
|
|
resolver, such that other threads that would attempt to
|
|
|
|
resolve it concurrently may skip the call to the original lazy
|
|
|
|
resolver and go straight to a condition wait.
|
|
|
|
|
|
|
|
When the actual resolver returns, it will have adjusted the
|
|
|
|
TLS descriptor such that we can tail-call it for it to return
|
|
|
|
the TP offset of the symbol. */
|
|
|
|
|
|
|
|
.hidden _dl_tlsdesc_resolve_hold
|
|
|
|
.global _dl_tlsdesc_resolve_hold
|
|
|
|
.type _dl_tlsdesc_resolve_hold,%function
|
|
|
|
cfi_startproc
|
|
|
|
.align 2
|
|
|
|
_dl_tlsdesc_resolve_hold:
|
|
|
|
#define NSAVEXREGPAIRS 10
|
|
|
|
1:
|
|
|
|
stp x29, x30, [sp, #-(32+16*NSAVEXREGPAIRS)]!
|
|
|
|
cfi_adjust_cfa_offset (32+16*NSAVEXREGPAIRS)
|
|
|
|
mov x29, sp
|
|
|
|
stp x1, x2, [sp, #32+16*0]
|
|
|
|
stp x3, x4, [sp, #32+16*1]
|
|
|
|
stp x5, x6, [sp, #32+16*2]
|
|
|
|
stp x7, x8, [sp, #32+16*3]
|
|
|
|
stp x9, x10, [sp, #32+16*4]
|
|
|
|
stp x11, x12, [sp, #32+16*5]
|
|
|
|
stp x13, x14, [sp, #32+16*6]
|
|
|
|
stp x15, x16, [sp, #32+16*7]
|
|
|
|
stp x17, x18, [sp, #32+16*8]
|
|
|
|
str x0, [sp, #32+16*9]
|
|
|
|
|
|
|
|
SAVE_Q_REGISTERS
|
|
|
|
|
|
|
|
adr x1, 1b
|
|
|
|
bl _dl_tlsdesc_resolve_hold_fixup
|
|
|
|
|
|
|
|
RESTORE_Q_REGISTERS
|
|
|
|
|
|
|
|
ldr x0, [sp, #32+16*9]
|
|
|
|
ldr x1, [x0]
|
|
|
|
blr x1
|
|
|
|
|
|
|
|
ldp x1, x2, [sp, #32+16*0]
|
|
|
|
ldp x3, x4, [sp, #32+16*1]
|
|
|
|
ldp x5, x6, [sp, #32+16*2]
|
|
|
|
ldp x7, x8, [sp, #32+16*3]
|
|
|
|
ldp x9, x10, [sp, #32+16*4]
|
|
|
|
ldp x11, x12, [sp, #32+16*5]
|
|
|
|
ldp x13, x14, [sp, #32+16*6]
|
|
|
|
ldp x15, x16, [sp, #32+16*7]
|
|
|
|
ldp x17, x18, [sp, #32+16*8]
|
|
|
|
ldp x29, x30, [sp], #(32+16*NSAVEXREGPAIRS)
|
2015-06-17 11:44:53 +00:00
|
|
|
cfi_adjust_cfa_offset (-32-16*NSAVEXREGPAIRS)
|
2012-11-09 17:53:51 +00:00
|
|
|
RET
|
|
|
|
cfi_endproc
|
|
|
|
.size _dl_tlsdesc_resolve_hold, .-_dl_tlsdesc_resolve_hold
|
|
|
|
#undef NSAVEXREGPAIRS
|