2021-04-21 17:49:51 +00:00
|
|
|
/* Completion of TCB initialization after TLS_INIT_TP. NPTL version.
|
2022-01-01 18:54:23 +00:00
|
|
|
Copyright (C) 2020-2022 Free Software Foundation, Inc.
|
2021-04-21 17:49:51 +00:00
|
|
|
This file is part of the GNU C Library.
|
|
|
|
|
|
|
|
The GNU C Library is free software; you can redistribute it and/or
|
|
|
|
modify it under the terms of the GNU Lesser General Public
|
|
|
|
License as published by the Free Software Foundation; either
|
|
|
|
version 2.1 of the License, or (at your option) any later version.
|
|
|
|
|
|
|
|
The GNU C Library is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
Lesser General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU Lesser General Public
|
|
|
|
License along with the GNU C Library; if not, see
|
|
|
|
<https://www.gnu.org/licenses/>. */
|
|
|
|
|
2021-04-21 17:49:51 +00:00
|
|
|
#include <kernel-features.h>
|
2021-04-21 17:49:51 +00:00
|
|
|
#include <ldsodefs.h>
|
|
|
|
#include <list.h>
|
2021-06-22 07:50:27 +00:00
|
|
|
#include <pthreadP.h>
|
2021-04-21 17:49:51 +00:00
|
|
|
#include <tls.h>
|
2021-12-09 08:49:32 +00:00
|
|
|
#include <rseq-internal.h>
|
2021-12-09 08:49:32 +00:00
|
|
|
#include <thread_pointer.h>
|
2021-04-21 17:49:51 +00:00
|
|
|
|
2021-12-09 08:49:32 +00:00
|
|
|
#define TUNABLE_NAMESPACE pthread
|
|
|
|
#include <dl-tunables.h>
|
|
|
|
|
2021-04-21 17:49:51 +00:00
|
|
|
#ifndef __ASSUME_SET_ROBUST_LIST
|
2021-07-09 18:09:14 +00:00
|
|
|
bool __nptl_set_robust_list_avail;
|
2021-04-21 17:49:51 +00:00
|
|
|
rtld_hidden_data_def (__nptl_set_robust_list_avail)
|
|
|
|
#endif
|
|
|
|
|
2021-07-09 18:09:14 +00:00
|
|
|
bool __nptl_initial_report_events;
|
2021-05-17 07:59:14 +00:00
|
|
|
rtld_hidden_def (__nptl_initial_report_events)
|
|
|
|
|
2021-05-10 08:31:41 +00:00
|
|
|
#ifdef SHARED
|
|
|
|
/* Dummy implementation. See __rtld_mutex_init. */
|
|
|
|
static int
|
|
|
|
rtld_mutex_dummy (pthread_mutex_t *lock)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2021-12-09 08:49:32 +00:00
|
|
|
const unsigned int __rseq_flags;
|
|
|
|
const unsigned int __rseq_size attribute_relro;
|
2022-02-02 21:37:20 +00:00
|
|
|
const ptrdiff_t __rseq_offset attribute_relro;
|
2021-12-09 08:49:32 +00:00
|
|
|
|
2021-04-21 17:49:51 +00:00
|
|
|
void
|
2021-05-10 08:31:41 +00:00
|
|
|
__tls_pre_init_tp (void)
|
2021-04-21 17:49:51 +00:00
|
|
|
{
|
2021-05-10 08:31:41 +00:00
|
|
|
/* The list data structures are not consistent until
|
|
|
|
initialized. */
|
2021-04-21 17:49:51 +00:00
|
|
|
INIT_LIST_HEAD (&GL (dl_stack_used));
|
|
|
|
INIT_LIST_HEAD (&GL (dl_stack_user));
|
2021-05-10 08:31:41 +00:00
|
|
|
INIT_LIST_HEAD (&GL (dl_stack_cache));
|
2021-05-10 08:31:41 +00:00
|
|
|
|
|
|
|
#ifdef SHARED
|
|
|
|
___rtld_mutex_lock = rtld_mutex_dummy;
|
|
|
|
___rtld_mutex_unlock = rtld_mutex_dummy;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
__tls_init_tp (void)
|
|
|
|
{
|
2021-12-09 08:49:32 +00:00
|
|
|
struct pthread *pd = THREAD_SELF;
|
|
|
|
|
2021-05-10 08:31:41 +00:00
|
|
|
/* Set up thread stack list management. */
|
2021-12-09 08:49:32 +00:00
|
|
|
list_add (&pd->list, &GL (dl_stack_user));
|
2021-04-21 17:49:51 +00:00
|
|
|
|
|
|
|
/* Early initialization of the TCB. */
|
|
|
|
pd->tid = INTERNAL_SYSCALL_CALL (set_tid_address, &pd->tid);
|
|
|
|
THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
|
|
|
|
THREAD_SETMEM (pd, user_stack, true);
|
|
|
|
|
2021-05-17 07:59:14 +00:00
|
|
|
/* Before initializing GL (dl_stack_user), the debugger could not
|
|
|
|
find us and had to set __nptl_initial_report_events. Propagate
|
|
|
|
its setting. */
|
|
|
|
THREAD_SETMEM (pd, report_events, __nptl_initial_report_events);
|
|
|
|
|
2021-04-21 17:49:51 +00:00
|
|
|
/* Initialize the robust mutex data. */
|
|
|
|
{
|
|
|
|
#if __PTHREAD_MUTEX_HAVE_PREV
|
|
|
|
pd->robust_prev = &pd->robust_head;
|
|
|
|
#endif
|
|
|
|
pd->robust_head.list = &pd->robust_head;
|
|
|
|
pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
|
|
|
|
- offsetof (pthread_mutex_t,
|
|
|
|
__data.__list.__next));
|
|
|
|
int res = INTERNAL_SYSCALL_CALL (set_robust_list, &pd->robust_head,
|
|
|
|
sizeof (struct robust_list_head));
|
|
|
|
if (!INTERNAL_SYSCALL_ERROR_P (res))
|
|
|
|
{
|
|
|
|
#ifndef __ASSUME_SET_ROBUST_LIST
|
|
|
|
__nptl_set_robust_list_avail = true;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-09 08:49:32 +00:00
|
|
|
{
|
|
|
|
bool do_rseq = true;
|
|
|
|
#if HAVE_TUNABLES
|
|
|
|
do_rseq = TUNABLE_GET (rseq, int, NULL);
|
|
|
|
#endif
|
2021-12-09 08:49:32 +00:00
|
|
|
if (rseq_register_current_thread (pd, do_rseq))
|
|
|
|
{
|
|
|
|
/* We need a writable view of the variables. They are in
|
|
|
|
.data.relro and are not yet write-protected. */
|
|
|
|
extern unsigned int size __asm__ ("__rseq_size");
|
|
|
|
size = sizeof (pd->rseq_area);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef RSEQ_SIG
|
|
|
|
/* This should be a compile-time constant, but the current
|
|
|
|
infrastructure makes it difficult to determine its value. Not
|
|
|
|
all targets support __thread_pointer, so set __rseq_offset only
|
|
|
|
if thre rseq registration may have happened because RSEQ_SIG is
|
|
|
|
defined. */
|
2022-02-02 21:37:20 +00:00
|
|
|
extern ptrdiff_t offset __asm__ ("__rseq_offset");
|
2021-12-09 08:49:32 +00:00
|
|
|
offset = (char *) &pd->rseq_area - (char *) __thread_pointer ();
|
|
|
|
#endif
|
2021-12-09 08:49:32 +00:00
|
|
|
}
|
2021-12-09 08:49:32 +00:00
|
|
|
|
2021-04-21 17:49:51 +00:00
|
|
|
/* Set initial thread's stack block from 0 up to __libc_stack_end.
|
|
|
|
It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
|
|
|
|
purposes this is good enough. */
|
|
|
|
THREAD_SETMEM (pd, stackblock_size, (size_t) __libc_stack_end);
|
2021-04-21 17:49:51 +00:00
|
|
|
}
|