mirror of
https://sourceware.org/git/glibc.git
synced 2024-12-11 05:40:06 +00:00
404656009b
Some Linux interfaces never restart after being interrupted by a signal handler, regardless of the use of SA_RESTART [1]. It means that for pthread cancellation, if the target thread disables cancellation with pthread_setcancelstate and calls such interfaces (like poll or select), it should not see spurious EINTR failures due the internal SIGCANCEL. However recent changes made pthread_cancel to always sent the internal signal, regardless of the target thread cancellation status or type. To fix it, the previous semantic is restored, where the cancel signal is only sent if the target thread has cancelation enabled in asynchronous mode. The cancel state and cancel type is moved back to cancelhandling and atomic operation are used to synchronize between threads. The patch essentially revert the following commits:8c1c0aae20
nptl: Move cancel type out of cancelhandling2b51742531
nptl: Move cancel state out of cancelhandling26cfbb7162
nptl: Remove CANCELING_BITMASK However I changed the atomic operation to follow the internal C11 semantic and removed the MACRO usage, it simplifies a bit the resulting code (and removes another usage of the old atomic macros). Checked on x86_64-linux-gnu, i686-linux-gnu, aarch64-linux-gnu, and powerpc64-linux-gnu. [1] https://man7.org/linux/man-pages/man7/signal.7.html Reviewed-by: Florian Weimer <fweimer@redhat.com> Tested-by: Aurelien Jarno <aurelien@aurel32.net>
132 lines
4.2 KiB
C
132 lines
4.2 KiB
C
/* Completion of TCB initialization after TLS_INIT_TP. NPTL version.
|
|
Copyright (C) 2020-2022 Free Software Foundation, Inc.
|
|
This file is part of the GNU C Library.
|
|
|
|
The GNU C Library is free software; you can redistribute it and/or
|
|
modify it under the terms of the GNU Lesser General Public
|
|
License as published by the Free Software Foundation; either
|
|
version 2.1 of the License, or (at your option) any later version.
|
|
|
|
The GNU C Library is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
Lesser General Public License for more details.
|
|
|
|
You should have received a copy of the GNU Lesser General Public
|
|
License along with the GNU C Library; if not, see
|
|
<https://www.gnu.org/licenses/>. */
|
|
|
|
#include <kernel-features.h>
|
|
#include <ldsodefs.h>
|
|
#include <list.h>
|
|
#include <pthreadP.h>
|
|
#include <tls.h>
|
|
#include <rseq-internal.h>
|
|
#include <thread_pointer.h>
|
|
|
|
#define TUNABLE_NAMESPACE pthread
|
|
#include <dl-tunables.h>
|
|
|
|
#ifndef __ASSUME_SET_ROBUST_LIST
|
|
bool __nptl_set_robust_list_avail;
|
|
rtld_hidden_data_def (__nptl_set_robust_list_avail)
|
|
#endif
|
|
|
|
bool __nptl_initial_report_events;
|
|
rtld_hidden_def (__nptl_initial_report_events)
|
|
|
|
#ifdef SHARED
|
|
/* Dummy implementation. See __rtld_mutex_init. */
|
|
static int
|
|
rtld_mutex_dummy (pthread_mutex_t *lock)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
const unsigned int __rseq_flags;
|
|
const unsigned int __rseq_size attribute_relro;
|
|
const ptrdiff_t __rseq_offset attribute_relro;
|
|
|
|
void
|
|
__tls_pre_init_tp (void)
|
|
{
|
|
/* The list data structures are not consistent until
|
|
initialized. */
|
|
INIT_LIST_HEAD (&GL (dl_stack_used));
|
|
INIT_LIST_HEAD (&GL (dl_stack_user));
|
|
INIT_LIST_HEAD (&GL (dl_stack_cache));
|
|
|
|
#ifdef SHARED
|
|
___rtld_mutex_lock = rtld_mutex_dummy;
|
|
___rtld_mutex_unlock = rtld_mutex_dummy;
|
|
#endif
|
|
}
|
|
|
|
void
|
|
__tls_init_tp (void)
|
|
{
|
|
struct pthread *pd = THREAD_SELF;
|
|
|
|
/* Set up thread stack list management. */
|
|
list_add (&pd->list, &GL (dl_stack_user));
|
|
|
|
/* Early initialization of the TCB. */
|
|
pd->tid = INTERNAL_SYSCALL_CALL (set_tid_address, &pd->tid);
|
|
THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
|
|
THREAD_SETMEM (pd, user_stack, true);
|
|
|
|
/* Before initializing GL (dl_stack_user), the debugger could not
|
|
find us and had to set __nptl_initial_report_events. Propagate
|
|
its setting. */
|
|
THREAD_SETMEM (pd, report_events, __nptl_initial_report_events);
|
|
|
|
/* Initialize the robust mutex data. */
|
|
{
|
|
#if __PTHREAD_MUTEX_HAVE_PREV
|
|
pd->robust_prev = &pd->robust_head;
|
|
#endif
|
|
pd->robust_head.list = &pd->robust_head;
|
|
pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
|
|
- offsetof (pthread_mutex_t,
|
|
__data.__list.__next));
|
|
int res = INTERNAL_SYSCALL_CALL (set_robust_list, &pd->robust_head,
|
|
sizeof (struct robust_list_head));
|
|
if (!INTERNAL_SYSCALL_ERROR_P (res))
|
|
{
|
|
#ifndef __ASSUME_SET_ROBUST_LIST
|
|
__nptl_set_robust_list_avail = true;
|
|
#endif
|
|
}
|
|
}
|
|
|
|
{
|
|
bool do_rseq = true;
|
|
#if HAVE_TUNABLES
|
|
do_rseq = TUNABLE_GET (rseq, int, NULL);
|
|
#endif
|
|
if (rseq_register_current_thread (pd, do_rseq))
|
|
{
|
|
/* We need a writable view of the variables. They are in
|
|
.data.relro and are not yet write-protected. */
|
|
extern unsigned int size __asm__ ("__rseq_size");
|
|
size = sizeof (pd->rseq_area);
|
|
}
|
|
|
|
#ifdef RSEQ_SIG
|
|
/* This should be a compile-time constant, but the current
|
|
infrastructure makes it difficult to determine its value. Not
|
|
all targets support __thread_pointer, so set __rseq_offset only
|
|
if thre rseq registration may have happened because RSEQ_SIG is
|
|
defined. */
|
|
extern ptrdiff_t offset __asm__ ("__rseq_offset");
|
|
offset = (char *) &pd->rseq_area - (char *) __thread_pointer ();
|
|
#endif
|
|
}
|
|
|
|
/* Set initial thread's stack block from 0 up to __libc_stack_end.
|
|
It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
|
|
purposes this is good enough. */
|
|
THREAD_SETMEM (pd, stackblock_size, (size_t) __libc_stack_end);
|
|
}
|