mirror of
https://sourceware.org/git/glibc.git
synced 2024-12-01 17:30:07 +00:00
ce7eb0e903
This patch wraps all uses of *_{enable,disable}_asynccancel and and *_CANCEL_{ASYNC,RESET} in either already provided macros (lll_futex_timed_wait_cancel) or creates new ones if the functionality is not provided (SYSCALL_CANCEL_NCS, lll_futex_wait_cancel, and lll_futex_timed_wait_cancel). Also for some generic implementations, the direct call of the macros are removed since the underlying symbols are suppose to provide cancellation support. This is a priliminary patch intended to simplify the work required for BZ#12683 fix. It is a refactor change, no semantic changes are expected. Checked on x86_64-linux-gnu and i686-linux-gnu. * nptl/pthread_join_common.c (__pthread_timedjoin_ex): Use lll_wait_tid with timeout. * nptl/sem_wait.c (__old_sem_wait): Use lll_futex_wait_cancel. * sysdeps/nptl/aio_misc.h (AIO_MISC_WAIT): Use futex_reltimed_wait_cancelable for cancelabla mode. * sysdeps/nptl/gai_misc.h (GAI_MISC_WAIT): Likewise. * sysdeps/posix/open64.c (__libc_open64): Do not call cancelation macros. * sysdeps/posix/sigwait.c (__sigwait): Likewise. * sysdeps/posix/waitid.c (__sigwait): Likewise. * sysdeps/unix/sysdep.h (__SYSCALL_CANCEL_CALL, SYSCALL_CANCEL_NCS): New macro. * sysdeps/nptl/lowlevellock.h (lll_wait_tid): Add timeout argument. (lll_timedwait_tid): Remove macro. * sysdeps/unix/sysv/linux/i386/lowlevellock.h (lll_wait_tid): Likewise. (lll_timedwait_tid): Likewise. * sysdeps/unix/sysv/linux/sparc/lowlevellock.h (lll_wait_tid): Likewise. (lll_timedwait_tid): Likewise. * sysdeps/unix/sysv/linux/x86_64/lowlevellock.h (lll_wait_tid): Likewise. (lll_timedwait_tid): Likewise. * sysdeps/unix/sysv/linux/clock_nanosleep.c (__clock_nanosleep): Use INTERNAL_SYSCALL_CANCEL. * sysdeps/unix/sysv/linux/futex-internal.h (futex_reltimed_wait_cancelable): Use LIBC_CANCEL_{ASYNC,RESET} instead of __pthread_{enable,disable}_asynccancel. * sysdeps/unix/sysv/linux/lowlevellock-futex.h (lll_futex_wait_cancel): New macro.
109 lines
3.5 KiB
C
109 lines
3.5 KiB
C
/* Common definition for pthread_{timed,try}join{_np}.
|
|
Copyright (C) 2017-2019 Free Software Foundation, Inc.
|
|
This file is part of the GNU C Library.
|
|
|
|
The GNU C Library is free software; you can redistribute it and/or
|
|
modify it under the terms of the GNU Lesser General Public
|
|
License as published by the Free Software Foundation; either
|
|
version 2.1 of the License, or (at your option) any later version.
|
|
|
|
The GNU C Library is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
Lesser General Public License for more details.
|
|
|
|
You should have received a copy of the GNU Lesser General Public
|
|
License along with the GNU C Library; if not, see
|
|
<http://www.gnu.org/licenses/>. */
|
|
|
|
#include "pthreadP.h"
|
|
#include <atomic.h>
|
|
#include <stap-probe.h>
|
|
|
|
static void
|
|
cleanup (void *arg)
|
|
{
|
|
/* If we already changed the waiter ID, reset it. The call cannot
|
|
fail for any reason but the thread not having done that yet so
|
|
there is no reason for a loop. */
|
|
struct pthread *self = THREAD_SELF;
|
|
atomic_compare_exchange_weak_acquire (&arg, &self, NULL);
|
|
}
|
|
|
|
int
|
|
__pthread_timedjoin_ex (pthread_t threadid, void **thread_return,
|
|
const struct timespec *abstime, bool block)
|
|
{
|
|
struct pthread *pd = (struct pthread *) threadid;
|
|
|
|
/* Make sure the descriptor is valid. */
|
|
if (INVALID_NOT_TERMINATED_TD_P (pd))
|
|
/* Not a valid thread handle. */
|
|
return ESRCH;
|
|
|
|
/* Is the thread joinable?. */
|
|
if (IS_DETACHED (pd))
|
|
/* We cannot wait for the thread. */
|
|
return EINVAL;
|
|
|
|
struct pthread *self = THREAD_SELF;
|
|
int result = 0;
|
|
|
|
LIBC_PROBE (pthread_join, 1, threadid);
|
|
|
|
if ((pd == self
|
|
|| (self->joinid == pd
|
|
&& (pd->cancelhandling
|
|
& (CANCELING_BITMASK | CANCELED_BITMASK | EXITING_BITMASK
|
|
| TERMINATED_BITMASK)) == 0))
|
|
&& !CANCEL_ENABLED_AND_CANCELED (self->cancelhandling))
|
|
/* This is a deadlock situation. The threads are waiting for each
|
|
other to finish. Note that this is a "may" error. To be 100%
|
|
sure we catch this error we would have to lock the data
|
|
structures but it is not necessary. In the unlikely case that
|
|
two threads are really caught in this situation they will
|
|
deadlock. It is the programmer's problem to figure this
|
|
out. */
|
|
return EDEADLK;
|
|
|
|
/* Wait for the thread to finish. If it is already locked something
|
|
is wrong. There can only be one waiter. */
|
|
else if (__glibc_unlikely (atomic_compare_exchange_weak_acquire (&pd->joinid,
|
|
&self,
|
|
NULL)))
|
|
/* There is already somebody waiting for the thread. */
|
|
return EINVAL;
|
|
|
|
if (block)
|
|
{
|
|
/* During the wait we change to asynchronous cancellation. If we
|
|
are cancelled the thread we are waiting for must be marked as
|
|
un-wait-ed for again. */
|
|
pthread_cleanup_push (cleanup, &pd->joinid);
|
|
|
|
result = lll_wait_tid (pd->tid, abstime);
|
|
|
|
pthread_cleanup_pop (0);
|
|
}
|
|
|
|
if (__glibc_likely (result == 0))
|
|
{
|
|
/* We mark the thread as terminated and as joined. */
|
|
pd->tid = -1;
|
|
|
|
/* Store the return value if the caller is interested. */
|
|
if (thread_return != NULL)
|
|
*thread_return = pd->result;
|
|
|
|
/* Free the TCB. */
|
|
__free_tcb (pd);
|
|
}
|
|
else
|
|
pd->joinid = NULL;
|
|
|
|
LIBC_PROBE (pthread_join_ret, 3, threadid, result, pd->result);
|
|
|
|
return result;
|
|
}
|
|
hidden_def (__pthread_timedjoin_ex)
|