nptl: Cleanup cancellation macros

This patch wraps all uses of *_{enable,disable}_asynccancel and
and *_CANCEL_{ASYNC,RESET} in either already provided macros
(lll_futex_timed_wait_cancel) or creates new ones if the
functionality is not provided (SYSCALL_CANCEL_NCS, lll_futex_wait_cancel,
and lll_futex_timed_wait_cancel).

Also for some generic implementations, the direct call of the macros
are removed since the underlying symbols are suppose to provide
cancellation support.

This is a priliminary patch intended to simplify the work required
for BZ#12683 fix.  It is a refactor change, no semantic changes are
expected.

Checked on x86_64-linux-gnu and i686-linux-gnu.

	* nptl/pthread_join_common.c (__pthread_timedjoin_ex): Use
	lll_wait_tid with timeout.
	* nptl/sem_wait.c (__old_sem_wait): Use lll_futex_wait_cancel.
	* sysdeps/nptl/aio_misc.h (AIO_MISC_WAIT): Use
	futex_reltimed_wait_cancelable for cancelabla mode.
	* sysdeps/nptl/gai_misc.h (GAI_MISC_WAIT): Likewise.
	* sysdeps/posix/open64.c (__libc_open64): Do not call cancelation
	macros.
	* sysdeps/posix/sigwait.c (__sigwait): Likewise.
	* sysdeps/posix/waitid.c (__sigwait): Likewise.
	* sysdeps/unix/sysdep.h (__SYSCALL_CANCEL_CALL,
	SYSCALL_CANCEL_NCS): New macro.
	* sysdeps/nptl/lowlevellock.h (lll_wait_tid): Add timeout argument.
	(lll_timedwait_tid): Remove macro.
	* sysdeps/unix/sysv/linux/i386/lowlevellock.h (lll_wait_tid):
	Likewise.
	(lll_timedwait_tid): Likewise.
	* sysdeps/unix/sysv/linux/sparc/lowlevellock.h (lll_wait_tid):
	Likewise.
	(lll_timedwait_tid): Likewise.
	* sysdeps/unix/sysv/linux/x86_64/lowlevellock.h (lll_wait_tid):
	Likewise.
	(lll_timedwait_tid): Likewise.
	* sysdeps/unix/sysv/linux/clock_nanosleep.c (__clock_nanosleep):
	Use INTERNAL_SYSCALL_CANCEL.
	* sysdeps/unix/sysv/linux/futex-internal.h
	(futex_reltimed_wait_cancelable): Use LIBC_CANCEL_{ASYNC,RESET}
	instead of __pthread_{enable,disable}_asynccancel.
	* sysdeps/unix/sysv/linux/lowlevellock-futex.h
	(lll_futex_wait_cancel): New macro.
This commit is contained in:
Adhemerval Zanella 2018-05-10 17:24:56 -03:00
parent 0b13e25581
commit ce7eb0e903
15 changed files with 146 additions and 164 deletions

View File

@ -1,5 +1,36 @@
2019-01-03 Adhemerval Zanella <adhemerval.zanella@linaro.org>
* nptl/pthread_join_common.c (__pthread_timedjoin_ex): Use
lll_wait_tid with timeout.
* nptl/sem_wait.c (__old_sem_wait): Use lll_futex_wait_cancel.
* sysdeps/nptl/aio_misc.h (AIO_MISC_WAIT): Use
futex_reltimed_wait_cancelable for cancelabla mode.
* sysdeps/nptl/gai_misc.h (GAI_MISC_WAIT): Likewise.
* sysdeps/posix/open64.c (__libc_open64): Do not call cancelation
macros.
* sysdeps/posix/sigwait.c (__sigwait): Likewise.
* sysdeps/posix/waitid.c (__sigwait): Likewise.
* sysdeps/unix/sysdep.h (__SYSCALL_CANCEL_CALL,
SYSCALL_CANCEL_NCS): New macro.
* sysdeps/nptl/lowlevellock.h (lll_wait_tid): Add timeout argument.
(lll_timedwait_tid): Remove macro.
* sysdeps/unix/sysv/linux/i386/lowlevellock.h (lll_wait_tid):
Likewise.
(lll_timedwait_tid): Likewise.
* sysdeps/unix/sysv/linux/sparc/lowlevellock.h (lll_wait_tid):
Likewise.
(lll_timedwait_tid): Likewise.
* sysdeps/unix/sysv/linux/x86_64/lowlevellock.h (lll_wait_tid):
Likewise.
(lll_timedwait_tid): Likewise.
* sysdeps/unix/sysv/linux/clock_nanosleep.c (__clock_nanosleep):
Use INTERNAL_SYSCALL_CANCEL.
* sysdeps/unix/sysv/linux/futex-internal.h
(futex_reltimed_wait_cancelable): Use LIBC_CANCEL_{ASYNC,RESET}
instead of __pthread_{enable,disable}_asynccancel.
* sysdeps/unix/sysv/linux/lowlevellock-futex.h
(lll_futex_wait_cancel): New macro.
* sysdeps/i386/nptl/tls.h (THREAD_ATOMIC_CMPXCHG_VAL,
THREAD_ATOMIC_AND, THREAD_ATOMIC_BIT_SET): Remove macros.

View File

@ -81,14 +81,7 @@ __pthread_timedjoin_ex (pthread_t threadid, void **thread_return,
un-wait-ed for again. */
pthread_cleanup_push (cleanup, &pd->joinid);
int oldtype = CANCEL_ASYNC ();
if (abstime != NULL)
result = lll_timedwait_tid (pd->tid, abstime);
else
lll_wait_tid (pd->tid);
CANCEL_RESET (oldtype);
result = lll_wait_tid (pd->tid, abstime);
pthread_cleanup_pop (0);
}

View File

@ -56,14 +56,8 @@ __old_sem_wait (sem_t *sem)
if (atomic_decrement_if_positive (futex) > 0)
return 0;
/* Enable asynchronous cancellation. Required by the standard. */
int oldtype = __pthread_enable_asynccancel ();
/* Always assume the semaphore is shared. */
err = lll_futex_wait (futex, 0, LLL_SHARED);
/* Disable asynchronous cancellation. */
__pthread_disable_asynccancel (oldtype);
err = lll_futex_wait_cancel (futex, 0, LLL_SHARED);
}
while (err == 0 || err == -EWOULDBLOCK);

View File

@ -41,15 +41,15 @@
{ \
pthread_mutex_unlock (&__aio_requests_mutex); \
\
int oldtype; \
if (cancel) \
oldtype = LIBC_CANCEL_ASYNC (); \
\
int status; \
do \
{ \
status = futex_reltimed_wait ((unsigned int *) futexaddr, oldval, \
timeout, FUTEX_PRIVATE); \
if (cancel) \
status = futex_reltimed_wait_cancelable ( \
(unsigned int *) futexaddr, oldval, timeout, FUTEX_PRIVATE); \
else \
status = futex_reltimed_wait ((unsigned int *) futexaddr, \
oldval, timeout, FUTEX_PRIVATE); \
if (status != EAGAIN) \
break; \
\
@ -57,9 +57,6 @@
} \
while (oldval != 0); \
\
if (cancel) \
LIBC_CANCEL_RESET (oldtype); \
\
if (status == EINTR) \
result = EINTR; \
else if (status == ETIMEDOUT) \

View File

@ -42,15 +42,15 @@
{ \
pthread_mutex_unlock (&__gai_requests_mutex); \
\
int oldtype; \
if (cancel) \
oldtype = LIBC_CANCEL_ASYNC (); \
\
int status; \
do \
{ \
status = futex_reltimed_wait ((unsigned int *) futexaddr, oldval, \
timeout, FUTEX_PRIVATE); \
if (cancel) \
status = futex_reltimed_wait_cancelable ( \
(unsigned int *) futexaddr, oldval, timeout, FUTEX_PRIVATE); \
else \
status = futex_reltimed_wait ((unsigned int *) futexaddr, \
oldval, timeout, FUTEX_PRIVATE); \
if (status != EAGAIN) \
break; \
\
@ -58,9 +58,6 @@
} \
while (oldval != 0); \
\
if (cancel) \
LIBC_CANCEL_RESET (oldtype); \
\
if (status == EINTR) \
result = EINTR; \
else if (status == ETIMEDOUT) \

View File

@ -175,33 +175,29 @@ extern int __lll_timedlock_wait (int *futex, const struct timespec *,
#define LLL_LOCK_INITIALIZER (0)
#define LLL_LOCK_INITIALIZER_LOCKED (1)
extern int __lll_timedwait_tid (int *, const struct timespec *)
attribute_hidden;
/* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
wake-up when the clone terminates. The memory location contains the
thread ID while the clone is running and is reset to zero by the kernel
afterwards. The kernel up to version 3.16.3 does not use the private futex
operations for futex wake-up when the clone terminates. */
#define lll_wait_tid(tid) \
do { \
__typeof (tid) __tid; \
/* We need acquire MO here so that we synchronize \
with the kernel's store to 0 when the clone \
terminates. (see above) */ \
while ((__tid = atomic_load_acquire (&(tid))) != 0) \
lll_futex_wait (&(tid), __tid, LLL_SHARED); \
} while (0)
extern int __lll_timedwait_tid (int *, const struct timespec *)
attribute_hidden;
/* As lll_wait_tid, but with a timeout. If the timeout occurs then return
ETIMEDOUT. If ABSTIME is invalid, return EINVAL. */
#define lll_timedwait_tid(tid, abstime) \
({ \
int __res = 0; \
if ((tid) != 0) \
__res = __lll_timedwait_tid (&(tid), (abstime)); \
__res; \
operations for futex wake-up when the clone terminates.
If ABSTIME is not NULL, is used a timeout for futex call. If the timeout
occurs then return ETIMEOUT, if ABSTIME is invalid, return EINVAL.
The futex operation are issues with cancellable versions. */
#define lll_wait_tid(tid, abstime) \
({ \
int __res = 0; \
__typeof (tid) __tid; \
if (abstime != NULL) \
__res = __lll_timedwait_tid (&(tid), (abstime)); \
else \
/* We need acquire MO here so that we synchronize with the \
kernel's store to 0 when the clone terminates. (see above) */ \
while ((__tid = atomic_load_acquire (&(tid))) != 0) \
lll_futex_wait_cancel (&(tid), __tid, LLL_SHARED); \
__res; \
})

View File

@ -34,16 +34,8 @@ __libc_open64 (const char *file, int oflag, ...)
va_end (arg);
}
if (SINGLE_THREAD_P)
return __libc_open (file, oflag | O_LARGEFILE, mode);
int oldtype = LIBC_CANCEL_ASYNC ();
int result = __libc_open (file, oflag | O_LARGEFILE, mode);
LIBC_CANCEL_RESET (oldtype);
return result;
/* __libc_open should be a cancellation point. */
return __libc_open (file, oflag | O_LARGEFILE, mode);
}
weak_alias (__libc_open64, __open64)
libc_hidden_weak (__open64)

View File

@ -85,16 +85,8 @@ do_sigwait (const sigset_t *set, int *sig)
int
__sigwait (const sigset_t *set, int *sig)
{
if (SINGLE_THREAD_P)
return do_sigwait (set, sig);
int oldtype = LIBC_CANCEL_ASYNC ();
int result = do_sigwait (set, sig);
LIBC_CANCEL_RESET (oldtype);
return result;
/* __sigsuspend should be a cancellation point. */
return do_sigitid (idtype, id, infop, options);
}
libc_hidden_def (__sigwait)
weak_alias (__sigwait, sigwait)

View File

@ -151,16 +151,8 @@ OUR_WAITID (idtype_t idtype, id_t id, siginfo_t *infop, int options)
int
__waitid (idtype_t idtype, id_t id, siginfo_t *infop, int options)
{
if (SINGLE_THREAD_P)
return do_waitid (idtype, id, infop, options);
int oldtype = LIBC_CANCEL_ASYNC ();
int result = do_waitid (idtype, id, infop, options);
LIBC_CANCEL_RESET (oldtype);
return result;
/* __waitpid should be a cancellation point. */
return do_waitid (idtype, id, infop, options);
}
weak_alias (__waitid, waitid)
strong_alias (__waitid, __libc_waitid)

View File

@ -28,26 +28,16 @@ int
__clock_nanosleep (clockid_t clock_id, int flags, const struct timespec *req,
struct timespec *rem)
{
INTERNAL_SYSCALL_DECL (err);
int r;
if (clock_id == CLOCK_THREAD_CPUTIME_ID)
return EINVAL;
if (clock_id == CLOCK_PROCESS_CPUTIME_ID)
clock_id = MAKE_PROCESS_CPUCLOCK (0, CPUCLOCK_SCHED);
if (SINGLE_THREAD_P)
r = INTERNAL_SYSCALL (clock_nanosleep, err, 4, clock_id, flags, req, rem);
else
{
int oldstate = LIBC_CANCEL_ASYNC ();
r = INTERNAL_SYSCALL (clock_nanosleep, err, 4, clock_id, flags, req,
rem);
LIBC_CANCEL_RESET (oldstate);
}
/* If the call is interrupted by a signal handler or encounters an error,
it returns a positive value similar to errno. */
INTERNAL_SYSCALL_DECL (err);
int r = INTERNAL_SYSCALL_CANCEL (clock_nanosleep, err, clock_id, flags,
req, rem);
return (INTERNAL_SYSCALL_ERROR_P (r, err)
? INTERNAL_SYSCALL_ERRNO (r, err) : 0);
}

View File

@ -138,9 +138,9 @@ futex_reltimed_wait_cancelable (unsigned int *futex_word,
const struct timespec *reltime, int private)
{
int oldtype;
oldtype = __pthread_enable_asynccancel ();
oldtype = LIBC_CANCEL_ASYNC ();
int err = lll_futex_timed_wait (futex_word, expected, reltime, private);
__pthread_disable_asynccancel (oldtype);
LIBC_CANCEL_RESET (oldtype);
switch (err)
{
case 0:

View File

@ -221,32 +221,30 @@ extern int __lll_timedlock_elision (int *futex, short *adapt_count,
#define lll_islocked(futex) \
(futex != LLL_LOCK_INITIALIZER)
extern int __lll_timedwait_tid (int *tid, const struct timespec *abstime)
__attribute__ ((regparm (2))) attribute_hidden;
/* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
wake-up when the clone terminates. The memory location contains the
thread ID while the clone is running and is reset to zero by the kernel
afterwards. The kernel up to version 3.16.3 does not use the private futex
operations for futex wake-up when the clone terminates. */
#define lll_wait_tid(tid) \
do { \
__typeof (tid) __tid; \
while ((__tid = (tid)) != 0) \
lll_futex_wait (&(tid), __tid, LLL_SHARED);\
} while (0)
extern int __lll_timedwait_tid (int *tid, const struct timespec *abstime)
__attribute__ ((regparm (2))) attribute_hidden;
/* As lll_wait_tid, but with a timeout. If the timeout occurs then return
ETIMEDOUT. If ABSTIME is invalid, return EINVAL.
XXX Note that this differs from the generic version in that we do the
error checking here and not in __lll_timedwait_tid. */
#define lll_timedwait_tid(tid, abstime) \
({ \
int __result = 0; \
if ((tid) != 0) \
__result = __lll_timedwait_tid (&(tid), (abstime)); \
__result; })
operations for futex wake-up when the clone terminates.
If ABSTIME is not NULL, is used a timeout for futex call. If the timeout
occurs then return ETIMEOUT, if ABSTIME is invalid, return EINVAL.
The futex operation are issues with cancellable versions. */
#define lll_wait_tid(tid, abstime) \
({ \
int __res = 0; \
__typeof (tid) __tid; \
if (abstime != NULL) \
__res = __lll_timedwait_tid (&(tid), (abstime)); \
else \
/* We need acquire MO here so that we synchronize with the \
kernel's store to 0 when the clone terminates. (see above) */ \
while ((__tid = atomic_load_acquire (&(tid))) != 0) \
lll_futex_wait_cancel (&(tid), __tid, LLL_SHARED); \
__res; \
})
extern int __lll_lock_elision (int *futex, short *adapt_count, int private)
attribute_hidden;

View File

@ -125,6 +125,17 @@
private), \
nr_wake, nr_move, mutex, val)
/* Cancellable futex macros. */
#define lll_futex_wait_cancel(futexp, val, private) \
({ \
int __oldtype = CANCEL_ASYNC (); \
long int __err = lll_futex_wait (futexp, val, LLL_SHARED); \
CANCEL_RESET (__oldtype); \
__err; \
})
#endif /* !__ASSEMBLER__ */
#endif /* lowlevellock-futex.h */

View File

@ -108,28 +108,29 @@ __lll_timedlock (int *futex, const struct timespec *abstime, int private)
#define LLL_LOCK_INITIALIZER (0)
#define LLL_LOCK_INITIALIZER_LOCKED (1)
/* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
wakeup when the clone terminates. The memory location contains the
thread ID while the clone is running and is reset to zero
afterwards. */
#define lll_wait_tid(tid) \
do \
{ \
__typeof (tid) __tid; \
while ((__tid = (tid)) != 0) \
lll_futex_wait (&(tid), __tid, LLL_SHARED); \
} \
while (0)
extern int __lll_timedwait_tid (int *, const struct timespec *)
attribute_hidden;
#define lll_timedwait_tid(tid, abstime) \
({ \
int __res = 0; \
if ((tid) != 0) \
__res = __lll_timedwait_tid (&(tid), (abstime)); \
__res; \
/* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
wake-up when the clone terminates. The memory location contains the
thread ID while the clone is running and is reset to zero by the kernel
afterwards. The kernel up to version 3.16.3 does not use the private futex
operations for futex wake-up when the clone terminates.
If ABSTIME is not NULL, is used a timeout for futex call. If the timeout
occurs then return ETIMEOUT, if ABSTIME is invalid, return EINVAL.
The futex operation are issues with cancellable versions. */
#define lll_wait_tid(tid, abstime) \
({ \
int __res = 0; \
__typeof (tid) __tid; \
if (abstime != NULL) \
__res = __lll_timedwait_tid (&(tid), (abstime)); \
else \
/* We need acquire MO here so that we synchronize with the \
kernel's store to 0 when the clone terminates. (see above) */ \
while ((__tid = atomic_load_acquire (&(tid))) != 0) \
lll_futex_wait_cancel (&(tid), __tid, LLL_SHARED); \
__res; \
})
#endif /* lowlevellock.h */

View File

@ -224,32 +224,30 @@ extern int __lll_timedlock_elision (int *futex, short *adapt_count,
#define lll_islocked(futex) \
(futex != LLL_LOCK_INITIALIZER)
extern int __lll_timedwait_tid (int *, const struct timespec *)
attribute_hidden;
/* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
wake-up when the clone terminates. The memory location contains the
thread ID while the clone is running and is reset to zero by the kernel
afterwards. The kernel up to version 3.16.3 does not use the private futex
operations for futex wake-up when the clone terminates. */
#define lll_wait_tid(tid) \
do { \
__typeof (tid) __tid; \
while ((__tid = (tid)) != 0) \
lll_futex_wait (&(tid), __tid, LLL_SHARED);\
} while (0)
extern int __lll_timedwait_tid (int *, const struct timespec *)
attribute_hidden;
/* As lll_wait_tid, but with a timeout. If the timeout occurs then return
ETIMEDOUT. If ABSTIME is invalid, return EINVAL.
XXX Note that this differs from the generic version in that we do the
error checking here and not in __lll_timedwait_tid. */
#define lll_timedwait_tid(tid, abstime) \
({ \
int __result = 0; \
if ((tid) != 0) \
__result = __lll_timedwait_tid (&(tid), (abstime)); \
__result; })
operations for futex wake-up when the clone terminates.
If ABSTIME is not NULL, is used a timeout for futex call. If the timeout
occurs then return ETIMEOUT, if ABSTIME is invalid, return EINVAL.
The futex operation are issues with cancellable versions. */
#define lll_wait_tid(tid, abstime) \
({ \
int __res = 0; \
__typeof (tid) __tid; \
if (abstime != NULL) \
__res = __lll_timedwait_tid (&(tid), (abstime)); \
else \
/* We need acquire MO here so that we synchronize with the \
kernel's store to 0 when the clone terminates. (see above) */ \
while ((__tid = atomic_load_acquire (&(tid))) != 0) \
lll_futex_wait_cancel (&(tid), __tid, LLL_SHARED); \
__res; \
})
extern int __lll_lock_elision (int *futex, short *adapt_count, int private)
attribute_hidden;