robust mutexes: Fix broken x86 assembly by removing it

lll_robust_unlock on i386 and x86_64 first sets the futex word to
FUTEX_WAITERS|0 before calling __lll_unlock_wake, which will set the
futex word to 0.  If the thread is killed between these steps, then the
futex word will be FUTEX_WAITERS|0, and the kernel (at least current
upstream) will not set it to FUTEX_OWNER_DIED|FUTEX_WAITERS because 0 is
not equal to the TID of the crashed thread.

The lll_robust_lock assembly code on i386 and x86_64 is not prepared to
deal with this case because the fastpath tries to only CAS 0 to TID and
not FUTEX_WAITERS|0 to TID; the slowpath simply waits until it can CAS 0
to TID or the futex_word has the FUTEX_OWNER_DIED bit set.

This issue is fixed by removing the custom x86 assembly code and using
the generic C code instead.  However, instead of adding more duplicate
code to the custom x86 lowlevellock.h, the code of the lll_robust* functions
is inlined into the single call sites that exist for each of these functions
in the pthread_mutex_* functions.  The robust mutex paths in the latter
have been slightly reorganized to make them simpler.

This patch is meant to be easy to backport, so C11-style atomics are not
used.

	[BZ #20985]
	* nptl/Makefile: Adapt.
	* nptl/pthread_mutex_cond_lock.c (LLL_ROBUST_MUTEX_LOCK): Remove.
	(LLL_ROBUST_MUTEX_LOCK_MODIFIER): New.
	* nptl/pthread_mutex_lock.c (LLL_ROBUST_MUTEX_LOCK): Remove.
	(LLL_ROBUST_MUTEX_LOCK_MODIFIER): New.
	(__pthread_mutex_lock_full): Inline lll_robust* functions and adapt.
	* nptl/pthread_mutex_timedlock.c (pthread_mutex_timedlock): Inline
	lll_robust* functions and adapt.
	* nptl/pthread_mutex_unlock.c (__pthread_mutex_unlock_full): Likewise.
	* sysdeps/nptl/lowlevellock.h (__lll_robust_lock_wait,
	__lll_robust_lock, lll_robust_cond_lock, __lll_robust_timedlock_wait,
	__lll_robust_timedlock, __lll_robust_unlock): Remove.
	* sysdeps/unix/sysv/linux/i386/lowlevellock.h (lll_robust_lock,
	lll_robust_cond_lock, lll_robust_timedlock, lll_robust_unlock): Remove.
	* sysdeps/unix/sysv/linux/x86_64/lowlevellock.h (lll_robust_lock,
	lll_robust_cond_lock, lll_robust_timedlock, lll_robust_unlock): Remove.
	* sysdeps/unix/sysv/linux/sparc/lowlevellock.h (__lll_robust_lock_wait,
	__lll_robust_lock, lll_robust_cond_lock, __lll_robust_timedlock_wait,
	__lll_robust_timedlock, __lll_robust_unlock): Remove.
	* nptl/lowlevelrobustlock.c: Remove file.
	* nptl/lowlevelrobustlock.sym: Likewise.
	* sysdeps/unix/sysv/linux/i386/lowlevelrobustlock.S: Likewise.
	* sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S: Likewise.
This commit is contained in:
Torvald Riegel 2016-12-22 10:20:43 +01:00
parent f32941d80c
commit 65810f0ef0
14 changed files with 185 additions and 977 deletions

View File

@ -1,3 +1,30 @@
2016-01-13 Torvald Riegel <triegel@redhat.com>
[BZ #20985]
* nptl/Makefile: Adapt.
* nptl/pthread_mutex_cond_lock.c (LLL_ROBUST_MUTEX_LOCK): Remove.
(LLL_ROBUST_MUTEX_LOCK_MODIFIER): New.
* nptl/pthread_mutex_lock.c (LLL_ROBUST_MUTEX_LOCK): Remove.
(LLL_ROBUST_MUTEX_LOCK_MODIFIER): New.
(__pthread_mutex_lock_full): Inline lll_robust* functions and adapt.
* nptl/pthread_mutex_timedlock.c (pthread_mutex_timedlock): Inline
lll_robust* functions and adapt.
* nptl/pthread_mutex_unlock.c (__pthread_mutex_unlock_full): Likewise.
* sysdeps/nptl/lowlevellock.h (__lll_robust_lock_wait,
__lll_robust_lock, lll_robust_cond_lock, __lll_robust_timedlock_wait,
__lll_robust_timedlock, __lll_robust_unlock): Remove.
* sysdeps/unix/sysv/linux/i386/lowlevellock.h (lll_robust_lock,
lll_robust_cond_lock, lll_robust_timedlock, lll_robust_unlock): Remove.
* sysdeps/unix/sysv/linux/x86_64/lowlevellock.h (lll_robust_lock,
lll_robust_cond_lock, lll_robust_timedlock, lll_robust_unlock): Remove.
* sysdeps/unix/sysv/linux/sparc/lowlevellock.h (__lll_robust_lock_wait,
__lll_robust_lock, lll_robust_cond_lock, __lll_robust_timedlock_wait,
__lll_robust_timedlock, __lll_robust_unlock): Remove.
* nptl/lowlevelrobustlock.c: Remove file.
* nptl/lowlevelrobustlock.sym: Likewise.
* sysdeps/unix/sysv/linux/i386/lowlevelrobustlock.S: Likewise.
* sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S: Likewise.
2017-01-13 Tulio Magno Quites Machado Filho <tuliom@linux.vnet.ibm.com>
* sysdeps/powerpc/fpu/libm-test-ulps: Regenerated.

View File

@ -117,7 +117,7 @@ libpthread-routines = nptl-init vars events version pt-interp \
cleanup_defer_compat unwind \
pt-longjmp pt-cleanup\
cancellation \
lowlevellock lowlevelrobustlock \
lowlevellock \
lll_timedlock_wait lll_timedwait_tid \
pt-fork pt-vfork \
$(pthread-compat-wrappers) \
@ -309,7 +309,7 @@ tests-nolibpthread = tst-unload
gen-as-const-headers = pthread-errnos.sym \
unwindbuf.sym \
lowlevelrobustlock.sym pthread-pi-defines.sym
pthread-pi-defines.sym
gen-py-const-headers := nptl_lock_constants.pysym
pretty-printers := nptl-printers.py

View File

@ -1,136 +0,0 @@
/* Copyright (C) 2006-2017 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Jakub Jelinek <jakub@redhat.com>, 2006.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#include <errno.h>
#include <sysdep.h>
#include <lowlevellock.h>
#include <sys/time.h>
#include <pthreadP.h>
#include <kernel-features.h>
int
__lll_robust_lock_wait (int *futex, int private)
{
int oldval = *futex;
int tid = THREAD_GETMEM (THREAD_SELF, tid);
/* If the futex changed meanwhile try locking again. */
if (oldval == 0)
goto try;
do
{
/* If the owner died, return the present value of the futex. */
if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
return oldval;
/* Try to put the lock into state 'acquired, possibly with waiters'. */
int newval = oldval | FUTEX_WAITERS;
if (oldval != newval
&& atomic_compare_and_exchange_bool_acq (futex, newval, oldval))
continue;
/* If *futex == 2, wait until woken. */
lll_futex_wait (futex, newval, private);
try:
;
}
while ((oldval = atomic_compare_and_exchange_val_acq (futex,
tid | FUTEX_WAITERS,
0)) != 0);
return 0;
}
int
__lll_robust_timedlock_wait (int *futex, const struct timespec *abstime,
int private)
{
/* Reject invalid timeouts. */
if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
return EINVAL;
int tid = THREAD_GETMEM (THREAD_SELF, tid);
int oldval = *futex;
/* If the futex changed meanwhile, try locking again. */
if (oldval == 0)
goto try;
/* Work around the fact that the kernel rejects negative timeout values
despite them being valid. */
if (__glibc_unlikely (abstime->tv_sec < 0))
return ETIMEDOUT;
do
{
#if (!defined __ASSUME_FUTEX_CLOCK_REALTIME \
|| !defined lll_futex_timed_wait_bitset)
struct timeval tv;
struct timespec rt;
/* Get the current time. */
(void) __gettimeofday (&tv, NULL);
/* Compute relative timeout. */
rt.tv_sec = abstime->tv_sec - tv.tv_sec;
rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
if (rt.tv_nsec < 0)
{
rt.tv_nsec += 1000000000;
--rt.tv_sec;
}
/* Already timed out? */
if (rt.tv_sec < 0)
return ETIMEDOUT;
#endif
/* If the owner died, return the present value of the futex. */
if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
return oldval;
/* Try to put the lock into state 'acquired, possibly with waiters'. */
int newval = oldval | FUTEX_WAITERS;
if (oldval != newval
&& atomic_compare_and_exchange_bool_acq (futex, newval, oldval))
continue;
/* If *futex == 2, wait until woken or timeout. */
#if (!defined __ASSUME_FUTEX_CLOCK_REALTIME \
|| !defined lll_futex_timed_wait_bitset)
lll_futex_timed_wait (futex, newval, &rt, private);
#else
int err = lll_futex_timed_wait_bitset (futex, newval, abstime,
FUTEX_CLOCK_REALTIME, private);
/* The futex call timed out. */
if (err == -ETIMEDOUT)
return -err;
#endif
try:
;
}
while ((oldval = atomic_compare_and_exchange_val_acq (futex,
tid | FUTEX_WAITERS,
0)) != 0);
return 0;
}

View File

@ -1,6 +0,0 @@
#include <stddef.h>
#include <pthreadP.h>
--
TID offsetof (struct pthread, tid)

View File

@ -11,9 +11,9 @@
lll_cond_trylock ((mutex)->__data.__lock)
#define LLL_MUTEX_TRYLOCK_ELISION(mutex) LLL_MUTEX_TRYLOCK(mutex)
#define LLL_ROBUST_MUTEX_LOCK(mutex, id) \
lll_robust_cond_lock ((mutex)->__data.__lock, id, \
PTHREAD_ROBUST_MUTEX_PSHARED (mutex))
/* We need to assume that there are other threads blocked on the futex.
See __pthread_mutex_lock_full for further details. */
#define LLL_ROBUST_MUTEX_LOCK_MODIFIER FUTEX_WAITERS
#define __pthread_mutex_lock internal_function __pthread_mutex_cond_lock
#define __pthread_mutex_lock_full __pthread_mutex_cond_lock_full
#define NO_INCR

View File

@ -36,14 +36,14 @@
#define lll_trylock_elision(a,t) lll_trylock(a)
#endif
/* Some of the following definitions differ when pthread_mutex_cond_lock.c
includes this file. */
#ifndef LLL_MUTEX_LOCK
# define LLL_MUTEX_LOCK(mutex) \
lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
# define LLL_MUTEX_TRYLOCK(mutex) \
lll_trylock ((mutex)->__data.__lock)
# define LLL_ROBUST_MUTEX_LOCK(mutex, id) \
lll_robust_lock ((mutex)->__data.__lock, id, \
PTHREAD_ROBUST_MUTEX_PSHARED (mutex))
# define LLL_ROBUST_MUTEX_LOCK_MODIFIER 0
# define LLL_MUTEX_LOCK_ELISION(mutex) \
lll_lock_elision ((mutex)->__data.__lock, (mutex)->__data.__elision, \
PTHREAD_MUTEX_PSHARED (mutex))
@ -185,11 +185,21 @@ __pthread_mutex_lock_full (pthread_mutex_t *mutex)
/* This is set to FUTEX_WAITERS iff we might have shared the
FUTEX_WAITERS flag with other threads, and therefore need to keep it
set to avoid lost wake-ups. We have the same requirement in the
simple mutex algorithm. */
unsigned int assume_other_futex_waiters = 0;
do
simple mutex algorithm.
We start with value zero for a normal mutex, and FUTEX_WAITERS if we
are building the special case mutexes for use from within condition
variables. */
unsigned int assume_other_futex_waiters = LLL_ROBUST_MUTEX_LOCK_MODIFIER;
while (1)
{
again:
/* Try to acquire the lock through a CAS from 0 (not acquired) to
our TID | assume_other_futex_waiters. */
if (__glibc_likely ((oldval == 0)
&& (atomic_compare_and_exchange_bool_acq
(&mutex->__data.__lock,
id | assume_other_futex_waiters, 0) == 0)))
break;
if ((oldval & FUTEX_OWNER_DIED) != 0)
{
/* The previous owner died. Try locking the mutex. */
@ -209,7 +219,7 @@ __pthread_mutex_lock_full (pthread_mutex_t *mutex)
if (newval != oldval)
{
oldval = newval;
goto again;
continue;
}
/* We got the mutex. */
@ -260,24 +270,47 @@ __pthread_mutex_lock_full (pthread_mutex_t *mutex)
}
}
oldval = LLL_ROBUST_MUTEX_LOCK (mutex,
id | assume_other_futex_waiters);
/* See above. We set FUTEX_WAITERS and might have shared this flag
with other threads; thus, we need to preserve it. */
assume_other_futex_waiters = FUTEX_WAITERS;
if (__builtin_expect (mutex->__data.__owner
== PTHREAD_MUTEX_NOTRECOVERABLE, 0))
/* We cannot acquire the mutex nor has its owner died. Thus, try
to block using futexes. Set FUTEX_WAITERS if necessary so that
other threads are aware that there are potentially threads
blocked on the futex. Restart if oldval changed in the
meantime. */
if ((oldval & FUTEX_WAITERS) == 0)
{
/* This mutex is now not recoverable. */
mutex->__data.__count = 0;
lll_unlock (mutex->__data.__lock,
PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
return ENOTRECOVERABLE;
if (atomic_compare_and_exchange_bool_acq (&mutex->__data.__lock,
oldval | FUTEX_WAITERS,
oldval)
!= 0)
{
oldval = mutex->__data.__lock;
continue;
}
oldval |= FUTEX_WAITERS;
}
/* It is now possible that we share the FUTEX_WAITERS flag with
another thread; therefore, update assume_other_futex_waiters so
that we do not forget about this when handling other cases
above and thus do not cause lost wake-ups. */
assume_other_futex_waiters |= FUTEX_WAITERS;
/* Block using the futex and reload current lock value. */
lll_futex_wait (&mutex->__data.__lock, oldval,
PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
oldval = mutex->__data.__lock;
}
/* We have acquired the mutex; check if it is still consistent. */
if (__builtin_expect (mutex->__data.__owner
== PTHREAD_MUTEX_NOTRECOVERABLE, 0))
{
/* This mutex is now not recoverable. */
mutex->__data.__count = 0;
int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex);
lll_unlock (mutex->__data.__lock, private);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
return ENOTRECOVERABLE;
}
while ((oldval & FUTEX_OWNER_DIED) != 0);
mutex->__data.__count = 1;
ENQUEUE_MUTEX (mutex);

View File

@ -147,9 +147,16 @@ pthread_mutex_timedlock (pthread_mutex_t *mutex,
set to avoid lost wake-ups. We have the same requirement in the
simple mutex algorithm. */
unsigned int assume_other_futex_waiters = 0;
do
while (1)
{
again:
/* Try to acquire the lock through a CAS from 0 (not acquired) to
our TID | assume_other_futex_waiters. */
if (__glibc_likely ((oldval == 0)
&& (atomic_compare_and_exchange_bool_acq
(&mutex->__data.__lock,
id | assume_other_futex_waiters, 0) == 0)))
break;
if ((oldval & FUTEX_OWNER_DIED) != 0)
{
/* The previous owner died. Try locking the mutex. */
@ -162,7 +169,7 @@ pthread_mutex_timedlock (pthread_mutex_t *mutex,
if (newval != oldval)
{
oldval = newval;
goto again;
continue;
}
/* We got the mutex. */
@ -209,30 +216,87 @@ pthread_mutex_timedlock (pthread_mutex_t *mutex,
}
}
result = lll_robust_timedlock (mutex->__data.__lock, abstime,
id | assume_other_futex_waiters,
PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
/* See above. We set FUTEX_WAITERS and might have shared this flag
with other threads; thus, we need to preserve it. */
assume_other_futex_waiters = FUTEX_WAITERS;
/* We are about to block; check whether the timeout is invalid. */
if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
return EINVAL;
/* Work around the fact that the kernel rejects negative timeout
values despite them being valid. */
if (__glibc_unlikely (abstime->tv_sec < 0))
return ETIMEDOUT;
#if (!defined __ASSUME_FUTEX_CLOCK_REALTIME \
|| !defined lll_futex_timed_wait_bitset)
struct timeval tv;
struct timespec rt;
if (__builtin_expect (mutex->__data.__owner
== PTHREAD_MUTEX_NOTRECOVERABLE, 0))
/* Get the current time. */
(void) __gettimeofday (&tv, NULL);
/* Compute relative timeout. */
rt.tv_sec = abstime->tv_sec - tv.tv_sec;
rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
if (rt.tv_nsec < 0)
{
/* This mutex is now not recoverable. */
mutex->__data.__count = 0;
lll_unlock (mutex->__data.__lock,
PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
return ENOTRECOVERABLE;
rt.tv_nsec += 1000000000;
--rt.tv_sec;
}
if (result == ETIMEDOUT || result == EINVAL)
goto out;
/* Already timed out? */
if (rt.tv_sec < 0)
return ETIMEDOUT;
#endif
oldval = result;
/* We cannot acquire the mutex nor has its owner died. Thus, try
to block using futexes. Set FUTEX_WAITERS if necessary so that
other threads are aware that there are potentially threads
blocked on the futex. Restart if oldval changed in the
meantime. */
if ((oldval & FUTEX_WAITERS) == 0)
{
if (atomic_compare_and_exchange_bool_acq (&mutex->__data.__lock,
oldval | FUTEX_WAITERS,
oldval)
!= 0)
{
oldval = mutex->__data.__lock;
continue;
}
oldval |= FUTEX_WAITERS;
}
/* It is now possible that we share the FUTEX_WAITERS flag with
another thread; therefore, update assume_other_futex_waiters so
that we do not forget about this when handling other cases
above and thus do not cause lost wake-ups. */
assume_other_futex_waiters |= FUTEX_WAITERS;
/* Block using the futex. */
#if (!defined __ASSUME_FUTEX_CLOCK_REALTIME \
|| !defined lll_futex_timed_wait_bitset)
lll_futex_timed wait (&mutex->__data.__lock, oldval,
&rt, PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
#else
int err = lll_futex_timed_wait_bitset (&mutex->__data.__lock,
oldval, abstime, FUTEX_CLOCK_REALTIME,
PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
/* The futex call timed out. */
if (err == -ETIMEDOUT)
return -err;
#endif
/* Reload current lock value. */
oldval = mutex->__data.__lock;
}
/* We have acquired the mutex; check if it is still consistent. */
if (__builtin_expect (mutex->__data.__owner
== PTHREAD_MUTEX_NOTRECOVERABLE, 0))
{
/* This mutex is now not recoverable. */
mutex->__data.__count = 0;
int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex);
lll_unlock (mutex->__data.__lock, private);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
return ENOTRECOVERABLE;
}
while ((oldval & FUTEX_OWNER_DIED) != 0);
mutex->__data.__count = 1;
ENQUEUE_MUTEX (mutex);

View File

@ -96,6 +96,7 @@ internal_function
__pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
{
int newowner = 0;
int private;
switch (PTHREAD_MUTEX_TYPE (mutex))
{
@ -149,9 +150,14 @@ __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
/* One less user. */
--mutex->__data.__nusers;
/* Unlock. */
lll_robust_unlock (mutex->__data.__lock,
PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
/* Unlock by setting the lock to 0 (not acquired); if the lock had
FUTEX_WAITERS set previously, then wake any waiters.
The unlock operation must be the last access to the mutex to not
violate the mutex destruction requirements (see __lll_unlock). */
private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex);
if (__glibc_unlikely ((atomic_exchange_rel (&mutex->__data.__lock, 0)
& FUTEX_WAITERS) != 0))
lll_futex_wake (&mutex->__data.__lock, 1, private);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
break;
@ -234,9 +240,9 @@ __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
to not violate the mutex destruction requirements (see
lll_unlock). */
int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
int private = (robust
? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
: PTHREAD_MUTEX_PSHARED (mutex));
private = (robust
? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
: PTHREAD_MUTEX_PSHARED (mutex));
/* Unlock the mutex using a CAS unless there are futex waiters or our
TID is not the value of __lock anymore, in which case we let the
kernel take care of the situation. Use release MO in the CAS to

View File

@ -74,7 +74,6 @@
extern void __lll_lock_wait_private (int *futex) attribute_hidden;
extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
/* This is an expression rather than a statement even though its value is
void, so that it can be used in a comma expression or as an expression
@ -103,28 +102,6 @@ extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
__lll_lock (&(futex), private)
/* If FUTEX is 0 (not acquired), set to ID (acquired with no waiters) and
return 0. Otherwise, ensure that it is set to FUTEX | FUTEX_WAITERS
(acquired, possibly with waiters) and block until we acquire the lock.
FUTEX will now be ID | FUTEX_WAITERS and we return 0.
If the previous owner of the lock dies before we acquire the lock then FUTEX
will be the value of id as set by the previous owner, with FUTEX_OWNER_DIED
set (FUTEX_WAITERS may or may not be set). We return this value to indicate
that the lock is not acquired. */
#define __lll_robust_lock(futex, id, private) \
({ \
int *__futex = (futex); \
int __val = 0; \
\
if (__glibc_unlikely \
(atomic_compare_and_exchange_bool_acq (__futex, id, 0))) \
__val = __lll_robust_lock_wait (__futex, private); \
__val; \
})
#define lll_robust_lock(futex, id, private) \
__lll_robust_lock (&(futex), id, private)
/* This is an expression rather than a statement even though its value is
void, so that it can be used in a comma expression or as an expression
that's cast to void. */
@ -142,16 +119,8 @@ extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
/* As __lll_robust_lock, but set to ID | FUTEX_WAITERS (acquired, possibly with
waiters) if FUTEX is 0. */
#define lll_robust_cond_lock(futex, id, private) \
__lll_robust_lock (&(futex), (id) | FUTEX_WAITERS, private)
extern int __lll_timedlock_wait (int *futex, const struct timespec *,
int private) attribute_hidden;
extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
int private) attribute_hidden;
/* As __lll_lock, but with a timeout. If the timeout occurs then return
@ -170,22 +139,6 @@ extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
__lll_timedlock (&(futex), abstime, private)
/* As __lll_robust_lock, but with a timeout. If the timeout occurs then return
ETIMEDOUT. If ABSTIME is invalid, return EINVAL. */
#define __lll_robust_timedlock(futex, abstime, id, private) \
({ \
int *__futex = (futex); \
int __val = 0; \
\
if (__glibc_unlikely \
(atomic_compare_and_exchange_bool_acq (__futex, id, 0))) \
__val = __lll_robust_timedlock_wait (__futex, abstime, private); \
__val; \
})
#define lll_robust_timedlock(futex, abstime, id, private) \
__lll_robust_timedlock (&(futex), abstime, id, private)
/* This is an expression rather than a statement even though its value is
void, so that it can be used in a comma expression or as an expression
that's cast to void. */
@ -211,27 +164,6 @@ extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
__lll_unlock (&(futex), private)
/* This is an expression rather than a statement even though its value is
void, so that it can be used in a comma expression or as an expression
that's cast to void. */
/* Unconditionally set FUTEX to 0 (not acquired), releasing the lock. If FUTEX
had FUTEX_WAITERS set then wake any waiters. The waiter that acquires the
lock will set FUTEX_WAITERS.
Evaluate PRIVATE before releasing the lock so that we do not violate the
mutex destruction requirements (see __lll_unlock). */
#define __lll_robust_unlock(futex, private) \
((void) \
({ \
int *__futex = (futex); \
int __private = (private); \
int __oldval = atomic_exchange_rel (__futex, 0); \
if (__glibc_unlikely (__oldval & FUTEX_WAITERS)) \
lll_futex_wake (__futex, 1, __private); \
}))
#define lll_robust_unlock(futex, private) \
__lll_robust_unlock (&(futex), private)
#define lll_islocked(futex) \
((futex) != LLL_LOCK_INITIALIZER)

View File

@ -132,20 +132,6 @@
} \
})
#define lll_robust_lock(futex, id, private) \
({ int result, ignore1, ignore2; \
__asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
"jz 18f\n\t" \
"1:\tleal %2, %%edx\n" \
"0:\tmovl %7, %%ecx\n" \
"2:\tcall __lll_robust_lock_wait\n" \
"18:" \
: "=a" (result), "=c" (ignore1), "=m" (futex), \
"=&d" (ignore2) \
: "0" (0), "1" (id), "m" (futex), "g" ((int) (private))\
: "memory"); \
result; })
/* Special version of lll_lock which causes the unlock function to
always wakeup waiters. */
@ -165,22 +151,6 @@
})
#define lll_robust_cond_lock(futex, id, private) \
({ int result, ignore1, ignore2; \
__asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
"jz 18f\n\t" \
"1:\tleal %2, %%edx\n" \
"0:\tmovl %7, %%ecx\n" \
"2:\tcall __lll_robust_lock_wait\n" \
"18:" \
: "=a" (result), "=c" (ignore1), "=m" (futex), \
"=&d" (ignore2) \
: "0" (0), "1" (id | FUTEX_WAITERS), "m" (futex), \
"g" ((int) (private)) \
: "memory"); \
result; })
#define lll_timedlock(futex, timeout, private) \
({ int result, ignore1, ignore2, ignore3; \
__asm __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t" \
@ -203,21 +173,6 @@ extern int __lll_timedlock_elision (int *futex, short *adapt_count,
#define lll_timedlock_elision(futex, adapt_count, timeout, private) \
__lll_timedlock_elision(&(futex), &(adapt_count), timeout, private)
#define lll_robust_timedlock(futex, timeout, id, private) \
({ int result, ignore1, ignore2, ignore3; \
__asm __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t" \
"jz 18f\n\t" \
"1:\tleal %3, %%ecx\n" \
"0:\tmovl %8, %%edx\n" \
"2:\tcall __lll_robust_timedlock_wait\n" \
"18:" \
: "=a" (result), "=c" (ignore1), "=&d" (ignore2), \
"=m" (futex), "=S" (ignore3) \
: "0" (0), "1" (id), "m" (futex), "m" (timeout), \
"4" ((int) (private)) \
: "memory"); \
result; })
#if !IS_IN (libc) || defined UP
# define __lll_unlock_asm LOCK_INSTR "subl $1, %0\n\t"
#else
@ -255,21 +210,6 @@ extern int __lll_timedlock_elision (int *futex, short *adapt_count,
} \
})
#define lll_robust_unlock(futex, private) \
(void) \
({ int ignore, ignore2; \
__asm __volatile (LOCK_INSTR "andl %3, %0\n\t" \
"je 18f\n\t" \
"1:\tleal %0, %%eax\n" \
"0:\tmovl %5, %%ecx\n" \
"2:\tcall __lll_unlock_wake\n" \
"18:" \
: "=m" (futex), "=&a" (ignore), "=&c" (ignore2) \
: "i" (FUTEX_WAITERS), "m" (futex), \
"g" ((int) (private)) \
: "memory"); \
})
#define lll_islocked(futex) \
(futex != LLL_LOCK_INITIALIZER)

View File

@ -1,232 +0,0 @@
/* Copyright (C) 2002-2017 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#include <sysdep.h>
#include <pthread-errnos.h>
#include <lowlevellock.h>
#include <lowlevelrobustlock.h>
#include <kernel-features.h>
.text
#define FUTEX_WAITERS 0x80000000
#define FUTEX_OWNER_DIED 0x40000000
#ifdef __ASSUME_PRIVATE_FUTEX
# define LOAD_FUTEX_WAIT(reg) \
xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
#else
# if FUTEX_WAIT == 0
# define LOAD_FUTEX_WAIT(reg) \
xorl $FUTEX_PRIVATE_FLAG, reg ; \
andl %gs:PRIVATE_FUTEX, reg
# else
# define LOAD_FUTEX_WAIT(reg) \
xorl $FUTEX_PRIVATE_FLAG, reg ; \
andl %gs:PRIVATE_FUTEX, reg ; \
orl $FUTEX_WAIT, reg
# endif
#endif
.globl __lll_robust_lock_wait
.type __lll_robust_lock_wait,@function
.hidden __lll_robust_lock_wait
.align 16
__lll_robust_lock_wait:
cfi_startproc
pushl %edx
cfi_adjust_cfa_offset(4)
pushl %ebx
cfi_adjust_cfa_offset(4)
pushl %esi
cfi_adjust_cfa_offset(4)
cfi_offset(%edx, -8)
cfi_offset(%ebx, -12)
cfi_offset(%esi, -16)
movl %edx, %ebx
xorl %esi, %esi /* No timeout. */
LOAD_FUTEX_WAIT (%ecx)
4: movl %eax, %edx
orl $FUTEX_WAITERS, %edx
testl $FUTEX_OWNER_DIED, %eax
jnz 3f
cmpl %edx, %eax /* NB: %edx == 2 */
je 1f
LOCK
cmpxchgl %edx, (%ebx)
jnz 2f
1: movl $SYS_futex, %eax
ENTER_KERNEL
movl (%ebx), %eax
2: test %eax, %eax
jne 4b
movl %gs:TID, %edx
orl $FUTEX_WAITERS, %edx
LOCK
cmpxchgl %edx, (%ebx)
jnz 4b
/* NB: %eax == 0 */
3: popl %esi
cfi_adjust_cfa_offset(-4)
cfi_restore(%esi)
popl %ebx
cfi_adjust_cfa_offset(-4)
cfi_restore(%ebx)
popl %edx
cfi_adjust_cfa_offset(-4)
cfi_restore(%edx)
ret
cfi_endproc
.size __lll_robust_lock_wait,.-__lll_robust_lock_wait
.globl __lll_robust_timedlock_wait
.type __lll_robust_timedlock_wait,@function
.hidden __lll_robust_timedlock_wait
.align 16
__lll_robust_timedlock_wait:
cfi_startproc
/* Check for a valid timeout value. */
cmpl $1000000000, 4(%edx)
jae 3f
pushl %edi
cfi_adjust_cfa_offset(4)
pushl %esi
cfi_adjust_cfa_offset(4)
pushl %ebx
cfi_adjust_cfa_offset(4)
pushl %ebp
cfi_adjust_cfa_offset(4)
cfi_offset(%edi, -8)
cfi_offset(%esi, -12)
cfi_offset(%ebx, -16)
cfi_offset(%ebp, -20)
/* Stack frame for the timespec and timeval structs. */
subl $12, %esp
cfi_adjust_cfa_offset(12)
movl %ecx, %ebp
movl %edx, %edi
1: movl %eax, 8(%esp)
/* Get current time. */
movl %esp, %ebx
xorl %ecx, %ecx
movl $__NR_gettimeofday, %eax
ENTER_KERNEL
/* Compute relative timeout. */
movl 4(%esp), %eax
movl $1000, %edx
mul %edx /* Milli seconds to nano seconds. */
movl (%edi), %ecx
movl 4(%edi), %edx
subl (%esp), %ecx
subl %eax, %edx
jns 4f
addl $1000000000, %edx
subl $1, %ecx
4: testl %ecx, %ecx
js 8f /* Time is already up. */
/* Store relative timeout. */
movl %ecx, (%esp)
movl %edx, 4(%esp)
movl %ebp, %ebx
movl 8(%esp), %edx
movl %edx, %eax
orl $FUTEX_WAITERS, %edx
testl $FUTEX_OWNER_DIED, %eax
jnz 6f
cmpl %eax, %edx
je 2f
LOCK
cmpxchgl %edx, (%ebx)
movl $0, %ecx /* Must use mov to avoid changing cc. */
jnz 5f
2:
/* Futex call. */
movl %esp, %esi
movl 20(%esp), %ecx
LOAD_FUTEX_WAIT (%ecx)
movl $SYS_futex, %eax
ENTER_KERNEL
movl %eax, %ecx
movl (%ebx), %eax
5: testl %eax, %eax
jne 7f
movl %gs:TID, %edx
orl $FUTEX_WAITERS, %edx
LOCK
cmpxchgl %edx, (%ebx)
jnz 7f
6: addl $12, %esp
cfi_adjust_cfa_offset(-12)
popl %ebp
cfi_adjust_cfa_offset(-4)
cfi_restore(%ebp)
popl %ebx
cfi_adjust_cfa_offset(-4)
cfi_restore(%ebx)
popl %esi
cfi_adjust_cfa_offset(-4)
cfi_restore(%esi)
popl %edi
cfi_adjust_cfa_offset(-4)
cfi_restore(%edi)
ret
3: movl $EINVAL, %eax
ret
cfi_adjust_cfa_offset(28)
cfi_offset(%edi, -8)
cfi_offset(%esi, -12)
cfi_offset(%ebx, -16)
cfi_offset(%ebp, -20)
/* Check whether the time expired. */
7: cmpl $-ETIMEDOUT, %ecx
jne 1b
8: movl $ETIMEDOUT, %eax
jmp 6b
cfi_endproc
.size __lll_robust_timedlock_wait,.-__lll_robust_timedlock_wait

View File

@ -46,7 +46,6 @@ __lll_cond_trylock (int *futex)
extern void __lll_lock_wait_private (int *futex) attribute_hidden;
extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
static inline void
__attribute__ ((always_inline))
@ -64,18 +63,6 @@ __lll_lock (int *futex, int private)
}
#define lll_lock(futex, private) __lll_lock (&(futex), private)
static inline int
__attribute__ ((always_inline))
__lll_robust_lock (int *futex, int id, int private)
{
int result = 0;
if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
result = __lll_robust_lock_wait (futex, private);
return result;
}
#define lll_robust_lock(futex, id, private) \
__lll_robust_lock (&(futex), id, private)
static inline void
__attribute__ ((always_inline))
__lll_cond_lock (int *futex, int private)
@ -87,14 +74,9 @@ __lll_cond_lock (int *futex, int private)
}
#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
#define lll_robust_cond_lock(futex, id, private) \
__lll_robust_lock (&(futex), (id) | FUTEX_WAITERS, private)
extern int __lll_timedlock_wait (int *futex, const struct timespec *,
int private) attribute_hidden;
extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
int private) attribute_hidden;
static inline int
__attribute__ ((always_inline))
@ -110,19 +92,6 @@ __lll_timedlock (int *futex, const struct timespec *abstime, int private)
#define lll_timedlock(futex, abstime, private) \
__lll_timedlock (&(futex), abstime, private)
static inline int
__attribute__ ((always_inline))
__lll_robust_timedlock (int *futex, const struct timespec *abstime,
int id, int private)
{
int result = 0;
if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
result = __lll_robust_timedlock_wait (futex, abstime, private);
return result;
}
#define lll_robust_timedlock(futex, abstime, id, private) \
__lll_robust_timedlock (&(futex), abstime, id, private)
#define lll_unlock(lock, private) \
((void) ({ \
int *__futex = &(lock); \
@ -132,15 +101,6 @@ __lll_robust_timedlock (int *futex, const struct timespec *abstime,
lll_futex_wake (__futex, 1, __private); \
}))
#define lll_robust_unlock(lock, private) \
((void) ({ \
int *__futex = &(lock); \
int __private = (private); \
int __val = atomic_exchange_rel (__futex, 0); \
if (__glibc_unlikely (__val & FUTEX_WAITERS)) \
lll_futex_wake (__futex, 1, __private); \
}))
#define lll_islocked(futex) \
(futex != 0)

View File

@ -136,23 +136,6 @@
: "cx", "r11", "cc", "memory"); \
}) \
#define lll_robust_lock(futex, id, private) \
({ int result, ignore1, ignore2; \
__asm __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
"jz 24f\n" \
"1:\tlea %2, %%" RDI_LP "\n" \
"2:\tsub $128, %%" RSP_LP "\n" \
".cfi_adjust_cfa_offset 128\n" \
"3:\tcallq __lll_robust_lock_wait\n" \
"4:\tadd $128, %%" RSP_LP "\n" \
".cfi_adjust_cfa_offset -128\n" \
"24:" \
: "=S" (ignore1), "=D" (ignore2), "=m" (futex), \
"=a" (result) \
: "1" (id), "m" (futex), "3" (0), "0" (private) \
: "cx", "r11", "cc", "memory"); \
result; })
#define lll_cond_lock(futex, private) \
(void) \
({ int ignore1, ignore2, ignore3; \
@ -171,24 +154,6 @@
: "cx", "r11", "cc", "memory"); \
})
#define lll_robust_cond_lock(futex, id, private) \
({ int result, ignore1, ignore2; \
__asm __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
"jz 24f\n" \
"1:\tlea %2, %%" RDI_LP "\n" \
"2:\tsub $128, %%" RSP_LP "\n" \
".cfi_adjust_cfa_offset 128\n" \
"3:\tcallq __lll_robust_lock_wait\n" \
"4:\tadd $128, %%" RSP_LP "\n" \
".cfi_adjust_cfa_offset -128\n" \
"24:" \
: "=S" (ignore1), "=D" (ignore2), "=m" (futex), \
"=a" (result) \
: "1" (id | FUTEX_WAITERS), "m" (futex), "3" (0), \
"0" (private) \
: "cx", "r11", "cc", "memory"); \
result; })
#define lll_timedlock(futex, timeout, private) \
({ int result, ignore1, ignore2, ignore3; \
__asm __volatile (LOCK_INSTR "cmpxchgl %1, %4\n\t" \
@ -215,25 +180,6 @@ extern int __lll_timedlock_elision (int *futex, short *adapt_count,
#define lll_timedlock_elision(futex, adapt_count, timeout, private) \
__lll_timedlock_elision(&(futex), &(adapt_count), timeout, private)
#define lll_robust_timedlock(futex, timeout, id, private) \
({ int result, ignore1, ignore2, ignore3; \
__asm __volatile (LOCK_INSTR "cmpxchgl %1, %4\n\t" \
"jz 24f\n\t" \
"1:\tlea %4, %%" RDI_LP "\n" \
"0:\tmov %8, %%" RDX_LP "\n" \
"2:\tsub $128, %%" RSP_LP "\n" \
".cfi_adjust_cfa_offset 128\n" \
"3:\tcallq __lll_robust_timedlock_wait\n" \
"4:\tadd $128, %%" RSP_LP "\n" \
".cfi_adjust_cfa_offset -128\n" \
"24:" \
: "=a" (result), "=D" (ignore1), "=S" (ignore2), \
"=&d" (ignore3), "=m" (futex) \
: "0" (0), "1" (id), "m" (futex), "m" (timeout), \
"2" (private) \
: "memory", "cx", "cc", "r10", "r11"); \
result; })
#if !IS_IN (libc) || defined UP
# define __lll_unlock_asm_start LOCK_INSTR "decl %0\n\t" \
"je 24f\n\t"
@ -276,26 +222,6 @@ extern int __lll_timedlock_elision (int *futex, short *adapt_count,
: "ax", "cx", "r11", "cc", "memory"); \
})
#define lll_robust_unlock(futex, private) \
do \
{ \
int ignore; \
__asm __volatile (LOCK_INSTR "andl %2, %0\n\t" \
"je 24f\n\t" \
"1:\tlea %0, %%" RDI_LP "\n" \
"2:\tsub $128, %%" RSP_LP "\n" \
".cfi_adjust_cfa_offset 128\n" \
"3:\tcallq __lll_unlock_wake\n" \
"4:\tadd $128, %%" RSP_LP "\n" \
".cfi_adjust_cfa_offset -128\n" \
"24:" \
: "=m" (futex), "=&D" (ignore) \
: "i" (FUTEX_WAITERS), "m" (futex), \
"S" (private) \
: "ax", "cx", "r11", "cc", "memory"); \
} \
while (0)
#define lll_islocked(futex) \
(futex != LLL_LOCK_INITIALIZER)

View File

@ -1,306 +0,0 @@
/* Copyright (C) 2002-2017 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#include <sysdep.h>
#include <pthread-errnos.h>
#include <lowlevellock.h>
#include <lowlevelrobustlock.h>
#include <kernel-features.h>
.text
#define FUTEX_WAITERS 0x80000000
#define FUTEX_OWNER_DIED 0x40000000
#ifdef __ASSUME_PRIVATE_FUTEX
# define LOAD_FUTEX_WAIT(reg) \
xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
# define LOAD_FUTEX_WAIT_ABS(reg) \
xorl $(FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME), reg
#else
# if FUTEX_WAIT == 0
# define LOAD_FUTEX_WAIT(reg) \
xorl $FUTEX_PRIVATE_FLAG, reg ; \
andl %fs:PRIVATE_FUTEX, reg
# else
# define LOAD_FUTEX_WAIT(reg) \
xorl $FUTEX_PRIVATE_FLAG, reg ; \
andl %fs:PRIVATE_FUTEX, reg ; \
orl $FUTEX_WAIT, reg
# endif
# define LOAD_FUTEX_WAIT_ABS(reg) \
xorl $FUTEX_PRIVATE_FLAG, reg ; \
andl %fs:PRIVATE_FUTEX, reg ; \
orl $FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME, reg
#endif
.globl __lll_robust_lock_wait
.type __lll_robust_lock_wait,@function
.hidden __lll_robust_lock_wait
.align 16
__lll_robust_lock_wait:
cfi_startproc
pushq %r10
cfi_adjust_cfa_offset(8)
pushq %rdx
cfi_adjust_cfa_offset(8)
cfi_offset(%r10, -16)
cfi_offset(%rdx, -24)
xorq %r10, %r10 /* No timeout. */
LOAD_FUTEX_WAIT (%esi)
4: movl %eax, %edx
orl $FUTEX_WAITERS, %edx
testl $FUTEX_OWNER_DIED, %eax
jnz 3f
cmpl %edx, %eax
je 1f
LOCK
cmpxchgl %edx, (%rdi)
jnz 2f
1: movl $SYS_futex, %eax
syscall
movl (%rdi), %eax
2: testl %eax, %eax
jne 4b
movl %fs:TID, %edx
orl $FUTEX_WAITERS, %edx
LOCK
cmpxchgl %edx, (%rdi)
jnz 4b
/* NB: %rax == 0 */
3: popq %rdx
cfi_adjust_cfa_offset(-8)
cfi_restore(%rdx)
popq %r10
cfi_adjust_cfa_offset(-8)
cfi_restore(%r10)
retq
cfi_endproc
.size __lll_robust_lock_wait,.-__lll_robust_lock_wait
.globl __lll_robust_timedlock_wait
.type __lll_robust_timedlock_wait,@function
.hidden __lll_robust_timedlock_wait
.align 16
__lll_robust_timedlock_wait:
cfi_startproc
# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
# ifdef PIC
cmpl $0, __have_futex_clock_realtime(%rip)
# else
cmpl $0, __have_futex_clock_realtime
# endif
je .Lreltmo
# endif
cmpq $0, (%rdx)
js 7f
pushq %r9
cfi_adjust_cfa_offset(8)
cfi_rel_offset(%r9, 0)
movq %rdx, %r10
movl $0xffffffff, %r9d
LOAD_FUTEX_WAIT_ABS (%esi)
1: testl $FUTEX_OWNER_DIED, %eax
jnz 3f
movl %eax, %edx
orl $FUTEX_WAITERS, %edx
cmpl %eax, %edx
je 5f
LOCK
cmpxchgl %edx, (%rdi)
movq $0, %rcx /* Must use mov to avoid changing cc. */
jnz 6f
5: movl $SYS_futex, %eax
syscall
movl %eax, %ecx
movl (%rdi), %eax
6: testl %eax, %eax
jne 2f
movl %fs:TID, %edx
orl $FUTEX_WAITERS, %edx
LOCK
cmpxchgl %edx, (%rdi)
jnz 2f
3: popq %r9
cfi_adjust_cfa_offset(-8)
cfi_restore(%r9)
retq
cfi_adjust_cfa_offset(8)
cfi_rel_offset(%r9, 0)
/* Check whether the time expired. */
2: cmpl $-ETIMEDOUT, %ecx
je 4f
cmpl $-EINVAL, %ecx
jne 1b
4: movl %ecx, %eax
negl %eax
jmp 3b
cfi_adjust_cfa_offset(-8)
cfi_restore(%r9)
7: movl $ETIMEDOUT, %eax
retq
# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
.Lreltmo:
/* Check for a valid timeout value. */
cmpq $1000000000, 8(%rdx)
jae 3f
pushq %r8
cfi_adjust_cfa_offset(8)
pushq %r9
cfi_adjust_cfa_offset(8)
pushq %r12
cfi_adjust_cfa_offset(8)
pushq %r13
cfi_adjust_cfa_offset(8)
cfi_offset(%r8, -16)
cfi_offset(%r9, -24)
cfi_offset(%r12, -32)
cfi_offset(%r13, -40)
pushq %rsi
cfi_adjust_cfa_offset(8)
/* Stack frame for the timespec and timeval structs. */
subq $32, %rsp
cfi_adjust_cfa_offset(32)
movq %rdi, %r12
movq %rdx, %r13
1: movq %rax, 16(%rsp)
/* Get current time. */
movq %rsp, %rdi
xorl %esi, %esi
/* This call works because we directly jump to a system call entry
which preserves all the registers. */
call JUMPTARGET(__gettimeofday)
/* Compute relative timeout. */
movq 8(%rsp), %rax
movl $1000, %edi
mul %rdi /* Milli seconds to nano seconds. */
movq (%r13), %rdi
movq 8(%r13), %rsi
subq (%rsp), %rdi
subq %rax, %rsi
jns 4f
addq $1000000000, %rsi
decq %rdi
4: testq %rdi, %rdi
js 8f /* Time is already up. */
/* Futex call. */
movq %rdi, (%rsp) /* Store relative timeout. */
movq %rsi, 8(%rsp)
movq 16(%rsp), %rdx
movl %edx, %eax
orl $FUTEX_WAITERS, %edx
testl $FUTEX_OWNER_DIED, %eax
jnz 6f
cmpl %eax, %edx
je 2f
LOCK
cmpxchgl %edx, (%r12)
movq $0, %rcx /* Must use mov to avoid changing cc. */
jnz 5f
2: movq %rsp, %r10
movl 32(%rsp), %esi
LOAD_FUTEX_WAIT (%esi)
movq %r12, %rdi
movl $SYS_futex, %eax
syscall
movq %rax, %rcx
movl (%r12), %eax
5: testl %eax, %eax
jne 7f
movl %fs:TID, %edx
orl $FUTEX_WAITERS, %edx
LOCK
cmpxchgl %edx, (%r12)
jnz 7f
6: addq $40, %rsp
cfi_adjust_cfa_offset(-40)
popq %r13
cfi_adjust_cfa_offset(-8)
cfi_restore(%r13)
popq %r12
cfi_adjust_cfa_offset(-8)
cfi_restore(%r12)
popq %r9
cfi_adjust_cfa_offset(-8)
cfi_restore(%r9)
popq %r8
cfi_adjust_cfa_offset(-8)
cfi_restore(%r8)
retq
3: movl $EINVAL, %eax
retq
cfi_adjust_cfa_offset(72)
cfi_offset(%r8, -16)
cfi_offset(%r9, -24)
cfi_offset(%r12, -32)
cfi_offset(%r13, -40)
/* Check whether the time expired. */
7: cmpl $-ETIMEDOUT, %ecx
jne 1b
8: movl $ETIMEDOUT, %eax
jmp 6b
#endif
cfi_endproc
.size __lll_robust_timedlock_wait,.-__lll_robust_timedlock_wait