mirror of
https://sourceware.org/git/glibc.git
synced 2024-11-10 15:20:10 +00:00
hurd: Add __pthread_spin_wait and use it
900778283a
("htl: make pthread_spin_lock really spin") made
pthread_spin_lock really spin and not block, but the current users of
__pthread_spin_lock were assuming that it blocks, i.e. they use it as a
lightweight mutex fitting in just one int.
__pthread_spin_wait provides that support back.
This commit is contained in:
parent
cd7965bd97
commit
8ba6ad703c
@ -24,7 +24,7 @@
|
||||
int
|
||||
pthread_barrier_wait (pthread_barrier_t *barrier)
|
||||
{
|
||||
__pthread_spin_lock (&barrier->__lock);
|
||||
__pthread_spin_wait (&barrier->__lock);
|
||||
if (--barrier->__pending == 0)
|
||||
{
|
||||
barrier->__pending = barrier->__count;
|
||||
|
@ -26,7 +26,7 @@ __pthread_cond_broadcast (pthread_cond_t *cond)
|
||||
{
|
||||
struct __pthread *wakeup;
|
||||
|
||||
__pthread_spin_lock (&cond->__lock);
|
||||
__pthread_spin_wait (&cond->__lock);
|
||||
while ((wakeup = cond->__queue))
|
||||
{
|
||||
__pthread_dequeue (wakeup);
|
||||
@ -34,7 +34,7 @@ __pthread_cond_broadcast (pthread_cond_t *cond)
|
||||
/* Wake it up without spin held, so it may have a chance to really
|
||||
preempt us */
|
||||
__pthread_wakeup (wakeup);
|
||||
__pthread_spin_lock (&cond->__lock);
|
||||
__pthread_spin_wait (&cond->__lock);
|
||||
}
|
||||
__pthread_spin_unlock (&cond->__lock);
|
||||
|
||||
|
@ -24,7 +24,7 @@ __pthread_cond_destroy (pthread_cond_t *cond)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
__pthread_spin_lock (&cond->__lock);
|
||||
__pthread_spin_wait (&cond->__lock);
|
||||
if (cond->__queue)
|
||||
ret = EBUSY;
|
||||
__pthread_spin_unlock (&cond->__lock);
|
||||
|
@ -27,7 +27,7 @@ __pthread_cond_signal (pthread_cond_t *cond)
|
||||
{
|
||||
struct __pthread *wakeup;
|
||||
|
||||
__pthread_spin_lock (&cond->__lock);
|
||||
__pthread_spin_wait (&cond->__lock);
|
||||
wakeup = cond->__queue;
|
||||
if (wakeup != NULL)
|
||||
__pthread_dequeue (wakeup);
|
||||
|
@ -50,7 +50,7 @@ cancel_hook (void *arg)
|
||||
pthread_cond_t *cond = ctx->cond;
|
||||
int unblock;
|
||||
|
||||
__pthread_spin_lock (&cond->__lock);
|
||||
__pthread_spin_wait (&cond->__lock);
|
||||
/* The thread only needs to be awaken if it's blocking or about to block.
|
||||
If it was already unblocked, it's not queued any more. */
|
||||
unblock = wakeup->prevp != NULL;
|
||||
@ -112,7 +112,7 @@ __pthread_cond_timedwait_internal (pthread_cond_t *cond,
|
||||
the cancellation hook to simplify the cancellation procedure, i.e.
|
||||
if the thread is queued, it can be cancelled, otherwise it is
|
||||
already unblocked, progressing on the return path. */
|
||||
__pthread_spin_lock (&cond->__lock);
|
||||
__pthread_spin_wait (&cond->__lock);
|
||||
__pthread_enqueue (&cond->__queue, self);
|
||||
if (cond->__attr != NULL)
|
||||
clock_id = cond->__attr->__clock;
|
||||
@ -135,7 +135,7 @@ __pthread_cond_timedwait_internal (pthread_cond_t *cond,
|
||||
__pthread_block (self);
|
||||
}
|
||||
|
||||
__pthread_spin_lock (&cond->__lock);
|
||||
__pthread_spin_wait (&cond->__lock);
|
||||
if (self->prevp == NULL)
|
||||
{
|
||||
/* Another thread removed us from the list of waiters, which means a
|
||||
|
@ -36,7 +36,7 @@ __pthread_once (pthread_once_t *once_control, void (*init_routine) (void))
|
||||
atomic_full_barrier ();
|
||||
if (once_control->__run == 0)
|
||||
{
|
||||
__pthread_spin_lock (&once_control->__lock);
|
||||
__pthread_spin_wait (&once_control->__lock);
|
||||
|
||||
if (once_control->__run == 0)
|
||||
{
|
||||
|
@ -33,7 +33,7 @@ __pthread_rwlock_timedrdlock_internal (struct __pthread_rwlock *rwlock,
|
||||
int drain;
|
||||
struct __pthread *self;
|
||||
|
||||
__pthread_spin_lock (&rwlock->__lock);
|
||||
__pthread_spin_wait (&rwlock->__lock);
|
||||
if (__pthread_spin_trylock (&rwlock->__held) == 0)
|
||||
/* Successfully acquired the lock. */
|
||||
{
|
||||
@ -79,7 +79,7 @@ __pthread_rwlock_timedrdlock_internal (struct __pthread_rwlock *rwlock,
|
||||
__pthread_block (self);
|
||||
}
|
||||
|
||||
__pthread_spin_lock (&rwlock->__lock);
|
||||
__pthread_spin_wait (&rwlock->__lock);
|
||||
if (self->prevp == NULL)
|
||||
/* Another thread removed us from the queue, which means a wakeup message
|
||||
has been sent. It was either consumed while we were blocking, or
|
||||
|
@ -33,7 +33,7 @@ __pthread_rwlock_timedwrlock_internal (struct __pthread_rwlock *rwlock,
|
||||
int drain;
|
||||
struct __pthread *self;
|
||||
|
||||
__pthread_spin_lock (&rwlock->__lock);
|
||||
__pthread_spin_wait (&rwlock->__lock);
|
||||
if (__pthread_spin_trylock (&rwlock->__held) == 0)
|
||||
/* Successfully acquired the lock. */
|
||||
{
|
||||
@ -65,7 +65,7 @@ __pthread_rwlock_timedwrlock_internal (struct __pthread_rwlock *rwlock,
|
||||
__pthread_block (self);
|
||||
}
|
||||
|
||||
__pthread_spin_lock (&rwlock->__lock);
|
||||
__pthread_spin_wait (&rwlock->__lock);
|
||||
if (self->prevp == NULL)
|
||||
/* Another thread removed us from the queue, which means a wakeup message
|
||||
has been sent. It was either consumed while we were blocking, or
|
||||
|
@ -25,7 +25,7 @@
|
||||
int
|
||||
pthread_rwlock_tryrdlock (struct __pthread_rwlock *rwlock)
|
||||
{
|
||||
__pthread_spin_lock (&rwlock->__lock);
|
||||
__pthread_spin_wait (&rwlock->__lock);
|
||||
if (__pthread_spin_trylock (&rwlock->__held) == 0)
|
||||
/* Successfully acquired the lock. */
|
||||
{
|
||||
|
@ -25,7 +25,7 @@
|
||||
int
|
||||
pthread_rwlock_trywrlock (struct __pthread_rwlock *rwlock)
|
||||
{
|
||||
__pthread_spin_lock (&rwlock->__lock);
|
||||
__pthread_spin_wait (&rwlock->__lock);
|
||||
if (__pthread_spin_trylock (&rwlock->__held) == 0)
|
||||
/* Successfully acquired the lock. */
|
||||
{
|
||||
|
@ -28,7 +28,7 @@ __pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
|
||||
{
|
||||
struct __pthread *wakeup;
|
||||
|
||||
__pthread_spin_lock (&rwlock->__lock);
|
||||
__pthread_spin_wait (&rwlock->__lock);
|
||||
|
||||
assert (__pthread_spin_trylock (&rwlock->__held) == EBUSY);
|
||||
|
||||
|
@ -22,7 +22,7 @@
|
||||
int
|
||||
__sem_getvalue (sem_t *restrict sem, int *restrict value)
|
||||
{
|
||||
__pthread_spin_lock (&sem->__lock);
|
||||
__pthread_spin_wait (&sem->__lock);
|
||||
*value = sem->__value;
|
||||
__pthread_spin_unlock (&sem->__lock);
|
||||
|
||||
|
@ -26,7 +26,7 @@ __sem_post (sem_t *sem)
|
||||
{
|
||||
struct __pthread *wakeup;
|
||||
|
||||
__pthread_spin_lock (&sem->__lock);
|
||||
__pthread_spin_wait (&sem->__lock);
|
||||
if (sem->__value > 0)
|
||||
/* Do a quick up. */
|
||||
{
|
||||
|
@ -32,7 +32,7 @@ __sem_timedwait_internal (sem_t *restrict sem,
|
||||
struct __pthread *self;
|
||||
clockid_t clock_id = CLOCK_REALTIME;
|
||||
|
||||
__pthread_spin_lock (&sem->__lock);
|
||||
__pthread_spin_wait (&sem->__lock);
|
||||
if (sem->__value > 0)
|
||||
/* Successful down. */
|
||||
{
|
||||
@ -59,7 +59,7 @@ __sem_timedwait_internal (sem_t *restrict sem,
|
||||
else
|
||||
err = __pthread_block_intr (self);
|
||||
|
||||
__pthread_spin_lock (&sem->__lock);
|
||||
__pthread_spin_wait (&sem->__lock);
|
||||
if (self->prevp == NULL)
|
||||
/* Another thread removed us from the queue, which means a wakeup message
|
||||
has been sent. It was either consumed while we were blocking, or
|
||||
|
@ -24,7 +24,7 @@
|
||||
int
|
||||
__sem_trywait (sem_t *sem)
|
||||
{
|
||||
__pthread_spin_lock (&sem->__lock);
|
||||
__pthread_spin_wait (&sem->__lock);
|
||||
if (sem->__value > 0)
|
||||
/* Successful down. */
|
||||
{
|
||||
|
@ -71,6 +71,15 @@ __pthread_spin_lock (__pthread_spinlock_t *__lock)
|
||||
return 0;
|
||||
}
|
||||
|
||||
__PT_SPIN_INLINE int __pthread_spin_wait (__pthread_spinlock_t *__lock);
|
||||
|
||||
__PT_SPIN_INLINE int
|
||||
__pthread_spin_wait (__pthread_spinlock_t *__lock)
|
||||
{
|
||||
__spin_lock ((__spin_lock_t *) __lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
__PT_SPIN_INLINE int __pthread_spin_unlock (__pthread_spinlock_t *__lock);
|
||||
|
||||
__PT_SPIN_INLINE int
|
||||
|
@ -56,7 +56,7 @@ __pthread_hurd_cond_timedwait_internal (pthread_cond_t *cond,
|
||||
{
|
||||
int unblock;
|
||||
|
||||
__pthread_spin_lock (&cond->__lock);
|
||||
__pthread_spin_wait (&cond->__lock);
|
||||
/* The thread only needs to be awaken if it's blocking or about to block.
|
||||
If it was already unblocked, it's not queued any more. */
|
||||
unblock = self->prevp != NULL;
|
||||
@ -81,7 +81,7 @@ __pthread_hurd_cond_timedwait_internal (pthread_cond_t *cond,
|
||||
the condition variable's lock. */
|
||||
|
||||
__spin_lock (&ss->lock);
|
||||
__pthread_spin_lock (&cond->__lock);
|
||||
__pthread_spin_wait (&cond->__lock);
|
||||
cancel = ss->cancel;
|
||||
if (cancel)
|
||||
/* We were cancelled before doing anything. Don't block at all. */
|
||||
@ -123,7 +123,7 @@ __pthread_hurd_cond_timedwait_internal (pthread_cond_t *cond,
|
||||
/* As it was done when enqueueing, prevent hurd_thread_cancel from
|
||||
suspending us while the condition lock is held. */
|
||||
__spin_lock (&ss->lock);
|
||||
__pthread_spin_lock (&cond->__lock);
|
||||
__pthread_spin_wait (&cond->__lock);
|
||||
if (self->prevp == NULL)
|
||||
/* Another thread removed us from the list of waiters, which means
|
||||
a wakeup message has been sent. It was either consumed while
|
||||
|
Loading…
Reference in New Issue
Block a user