mirror of
https://sourceware.org/git/glibc.git
synced 2024-11-21 20:40:05 +00:00
htl: Make pthread_cond_destroy wait for threads to be woken
This allows to reuse the storage after calling pthread_cond_destroy. * sysdeps/htl/bits/types/struct___pthread_cond.h (__pthread_cond): Replace unused struct __pthread_condimpl *__impl field with unsigned int __wrefs. (__PTHREAD_COND_INITIALIZER): Update accordingly. * sysdeps/htl/pt-cond-timedwait.c (__pthread_cond_timedwait_internal): Register as waiter in __wrefs field. On unregistering, wake any pending pthread_cond_destroy. * sysdeps/htl/pt-cond-destroy.c (__pthread_cond_destroy): Register wake request in __wrefs. * nptl/Makefile (tests): Move tst-cond20 tst-cond21 to... * sysdeps/pthread/Makefile (tests): ... here. * nptl/tst-cond20.c nptl/tst-cond21.c: Move to... * sysdeps/pthread/tst-cond20.c sysdeps/pthread/tst-cond21.c: ... here.
This commit is contained in:
parent
a3e589d1f6
commit
8081702460
@ -263,7 +263,7 @@ tests = tst-attr2 tst-attr3 tst-default-attr \
|
|||||||
tst-mutexpi1 tst-mutexpi2 tst-mutexpi3 tst-mutexpi4 \
|
tst-mutexpi1 tst-mutexpi2 tst-mutexpi3 tst-mutexpi4 \
|
||||||
tst-mutexpi5 tst-mutexpi5a tst-mutexpi6 tst-mutexpi7 tst-mutexpi7a \
|
tst-mutexpi5 tst-mutexpi5a tst-mutexpi6 tst-mutexpi7 tst-mutexpi7a \
|
||||||
tst-mutexpi9 \
|
tst-mutexpi9 \
|
||||||
tst-cond20 tst-cond21 tst-cond22 tst-cond26 \
|
tst-cond22 tst-cond26 \
|
||||||
tst-robustpi1 tst-robustpi2 tst-robustpi3 tst-robustpi4 tst-robustpi5 \
|
tst-robustpi1 tst-robustpi2 tst-robustpi3 tst-robustpi4 tst-robustpi5 \
|
||||||
tst-robustpi6 tst-robustpi7 tst-robustpi9 \
|
tst-robustpi6 tst-robustpi7 tst-robustpi9 \
|
||||||
tst-rwlock2 tst-rwlock2a tst-rwlock2b tst-rwlock3 \
|
tst-rwlock2 tst-rwlock2a tst-rwlock2b tst-rwlock3 \
|
||||||
|
@ -27,12 +27,12 @@ struct __pthread_cond
|
|||||||
__pthread_spinlock_t __lock;
|
__pthread_spinlock_t __lock;
|
||||||
struct __pthread *__queue;
|
struct __pthread *__queue;
|
||||||
struct __pthread_condattr *__attr;
|
struct __pthread_condattr *__attr;
|
||||||
struct __pthread_condimpl *__impl;
|
unsigned int __wrefs;
|
||||||
void *__data;
|
void *__data;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Initializer for a condition variable. */
|
/* Initializer for a condition variable. */
|
||||||
#define __PTHREAD_COND_INITIALIZER \
|
#define __PTHREAD_COND_INITIALIZER \
|
||||||
{ __PTHREAD_SPIN_LOCK_INITIALIZER, NULL, NULL, NULL, NULL }
|
{ __PTHREAD_SPIN_LOCK_INITIALIZER, NULL, NULL, 0, NULL }
|
||||||
|
|
||||||
#endif /* bits/types/struct___pthread_cond.h */
|
#endif /* bits/types/struct___pthread_cond.h */
|
||||||
|
@ -22,14 +22,26 @@
|
|||||||
int
|
int
|
||||||
__pthread_cond_destroy (pthread_cond_t *cond)
|
__pthread_cond_destroy (pthread_cond_t *cond)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
/* Set the wake request flag. */
|
||||||
|
unsigned int wrefs = atomic_fetch_or_acquire (&cond->__wrefs, 1);
|
||||||
|
|
||||||
__pthread_spin_wait (&cond->__lock);
|
__pthread_spin_wait (&cond->__lock);
|
||||||
if (cond->__queue)
|
if (cond->__queue)
|
||||||
ret = EBUSY;
|
{
|
||||||
|
__pthread_spin_unlock (&cond->__lock);
|
||||||
|
return EBUSY;
|
||||||
|
}
|
||||||
__pthread_spin_unlock (&cond->__lock);
|
__pthread_spin_unlock (&cond->__lock);
|
||||||
|
|
||||||
return ret;
|
while (wrefs >> 1 != 0)
|
||||||
|
{
|
||||||
|
gsync_wait (__mach_task_self (), (vm_offset_t) &cond->__wrefs, wrefs,
|
||||||
|
0, 0, 0);
|
||||||
|
wrefs = atomic_load_acquire (&cond->__wrefs);
|
||||||
|
}
|
||||||
|
/* The memory the condvar occupies can now be reused. */
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
weak_alias (__pthread_cond_destroy, pthread_cond_destroy);
|
weak_alias (__pthread_cond_destroy, pthread_cond_destroy);
|
||||||
|
@ -144,6 +144,10 @@ __pthread_cond_timedwait_internal (pthread_cond_t *cond,
|
|||||||
/* Release MUTEX before blocking. */
|
/* Release MUTEX before blocking. */
|
||||||
__pthread_mutex_unlock (mutex);
|
__pthread_mutex_unlock (mutex);
|
||||||
|
|
||||||
|
/* Increase the waiter reference count. Relaxed MO is sufficient because
|
||||||
|
we only need to synchronize when decrementing the reference count. */
|
||||||
|
atomic_fetch_add_relaxed (&cond->__wrefs, 2);
|
||||||
|
|
||||||
/* Block the thread. */
|
/* Block the thread. */
|
||||||
if (abstime != NULL)
|
if (abstime != NULL)
|
||||||
err = __pthread_timedblock (self, abstime, clock_id);
|
err = __pthread_timedblock (self, abstime, clock_id);
|
||||||
@ -178,6 +182,13 @@ __pthread_cond_timedwait_internal (pthread_cond_t *cond,
|
|||||||
}
|
}
|
||||||
__pthread_spin_unlock (&cond->__lock);
|
__pthread_spin_unlock (&cond->__lock);
|
||||||
|
|
||||||
|
/* If destruction is pending (i.e., the wake-request flag is nonzero) and we
|
||||||
|
are the last waiter (prior value of __wrefs was 1 << 1), then wake any
|
||||||
|
threads waiting in pthread_cond_destroy. Release MO to synchronize with
|
||||||
|
these threads. Don't bother clearing the wake-up request flag. */
|
||||||
|
if ((atomic_fetch_add_release (&cond->__wrefs, -2)) == 3)
|
||||||
|
__gsync_wake (__mach_task_self (), (vm_offset_t) &cond->__wrefs, 0, 0);
|
||||||
|
|
||||||
if (drain)
|
if (drain)
|
||||||
__pthread_block (self);
|
__pthread_block (self);
|
||||||
|
|
||||||
|
@ -111,6 +111,10 @@ __pthread_hurd_cond_timedwait_internal (pthread_cond_t *cond,
|
|||||||
/* Release MUTEX before blocking. */
|
/* Release MUTEX before blocking. */
|
||||||
__pthread_mutex_unlock (mutex);
|
__pthread_mutex_unlock (mutex);
|
||||||
|
|
||||||
|
/* Increase the waiter reference count. Relaxed MO is sufficient because
|
||||||
|
we only need to synchronize when decrementing the reference count. */
|
||||||
|
atomic_fetch_add_relaxed (&cond->__wrefs, 2);
|
||||||
|
|
||||||
/* Block the thread. */
|
/* Block the thread. */
|
||||||
if (abstime != NULL)
|
if (abstime != NULL)
|
||||||
err = __pthread_timedblock (self, abstime, clock_id);
|
err = __pthread_timedblock (self, abstime, clock_id);
|
||||||
@ -144,6 +148,13 @@ __pthread_hurd_cond_timedwait_internal (pthread_cond_t *cond,
|
|||||||
__pthread_block (self);
|
__pthread_block (self);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* If destruction is pending (i.e., the wake-request flag is nonzero) and we
|
||||||
|
are the last waiter (prior value of __wrefs was 1 << 1), then wake any
|
||||||
|
threads waiting in pthread_cond_destroy. Release MO to synchronize with
|
||||||
|
these threads. Don't bother clearing the wake-up request flag. */
|
||||||
|
if ((atomic_fetch_add_release (&cond->__wrefs, -2)) == 3)
|
||||||
|
__gsync_wake (__mach_task_self (), (vm_offset_t) &cond->__wrefs, 0, 0);
|
||||||
|
|
||||||
/* Clear the hook, now that we are done blocking. */
|
/* Clear the hook, now that we are done blocking. */
|
||||||
ss->cancel_hook = NULL;
|
ss->cancel_hook = NULL;
|
||||||
/* Check the cancellation flag; we might have unblocked due to
|
/* Check the cancellation flag; we might have unblocked due to
|
||||||
|
@ -49,7 +49,7 @@ tests += tst-cnd-basic tst-mtx-trylock tst-cnd-broadcast \
|
|||||||
tst-cond1 tst-cond2 tst-cond3 tst-cond4 tst-cond5 tst-cond6 tst-cond7 \
|
tst-cond1 tst-cond2 tst-cond3 tst-cond4 tst-cond5 tst-cond6 tst-cond7 \
|
||||||
tst-cond8 tst-cond9 tst-cond10 tst-cond11 tst-cond12 tst-cond13 \
|
tst-cond8 tst-cond9 tst-cond10 tst-cond11 tst-cond12 tst-cond13 \
|
||||||
tst-cond14 tst-cond15 tst-cond16 tst-cond17 tst-cond18 tst-cond19 \
|
tst-cond14 tst-cond15 tst-cond16 tst-cond17 tst-cond18 tst-cond19 \
|
||||||
tst-cond23 tst-cond24 tst-cond25 tst-cond27 \
|
tst-cond20 tst-cond21 tst-cond23 tst-cond24 tst-cond25 tst-cond27 \
|
||||||
tst-cond-except \
|
tst-cond-except \
|
||||||
tst-join1 tst-join2 tst-join3 tst-join4 tst-join5 tst-join6 tst-join7 \
|
tst-join1 tst-join2 tst-join3 tst-join4 tst-join5 tst-join6 tst-join7 \
|
||||||
tst-join8 tst-join9 tst-join10 tst-join11 tst-join12 tst-join13 \
|
tst-join8 tst-join9 tst-join10 tst-join11 tst-join12 tst-join13 \
|
||||||
|
Loading…
Reference in New Issue
Block a user