* sysdeps/unix/sysv/linux/kernel-features.h: Add

__ASSUME_SET_ROBUST_LIST.
This commit is contained in:
Ulrich Drepper 2006-03-28 04:25:17 +00:00
parent 5b20043897
commit 0f6699ea05
16 changed files with 520 additions and 123 deletions

View File

@ -1,3 +1,8 @@
2006-03-27 Ulrich Drepper <drepper@redhat.com>
* sysdeps/unix/sysv/linux/kernel-features.h: Add
__ASSUME_SET_ROBUST_LIST.
2006-03-27 Jakub Jelinek <jakub@redhat.com>
* wcsmbs/wchar.h (btowc, wctob): Don't optimize in C++.

View File

@ -206,7 +206,7 @@ tests = tst-typesizes \
tst-cond14 tst-cond15 tst-cond16 tst-cond17 tst-cond18 tst-cond19 \
tst-cond20 tst-cond21 \
tst-robust1 tst-robust2 tst-robust3 tst-robust4 tst-robust5 \
tst-robust6 tst-robust7 \
tst-robust6 tst-robust7 tst-robust8 \
tst-rwlock1 tst-rwlock2 tst-rwlock3 tst-rwlock4 tst-rwlock5 \
tst-rwlock6 tst-rwlock7 tst-rwlock8 tst-rwlock9 tst-rwlock10 \
tst-rwlock11 tst-rwlock12 tst-rwlock13 tst-rwlock14 \

View File

@ -365,12 +365,6 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
/* The process ID is also the same as that of the caller. */
pd->pid = THREAD_GETMEM (THREAD_SELF, pid);
/* List of robust mutexes. */
#ifdef __PTHREAD_MUTEX_HAVE_PREV
pd->robust_list.__prev = &pd->robust_list;
#endif
pd->robust_list.__next = &pd->robust_list;
/* Allocate the DTV for this thread. */
if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL)
{
@ -505,12 +499,6 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
/* The process ID is also the same as that of the caller. */
pd->pid = THREAD_GETMEM (THREAD_SELF, pid);
/* List of robust mutexes. */
#ifdef __PTHREAD_MUTEX_HAVE_PREV
pd->robust_list.__prev = &pd->robust_list;
#endif
pd->robust_list.__next = &pd->robust_list;
/* Allocate the DTV for this thread. */
if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL)
{
@ -634,6 +622,18 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
stillborn thread could be canceled while the lock is taken. */
pd->lock = LLL_LOCK_INITIALIZER;
/* The robust mutex lists also need to be initialized
unconditionally because the cleanup for the previous stack owner
might have happened in the kernel. */
pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
- offsetof (pthread_mutex_t,
__data.__list.__next));
pd->robust_head.list_op_pending = NULL;
#ifdef __PTHREAD_MUTEX_HAVE_PREV
pd->robust_prev = &pd->robust_head;
#endif
pd->robust_head.list = &pd->robust_head;
/* We place the thread descriptor at the end of the stack. */
*pdp = pd;

View File

@ -102,6 +102,15 @@ struct xid_command
};
/* Data structure used by the kernel to find robust futexes. */
struct robust_list_head
{
void *list;
long int futex_offset;
void *list_op_pending;
};
/* Thread descriptor data structure. */
struct pthread
{
@ -136,25 +145,43 @@ struct pthread
/* List of robust mutexes the thread is holding. */
#ifdef __PTHREAD_MUTEX_HAVE_PREV
__pthread_list_t robust_list;
void *robust_prev;
struct robust_list_head robust_head;
/* The list above is strange. It is basically a double linked list
but the pointer to the next/previous element of the list points
in the middle of the object, the __next element. Whenever
casting to __pthread_list_t we need to adjust the pointer
first. */
# define QUEUE_PTR_ADJUST (offsetof (__pthread_list_t, __next))
# define ENQUEUE_MUTEX(mutex) \
do { \
__pthread_list_t *next = THREAD_GETMEM (THREAD_SELF, robust_list.__next); \
next->__prev = &mutex->__data.__list; \
mutex->__data.__list.__next = next; \
mutex->__data.__list.__prev = &THREAD_SELF->robust_list; \
THREAD_SETMEM (THREAD_SELF, robust_list.__next, &mutex->__data.__list); \
__pthread_list_t *next = (THREAD_GETMEM (THREAD_SELF, robust_head.list) \
- QUEUE_PTR_ADJUST); \
next->__prev = (void *) &mutex->__data.__list.__next; \
mutex->__data.__list.__next = (void *) &next->__next; \
mutex->__data.__list.__prev = (void *) &THREAD_SELF->robust_head; \
THREAD_SETMEM (THREAD_SELF, robust_head.list, \
&mutex->__data.__list.__next); \
} while (0)
# define DEQUEUE_MUTEX(mutex) \
do { \
mutex->__data.__list.__next->__prev = mutex->__data.__list.__prev; \
mutex->__data.__list.__prev->__next = mutex->__data.__list.__next; \
__pthread_list_t *next = (__pthread_list_t *) \
((char *) mutex->__data.__list.__next - QUEUE_PTR_ADJUST); \
next->__prev = mutex->__data.__list.__prev; \
__pthread_list_t *prev = (__pthread_list_t *) \
((char *) mutex->__data.__list.__prev - QUEUE_PTR_ADJUST); \
prev->__next = mutex->__data.__list.__next; \
mutex->__data.__list.__prev = NULL; \
mutex->__data.__list.__next = NULL; \
} while (0)
#else
union
{
__pthread_slist_t robust_list;
struct robust_list_head robust_head;
};
# define ENQUEUE_MUTEX(mutex) \
do { \

View File

@ -60,6 +60,15 @@
size_t __static_tls_size;
size_t __static_tls_align_m1;
#ifndef __ASSUME_SET_ROBUST_LIST
/* Negative if we do not have the system call and we can use it. */
int __set_robust_list_avail;
# define set_robust_list_not_avail() \
__set_robust_list_avail = -1
#else
# define set_robust_list_not_avail() do { } while (0)
#endif
/* Version of the library, used in libthread_db to detect mismatches. */
static const char nptl_version[] __attribute_used__ = VERSION;
@ -247,10 +256,6 @@ __pthread_initialize_minimal_internal (void)
struct pthread *pd = THREAD_SELF;
INTERNAL_SYSCALL_DECL (err);
pd->pid = pd->tid = INTERNAL_SYSCALL (set_tid_address, err, 1, &pd->tid);
#ifdef __PTHREAD_MUTEX_HAVE_PREV
pd->robust_list.__prev = &pd->robust_list;
#endif
pd->robust_list.__next = &pd->robust_list;
THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
THREAD_SETMEM (pd, user_stack, true);
if (LLL_LOCK_INITIALIZER != 0)
@ -259,6 +264,21 @@ __pthread_initialize_minimal_internal (void)
THREAD_SETMEM (pd, cpuclock_offset, GL(dl_cpuclock_offset));
#endif
/* Initialize the robust mutex data. */
#ifdef __PTHREAD_MUTEX_HAVE_PREV
pd->robust_prev = &pd->robust_head;
#endif
pd->robust_head.list = &pd->robust_head;
#ifdef __NR_set_robust_list
pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
- offsetof (pthread_mutex_t,
__data.__list.__next));
int res = INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
sizeof (struct robust_list_head));
if (INTERNAL_SYSCALL_ERROR_P (res, err))
#endif
set_robust_list_not_avail ();
/* Set initial thread's stack block from 0 up to __libc_stack_end.
It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
purposes this is good enough. */

View File

@ -31,6 +31,7 @@
#include <internaltypes.h>
#include <pthread-functions.h>
#include <atomic.h>
#include <kernel-features.h>
/* Atomic operations on TLS memory. */
@ -60,13 +61,13 @@
/* Internal mutex type value. */
enum
{
PTHREAD_MUTEX_ROBUST_PRIVATE_NP = 16,
PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP
= PTHREAD_MUTEX_ROBUST_PRIVATE_NP | PTHREAD_MUTEX_RECURSIVE_NP,
PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP
= PTHREAD_MUTEX_ROBUST_PRIVATE_NP | PTHREAD_MUTEX_ERRORCHECK_NP,
PTHREAD_MUTEX_ROBUST_PRIVATE_ADAPTIVE_NP
= PTHREAD_MUTEX_ROBUST_PRIVATE_NP | PTHREAD_MUTEX_ADAPTIVE_NP,
PTHREAD_MUTEX_ROBUST_NORMAL_NP = 16,
PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
= PTHREAD_MUTEX_ROBUST_NORMAL_NP | PTHREAD_MUTEX_RECURSIVE_NP,
PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
= PTHREAD_MUTEX_ROBUST_NORMAL_NP | PTHREAD_MUTEX_ERRORCHECK_NP,
PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
= PTHREAD_MUTEX_ROBUST_NORMAL_NP | PTHREAD_MUTEX_ADAPTIVE_NP,
PTHREAD_MUTEX_PRIO_INHERIT_PRIVATE_NP = 32,
PTHREAD_MUTEX_PRIO_PROTECT_PRIVATE_NP = 64
};
@ -128,6 +129,11 @@ hidden_proto (__pthread_keys)
/* Number of threads running. */
extern unsigned int __nptl_nthreads attribute_hidden;
#ifndef __ASSUME_SET_ROBUST_LIST
/* Negative if we do not have the system call and we can use it. */
extern int __set_robust_list_avail attribute_hidden;
#endif
/* The library can run in debugging mode where it performs a lot more
tests. */
extern int __pthread_debug attribute_hidden;
@ -504,4 +510,15 @@ extern int __nptl_setxid (struct xid_command *cmdp) attribute_hidden;
# define PTHREAD_STATIC_FN_REQUIRE(name) __asm (".globl " #name);
#endif
#ifndef __NR_set_robust_list
/* XXX For the time being... Once we can rely on the kernel headers
having the definition remove these lines. */
# if defined __i386__
# define __NR_set_robust_list 311
# elif defined __x86_64__
# define __NR_set_robust_list 273
# endif
#endif
#endif /* pthreadP.h */

View File

@ -229,6 +229,19 @@ start_thread (void *arg)
/* Initialize resolver state pointer. */
__resp = &pd->res;
#ifdef __NR_set_robust_list
# ifndef __ASSUME_SET_ROBUST_LIST
if (__set_robust_list_avail >= 0)
# endif
{
INTERNAL_SYSCALL_DECL (err);
/* This call should never fail because the initial call in init.c
succeeded. */
INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
sizeof (struct robust_list_head));
}
#endif
/* This is where the try/finally block should be created. For
compilers without that support we do use setjmp. */
struct pthread_unwind_buf unwind_buf;
@ -310,35 +323,34 @@ start_thread (void *arg)
the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */
atomic_bit_set (&pd->cancelhandling, EXITING_BIT);
#ifndef __ASSUME_SET_ROBUST_LIST
/* If this thread has any robust mutexes locked, handle them now. */
# if __WORDSIZE == 64
__pthread_list_t *robust = pd->robust_list.__next;
void *robust = pd->robust_head.list;
# else
__pthread_slist_t *robust = pd->robust_list.__next;
# endif
if (__builtin_expect (robust != &pd->robust_list, 0))
/* We let the kernel do the notification if it is able to do so. */
if (__set_robust_list_avail < 0
&& __builtin_expect (robust != &pd->robust_head, 0))
{
do
{
struct __pthread_mutex_s *this = (struct __pthread_mutex_s *)
((char *) robust - offsetof (struct __pthread_mutex_s, __list));
robust = robust->__next;
((char *) robust - offsetof (struct __pthread_mutex_s,
__list.__next));
robust = *((void **) robust);
this->__list.__next = NULL;
# ifdef __PTHREAD_MUTEX_HAVE_PREV
this->__list.__prev = NULL;
# endif
this->__list.__next = NULL;
lll_robust_mutex_dead (this->__lock);
}
while (robust != &pd->robust_list);
/* Clean up so that the thread descriptor can be reused. */
pd->robust_list.__next = &pd->robust_list;
#ifdef __PTHREAD_MUTEX_HAVE_PREV
pd->robust_list.__prev = &pd->robust_list;
#endif
while (robust != &pd->robust_head);
}
#endif
/* If the thread is detached free the TCB. */
if (IS_DETACHED (pd))

View File

@ -26,7 +26,7 @@ pthread_mutex_consistent_np (mutex)
pthread_mutex_t *mutex;
{
/* Test whether this is a robust mutex with a dead owner. */
if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_PRIVATE_NP) == 0
if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0
|| mutex->__data.__owner != PTHREAD_MUTEX_INCONSISTENT)
return EINVAL;

View File

@ -25,15 +25,9 @@ int
__pthread_mutex_destroy (mutex)
pthread_mutex_t *mutex;
{
if (mutex->__data.__nusers != 0)
{
if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_PRIVATE_NP) != 0
&& (mutex->__data.__lock & FUTEX_OWNER_DIED) != 0
&& mutex->__data.__nusers == 1)
goto dead_robust_mutex;
if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0
&& mutex->__data.__nusers != 0)
return EBUSY;
}
/* Set to an invalid value. */
dead_robust_mutex:

View File

@ -22,7 +22,6 @@
#include <string.h>
#include "pthreadP.h"
static const struct pthread_mutexattr default_attr =
{
/* Default is a normal mutex, not shared between processes. */
@ -42,10 +41,6 @@ __pthread_mutex_init (mutex, mutexattr)
imutexattr = (const struct pthread_mutexattr *) mutexattr ?: &default_attr;
/* Sanity checks. */
// XXX For now we cannot implement robust mutexes if they are shared.
if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_ROBUST) != 0
&& (imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_PSHARED) != 0)
return ENOTSUP;
// XXX For now we don't support priority inherited or priority protected
// XXX mutexes.
if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_PROTOCOL_MASK)
@ -57,8 +52,18 @@ __pthread_mutex_init (mutex, mutexattr)
/* Copy the values from the attribute. */
mutex->__data.__kind = imutexattr->mutexkind & ~PTHREAD_MUTEXATTR_FLAG_BITS;
if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_ROBUST) != 0)
mutex->__data.__kind |= PTHREAD_MUTEX_ROBUST_PRIVATE_NP;
{
#ifndef __ASSUME_SET_ROBUST_LIST
if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_PSHARED) != 0
&& __set_robust_list_avail < 0)
return ENOTSUP;
#endif
mutex->__data.__kind |= PTHREAD_MUTEX_ROBUST_NORMAL_NP;
}
switch ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_PROTOCOL_MASK)
>> PTHREAD_MUTEXATTR_PROTOCOL_SHIFT)
{

View File

@ -108,25 +108,33 @@ __pthread_mutex_lock (mutex)
assert (mutex->__data.__owner == 0);
break;
case PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP:
case PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP:
case PTHREAD_MUTEX_ROBUST_PRIVATE_NP:
case PTHREAD_MUTEX_ROBUST_PRIVATE_ADAPTIVE_NP:
case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
&mutex->__data.__list.__next);
oldval = mutex->__data.__lock;
do
{
again:
if ((oldval & FUTEX_OWNER_DIED) != 0)
{
/* The previous owner died. Try locking the mutex. */
int newval;
while ((newval
int newval = id;
#ifdef NO_INCR
newval |= FUTEX_WAITERS;
#endif
newval
= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
id, oldval))
!= oldval)
newval, oldval);
if (newval != oldval)
{
if ((newval & FUTEX_OWNER_DIED) == 0)
goto normal;
oldval = newval;
goto again;
}
/* We got the mutex. */
@ -135,6 +143,7 @@ __pthread_mutex_lock (mutex)
mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
ENQUEUE_MUTEX (mutex);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
/* Note that we deliberately exit here. If we fall
through to the end of the function __nusers would be
@ -149,18 +158,23 @@ __pthread_mutex_lock (mutex)
return EOWNERDEAD;
}
normal:
/* Check whether we already hold the mutex. */
if (__builtin_expect ((mutex->__data.__lock & FUTEX_TID_MASK)
== id, 0))
if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
{
if (mutex->__data.__kind
== PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP)
== PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
{
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
NULL);
return EDEADLK;
}
if (mutex->__data.__kind
== PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP)
== PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
{
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
NULL);
/* Just bump the counter. */
if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
/* Overflow of the counter. */
@ -180,6 +194,7 @@ __pthread_mutex_lock (mutex)
/* This mutex is now not recoverable. */
mutex->__data.__count = 0;
lll_mutex_unlock (mutex->__data.__lock);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
return ENOTRECOVERABLE;
}
}
@ -187,6 +202,7 @@ __pthread_mutex_lock (mutex)
mutex->__data.__count = 1;
ENQUEUE_MUTEX (mutex);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
break;
default:

View File

@ -103,25 +103,27 @@ pthread_mutex_timedlock (mutex, abstime)
}
break;
case PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP:
case PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP:
case PTHREAD_MUTEX_ROBUST_PRIVATE_NP:
case PTHREAD_MUTEX_ROBUST_PRIVATE_ADAPTIVE_NP:
case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
&mutex->__data.__list.__next);
oldval = mutex->__data.__lock;
do
{
again:
if ((oldval & FUTEX_OWNER_DIED) != 0)
{
/* The previous owner died. Try locking the mutex. */
int newval;
while ((newval
int newval
= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
id, oldval))
!= oldval)
id, oldval);
if (newval != oldval)
{
if ((newval & FUTEX_OWNER_DIED) == 0)
goto normal;
oldval = newval;
goto again;
}
/* We got the mutex. */
@ -130,6 +132,7 @@ pthread_mutex_timedlock (mutex, abstime)
mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
ENQUEUE_MUTEX (mutex);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
/* Note that we deliberately exist here. If we fall
through to the end of the function __nusers would be
@ -138,18 +141,23 @@ pthread_mutex_timedlock (mutex, abstime)
return EOWNERDEAD;
}
normal:
/* Check whether we already hold the mutex. */
if (__builtin_expect ((mutex->__data.__lock & FUTEX_TID_MASK)
== id, 0))
if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
{
if (mutex->__data.__kind
== PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP)
== PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
{
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
NULL);
return EDEADLK;
}
if (mutex->__data.__kind
== PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP)
== PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
{
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
NULL);
/* Just bump the counter. */
if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
/* Overflow of the counter. */
@ -170,6 +178,7 @@ pthread_mutex_timedlock (mutex, abstime)
/* This mutex is now not recoverable. */
mutex->__data.__count = 0;
lll_mutex_unlock (mutex->__data.__lock);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
return ENOTRECOVERABLE;
}
@ -182,6 +191,7 @@ pthread_mutex_timedlock (mutex, abstime)
mutex->__data.__count = 1;
ENQUEUE_MUTEX (mutex);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
break;
default:

View File

@ -77,25 +77,28 @@ __pthread_mutex_trylock (mutex)
return 0;
case PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP:
case PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP:
case PTHREAD_MUTEX_ROBUST_PRIVATE_NP:
case PTHREAD_MUTEX_ROBUST_PRIVATE_ADAPTIVE_NP:
case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
&mutex->__data.__list.__next);
oldval = mutex->__data.__lock;
do
{
again:
if ((oldval & FUTEX_OWNER_DIED) != 0)
{
/* The previous owner died. Try locking the mutex. */
int newval;
while ((newval
int newval
= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
id, oldval))
!= oldval)
id, oldval);
if (newval != oldval)
{
if ((newval & FUTEX_OWNER_DIED) == 0)
goto normal;
oldval = newval;
goto again;
}
/* We got the mutex. */
@ -104,6 +107,7 @@ __pthread_mutex_trylock (mutex)
mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
ENQUEUE_MUTEX (mutex);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
/* Note that we deliberately exist here. If we fall
through to the end of the function __nusers would be
@ -112,18 +116,23 @@ __pthread_mutex_trylock (mutex)
return EOWNERDEAD;
}
normal:
/* Check whether we already hold the mutex. */
if (__builtin_expect ((mutex->__data.__lock & FUTEX_TID_MASK)
== id, 0))
if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
{
if (mutex->__data.__kind
== PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP)
== PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
{
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
NULL);
return EDEADLK;
}
if (mutex->__data.__kind
== PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP)
== PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
{
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
NULL);
/* Just bump the counter. */
if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
/* Overflow of the counter. */
@ -137,7 +146,11 @@ __pthread_mutex_trylock (mutex)
oldval = lll_robust_mutex_trylock (mutex->__data.__lock, id);
if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0)
{
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
return EBUSY;
}
robust:
if (__builtin_expect (mutex->__data.__owner
@ -147,12 +160,14 @@ __pthread_mutex_trylock (mutex)
mutex->__data.__count = 0;
if (oldval == id)
lll_mutex_unlock (mutex->__data.__lock);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
return ENOTRECOVERABLE;
}
}
while ((oldval & FUTEX_OWNER_DIED) != 0);
ENQUEUE_MUTEX (mutex);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
mutex->__data.__owner = id;
++mutex->__data.__nusers;

View File

@ -63,10 +63,12 @@ __pthread_mutex_unlock_usercnt (mutex, decr)
lll_mutex_unlock (mutex->__data.__lock);
break;
case PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP:
case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
/* Recursive mutex. */
if ((mutex->__data.__lock & FUTEX_TID_MASK)
== THREAD_GETMEM (THREAD_SELF, tid))
== THREAD_GETMEM (THREAD_SELF, tid)
&& __builtin_expect (mutex->__data.__owner
== PTHREAD_MUTEX_INCONSISTENT, 0))
{
if (--mutex->__data.__count != 0)
/* We still hold the mutex. */
@ -84,9 +86,9 @@ __pthread_mutex_unlock_usercnt (mutex, decr)
goto robust;
case PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP:
case PTHREAD_MUTEX_ROBUST_PRIVATE_NP:
case PTHREAD_MUTEX_ROBUST_PRIVATE_ADAPTIVE_NP:
case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
if ((mutex->__data.__lock & FUTEX_TID_MASK)
!= THREAD_GETMEM (THREAD_SELF, tid)
|| ! lll_mutex_islocked (mutex->__data.__lock))
@ -102,6 +104,8 @@ __pthread_mutex_unlock_usercnt (mutex, decr)
robust:
/* Remove mutex from the list. */
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
&mutex->__data.__list.__next);
DEQUEUE_MUTEX (mutex);
mutex->__data.__owner = newowner;
@ -111,6 +115,8 @@ __pthread_mutex_unlock_usercnt (mutex, decr)
/* Unlock. */
lll_robust_mutex_unlock (mutex->__data.__lock);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
break;
default:

264
nptl/tst-robust8.c Normal file
View File

@ -0,0 +1,264 @@
#include <pthread.h>
#include <signal.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/mman.h>
#include <sys/wait.h>
static void prepare (void);
#define PREPARE(argc, argv) prepare ()
static int do_test (void);
#define TEST_FUNCTION do_test ()
#define TIMEOUT 3
#include "../test-skeleton.c"
static int fd;
#define N 100
static void
prepare (void)
{
fd = create_temp_file ("tst-robust8", NULL);
if (fd == -1)
exit (1);
}
#define THESIGNAL SIGKILL
#define ROUNDS 5
#define THREADS 9
static const struct timespec before = { 0, 0 };
static pthread_mutex_t *map;
static void *
tf (void *arg)
{
long int nr = (long int) arg;
int fct = nr % 3;
uint8_t state[N];
memset (state, '\0', sizeof (state));
while (1)
{
int r = random () % N;
if (state[r] == 0)
{
int e;
switch (fct)
{
case 0:
e = pthread_mutex_lock (&map[r]);
if (e != 0)
{
printf ("mutex_lock of %d in thread %ld failed with %d\n",
r, nr, e);
exit (1);
}
state[r] = 1;
break;
case 1:
e = pthread_mutex_timedlock (&map[r], &before);
if (e != 0 && e != ETIMEDOUT)
{
printf ("\
mutex_timedlock of %d in thread %ld failed with %d\n",
r, nr, e);
exit (1);
}
break;
default:
e = pthread_mutex_trylock (&map[r]);
if (e != 0 && e != EBUSY)
{
printf ("mutex_trylock of %d in thread %ld failed with %d\n",
r, nr, e);
exit (1);
}
break;
}
if (e == EOWNERDEAD)
pthread_mutex_consistent_np (&map[r]);
if (e == 0 || e == EOWNERDEAD)
state[r] = 1;
}
else
{
int e = pthread_mutex_unlock (&map[r]);
if (e != 0)
{
printf ("mutex_unlock of %d in thread %ld failed with %d\n",
r, nr, e);
exit (1);
}
state[r] = 0;
}
}
}
static void
child (int round)
{
for (int thread = 1; thread <= THREADS; ++thread)
{
pthread_t th;
if (pthread_create (&th, NULL, tf, (void *) (long int) thread) != 0)
{
printf ("cannot create thread %d in round %d\n", thread, round);
exit (1);
}
}
struct timespec ts;
ts.tv_sec = 0;
ts.tv_nsec = 1000000000 / ROUNDS;
while (nanosleep (&ts, &ts) != 0)
/* nothing */;
/* Time to die. */
kill (getpid (), THESIGNAL);
/* We better never get here. */
abort ();
}
static int
do_test (void)
{
if (ftruncate (fd, N * sizeof (pthread_mutex_t)) != 0)
{
puts ("cannot size new file");
return 1;
}
map = mmap (NULL, N * sizeof (pthread_mutex_t), PROT_READ | PROT_WRITE,
MAP_SHARED, fd, 0);
if (map == MAP_FAILED)
{
puts ("mapping failed");
return 1;
}
pthread_mutexattr_t ma;
if (pthread_mutexattr_init (&ma) != 0)
{
puts ("mutexattr_init failed");
return 0;
}
if (pthread_mutexattr_setrobust_np (&ma, PTHREAD_MUTEX_ROBUST_NP) != 0)
{
puts ("mutexattr_setrobust failed");
return 1;
}
if (pthread_mutexattr_setpshared (&ma, PTHREAD_PROCESS_SHARED) != 0)
{
puts ("mutexattr_setpshared failed");
return 1;
}
for (int round = 1; round <= ROUNDS; ++round)
{
for (int n = 0; n < N; ++n)
{
int e = pthread_mutex_init (&map[n], &ma);
if (e == ENOTSUP)
{
puts ("cannot support pshared robust mutexes");
return 0;
}
if (e != 0)
{
printf ("mutex_init %d in round %d failed\n", n + 1, round);
return 1;
}
}
pid_t p = fork ();
if (p == -1)
{
printf ("fork in round %d failed\n", round);
return 1;
}
if (p == 0)
child (round);
int status;
if (TEMP_FAILURE_RETRY (waitpid (p, &status, 0)) != p)
{
printf ("waitpid in round %d failed\n", round);
return 1;
}
if (!WIFSIGNALED (status))
{
printf ("child did not die of a signal in round %d\n", round);
return 1;
}
if (WTERMSIG (status) != THESIGNAL)
{
printf ("child did not die of signal %d in round %d\n",
THESIGNAL, round);
return 1;
}
for (int n = 0; n < N; ++n)
{
int e = pthread_mutex_lock (&map[n]);
if (e != 0 && e != EOWNERDEAD)
{
printf ("mutex_lock %d failed in round %d\n", n + 1, round);
return 1;
}
}
for (int n = 0; n < N; ++n)
if (pthread_mutex_unlock (&map[n]) != 0)
{
printf ("mutex_unlock %d failed in round %d\n", n + 1, round);
return 1;
}
for (int n = 0; n < N; ++n)
{
int e = pthread_mutex_destroy (&map[n]);
if (e != 0)
{
printf ("mutex_destroy %d in round %d failed with %d\n",
n + 1, round, e);
printf("nusers = %d\n", (int) map[n].__data.__nusers);
return 1;
}
}
}
if (pthread_mutexattr_destroy (&ma) != 0)
{
puts ("mutexattr_destroy failed");
return 1;
}
if (munmap (map, N * sizeof (pthread_mutex_t)) != 0)
{
puts ("munmap failed");
return 1;
}
return 0;
}

View File

@ -468,3 +468,9 @@
&& (defined __i386__ || defined __x86_64__)
# define __ASSUME_ATFCTS 1
#endif
/* Support for inter-process robust mutexes was added in 2.6.17. */
#if __LINUX_KERNEL_VERSION >= 0x020611 \
&& (defined __i386__ || defined __x86_64__)
# define __ASSUME_SET_ROBUST_LIST 1
#endif