glibc/nptl/pthread_mutex_setprioceiling.c
Stefan Liebler 403b4feb22 Fix race in pthread_mutex_lock while promoting to PTHREAD_MUTEX_ELISION_NP [BZ #23275]
The race leads either to pthread_mutex_destroy returning EBUSY
or triggering an assertion (See description in bugzilla).

This patch is fixing the race by ensuring that the elision path is
used in all cases if elision is enabled by the GLIBC_TUNABLES framework.

The __kind variable in struct __pthread_mutex_s is accessed concurrently.
Therefore we are now using the atomic macros.

The new testcase tst-mutex10 is triggering the race on s390x and intel.
Presumably also on power, but I don't have access to a power machine
with lock-elision. At least the code for power is the same as on the other
two architectures.

ChangeLog:

	[BZ #23275]
	* nptl/tst-mutex10.c: New File.
	* nptl/Makefile (tests): Add tst-mutex10.
	(tst-mutex10-ENV): New variable.
	* sysdeps/unix/sysv/linux/s390/force-elision.h: (FORCE_ELISION):
	Ensure that elision path is used if elision is available.
	* sysdeps/unix/sysv/linux/powerpc/force-elision.h (FORCE_ELISION):
	Likewise.
	* sysdeps/unix/sysv/linux/x86/force-elision.h: (FORCE_ELISION):
	Likewise.
	* nptl/pthreadP.h (PTHREAD_MUTEX_TYPE, PTHREAD_MUTEX_TYPE_ELISION)
	(PTHREAD_MUTEX_PSHARED): Use atomic_load_relaxed.
	* nptl/pthread_mutex_consistent.c (pthread_mutex_consistent): Likewise.
	* nptl/pthread_mutex_getprioceiling.c (pthread_mutex_getprioceiling):
	Likewise.
	* nptl/pthread_mutex_lock.c (__pthread_mutex_lock_full)
	(__pthread_mutex_cond_lock_adjust): Likewise.
	* nptl/pthread_mutex_setprioceiling.c (pthread_mutex_setprioceiling):
	Likewise.
	* nptl/pthread_mutex_timedlock.c (__pthread_mutex_timedlock): Likewise.
	* nptl/pthread_mutex_trylock.c (__pthread_mutex_trylock): Likewise.
	* nptl/pthread_mutex_unlock.c (__pthread_mutex_unlock_full): Likewise.
	* sysdeps/nptl/bits/thread-shared-types.h (struct __pthread_mutex_s):
	Add comments.
	* nptl/pthread_mutex_destroy.c (__pthread_mutex_destroy):
	Use atomic_load_relaxed and atomic_store_relaxed.
	* nptl/pthread_mutex_init.c (__pthread_mutex_init):
	Use atomic_store_relaxed.
2018-10-17 12:23:04 +02:00

123 lines
3.6 KiB
C

/* Set current priority ceiling of pthread_mutex_t.
Copyright (C) 2006-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Jakub Jelinek <jakub@redhat.com>, 2006.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#include <stdbool.h>
#include <errno.h>
#include <pthreadP.h>
#include <atomic.h>
int
pthread_mutex_setprioceiling (pthread_mutex_t *mutex, int prioceiling,
int *old_ceiling)
{
/* See concurrency notes regarding __kind in struct __pthread_mutex_s
in sysdeps/nptl/bits/thread-shared-types.h. */
if ((atomic_load_relaxed (&(mutex->__data.__kind))
& PTHREAD_MUTEX_PRIO_PROTECT_NP) == 0)
return EINVAL;
/* See __init_sched_fifo_prio. */
if (atomic_load_relaxed (&__sched_fifo_min_prio) == -1
|| atomic_load_relaxed (&__sched_fifo_max_prio) == -1)
__init_sched_fifo_prio ();
if (__glibc_unlikely (prioceiling
< atomic_load_relaxed (&__sched_fifo_min_prio))
|| __glibc_unlikely (prioceiling
> atomic_load_relaxed (&__sched_fifo_max_prio))
|| __glibc_unlikely ((prioceiling
& (PTHREAD_MUTEXATTR_PRIO_CEILING_MASK
>> PTHREAD_MUTEXATTR_PRIO_CEILING_SHIFT))
!= prioceiling))
return EINVAL;
/* Check whether we already hold the mutex. */
bool locked = false;
int kind = PTHREAD_MUTEX_TYPE (mutex);
if (mutex->__data.__owner == THREAD_GETMEM (THREAD_SELF, tid))
{
if (kind == PTHREAD_MUTEX_PP_ERRORCHECK_NP)
return EDEADLK;
if (kind == PTHREAD_MUTEX_PP_RECURSIVE_NP)
locked = true;
}
int oldval = mutex->__data.__lock;
if (! locked)
do
{
/* Need to lock the mutex, but without obeying the priority
protect protocol. */
int ceilval = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK);
oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
ceilval | 1, ceilval);
if (oldval == ceilval)
break;
do
{
oldval
= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
ceilval | 2,
ceilval | 1);
if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
break;
if (oldval != ceilval)
lll_futex_wait (&mutex->__data.__lock, ceilval | 2,
PTHREAD_MUTEX_PSHARED (mutex));
}
while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
ceilval | 2, ceilval)
!= ceilval);
if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
continue;
}
while (0);
int oldprio = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
>> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
if (locked)
{
int ret = __pthread_tpp_change_priority (oldprio, prioceiling);
if (ret)
return ret;
}
if (old_ceiling != NULL)
*old_ceiling = oldprio;
int newlock = 0;
if (locked)
newlock = (mutex->__data.__lock & ~PTHREAD_MUTEX_PRIO_CEILING_MASK);
mutex->__data.__lock = newlock
| (prioceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT);
atomic_full_barrier ();
lll_futex_wake (&mutex->__data.__lock, INT_MAX,
PTHREAD_MUTEX_PSHARED (mutex));
return 0;
}