mirror of
https://sourceware.org/git/glibc.git
synced 2024-12-14 07:10:05 +00:00
5712f8db26
This patch adds a new build test to check for internal fields
offsets for user visible internal field. Although currently
the only field which is statically initialized to a non zero value
is pthread_mutex_t.__data.__kind value, the tests also check the
offset of __kind, __spins, __elision (if supported), and __list
internal member. A internal header (pthread-offset.h) is added
to each major ABI with the reference value.
Checked on x86_64-linux-gnu and with a build check for all affected
ABIs (aarch64-linux-gnu, alpha-linux-gnu, arm-linux-gnueabihf,
hppa-linux-gnu, i686-linux-gnu, ia64-linux-gnu, m68k-linux-gnu,
microblaze-linux-gnu, mips64-linux-gnu, mips64-n32-linux-gnu,
mips-linux-gnu, powerpc64le-linux-gnu, powerpc-linux-gnu,
s390-linux-gnu, s390x-linux-gnu, sh4-linux-gnu, sparc64-linux-gnu,
sparcv9-linux-gnu, tilegx-linux-gnu, tilegx-linux-gnu-x32,
tilepro-linux-gnu, x86_64-linux-gnu, and x86_64-linux-x32).
* nptl/pthreadP.h (ASSERT_PTHREAD_STRING,
ASSERT_PTHREAD_INTERNAL_OFFSET): New macro.
* nptl/pthread_mutex_init.c (__pthread_mutex_init): Add build time
checks for internal pthread_mutex_t offsets.
* sysdeps/aarch64/nptl/pthread-offsets.h
(__PTHREAD_MUTEX_NUSERS_OFFSET, __PTHREAD_MUTEX_KIND_OFFSET,
__PTHREAD_MUTEX_SPINS_OFFSET, __PTHREAD_MUTEX_ELISION_OFFSET,
__PTHREAD_MUTEX_LIST_OFFSET): New macro.
* sysdeps/alpha/nptl/pthread-offsets.h: Likewise.
* sysdeps/arm/nptl/pthread-offsets.h: Likewise.
* sysdeps/hppa/nptl/pthread-offsets.h: Likewise.
* sysdeps/i386/nptl/pthread-offsets.h: Likewise.
* sysdeps/ia64/nptl/pthread-offsets.h: Likewise.
* sysdeps/m68k/nptl/pthread-offsets.h: Likewise.
* sysdeps/microblaze/nptl/pthread-offsets.h: Likewise.
* sysdeps/mips/nptl/pthread-offsets.h: Likewise.
* sysdeps/nios2/nptl/pthread-offsets.h: Likewise.
* sysdeps/powerpc/nptl/pthread-offsets.h: Likewise.
* sysdeps/s390/nptl/pthread-offsets.h: Likewise.
* sysdeps/sh/nptl/pthread-offsets.h: Likewise.
* sysdeps/sparc/nptl/pthread-offsets.h: Likewise.
* sysdeps/tile/nptl/pthread-offsets.h: Likewise.
* sysdeps/x86_64/nptl/pthread-offsets.h: Likewise.
Signed-off-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>
(cherry picked from commit dff91cd45e
)
162 lines
5.2 KiB
C
162 lines
5.2 KiB
C
/* Copyright (C) 2002-2017 Free Software Foundation, Inc.
|
|
This file is part of the GNU C Library.
|
|
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
|
|
|
The GNU C Library is free software; you can redistribute it and/or
|
|
modify it under the terms of the GNU Lesser General Public
|
|
License as published by the Free Software Foundation; either
|
|
version 2.1 of the License, or (at your option) any later version.
|
|
|
|
The GNU C Library is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
Lesser General Public License for more details.
|
|
|
|
You should have received a copy of the GNU Lesser General Public
|
|
License along with the GNU C Library; if not, see
|
|
<http://www.gnu.org/licenses/>. */
|
|
|
|
#include <assert.h>
|
|
#include <errno.h>
|
|
#include <stdbool.h>
|
|
#include <string.h>
|
|
#include <kernel-features.h>
|
|
#include "pthreadP.h"
|
|
#include <atomic.h>
|
|
#include <pthread-offsets.h>
|
|
|
|
#include <stap-probe.h>
|
|
|
|
static const struct pthread_mutexattr default_mutexattr =
|
|
{
|
|
/* Default is a normal mutex, not shared between processes. */
|
|
.mutexkind = PTHREAD_MUTEX_NORMAL
|
|
};
|
|
|
|
|
|
static bool
|
|
prio_inherit_missing (void)
|
|
{
|
|
#ifdef __NR_futex
|
|
static int tpi_supported;
|
|
if (__glibc_unlikely (tpi_supported == 0))
|
|
{
|
|
int lock = 0;
|
|
INTERNAL_SYSCALL_DECL (err);
|
|
int ret = INTERNAL_SYSCALL (futex, err, 4, &lock, FUTEX_UNLOCK_PI, 0, 0);
|
|
assert (INTERNAL_SYSCALL_ERROR_P (ret, err));
|
|
tpi_supported = INTERNAL_SYSCALL_ERRNO (ret, err) == ENOSYS ? -1 : 1;
|
|
}
|
|
return __glibc_unlikely (tpi_supported < 0);
|
|
#endif
|
|
return true;
|
|
}
|
|
|
|
int
|
|
__pthread_mutex_init (pthread_mutex_t *mutex,
|
|
const pthread_mutexattr_t *mutexattr)
|
|
{
|
|
const struct pthread_mutexattr *imutexattr;
|
|
|
|
assert (sizeof (pthread_mutex_t) <= __SIZEOF_PTHREAD_MUTEX_T);
|
|
ASSERT_PTHREAD_INTERNAL_OFFSET (pthread_mutex_t, __data.__nusers,
|
|
__PTHREAD_MUTEX_NUSERS_OFFSET);
|
|
ASSERT_PTHREAD_INTERNAL_OFFSET (pthread_mutex_t, __data.__kind,
|
|
__PTHREAD_MUTEX_KIND_OFFSET);
|
|
ASSERT_PTHREAD_INTERNAL_OFFSET (pthread_mutex_t, __data.__spins,
|
|
__PTHREAD_MUTEX_SPINS_OFFSET);
|
|
#if __PTHREAD_MUTEX_LOCK_ELISION
|
|
ASSERT_PTHREAD_INTERNAL_OFFSET (pthread_mutex_t, __data.__elision,
|
|
__PTHREAD_MUTEX_ELISION_OFFSET);
|
|
#endif
|
|
ASSERT_PTHREAD_INTERNAL_OFFSET (pthread_mutex_t, __data.__list,
|
|
__PTHREAD_MUTEX_LIST_OFFSET);
|
|
|
|
imutexattr = ((const struct pthread_mutexattr *) mutexattr
|
|
?: &default_mutexattr);
|
|
|
|
/* Sanity checks. */
|
|
switch (__builtin_expect (imutexattr->mutexkind
|
|
& PTHREAD_MUTEXATTR_PROTOCOL_MASK,
|
|
PTHREAD_PRIO_NONE
|
|
<< PTHREAD_MUTEXATTR_PROTOCOL_SHIFT))
|
|
{
|
|
case PTHREAD_PRIO_NONE << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT:
|
|
break;
|
|
|
|
case PTHREAD_PRIO_INHERIT << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT:
|
|
if (__glibc_unlikely (prio_inherit_missing ()))
|
|
return ENOTSUP;
|
|
break;
|
|
|
|
default:
|
|
/* XXX: For now we don't support robust priority protected mutexes. */
|
|
if (imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_ROBUST)
|
|
return ENOTSUP;
|
|
break;
|
|
}
|
|
|
|
/* Clear the whole variable. */
|
|
memset (mutex, '\0', __SIZEOF_PTHREAD_MUTEX_T);
|
|
|
|
/* Copy the values from the attribute. */
|
|
mutex->__data.__kind = imutexattr->mutexkind & ~PTHREAD_MUTEXATTR_FLAG_BITS;
|
|
|
|
if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_ROBUST) != 0)
|
|
{
|
|
#ifndef __ASSUME_SET_ROBUST_LIST
|
|
if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_PSHARED) != 0
|
|
&& __set_robust_list_avail < 0)
|
|
return ENOTSUP;
|
|
#endif
|
|
|
|
mutex->__data.__kind |= PTHREAD_MUTEX_ROBUST_NORMAL_NP;
|
|
}
|
|
|
|
switch (imutexattr->mutexkind & PTHREAD_MUTEXATTR_PROTOCOL_MASK)
|
|
{
|
|
case PTHREAD_PRIO_INHERIT << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT:
|
|
mutex->__data.__kind |= PTHREAD_MUTEX_PRIO_INHERIT_NP;
|
|
break;
|
|
|
|
case PTHREAD_PRIO_PROTECT << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT:
|
|
mutex->__data.__kind |= PTHREAD_MUTEX_PRIO_PROTECT_NP;
|
|
|
|
int ceiling = (imutexattr->mutexkind
|
|
& PTHREAD_MUTEXATTR_PRIO_CEILING_MASK)
|
|
>> PTHREAD_MUTEXATTR_PRIO_CEILING_SHIFT;
|
|
if (! ceiling)
|
|
{
|
|
/* See __init_sched_fifo_prio. */
|
|
if (atomic_load_relaxed (&__sched_fifo_min_prio) == -1)
|
|
__init_sched_fifo_prio ();
|
|
if (ceiling < atomic_load_relaxed (&__sched_fifo_min_prio))
|
|
ceiling = atomic_load_relaxed (&__sched_fifo_min_prio);
|
|
}
|
|
mutex->__data.__lock = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
|
|
break;
|
|
|
|
default:
|
|
break;
|
|
}
|
|
|
|
/* The kernel when waking robust mutexes on exit never uses
|
|
FUTEX_PRIVATE_FLAG FUTEX_WAKE. */
|
|
if ((imutexattr->mutexkind & (PTHREAD_MUTEXATTR_FLAG_PSHARED
|
|
| PTHREAD_MUTEXATTR_FLAG_ROBUST)) != 0)
|
|
mutex->__data.__kind |= PTHREAD_MUTEX_PSHARED_BIT;
|
|
|
|
/* Default values: mutex not used yet. */
|
|
// mutex->__count = 0; already done by memset
|
|
// mutex->__owner = 0; already done by memset
|
|
// mutex->__nusers = 0; already done by memset
|
|
// mutex->__spins = 0; already done by memset
|
|
// mutex->__next = NULL; already done by memset
|
|
|
|
LIBC_PROBE (mutex_init, 1, mutex);
|
|
|
|
return 0;
|
|
}
|
|
weak_alias (__pthread_mutex_init, pthread_mutex_init)
|
|
hidden_def (__pthread_mutex_init)
|