mirror of
https://sourceware.org/git/glibc.git
synced 2024-12-03 02:11:08 +00:00
07ed18d26a
This patch adds several new tunables to control the behavior of elision on supported platforms[1]. Since elision now depends on tunables, we should always *compile* with elision enabled, and leave the code disabled, but available for runtime selection. This gives us *much* better compile-time testing of the existing code to avoid bit-rot[2]. Tested on ppc, ppc64, ppc64le, s390x and x86_64. [1] This part of the patch was initially proposed by Paul Murphy but was "staled" because the framework have changed since the patch was originally proposed: https://patchwork.sourceware.org/patch/10342/ [2] This part of the patch was inititally proposed as a RFC by Carlos O'Donnell. Make sense to me integrate this on the patch: https://sourceware.org/ml/libc-alpha/2017-05/msg00335.html * elf/dl-tunables.list: Add elision parameters. * manual/tunables.texi: Add entries about elision tunable. * sysdeps/unix/sysv/linux/powerpc/elision-conf.c: Add callback functions to dynamically enable/disable elision. Add multiple callbacks functions to set elision parameters. Deleted __libc_enable_secure check. * sysdeps/unix/sysv/linux/s390/elision-conf.c: Likewise. * sysdeps/unix/sysv/linux/x86/elision-conf.c: Likewise. * configure: Regenerated. * configure.ac: Option enable_lock_elision was deleted. * config.h.in: ENABLE_LOCK_ELISION flag was deleted. * config.make.in: Remove references to enable_lock_elision. * manual/install.texi: Elision configure option was removed. * INSTALL: Regenerated to remove enable_lock_elision. * nptl/Makefile: Disable elision so it can verify error case for destroying a mutex. * sysdeps/powerpc/nptl/elide.h: Cleanup ENABLE_LOCK_ELISION check. Deleted macros for the case when ENABLE_LOCK_ELISION was not defined. * sysdeps/s390/configure: Regenerated. * sysdeps/s390/configure.ac: Remove references to enable_lock_elision.. * nptl/tst-mutex8.c: Deleted all #ifndef ENABLE_LOCK_ELISION from the test. * sysdeps/powerpc/powerpc32/sysdep.h: Deleted all ENABLE_LOCK_ELISION checks. * sysdeps/powerpc/powerpc64/sysdep.h: Likewise. * sysdeps/powerpc/sysdep.h: Likewise. * sysdeps/s390/nptl/bits/pthreadtypes-arch.h: Likewise. * sysdeps/unix/sysv/linux/powerpc/force-elision.h: Likewise. * sysdeps/unix/sysv/linux/s390/elision-conf.h: Likewise. * sysdeps/unix/sysv/linux/s390/force-elision.h: Likewise. * sysdeps/unix/sysv/linux/s390/lowlevellock.h: Likewise. * sysdeps/unix/sysv/linux/s390/Makefile: Remove references to enable-lock-elision. Reviewed-by: Tulio Magno Quites Machado Filho <tuliom@linux.vnet.ibm.com>
117 lines
3.8 KiB
C
117 lines
3.8 KiB
C
/* elide.h: Generic lock elision support for powerpc.
|
|
Copyright (C) 2015-2017 Free Software Foundation, Inc.
|
|
This file is part of the GNU C Library.
|
|
|
|
The GNU C Library is free software; you can redistribute it and/or
|
|
modify it under the terms of the GNU Lesser General Public
|
|
License as published by the Free Software Foundation; either
|
|
version 2.1 of the License, or (at your option) any later version.
|
|
|
|
The GNU C Library is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
Lesser General Public License for more details.
|
|
|
|
You should have received a copy of the GNU Lesser General Public
|
|
License along with the GNU C Library; if not, see
|
|
<http://www.gnu.org/licenses/>. */
|
|
|
|
#ifndef ELIDE_PPC_H
|
|
# define ELIDE_PPC_H
|
|
|
|
# include <htm.h>
|
|
# include <elision-conf.h>
|
|
|
|
/* Get the new value of adapt_count according to the elision
|
|
configurations. Returns true if the system should retry again or false
|
|
otherwise. */
|
|
static inline bool
|
|
__get_new_count (uint8_t *adapt_count, int attempt)
|
|
{
|
|
/* A persistent failure indicates that a retry will probably
|
|
result in another failure. Use normal locking now and
|
|
for the next couple of calls. */
|
|
if (_TEXASRU_FAILURE_PERSISTENT (__builtin_get_texasru ()))
|
|
{
|
|
if (__elision_aconf.skip_lock_internal_abort > 0)
|
|
*adapt_count = __elision_aconf.skip_lock_internal_abort;
|
|
return false;
|
|
}
|
|
/* Same logic as above, but for a number of temporary failures in a
|
|
a row. */
|
|
else if (attempt <= 1 && __elision_aconf.skip_lock_out_of_tbegin_retries > 0
|
|
&& __elision_aconf.try_tbegin > 0)
|
|
*adapt_count = __elision_aconf.skip_lock_out_of_tbegin_retries;
|
|
return true;
|
|
}
|
|
|
|
/* CONCURRENCY NOTES:
|
|
|
|
The evaluation of the macro expression is_lock_free encompasses one or
|
|
more loads from memory locations that are concurrently modified by other
|
|
threads. For lock elision to work, this evaluation and the rest of the
|
|
critical section protected by the lock must be atomic because an
|
|
execution with lock elision must be equivalent to an execution in which
|
|
the lock would have been actually acquired and released. Therefore, we
|
|
evaluate is_lock_free inside of the transaction that represents the
|
|
critical section for which we want to use lock elision, which ensures
|
|
the atomicity that we require. */
|
|
|
|
/* Returns 0 if the lock defined by is_lock_free was elided.
|
|
ADAPT_COUNT is a per-lock state variable. */
|
|
# define ELIDE_LOCK(adapt_count, is_lock_free) \
|
|
({ \
|
|
int ret = 0; \
|
|
if (adapt_count > 0) \
|
|
(adapt_count)--; \
|
|
else \
|
|
for (int i = __elision_aconf.try_tbegin; i > 0; i--) \
|
|
{ \
|
|
if (__libc_tbegin (0)) \
|
|
{ \
|
|
if (is_lock_free) \
|
|
{ \
|
|
ret = 1; \
|
|
break; \
|
|
} \
|
|
__libc_tabort (_ABORT_LOCK_BUSY); \
|
|
} \
|
|
else \
|
|
if (!__get_new_count (&adapt_count,i)) \
|
|
break; \
|
|
} \
|
|
ret; \
|
|
})
|
|
|
|
# define ELIDE_TRYLOCK(adapt_count, is_lock_free, write) \
|
|
({ \
|
|
int ret = 0; \
|
|
if (__elision_aconf.try_tbegin > 0) \
|
|
{ \
|
|
if (write) \
|
|
__libc_tabort (_ABORT_NESTED_TRYLOCK); \
|
|
ret = ELIDE_LOCK (adapt_count, is_lock_free); \
|
|
} \
|
|
ret; \
|
|
})
|
|
|
|
|
|
static inline bool
|
|
__elide_unlock (int is_lock_free)
|
|
{
|
|
if (is_lock_free)
|
|
{
|
|
/* This code is expected to crash when trying to unlock a lock not
|
|
held by this thread. More information is available in the
|
|
__pthread_rwlock_unlock() implementation. */
|
|
__libc_tend (0);
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
# define ELIDE_UNLOCK(is_lock_free) \
|
|
__elide_unlock (is_lock_free)
|
|
|
|
#endif
|