elf, nptl: Resolve recursive lock implementation early

If libpthread is included in libc, it is not necessary to delay
initialization of the lock/unlock function pointers until libpthread
is loaded.  This eliminates two unprotected function pointers
from _rtld_global and removes some initialization code from
libpthread.

Tested-by: Carlos O'Donell <carlos@redhat.com>
Reviewed-by: Carlos O'Donell <carlos@redhat.com>
This commit is contained in:
Florian Weimer 2021-05-10 10:31:41 +02:00
parent a64af8c9b6
commit d6163dfd38
7 changed files with 120 additions and 24 deletions

View File

@ -66,7 +66,8 @@ elide-routines.os = $(all-dl-routines) dl-support enbl-secure dl-origin \
# interpreter and operating independent of libc. # interpreter and operating independent of libc.
rtld-routines = rtld $(all-dl-routines) dl-sysdep dl-environ dl-minimal \ rtld-routines = rtld $(all-dl-routines) dl-sysdep dl-environ dl-minimal \
dl-error-minimal dl-conflict dl-hwcaps dl-hwcaps_split dl-hwcaps-subdirs \ dl-error-minimal dl-conflict dl-hwcaps dl-hwcaps_split dl-hwcaps-subdirs \
dl-usage dl-diagnostics dl-diagnostics-kernel dl-diagnostics-cpu dl-usage dl-diagnostics dl-diagnostics-kernel dl-diagnostics-cpu \
dl-mutex
all-rtld-routines = $(rtld-routines) $(sysdep-rtld-routines) all-rtld-routines = $(rtld-routines) $(sysdep-rtld-routines)
CFLAGS-dl-runtime.c += -fexceptions -fasynchronous-unwind-tables CFLAGS-dl-runtime.c += -fexceptions -fasynchronous-unwind-tables

19
elf/dl-mutex.c Normal file
View File

@ -0,0 +1,19 @@
/* Recursive locking implementation for the dynamic loader. Generic version.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
/* The generic version initialization happpens in dl_main. */

View File

@ -857,6 +857,14 @@ rtld_lock_default_unlock_recursive (void *lock)
__rtld_lock_default_unlock_recursive (lock); __rtld_lock_default_unlock_recursive (lock);
} }
#endif #endif
#if PTHREAD_IN_LIBC
/* Dummy implementation. See __rtld_mutex_init. */
static int
rtld_mutex_dummy (pthread_mutex_t *lock)
{
return 0;
}
#endif
static void static void
@ -1148,6 +1156,10 @@ dl_main (const ElfW(Phdr) *phdr,
GL(dl_rtld_lock_recursive) = rtld_lock_default_lock_recursive; GL(dl_rtld_lock_recursive) = rtld_lock_default_lock_recursive;
GL(dl_rtld_unlock_recursive) = rtld_lock_default_unlock_recursive; GL(dl_rtld_unlock_recursive) = rtld_lock_default_unlock_recursive;
#endif #endif
#if PTHREAD_IN_LIBC
___rtld_mutex_lock = rtld_mutex_dummy;
___rtld_mutex_unlock = rtld_mutex_dummy;
#endif
/* The explicit initialization here is cheaper than processing the reloc /* The explicit initialization here is cheaper than processing the reloc
in the _rtld_local definition's initializer. */ in the _rtld_local definition's initializer. */
@ -2363,6 +2375,9 @@ dl_main (const ElfW(Phdr) *phdr,
loader. */ loader. */
__rtld_malloc_init_real (main_map); __rtld_malloc_init_real (main_map);
/* Likewise for the locking implementation. */
__rtld_mutex_init ();
/* Mark all the objects so we know they have been already relocated. */ /* Mark all the objects so we know they have been already relocated. */
for (struct link_map *l = main_map; l != NULL; l = l->l_next) for (struct link_map *l = main_map; l != NULL; l = l->l_next)
{ {
@ -2468,6 +2483,9 @@ dl_main (const ElfW(Phdr) *phdr,
at this point. */ at this point. */
__rtld_malloc_init_real (main_map); __rtld_malloc_init_real (main_map);
/* Likewise for the locking implementation. */
__rtld_mutex_init ();
RTLD_TIMING_VAR (start); RTLD_TIMING_VAR (start);
rtld_timer_start (&start); rtld_timer_start (&start);

View File

@ -179,15 +179,6 @@ __pthread_initialize_minimal_internal (void)
lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE); lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE);
#ifdef SHARED #ifdef SHARED
/* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
keep the lock count from the ld.so implementation. */
GL(dl_rtld_lock_recursive) = (void *) __pthread_mutex_lock;
GL(dl_rtld_unlock_recursive) = (void *) __pthread_mutex_unlock;
unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__data.__count;
GL(dl_load_lock).mutex.__data.__count = 0;
while (rtld_lock_count-- > 0)
__pthread_mutex_lock (&GL(dl_load_lock).mutex);
GL(dl_make_stack_executable_hook) = &__make_stacks_executable; GL(dl_make_stack_executable_hook) = &__make_stacks_executable;
#endif #endif

View File

@ -403,7 +403,7 @@ struct rtld_global
struct auditstate _dl_rtld_auditstate[DL_NNS]; struct auditstate _dl_rtld_auditstate[DL_NNS];
#endif #endif
#if defined SHARED && defined _LIBC_REENTRANT \ #if !PTHREAD_IN_LIBC && defined SHARED \
&& defined __rtld_lock_default_lock_recursive && defined __rtld_lock_default_lock_recursive
EXTERN void (*_dl_rtld_lock_recursive) (void *); EXTERN void (*_dl_rtld_lock_recursive) (void *);
EXTERN void (*_dl_rtld_unlock_recursive) (void *); EXTERN void (*_dl_rtld_unlock_recursive) (void *);
@ -1318,6 +1318,29 @@ link_map_audit_state (struct link_map *l, size_t index)
} }
#endif /* SHARED */ #endif /* SHARED */
#if PTHREAD_IN_LIBC && defined SHARED
/* Recursive locking implementation for use within the dynamic loader.
Used to define the __rtld_lock_lock_recursive and
__rtld_lock_unlock_recursive via <libc-lock.h>. Initialized to a
no-op dummy implementation early. Similar
to GL (dl_rtld_lock_recursive) and GL (dl_rtld_unlock_recursive)
in !PTHREAD_IN_LIBC builds. */
extern int (*___rtld_mutex_lock) (pthread_mutex_t *) attribute_hidden;
extern int (*___rtld_mutex_unlock) (pthread_mutex_t *lock) attribute_hidden;
/* Called after libc has been loaded, but before RELRO is activated.
Used to initialize the function pointers to the actual
implementations. */
void __rtld_mutex_init (void) attribute_hidden;
#else /* !PTHREAD_IN_LIBC */
static inline void
__rtld_mutex_init (void)
{
/* The initialization happens later (!PTHREAD_IN_LIBC) or is not
needed at all (!SHARED). */
}
#endif /* !PTHREAD_IN_LIBC */
#if THREAD_GSCOPE_IN_TCB #if THREAD_GSCOPE_IN_TCB
void __thread_gscope_wait (void) attribute_hidden; void __thread_gscope_wait (void) attribute_hidden;
# define THREAD_GSCOPE_WAIT() __thread_gscope_wait () # define THREAD_GSCOPE_WAIT() __thread_gscope_wait ()

53
sysdeps/nptl/dl-mutex.c Normal file
View File

@ -0,0 +1,53 @@
/* Recursive locking implementation for the dynamic loader. NPTL version.
Copyright (C) 2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
/* Use the mutex implementation in libc (assuming PTHREAD_IN_LIBC). */
#include <assert.h>
#include <first-versions.h>
#include <ldsodefs.h>
__typeof (pthread_mutex_lock) *___rtld_mutex_lock attribute_relro;
__typeof (pthread_mutex_unlock) *___rtld_mutex_unlock attribute_relro;
void
__rtld_mutex_init (void)
{
/* There is an implicit assumption here that the lock counters are
zero and this function is called while nothing is locked. For
early initialization of the mutex functions this is true because
it happens directly in dl_main in elf/rtld.c, and not some ELF
constructor while holding loader locks. */
struct link_map *libc_map = GL (dl_ns)[LM_ID_BASE].libc_map;
const ElfW(Sym) *sym
= _dl_lookup_direct (libc_map, "pthread_mutex_lock",
0x4f152227, /* dl_new_hash output. */
FIRST_VERSION_libc_pthread_mutex_lock_STRING,
FIRST_VERSION_libc_pthread_mutex_lock_HASH);
assert (sym != NULL);
___rtld_mutex_lock = DL_SYMBOL_ADDRESS (libc_map, sym);
sym = _dl_lookup_direct (libc_map, "pthread_mutex_unlock",
0x7dd7aaaa, /* dl_new_hash output. */
FIRST_VERSION_libc_pthread_mutex_unlock_STRING,
FIRST_VERSION_libc_pthread_mutex_unlock_HASH);
assert (sym != NULL);
___rtld_mutex_unlock = DL_SYMBOL_ADDRESS (libc_map, sym);
}

View File

@ -151,9 +151,6 @@ _Static_assert (LLL_LOCK_INITIALIZER == 0, "LLL_LOCK_INITIALIZER != 0");
__libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0) __libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0)
#endif #endif
#define __rtld_lock_trylock_recursive(NAME) \
__libc_maybe_call (__pthread_mutex_trylock, (&(NAME).mutex), 0)
/* Unlock the named lock variable. */ /* Unlock the named lock variable. */
#if IS_IN (libc) || IS_IN (libpthread) #if IS_IN (libc) || IS_IN (libpthread)
# define __libc_lock_unlock(NAME) \ # define __libc_lock_unlock(NAME) \
@ -163,19 +160,13 @@ _Static_assert (LLL_LOCK_INITIALIZER == 0, "LLL_LOCK_INITIALIZER != 0");
#endif #endif
#define __libc_rwlock_unlock(NAME) __pthread_rwlock_unlock (&(NAME)) #define __libc_rwlock_unlock(NAME) __pthread_rwlock_unlock (&(NAME))
#ifdef SHARED #if IS_IN (rtld)
# define __rtld_lock_default_lock_recursive(lock) \
++((pthread_mutex_t *)(lock))->__data.__count;
# define __rtld_lock_default_unlock_recursive(lock) \
--((pthread_mutex_t *)(lock))->__data.__count;
# define __rtld_lock_lock_recursive(NAME) \ # define __rtld_lock_lock_recursive(NAME) \
GL(dl_rtld_lock_recursive) (&(NAME).mutex) ___rtld_mutex_lock (&(NAME).mutex)
# define __rtld_lock_unlock_recursive(NAME) \ # define __rtld_lock_unlock_recursive(NAME) \
GL(dl_rtld_unlock_recursive) (&(NAME).mutex) ___rtld_mutex_unlock (&(NAME).mutex)
#else #else /* Not in the dynamic loader. */
# define __rtld_lock_lock_recursive(NAME) \ # define __rtld_lock_lock_recursive(NAME) \
__pthread_mutex_lock (&(NAME).mutex) __pthread_mutex_lock (&(NAME).mutex)