elf: Remove lazy tlsdesc relocation related code

Remove generic tlsdesc code related to lazy tlsdesc processing since
lazy tlsdesc relocation is no longer supported.  This includes removing
GL(dl_load_lock) from _dl_make_tlsdesc_dynamic which is only called at
load time when that lock is already held.

Added a documentation comment too.
This commit is contained in:
Szabolcs Nagy 2021-02-11 13:24:47 +00:00
parent 14e2476505
commit 191f5ae0f3
5 changed files with 6 additions and 51 deletions

View File

@ -78,6 +78,10 @@ map_generation (struct link_map *map)
return GL(dl_tls_generation) + 1;
}
/* Returns the data pointer for a given map and tls offset that is used
to fill in one of the GOT entries referenced by a TLSDESC relocation
when using dynamic TLS. This requires allocation, returns NULL on
allocation failure. */
void *
_dl_make_tlsdesc_dynamic (struct link_map *map, size_t ti_offset)
{
@ -85,18 +89,12 @@ _dl_make_tlsdesc_dynamic (struct link_map *map, size_t ti_offset)
void **entry;
struct tlsdesc_dynamic_arg *td, test;
/* FIXME: We could use a per-map lock here, but is it worth it? */
__rtld_lock_lock_recursive (GL(dl_load_lock));
ht = map->l_mach.tlsdesc_table;
if (! ht)
{
ht = htab_create ();
if (! ht)
{
__rtld_lock_unlock_recursive (GL(dl_load_lock));
return 0;
}
return 0;
map->l_mach.tlsdesc_table = ht;
}
@ -104,15 +102,11 @@ _dl_make_tlsdesc_dynamic (struct link_map *map, size_t ti_offset)
test.tlsinfo.ti_offset = ti_offset;
entry = htab_find_slot (ht, &test, 1, hash_tlsdesc, eq_tlsdesc);
if (! entry)
{
__rtld_lock_unlock_recursive (GL(dl_load_lock));
return 0;
}
return 0;
if (*entry)
{
td = *entry;
__rtld_lock_unlock_recursive (GL(dl_load_lock));
return td;
}
@ -122,44 +116,9 @@ _dl_make_tlsdesc_dynamic (struct link_map *map, size_t ti_offset)
thread. */
td->gen_count = map_generation (map);
td->tlsinfo = test.tlsinfo;
__rtld_lock_unlock_recursive (GL(dl_load_lock));
return td;
}
# endif /* SHARED */
/* The idea of the following two functions is to stop multiple threads
from attempting to resolve the same TLS descriptor without busy
waiting. Ideally, we should be able to release the lock right
after changing td->entry, and then using say a condition variable
or a futex wake to wake up any waiting threads, but let's try to
avoid introducing such dependencies. */
static int
__attribute__ ((unused))
_dl_tlsdesc_resolve_early_return_p (struct tlsdesc volatile *td, void *caller)
{
if (caller != atomic_load_relaxed (&td->entry))
return 1;
__rtld_lock_lock_recursive (GL(dl_load_lock));
if (caller != atomic_load_relaxed (&td->entry))
{
__rtld_lock_unlock_recursive (GL(dl_load_lock));
return 1;
}
atomic_store_relaxed (&td->entry, _dl_tlsdesc_resolve_hold);
return 0;
}
static void
__attribute__ ((unused))
_dl_tlsdesc_wake_up_held_fixups (void)
{
__rtld_lock_unlock_recursive (GL(dl_load_lock));
}
#endif

View File

@ -22,7 +22,6 @@
#include <tls.h>
#include <dl-tlsdesc.h>
#include <dl-unmap-segments.h>
#define _dl_tlsdesc_resolve_hold 0
#include <tlsdeschtab.h>
/* Unmap the dynamic object, but also release its TLS descriptor table

View File

@ -20,7 +20,6 @@
#include <tls.h>
#include <dl-tlsdesc.h>
#include <dl-unmap-segments.h>
#define _dl_tlsdesc_resolve_hold 0
#include <tlsdeschtab.h>
/* Unmap the dynamic object, but also release its TLS descriptor table

View File

@ -20,7 +20,6 @@
#include <tls.h>
#include <dl-tlsdesc.h>
#include <dl-unmap-segments.h>
#define _dl_tlsdesc_resolve_hold 0
#include <tlsdeschtab.h>
/* Unmap the dynamic object, but also release its TLS descriptor table

View File

@ -20,7 +20,6 @@
#include <tls.h>
#include <dl-tlsdesc.h>
#include <dl-unmap-segments.h>
#define _dl_tlsdesc_resolve_hold 0
#include <tlsdeschtab.h>
/* Unmap the dynamic object, but also release its TLS descriptor table