* sysdeps/generic/ldsodefs.h (struct dl_scope_free_list): Store

void * pointers instead of struct link_map **.
	(_dl_scope_free): Change argument type to void *.
	* include/link.h (struct link_map): Change type of l_reldeps
	to struct link_map_reldeps, move l_reldepsact into that
	struct too.
	* elf/dl-deps.c: Include atomic.h.
	(_dl_map_object_deps): Only change l->l_initfini when it is
	fully populated, use _dl_scope_free for freeing it.  Optimize
	removal of libs from reldeps by using l_reserved flag, when
	some removal is needed, allocate a new list instead of
	reallocating and free the old with _dl_scope_free.  Adjust
	for l_reldeps and l_reldepsact changes.
	* elf/dl-lookup.c (add_dependency): Likewise.  Reorganize to allow
	searching in l_initfini and l_reldeps without holding dl_load_lock.
	* elf/dl-fini.c (_dl_sort_fini): Adjust for l_reldeps and
	l_reldepsact changes.
	* elf/dl-close.c (_dl_close_worker): Likewise.
	* elf/dl-open.c (_dl_scope_free): Change argument type to void *.
This commit is contained in:
Ulrich Drepper 2007-09-29 06:58:31 +00:00
parent 90bf831bda
commit 385b4cf4c5
8 changed files with 209 additions and 94 deletions

View File

@ -1,3 +1,25 @@
2007-09-24 Jakub Jelinek <jakub@redhat.com>
* sysdeps/generic/ldsodefs.h (struct dl_scope_free_list): Store
void * pointers instead of struct link_map **.
(_dl_scope_free): Change argument type to void *.
* include/link.h (struct link_map): Change type of l_reldeps
to struct link_map_reldeps, move l_reldepsact into that
struct too.
* elf/dl-deps.c: Include atomic.h.
(_dl_map_object_deps): Only change l->l_initfini when it is
fully populated, use _dl_scope_free for freeing it. Optimize
removal of libs from reldeps by using l_reserved flag, when
some removal is needed, allocate a new list instead of
reallocating and free the old with _dl_scope_free. Adjust
for l_reldeps and l_reldepsact changes.
* elf/dl-lookup.c (add_dependency): Likewise. Reorganize to allow
searching in l_initfini and l_reldeps without holding dl_load_lock.
* elf/dl-fini.c (_dl_sort_fini): Adjust for l_reldeps and
l_reldepsact changes.
* elf/dl-close.c (_dl_close_worker): Likewise.
* elf/dl-open.c (_dl_scope_free): Change argument type to void *.
2007-09-28 Ulrich Drepper <drepper@redhat.com>
* iconvdata/Makefile (modules): Add KOI8-RU.

View File

@ -203,9 +203,9 @@ _dl_close_worker (struct link_map *map)
}
/* And the same for relocation dependencies. */
if (l->l_reldeps != NULL)
for (unsigned int j = 0; j < l->l_reldepsact; ++j)
for (unsigned int j = 0; j < l->l_reldeps->act; ++j)
{
struct link_map *jmap = l->l_reldeps[j];
struct link_map *jmap = l->l_reldeps->list[j];
if (jmap->l_idx != IDX_STILL_USED)
{
@ -497,7 +497,7 @@ _dl_close_worker (struct link_map *map)
THREAD_GSCOPE_WAIT ();
/* Now we can free any queued old scopes. */
struct dl_scope_free_list *fsl = GL(dl_scope_free_list);
struct dl_scope_free_list *fsl = GL(dl_scope_free_list);
if (fsl != NULL)
while (fsl->count > 0)
free (fsl->list[--fsl->count]);

View File

@ -1,5 +1,6 @@
/* Load the dependencies of a mapped object.
Copyright (C) 1996-2003, 2004, 2005, 2006 Free Software Foundation, Inc.
Copyright (C) 1996-2003, 2004, 2005, 2006, 2007
Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@ -17,6 +18,7 @@
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
#include <atomic.h>
#include <assert.h>
#include <dlfcn.h>
#include <errno.h>
@ -465,15 +467,17 @@ _dl_map_object_deps (struct link_map *map,
{
needed[nneeded++] = NULL;
l->l_initfini = (struct link_map **)
struct link_map **l_initfini = (struct link_map **)
malloc ((2 * nneeded + 1) * sizeof needed[0]);
if (l->l_initfini == NULL)
if (l_initfini == NULL)
_dl_signal_error (ENOMEM, map->l_name, NULL,
N_("cannot allocate dependency list"));
l->l_initfini[0] = l;
memcpy (&l->l_initfini[1], needed, nneeded * sizeof needed[0]);
memcpy (&l->l_initfini[nneeded + 1], l->l_initfini,
l_initfini[0] = l;
memcpy (&l_initfini[1], needed, nneeded * sizeof needed[0]);
memcpy (&l_initfini[nneeded + 1], l_initfini,
nneeded * sizeof needed[0]);
atomic_write_barrier ();
l->l_initfini = l_initfini;
}
/* If we have no auxiliary objects just go on to the next map. */
@ -487,25 +491,26 @@ _dl_map_object_deps (struct link_map *map,
if (errno == 0 && errno_saved != 0)
__set_errno (errno_saved);
struct link_map **old_l_initfini = NULL;
if (map->l_initfini != NULL && map->l_type == lt_loaded)
{
/* This object was previously loaded as a dependency and we have
a separate l_initfini list. We don't need it anymore. */
assert (map->l_searchlist.r_list == NULL);
free (map->l_initfini);
old_l_initfini = map->l_initfini;
}
/* Store the search list we built in the object. It will be used for
searches in the scope of this object. */
map->l_initfini =
struct link_map **l_initfini =
(struct link_map **) malloc ((2 * nlist + 1)
* sizeof (struct link_map *));
if (map->l_initfini == NULL)
if (l_initfini == NULL)
_dl_signal_error (ENOMEM, map->l_name, NULL,
N_("cannot allocate symbol search list"));
map->l_searchlist.r_list = &map->l_initfini[nlist + 1];
map->l_searchlist.r_list = &l_initfini[nlist + 1];
map->l_searchlist.r_nlist = nlist;
for (nlist = 0, runp = known; runp; runp = runp->next)
@ -546,10 +551,10 @@ _dl_map_object_deps (struct link_map *map,
Filters not supported with LD_TRACE_PRELINKING"));
}
cnt = _dl_build_local_scope (map->l_initfini, l);
cnt = _dl_build_local_scope (l_initfini, l);
assert (cnt <= nlist);
for (j = 0; j < cnt; j++)
map->l_initfini[j]->l_reserved = 0;
l_initfini[j]->l_reserved = 0;
l->l_local_scope[0] =
(struct r_scope_elem *) malloc (sizeof (struct r_scope_elem)
@ -561,35 +566,50 @@ Filters not supported with LD_TRACE_PRELINKING"));
l->l_local_scope[0]->r_nlist = cnt;
l->l_local_scope[0]->r_list =
(struct link_map **) (l->l_local_scope[0] + 1);
memcpy (l->l_local_scope[0]->r_list, map->l_initfini,
memcpy (l->l_local_scope[0]->r_list, l_initfini,
cnt * sizeof (struct link_map *));
}
}
/* Maybe we can remove some relocation dependencies now. */
assert (map->l_searchlist.r_list[0] == map);
for (i = 0; i < map->l_reldepsact; ++i)
struct link_map_reldeps *l_reldeps = NULL;
if (map->l_reldeps != NULL)
{
unsigned int j;
for (i = 1; i < nlist; ++i)
map->l_searchlist.r_list[i]->l_reserved = 1;
for (j = 1; j < nlist; ++j)
if (map->l_searchlist.r_list[j] == map->l_reldeps[i])
struct link_map **list = &map->l_reldeps->list[0];
for (i = 0; i < map->l_reldeps->act; ++i)
if (list[i]->l_reserved)
{
/* A direct or transitive dependency is also on the list
of relocation dependencies. Remove the latter. */
for (j = i + 1; j < map->l_reldepsact; ++j)
map->l_reldeps[j - 1] = map->l_reldeps[j];
--map->l_reldepsact;
/* Account for the '++i' performed by the 'for'. */
--i;
break;
/* Need to allocate new array of relocation dependencies. */
struct link_map_reldeps *l_reldeps;
l_reldeps = malloc (sizeof (*l_reldeps)
+ map->l_reldepsmax
* sizeof (struct link_map *));
if (l_reldeps == NULL)
/* Bad luck, keep the reldeps duplicated between
map->l_reldeps->list and map->l_initfini lists. */
;
else
{
unsigned int j = i;
memcpy (&l_reldeps->list[0], &list[0],
i * sizeof (struct link_map *));
for (i = i + 1; i < map->l_reldeps->act; ++i)
if (!list[i]->l_reserved)
l_reldeps->list[j++] = list[i];
l_reldeps->act = j;
}
}
for (i = 1; i < nlist; ++i)
map->l_searchlist.r_list[i]->l_reserved = 0;
}
/* Now determine the order in which the initialization has to happen. */
memcpy (map->l_initfini, map->l_searchlist.r_list,
memcpy (l_initfini, map->l_searchlist.r_list,
nlist * sizeof (struct link_map *));
/* We can skip looking for the binary itself which is at the front
of the search list. Look through the list backward so that circular
@ -602,7 +622,7 @@ Filters not supported with LD_TRACE_PRELINKING"));
/* Find the place in the initfini list where the map is currently
located. */
for (j = 1; map->l_initfini[j] != l; ++j)
for (j = 1; l_initfini[j] != l; ++j)
;
/* Find all object for which the current one is a dependency and
@ -611,19 +631,18 @@ Filters not supported with LD_TRACE_PRELINKING"));
{
struct link_map **runp;
runp = map->l_initfini[k]->l_initfini;
runp = l_initfini[k]->l_initfini;
if (runp != NULL)
{
while (*runp != NULL)
if (__builtin_expect (*runp++ == l, 0))
{
struct link_map *here = map->l_initfini[k];
struct link_map *here = l_initfini[k];
/* Move it now. */
memmove (&map->l_initfini[j] + 1,
&map->l_initfini[j],
memmove (&l_initfini[j] + 1, &l_initfini[j],
(k - j) * sizeof (struct link_map *));
map->l_initfini[j] = here;
l_initfini[j] = here;
/* Don't insert further matches before the last
entry moved to the front. */
@ -635,7 +654,18 @@ Filters not supported with LD_TRACE_PRELINKING"));
}
}
/* Terminate the list of dependencies. */
map->l_initfini[nlist] = NULL;
l_initfini[nlist] = NULL;
atomic_write_barrier ();
map->l_initfini = l_initfini;
if (l_reldeps != NULL)
{
atomic_write_barrier ();
void *old_l_reldeps = map->l_reldeps;
map->l_reldeps = l_reldeps;
_dl_scope_free (old_l_reldeps);
}
if (old_l_initfini != NULL)
_dl_scope_free (old_l_initfini);
if (errno_reason)
_dl_signal_error (errno_reason == -1 ? 0 : errno_reason, objname,

View File

@ -82,8 +82,8 @@ _dl_sort_fini (struct link_map *l, struct link_map **maps, size_t nmaps,
if (__builtin_expect (maps[k]->l_reldeps != NULL, 0))
{
unsigned int m = maps[k]->l_reldepsact;
struct link_map **relmaps = maps[k]->l_reldeps;
unsigned int m = maps[k]->l_reldeps->act;
struct link_map **relmaps = &maps[k]->l_reldeps->list[0];
while (m-- > 0)
{

View File

@ -88,20 +88,50 @@ static int
internal_function
add_dependency (struct link_map *undef_map, struct link_map *map, int flags)
{
struct link_map **list;
struct link_map *runp;
unsigned int act;
unsigned int i;
int result = 0;
unsigned long long int serial;
/* Avoid self-references and references to objects which cannot be
unloaded anyway. */
if (undef_map == map)
return 0;
/* Avoid references to objects which cannot be unloaded anyway. */
assert (map->l_type == lt_loaded);
if ((map->l_flags_1 & DF_1_NODELETE) != 0)
return 0;
struct link_map_reldeps *l_reldeps
= atomic_forced_read (undef_map->l_reldeps);
/* Make sure l_reldeps is read before l_initfini. */
atomic_read_barrier ();
/* Determine whether UNDEF_MAP already has a reference to MAP. First
look in the normal dependencies. */
struct link_map **l_initfini = atomic_forced_read (undef_map->l_initfini);
if (l_initfini != NULL)
{
for (i = 0; l_initfini[i] != NULL; ++i)
if (l_initfini[i] == map)
return 0;
}
/* No normal dependency. See whether we already had to add it
to the special list of dynamic dependencies. */
unsigned int l_reldepsact = 0;
if (l_reldeps != NULL)
{
struct link_map **list = &l_reldeps->list[0];
l_reldepsact = l_reldeps->act;
for (i = 0; i < l_reldepsact; ++i)
if (list[i] == map)
return 0;
}
/* Save serial number of the target MAP. */
serial = map->l_serial;
unsigned long long serial = map->l_serial;
/* Make sure nobody can unload the object while we are at it. */
if (__builtin_expect (flags & DL_LOOKUP_GSCOPE_LOCK, 0))
@ -110,39 +140,53 @@ add_dependency (struct link_map *undef_map, struct link_map *map, int flags)
here, that can result in ABBA deadlock. */
THREAD_GSCOPE_RESET_FLAG ();
__rtld_lock_lock_recursive (GL(dl_load_lock));
THREAD_GSCOPE_SET_FLAG ();
/* While MAP value won't change, after THREAD_GSCOPE_RESET_FLAG ()
it can e.g. point to unallocated memory. So avoid the optimizer
treating the above read from MAP->l_serial as ensurance it
can safely dereference it. */
map = atomic_forced_read (map);
/* From this point on it is unsafe to dereference MAP, until it
has been found in one of the lists. */
/* Redo the l_initfini check in case undef_map's l_initfini
changed in the mean time. */
if (undef_map->l_initfini != l_initfini
&& undef_map->l_initfini != NULL)
{
l_initfini = undef_map->l_initfini;
for (i = 0; l_initfini[i] != NULL; ++i)
if (l_initfini[i] == map)
goto out_check;
}
/* Redo the l_reldeps check if undef_map's l_reldeps changed in
the mean time. */
if (undef_map->l_reldeps != NULL)
{
if (undef_map->l_reldeps != l_reldeps)
{
struct link_map **list = &undef_map->l_reldeps->list[0];
l_reldepsact = undef_map->l_reldeps->act;
for (i = 0; i < l_reldepsact; ++i)
if (list[i] == map)
goto out_check;
}
else if (undef_map->l_reldeps->act > l_reldepsact)
{
struct link_map **list
= &undef_map->l_reldeps->list[0];
i = l_reldepsact;
l_reldepsact = undef_map->l_reldeps->act;
for (; i < l_reldepsact; ++i)
if (list[i] == map)
goto out_check;
}
}
}
else
__rtld_lock_lock_recursive (GL(dl_load_lock));
/* From this point on it is unsafe to dereference MAP, until it
has been found in one of the lists. */
/* Determine whether UNDEF_MAP already has a reference to MAP. First
look in the normal dependencies. */
if (undef_map->l_initfini != NULL)
{
list = undef_map->l_initfini;
for (i = 0; list[i] != NULL; ++i)
if (list[i] == map)
goto out_check;
}
/* No normal dependency. See whether we already had to add it
to the special list of dynamic dependencies. */
list = undef_map->l_reldeps;
act = undef_map->l_reldepsact;
for (i = 0; i < act; ++i)
if (list[i] == map)
goto out_check;
/* The object is not yet in the dependency list. Before we add
it make sure just one more time the object we are about to
reference is still available. There is a brief period in
@ -161,8 +205,8 @@ add_dependency (struct link_map *undef_map, struct link_map *map, int flags)
if (map->l_serial != serial)
goto out_check;
/* Avoid references to objects which cannot be unloaded anyway. */
assert (map->l_type == lt_loaded);
/* Redo the NODELETE check, as when dl_load_lock wasn't held
yet this could have changed. */
if ((map->l_flags_1 & DF_1_NODELETE) != 0)
goto out;
@ -177,33 +221,46 @@ add_dependency (struct link_map *undef_map, struct link_map *map, int flags)
}
/* Add the reference now. */
if (__builtin_expect (act >= undef_map->l_reldepsmax, 0))
if (__builtin_expect (l_reldepsact >= undef_map->l_reldepsmax, 0))
{
/* Allocate more memory for the dependency list. Since this
can never happen during the startup phase we can use
`realloc'. */
void *newp;
struct link_map_reldeps *newp;
unsigned int max
= undef_map->l_reldepsmax ? undef_map->l_reldepsmax * 2 : 10;
undef_map->l_reldepsmax += 5;
newp = realloc (undef_map->l_reldeps,
undef_map->l_reldepsmax
* sizeof (struct link_map *));
if (__builtin_expect (newp != NULL, 1))
undef_map->l_reldeps = (struct link_map **) newp;
newp = malloc (sizeof (*newp) + max * sizeof (struct link_map *));
if (newp == NULL)
{
/* If we didn't manage to allocate memory for the list this is
no fatal problem. We simply make sure the referenced object
cannot be unloaded. This is semantically the correct
behavior. */
map->l_flags_1 |= DF_1_NODELETE;
goto out;
}
else
/* Correct the addition. */
undef_map->l_reldepsmax -= 5;
{
if (l_reldepsact)
memcpy (&newp->list[0], &undef_map->l_reldeps->list[0],
l_reldepsact * sizeof (struct link_map *));
newp->list[l_reldepsact] = map;
newp->act = l_reldepsact + 1;
atomic_write_barrier ();
void *old = undef_map->l_reldeps;
undef_map->l_reldeps = newp;
undef_map->l_reldepsmax = max;
if (old)
_dl_scope_free (old);
}
}
/* If we didn't manage to allocate memory for the list this is
no fatal mistake. We simply make sure the referenced object
cannot be unloaded. This is semantically the correct
behavior. */
if (__builtin_expect (act < undef_map->l_reldepsmax, 1))
undef_map->l_reldeps[undef_map->l_reldepsact++] = map;
else
map->l_flags_1 |= DF_1_NODELETE;
{
undef_map->l_reldeps->list[l_reldepsact] = map;
atomic_write_barrier ();
undef_map->l_reldeps->act = l_reldepsact + 1;
}
/* Display information if we are debugging. */
if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
@ -223,6 +280,9 @@ add_dependency (struct link_map *undef_map, struct link_map *map, int flags)
/* Release the lock. */
__rtld_lock_unlock_recursive (GL(dl_load_lock));
if (__builtin_expect (flags & DL_LOOKUP_GSCOPE_LOCK, 0))
THREAD_GSCOPE_SET_FLAG ();
return result;
out_check:

View File

@ -166,7 +166,7 @@ add_to_global (struct link_map *new)
}
int
_dl_scope_free (struct r_scope_elem **old)
_dl_scope_free (void *old)
{
struct dl_scope_free_list *fsl;
#define DL_SCOPE_FREE_LIST_SIZE (sizeof (fsl->list) / sizeof (fsl->list[0]))

View File

@ -240,8 +240,11 @@ struct link_map
/* List of the dependencies introduced through symbol binding. */
unsigned int l_reldepsmax;
unsigned int l_reldepsact;
struct link_map **l_reldeps;
struct link_map_reldeps
{
unsigned int act;
struct link_map *list[];
} *l_reldeps;
/* Various flag words. */
ElfW(Word) l_feature_1;

View File

@ -491,7 +491,7 @@ struct rtld_global
EXTERN struct dl_scope_free_list
{
size_t count;
struct r_scope_elem **list[50];
void *list[50];
} *_dl_scope_free_list;
#ifdef SHARED
};
@ -1058,7 +1058,7 @@ extern void *_dl_open (const char *name, int mode, const void *caller,
/* Free or queue for freeing scope OLD. If other threads might be
in the middle of _dl_fixup, _dl_profile_fixup or dl*sym using the
old scope, OLD can't be freed until no thread is using it. */
extern int _dl_scope_free (struct r_scope_elem **old) attribute_hidden;
extern int _dl_scope_free (void *) attribute_hidden;
/* Add module to slot information data. */
extern void _dl_add_to_slotinfo (struct link_map *l) attribute_hidden;