Jakub Jelinek <jakub@redhat.com>

Implement reference counting of scope records.
	* elf/dl-close.c (_dl_close): Remove all scopes from removed objects
	from the list in objects which remain.  Always allocate new scope
	record.
	* elf/dl-open.c (dl_open_worker): When growing array for scopes,
	don't resize, allocate a new one.
	* elf/dl-runtime.c: Update reference counters before using a scope
	array.
	* elf/dl-sym.c: Likewise.
	* elf/dl-libc.c: Adjust for l_scope name change.
	* elf/dl-load.c: Likewise.
	* elf/dl-object.c: Likewise.
	* elf/rtld.c: Likewise.
	* include/link.h: Inlcude <rtld-lowlevel.h>.  Define struct
	r_scoperec.  Replace r_scope with pointer to r_scoperec structure.
	Add l_scoperec_lock.
	* sysdeps/generic/ldsodefs.h: Include <rtld-lowlevel.h>.
	* sysdeps/generic/rtld-lowlevel.h: New file.

	* include/atomic.h: Rename atomic_and to atomic_and_val and
	atomic_or to atomic_or_val.  Define new macros atomic_and and
	atomic_or which do not return values.
	* sysdeps/x86_64/bits/atomic.h: Define atomic_and and atomic_or.
	Various cleanups.
	* sysdeps/i386/i486/bits/atomic.h: Likewise.
This commit is contained in:
Ulrich Drepper 2006-10-10 00:51:29 +00:00
parent 7484f797e4
commit 1100f84983
18 changed files with 5651 additions and 4582 deletions

View File

@ -1,4 +1,31 @@
2006-10-09 Ulrich Drepper <drepper@redhat.com>
Jakub Jelinek <jakub@redhat.com>
Implement reference counting of scope records.
* elf/dl-close.c (_dl_close): Remove all scopes from removed objects
from the list in objects which remain. Always allocate new scope
record.
* elf/dl-open.c (dl_open_worker): When growing array for scopes,
don't resize, allocate a new one.
* elf/dl-runtime.c: Update reference counters before using a scope
array.
* elf/dl-sym.c: Likewise.
* elf/dl-libc.c: Adjust for l_scope name change.
* elf/dl-load.c: Likewise.
* elf/dl-object.c: Likewise.
* elf/rtld.c: Likewise.
* include/link.h: Inlcude <rtld-lowlevel.h>. Define struct
r_scoperec. Replace r_scope with pointer to r_scoperec structure.
Add l_scoperec_lock.
* sysdeps/generic/ldsodefs.h: Include <rtld-lowlevel.h>.
* sysdeps/generic/rtld-lowlevel.h: New file.
* include/atomic.h: Rename atomic_and to atomic_and_val and
atomic_or to atomic_or_val. Define new macros atomic_and and
atomic_or which do not return values.
* sysdeps/x86_64/bits/atomic.h: Define atomic_and and atomic_or.
Various cleanups.
* sysdeps/i386/i486/bits/atomic.h: Likewise.
* po/sv.po: Update from translation team.

View File

@ -19,6 +19,7 @@
#include <assert.h>
#include <dlfcn.h>
#include <errno.h>
#include <libintl.h>
#include <stddef.h>
#include <stdio.h>
@ -35,6 +36,10 @@
typedef void (*fini_t) (void);
/* Special l_idx value used to indicate which objects remain loaded. */
#define IDX_STILL_USED -1
#ifdef USE_TLS
/* Returns true we an non-empty was found. */
static bool
@ -188,7 +193,7 @@ _dl_close (void *_map)
done[done_index] = 1;
used[done_index] = 1;
/* Signal the object is still needed. */
l->l_idx = -1;
l->l_idx = IDX_STILL_USED;
/* Mark all dependencies as used. */
if (l->l_initfini != NULL)
@ -196,7 +201,7 @@ _dl_close (void *_map)
struct link_map **lp = &l->l_initfini[1];
while (*lp != NULL)
{
if ((*lp)->l_idx != -1)
if ((*lp)->l_idx != IDX_STILL_USED)
{
assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
@ -217,7 +222,7 @@ _dl_close (void *_map)
{
struct link_map *jmap = l->l_reldeps[j];
if (jmap->l_idx != -1)
if (jmap->l_idx != IDX_STILL_USED)
{
assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
@ -310,8 +315,9 @@ _dl_close (void *_map)
/* Else used[i]. */
else if (imap->l_type == lt_loaded)
{
if (imap->l_searchlist.r_list == NULL
&& imap->l_initfini != NULL)
struct r_scope_elem *new_list = NULL;
if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL)
{
/* The object is still used. But one of the objects we are
unloading right now is responsible for loading it. If
@ -328,44 +334,114 @@ _dl_close (void *_map)
imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
imap->l_searchlist.r_nlist = cnt;
for (cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
/* This relies on l_scope[] entries being always set either
to its own l_symbolic_searchlist address, or some map's
l_searchlist address. */
if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
{
struct link_map *tmap;
tmap = (struct link_map *) ((char *) imap->l_scope[cnt]
- offsetof (struct link_map,
l_searchlist));
assert (tmap->l_ns == ns);
if (tmap->l_idx != -1)
{
imap->l_scope[cnt] = &imap->l_searchlist;
break;
}
}
new_list = &imap->l_searchlist;
}
else
/* Count the number of scopes which remain after the unload.
When we add the local search list count it. Always add
one for the terminating NULL pointer. */
size_t remain = (new_list != NULL) + 1;
bool removed_any = false;
for (size_t cnt = 0; imap->l_scoperec->scope[cnt] != NULL; ++cnt)
/* This relies on l_scope[] entries being always set either
to its own l_symbolic_searchlist address, or some map's
l_searchlist address. */
if (imap->l_scoperec->scope[cnt] != &imap->l_symbolic_searchlist)
{
struct link_map *tmap = (struct link_map *)
((char *) imap->l_scoperec->scope[cnt]
- offsetof (struct link_map, l_searchlist));
assert (tmap->l_ns == ns);
if (tmap->l_idx == IDX_STILL_USED)
++remain;
else
removed_any = true;
}
else
++remain;
if (removed_any)
{
unsigned int cnt = 0;
while (imap->l_scope[cnt] != NULL)
/* Always allocate a new array for the scope. This is
necessary since we must be able to determine the last
user of the current array. If possible use the link map's
memory. */
size_t new_size;
struct r_scoperec *newp;
if (imap->l_scoperec != &imap->l_scoperec_mem
&& remain < NINIT_SCOPE_ELEMS (imap)
&& imap->l_scoperec_mem.nusers == 0)
{
if (imap->l_scope[cnt] == &map->l_searchlist)
{
while ((imap->l_scope[cnt] = imap->l_scope[cnt + 1])
!= NULL)
++cnt;
break;
}
++cnt;
new_size = NINIT_SCOPE_ELEMS (imap);
newp = &imap->l_scoperec_mem;
}
else
{
new_size = imap->l_scope_max;
newp = (struct r_scoperec *)
malloc (sizeof (struct r_scoperec)
+ new_size * sizeof (struct r_scope_elem *));
if (newp == NULL)
_dl_signal_error (ENOMEM, "dlclose", NULL,
N_("cannot create scope list"));
}
newp->nusers = 0;
newp->remove_after_use = false;
newp->notify = false;
/* Copy over the remaining scope elements. */
remain = 0;
for (size_t cnt = 0; imap->l_scoperec->scope[cnt] != NULL; ++cnt)
{
if (imap->l_scoperec->scope[cnt]
!= &imap->l_symbolic_searchlist)
{
struct link_map *tmap = (struct link_map *)
((char *) imap->l_scoperec->scope[cnt]
- offsetof (struct link_map, l_searchlist));
if (tmap->l_idx != IDX_STILL_USED)
{
/* Remove the scope. Or replace with own map's
scope. */
if (new_list != NULL)
{
newp->scope[remain++] = new_list;
new_list = NULL;
}
continue;
}
}
newp->scope[remain++] = imap->l_scoperec->scope[cnt];
}
newp->scope[remain] = NULL;
struct r_scoperec *old = imap->l_scoperec;
__rtld_mrlock_change (imap->l_scoperec_lock);
imap->l_scoperec = newp;
__rtld_mrlock_done (imap->l_scoperec_lock);
if (atomic_increment_val (&old->nusers) != 1)
{
old->remove_after_use = true;
old->notify = true;
if (atomic_decrement_val (&old->nusers) != 0)
__rtld_waitzero (old->nusers);
}
/* No user anymore, we can free it now. */
if (old != &imap->l_scoperec_mem)
free (old);
imap->l_scope_max = new_size;
}
/* The loader is gone, so mark the object as not having one.
Note: l_idx != -1 -> object will be removed. */
if (imap->l_loader != NULL && imap->l_loader->l_idx != -1)
Note: l_idx != IDX_STILL_USED -> object will be removed. */
if (imap->l_loader != NULL
&& imap->l_loader->l_idx != IDX_STILL_USED)
imap->l_loader = NULL;
/* Remember where the first dynamically loaded object is. */
@ -570,8 +646,8 @@ _dl_close (void *_map)
free (imap->l_initfini);
/* Remove the scope array if we allocated it. */
if (imap->l_scope != imap->l_scope_mem)
free (imap->l_scope);
if (imap->l_scoperec != &imap->l_scoperec_mem)
free (imap->l_scoperec);
if (imap->l_phdr_allocated)
free ((void *) imap->l_phdr);

View File

@ -1,5 +1,5 @@
/* Handle loading and unloading shared objects for internal libc purposes.
Copyright (C) 1999,2000,2001,2002,2004,2005 Free Software Foundation, Inc.
Copyright (C) 1999-2002,2004,2005,2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Zack Weinberg <zack@rabi.columbia.edu>, 1999.
@ -133,7 +133,8 @@ do_dlsym_private (void *ptr)
struct do_dlsym_args *args = (struct do_dlsym_args *) ptr;
args->ref = NULL;
l = GLRO(dl_lookup_symbol_x) (args->name, args->map, &args->ref,
args->map->l_scope, &vers, 0, 0, NULL);
args->map->l_scoperec->scope, &vers, 0, 0,
NULL);
args->loadbase = l;
}

View File

@ -1473,7 +1473,7 @@ cannot enable executable stack as shared object requires");
have to do this for the main map. */
if ((mode & RTLD_DEEPBIND) == 0
&& __builtin_expect (l->l_info[DT_SYMBOLIC] != NULL, 0)
&& &l->l_searchlist != l->l_scope[0])
&& &l->l_searchlist != l->l_scoperec->scope[0])
{
/* Create an appropriate searchlist. It contains only this map.
This is the definition of DT_SYMBOLIC in SysVr4. */
@ -1490,11 +1490,11 @@ cannot enable executable stack as shared object requires");
l->l_symbolic_searchlist.r_nlist = 1;
/* Now move the existing entries one back. */
memmove (&l->l_scope[1], &l->l_scope[0],
(l->l_scope_max - 1) * sizeof (l->l_scope[0]));
memmove (&l->l_scoperec->scope[1], &l->l_scoperec->scope[0],
(l->l_scope_max - 1) * sizeof (l->l_scoperec->scope[0]));
/* Now add the new entry. */
l->l_scope[0] = &l->l_symbolic_searchlist;
l->l_scoperec->scope[0] = &l->l_symbolic_searchlist;
}
/* Remember whether this object must be initialized first. */

View File

@ -1,5 +1,5 @@
/* Storage management for the chain of loaded shared objects.
Copyright (C) 1995-2002, 2004 Free Software Foundation, Inc.
Copyright (C) 1995-2002, 2004, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@ -82,8 +82,14 @@ _dl_new_object (char *realname, const char *libname, int type,
/* Use the 'l_scope_mem' array by default for the the 'l_scope'
information. If we need more entries we will allocate a large
array dynamically. */
new->l_scope = new->l_scope_mem;
new->l_scope_max = sizeof (new->l_scope_mem) / sizeof (new->l_scope_mem[0]);
new->l_scoperec = &new->l_scoperec_mem;
new->l_scope_max = (sizeof (new->l_scope_realmem.scope_elems)
/ sizeof (new->l_scope_realmem.scope_elems[0]));
/* No need to initialize the scope lock if the initializer is zero. */
#if _RTLD_MRLOCK_INITIALIZER != 0
__rtld_mrlock_initialize (new->l_scoperec_mem.lock);
#endif
/* Counter for the scopes we have to handle. */
idx = 0;
@ -98,7 +104,8 @@ _dl_new_object (char *realname, const char *libname, int type,
l->l_next = new;
/* Add the global scope. */
new->l_scope[idx++] = &GL(dl_ns)[nsid]._ns_loaded->l_searchlist;
new->l_scoperec->scope[idx++]
= &GL(dl_ns)[nsid]._ns_loaded->l_searchlist;
}
else
GL(dl_ns)[nsid]._ns_loaded = new;
@ -114,15 +121,15 @@ _dl_new_object (char *realname, const char *libname, int type,
loader = loader->l_loader;
/* Insert the scope if it isn't the global scope we already added. */
if (idx == 0 || &loader->l_searchlist != new->l_scope[0])
if (idx == 0 || &loader->l_searchlist != new->l_scoperec->scope[0])
{
if ((mode & RTLD_DEEPBIND) != 0 && idx != 0)
{
new->l_scope[1] = new->l_scope[0];
new->l_scoperec->scope[1] = new->l_scoperec->scope[0];
idx = 0;
}
new->l_scope[idx] = &loader->l_searchlist;
new->l_scoperec->scope[idx] = &loader->l_searchlist;
}
new->l_local_scope[0] = &new->l_searchlist;

View File

@ -343,7 +343,7 @@ dl_open_worker (void *a)
start the profiling. */
struct link_map *old_profile_map = GL(dl_profile_map);
_dl_relocate_object (l, l->l_scope, 1, 1);
_dl_relocate_object (l, l->l_scoperec->scope, 1, 1);
if (old_profile_map == NULL && GL(dl_profile_map) != NULL)
{
@ -356,7 +356,7 @@ dl_open_worker (void *a)
}
else
#endif
_dl_relocate_object (l, l->l_scope, lazy, 0);
_dl_relocate_object (l, l->l_scoperec->scope, lazy, 0);
}
if (l == new)
@ -374,11 +374,13 @@ dl_open_worker (void *a)
not been loaded here and now. */
if (imap->l_init_called && imap->l_type == lt_loaded)
{
struct r_scope_elem **runp = imap->l_scope;
struct r_scope_elem **runp = imap->l_scoperec->scope;
size_t cnt = 0;
while (*runp != NULL)
{
if (*runp == &new->l_searchlist)
break;
++cnt;
++runp;
}
@ -391,35 +393,58 @@ dl_open_worker (void *a)
{
/* The 'r_scope' array is too small. Allocate a new one
dynamically. */
struct r_scope_elem **newp;
size_t new_size = imap->l_scope_max * 2;
size_t new_size;
struct r_scoperec *newp;
if (imap->l_scope == imap->l_scope_mem)
if (imap->l_scoperec != &imap->l_scoperec_mem
&& imap->l_scope_max < NINIT_SCOPE_ELEMS (imap)
&& imap->l_scoperec_mem.nusers == 0)
{
newp = (struct r_scope_elem **)
malloc (new_size * sizeof (struct r_scope_elem *));
if (newp == NULL)
_dl_signal_error (ENOMEM, "dlopen", NULL,
N_("cannot create scope list"));
imap->l_scope = memcpy (newp, imap->l_scope,
cnt * sizeof (imap->l_scope[0]));
new_size = NINIT_SCOPE_ELEMS (imap);
newp = &imap->l_scoperec_mem;
}
else
{
newp = (struct r_scope_elem **)
realloc (imap->l_scope,
new_size * sizeof (struct r_scope_elem *));
new_size = imap->l_scope_max * 2;
newp = (struct r_scoperec *)
malloc (sizeof (struct r_scoperec)
+ new_size * sizeof (struct r_scope_elem *));
if (newp == NULL)
_dl_signal_error (ENOMEM, "dlopen", NULL,
N_("cannot create scope list"));
imap->l_scope = newp;
}
newp->nusers = 0;
newp->remove_after_use = false;
newp->notify = false;
memcpy (newp->scope, imap->l_scoperec->scope,
cnt * sizeof (imap->l_scoperec->scope[0]));
struct r_scoperec *old = imap->l_scoperec;
if (old == &imap->l_scoperec_mem)
imap->l_scoperec = newp;
else
{
__rtld_mrlock_change (imap->l_scoperec_lock);
imap->l_scoperec = newp;
__rtld_mrlock_done (imap->l_scoperec_lock);
atomic_increment (&old->nusers);
old->remove_after_use = true;
if (atomic_decrement_val (&old->nusers) == 0)
/* No user, we can free it here and now. */
free (old);
}
imap->l_scope_max = new_size;
}
imap->l_scope[cnt++] = &new->l_searchlist;
imap->l_scope[cnt] = NULL;
/* First terminate the extended list. Otherwise a thread
might use the new last element and then use the garbage
at offset IDX+1. */
imap->l_scoperec->scope[cnt + 1] = NULL;
atomic_write_barrier ();
imap->l_scoperec->scope[cnt] = &new->l_searchlist;
}
#if USE_TLS
/* Only add TLS memory if this object is loaded now and

View File

@ -1,5 +1,5 @@
/* On-demand PLT fixup for shared objects.
Copyright (C) 1995-2002,2003,2004,2005 Free Software Foundation, Inc.
Copyright (C) 1995-2002,2003,2004,2005,2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@ -92,16 +92,36 @@ _dl_fixup (
version = NULL;
}
struct r_scoperec *scoperec = l->l_scoperec;
if (l->l_type == lt_loaded)
{
__rtld_mrlock_lock (l->l_scoperec_lock);
scoperec = l->l_scoperec;
atomic_increment (&scoperec->nusers);
__rtld_mrlock_unlock (l->l_scoperec_lock);
}
result = _dl_lookup_symbol_x (strtab + sym->st_name, l, &sym,
l->l_scope, version, ELF_RTYPE_CLASS_PLT,
scoperec->scope, version,
ELF_RTYPE_CLASS_PLT,
DL_LOOKUP_ADD_DEPENDENCY, NULL);
if (l->l_type == lt_loaded
&& atomic_decrement_val (&scoperec->nusers) == 0
&& __builtin_expect (scoperec->remove_after_use, 0))
{
if (scoperec->notify)
__rtld_notify (scoperec->nusers);
else
free (scoperec);
}
/* Currently result contains the base load address (or link map)
of the object that defines sym. Now add in the symbol
offset. */
value = DL_FIXUP_MAKE_VALUE (result,
sym ? LOOKUP_VALUE_ADDRESS (result)
+ sym->st_value : 0);
sym ? (LOOKUP_VALUE_ADDRESS (result)
+ sym->st_value) : 0);
}
else
{
@ -174,11 +194,30 @@ _dl_profile_fixup (
version = NULL;
}
struct r_scoperec *scoperec = l->l_scoperec;
if (l->l_type == lt_loaded)
{
__rtld_mrlock_lock (l->l_scoperec_lock);
scoperec = l->l_scoperec;
atomic_increment (&scoperec->nusers);
__rtld_mrlock_unlock (l->l_scoperec_lock);
}
result = _dl_lookup_symbol_x (strtab + refsym->st_name, l, &defsym,
l->l_scope, version,
scoperec->scope, version,
ELF_RTYPE_CLASS_PLT,
DL_LOOKUP_ADD_DEPENDENCY, NULL);
if (l->l_type == lt_loaded
&& atomic_decrement_val (&scoperec->nusers) == 0
&& __builtin_expect (scoperec->remove_after_use, 0))
{
if (scoperec->notify)
__rtld_notify (scoperec->nusers);
else
free (scoperec);
}
/* Currently result contains the base load address (or link map)
of the object that defines sym. Now add in the symbol
offset. */

View File

@ -17,6 +17,7 @@
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
#include <assert.h>
#include <stddef.h>
#include <setjmp.h>
#include <libintl.h>
@ -58,6 +59,30 @@ _dl_tls_symaddr (struct link_map *map, const ElfW(Sym) *ref)
#endif
struct call_dl_lookup_args
{
/* Arguments to do_dlsym. */
struct link_map *map;
const char *name;
struct r_scope_elem **scope;
struct r_found_version *vers;
int flags;
/* Return values of do_dlsym. */
lookup_t loadbase;
const ElfW(Sym) **refp;
};
static void
call_dl_lookup (void *ptr)
{
struct call_dl_lookup_args *args = (struct call_dl_lookup_args *) ptr;
args->map = GLRO(dl_lookup_symbol_x) (args->name, args->map, args->refp,
args->scope, args->vers, 0,
args->flags, NULL);
}
static void *
internal_function
do_sym (void *handle, const char *name, void *who,
@ -84,10 +109,62 @@ do_sym (void *handle, const char *name, void *who,
}
if (handle == RTLD_DEFAULT)
/* Search the global scope. */
result = GLRO(dl_lookup_symbol_x) (name, match, &ref, match->l_scope,
vers, 0, flags|DL_LOOKUP_ADD_DEPENDENCY,
NULL);
{
/* Search the global scope. We have the simple case where
we look up in the scope of an object which was part of
the initial binary. And then the more complex part
where the object is dynamically loaded and the scope
array can change. */
if (match->l_type != lt_loaded)
result = GLRO(dl_lookup_symbol_x) (name, match, &ref,
match->l_scoperec->scope, vers, 0,
flags | DL_LOOKUP_ADD_DEPENDENCY,
NULL);
else
{
__rtld_mrlock_lock (match->l_scoperec_lock);
struct r_scoperec *scoperec = match->l_scoperec;
atomic_increment (&scoperec->nusers);
__rtld_mrlock_unlock (match->l_scoperec_lock);
struct call_dl_lookup_args args;
args.name = name;
args.map = match;
args.scope = scoperec->scope;
args.vers = vers;
args.flags = flags | DL_LOOKUP_ADD_DEPENDENCY;
args.refp = &ref;
const char *objname;
const char *errstring = NULL;
bool malloced;
int err = GLRO(dl_catch_error) (&objname, &errstring, &malloced,
call_dl_lookup, &args);
if (atomic_decrement_val (&scoperec->nusers) == 0
&& __builtin_expect (scoperec->remove_after_use, 0))
{
if (scoperec->notify)
__rtld_notify (scoperec->nusers);
else
free (scoperec);
}
if (__builtin_expect (errstring != NULL, 0))
{
/* The lookup was unsuccessful. Rethrow the error. */
char *errstring_dup = strdupa (errstring);
char *objname_dup = strdupa (objname);
if (malloced)
free ((char *) errstring);
GLRO(dl_signal_error) (err, objname_dup, NULL, errstring_dup);
/* NOTREACHED */
}
result = args.map;
}
}
else if (handle == RTLD_NEXT)
{
if (__builtin_expect (match == GL(dl_ns)[LM_ID_BASE]._ns_loaded, 0))

View File

@ -609,7 +609,7 @@ relocate_doit (void *a)
{
struct relocate_args *args = (struct relocate_args *) a;
_dl_relocate_object (args->l, args->l->l_scope, args->lazy, 0);
_dl_relocate_object (args->l, args->l->l_scoperec->scope, args->lazy, 0);
}
static void
@ -1963,8 +1963,8 @@ ERROR: ld.so: object '%s' cannot be loaded as audit interface: %s; ignored.\n",
lookup_t result;
result = _dl_lookup_symbol_x (INTUSE(_dl_argv)[i], main_map,
&ref, main_map->l_scope, NULL,
ELF_RTYPE_CLASS_PLT,
&ref, main_map->l_scoperec->scope,
NULL, ELF_RTYPE_CLASS_PLT,
DL_LOOKUP_ADD_DEPENDENCY, NULL);
loadbase = LOOKUP_VALUE_ADDRESS (result);
@ -2006,8 +2006,8 @@ ERROR: ld.so: object '%s' cannot be loaded as audit interface: %s; ignored.\n",
{
/* Mark the link map as not yet relocated again. */
GL(dl_rtld_map).l_relocated = 0;
_dl_relocate_object (&GL(dl_rtld_map), main_map->l_scope,
0, 0);
_dl_relocate_object (&GL(dl_rtld_map),
main_map->l_scoperec->scope, 0, 0);
}
}
#define VERNEEDTAG (DT_NUM + DT_THISPROCNUM + DT_VERSIONTAGIDX (DT_VERNEED))
@ -2227,7 +2227,7 @@ ERROR: ld.so: object '%s' cannot be loaded as audit interface: %s; ignored.\n",
}
if (l != &GL(dl_rtld_map))
_dl_relocate_object (l, l->l_scope, GLRO(dl_lazy),
_dl_relocate_object (l, l->l_scoperec->scope, GLRO(dl_lazy),
consider_profiling);
#ifdef USE_TLS
@ -2303,7 +2303,8 @@ ERROR: ld.so: object '%s' cannot be loaded as audit interface: %s; ignored.\n",
HP_TIMING_NOW (start);
/* Mark the link map as not yet relocated again. */
GL(dl_rtld_map).l_relocated = 0;
_dl_relocate_object (&GL(dl_rtld_map), main_map->l_scope, 0, 0);
_dl_relocate_object (&GL(dl_rtld_map), main_map->l_scoperec->scope,
0, 0);
HP_TIMING_NOW (stop);
HP_TIMING_DIFF (add, start, stop);
HP_TIMING_ACCUM_NT (relocate_time, add);

View File

@ -273,9 +273,27 @@
__oldval & __mask; })
#endif
/* Atomically *mem &= mask and return the old value of *mem. */
/* Atomically *mem &= mask. */
#ifndef atomic_and
# define atomic_and(mem, mask) \
do { \
__typeof (*(mem)) __oldval; \
__typeof (mem) __memp = (mem); \
__typeof (*(mem)) __mask = (mask); \
\
do \
__oldval = (*__memp); \
while (__builtin_expect (atomic_compare_and_exchange_bool_acq (__memp, \
__oldval \
& __mask, \
__oldval), \
0)); \
} while (0)
#endif
/* Atomically *mem &= mask and return the old value of *mem. */
#ifndef atomic_and_val
# define atomic_and_val(mem, mask) \
({ __typeof (*(mem)) __oldval; \
__typeof (mem) __memp = (mem); \
__typeof (*(mem)) __mask = (mask); \
@ -294,6 +312,24 @@
/* Atomically *mem |= mask and return the old value of *mem. */
#ifndef atomic_or
# define atomic_or(mem, mask) \
do { \
__typeof (*(mem)) __oldval; \
__typeof (mem) __memp = (mem); \
__typeof (*(mem)) __mask = (mask); \
\
do \
__oldval = (*__memp); \
while (__builtin_expect (atomic_compare_and_exchange_bool_acq (__memp, \
__oldval \
| __mask, \
__oldval), \
0)); \
} while (0)
#endif
/* Atomically *mem |= mask and return the old value of *mem. */
#ifndef atomic_or_val
# define atomic_or_val(mem, mask) \
({ __typeof (*(mem)) __oldval; \
__typeof (mem) __memp = (mem); \
__typeof (*(mem)) __mask = (mask); \

View File

@ -43,6 +43,8 @@ extern unsigned int la_objopen (struct link_map *__map, Lmid_t __lmid,
#include <bits/linkmap.h>
#include <dl-lookupcfg.h>
#include <tls.h> /* Defines USE_TLS. */
#include <bits/libc-lock.h>
#include <rtld-lowlevel.h>
/* Some internal data structures of the dynamic linker used in the
@ -73,6 +75,18 @@ struct r_search_path_struct
};
/* Structure for a scope. Each such data structure has a lock. The
lock allows many readers. It can be invalidated by setting bit 31
which means that no more lockers are allowe */
struct r_scoperec
{
bool remove_after_use;
bool notify;
int nusers;
struct r_scope_elem *scope[0];
};
/* Structure describing a loaded shared object. The `l_next' and `l_prev'
members form a chain of all the shared objects loaded at startup.
@ -120,7 +134,7 @@ struct link_map
are indexed by DT_ADDRTAGIDX(tagvalue), see <elf.h>. */
ElfW(Dyn) *l_info[DT_NUM + DT_THISPROCNUM + DT_VERSIONTAGNUM
+ DT_EXTRANUM + DT_VALNUM + DT_ADDRNUM];
+ DT_EXTRANUM + DT_VALNUM + DT_ADDRNUM];
const ElfW(Phdr) *l_phdr; /* Pointer to program header table in core. */
ElfW(Addr) l_entry; /* Entry point location. */
ElfW(Half) l_phnum; /* Number of program header entries. */
@ -212,12 +226,27 @@ struct link_map
ElfW(Addr) l_text_end;
/* Default array for 'l_scope'. */
struct r_scope_elem *l_scope_mem[4];
union
{
struct r_scoperec l_scoperec_mem;
struct
{
struct r_scoperec scoperec_struct;
/* XXX This number should be increased once the scope memory
handling has been tested. */
struct r_scope_elem *scope_elems[4];
#define NINIT_SCOPE_ELEMS(map) \
(sizeof ((map)->l_scope_realmem.scope_elems) \
/ sizeof ((map)->l_scope_realmem.scope_elems[0]))
} l_scope_realmem;
};
/* Size of array allocated for 'l_scope'. */
size_t l_scope_max;
/* This is an array defining the lookup scope for this link map.
There are initially at most three different scope lists. */
struct r_scope_elem **l_scope;
struct r_scoperec *l_scoperec;
/* We need to protect using the SCOPEREC. */
__rtld_mrlock_define (, l_scoperec_lock)
/* A similar array, this time only with the local scope. This is
used occasionally. */

View File

@ -1,3 +1,7 @@
2006-10-09 Ulrich Drepper <drepper@redhat.com>
* sysdeps/unix/sysv/linux/rtld-lowlevel.h: New file..
2006-10-07 Ulrich Drepper <drepper@redhat.com>
* sysdeps/unix/sysv/linux/powerpc/bits/local_lim.h: New file.

View File

@ -0,0 +1,151 @@
/* Defintions for lowlevel handling in ld.so.
Copyright (C) 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
#ifndef _RTLD_LOWLEVEL_H
#define _RTLD_LOWLEVEL_H 1
#include <atomic.h>
#include <lowlevellock.h>
/* Special multi-reader lock used in ld.so. */
#define __RTLD_MRLOCK_WRITER 1
#define __RTLD_MRLOCK_RWAIT 2
#define __RTLD_MRLOCK_WWAIT 4
#define __RTLD_MRLOCK_RBITS \
~(__RTLD_MRLOCK_WRITER | __RTLD_MRLOCK_RWAIT | __RTLD_MRLOCK_WWAIT)
#define __RTLD_MRLOCK_INC 8
#define __RTLD_MRLOCK_TRIES 5
typedef int __rtld_mrlock_t;
#define __rtld_mrlock_define(CLASS,NAME) \
CLASS __rtld_mrlock_t NAME;
#define _RTLD_MRLOCK_INITIALIZER 0
#define __rtld_mrlock_initialize(NAME) \
(void) ((NAME) = 0
#define __rtld_mrlock_lock(lock) \
do { \
__label__ out; \
while (1) \
{ \
int oldval; \
for (int tries = 0; tries < __RTLD_MRLOCK_TRIES; ++tries) \
{ \
oldval = lock; \
while (__builtin_expect ((oldval \
& (__RTLD_MRLOCK_WRITER \
| __RTLD_MRLOCK_WWAIT)) \
== 0, 1)) \
{ \
int newval = ((oldval & __RTLD_MRLOCK_RBITS) \
+ __RTLD_MRLOCK_INC); \
int ret = atomic_compare_and_exchange_val_acq (&(lock), \
newval, \
oldval); \
if (__builtin_expect (ret == oldval, 1)) \
goto out; \
} \
atomic_delay (); \
} \
if ((oldval & __RTLD_MRLOCK_RWAIT) == 0) \
{ \
atomic_or (&(lock), __RTLD_MRLOCK_RWAIT); \
oldval |= __RTLD_MRLOCK_RWAIT; \
} \
lll_futex_wait (lock, oldval); \
} \
out:; \
} while (0)
#define __rtld_mrlock_unlock(lock) \
do { \
int oldval = atomic_exchange_and_add (&(lock), -__RTLD_MRLOCK_INC); \
if (__builtin_expect ((oldval \
& (__RTLD_MRLOCK_RBITS | __RTLD_MRLOCK_WWAIT)) \
== __RTLD_MRLOCK_INC | __RTLD_MRLOCK_WWAIT, 0)) \
/* We have to wake all threads since there might be some queued \
readers already. */ \
lll_futex_wake (&(lock), 0x7fffffff); \
} while (0)
/* There can only ever be one thread trying to get the exclusive lock. */
#define __rtld_mrlock_change(lock) \
do { \
__label__ out; \
while (1) \
{ \
int oldval; \
for (int tries = 0; tries < __RTLD_MRLOCK_TRIES; ++tries) \
{ \
oldval = lock; \
while (__builtin_expect ((oldval & __RTLD_MRLOCK_RBITS) == 0, 1))\
{ \
int newval = ((oldval & __RTLD_MRLOCK_RWAIT) \
+ __RTLD_MRLOCK_WRITER); \
int ret = atomic_compare_and_exchange_val_acq (&(lock), \
newval, \
oldval); \
if (__builtin_expect (ret == oldval, 1)) \
goto out; \
} \
atomic_delay (); \
} \
atomic_or (&(lock), __RTLD_MRLOCK_WWAIT); \
oldval |= __RTLD_MRLOCK_WWAIT; \
lll_futex_wait (lock, oldval); \
} \
out:; \
} while (0)
#define __rtld_mrlock_done(lock) \
do { \
int oldval = atomic_exchange_and_add (&(lock), -__RTLD_MRLOCK_WRITER); \
if (__builtin_expect ((oldval & __RTLD_MRLOCK_RWAIT) != 0, 0)) \
lll_futex_wake (&(lock), 0x7fffffff); \
} while (0)
/* Function to wait for variable become zero. Used in ld.so for
reference counters. */
#define __rtld_waitzero(word) \
do { \
while (1) \
{ \
int val = word; \
if (val == 0) \
break; \
lll_futex_wait (&(word), val); \
} \
} while (0)
#define __rtld_notify(word) \
lll_futex_wake (&(word), 1)
#endif

9199
po/sv.po

File diff suppressed because it is too large Load Diff

View File

@ -38,6 +38,7 @@
#include <bits/libc-lock.h>
#include <hp-timing.h>
#include <tls.h>
#include <rtld-lowlevel.h>
__BEGIN_DECLS

View File

@ -0,0 +1 @@
#error "Lowlevel primitives for ld.so not implemented"

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
/* Copyright (C) 2002, 2003, 2004, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@ -168,34 +168,35 @@ typedef uintmax_t uatomic_max_t;
#define atomic_add(mem, value) \
(void) ({ if (__builtin_constant_p (value) && (value) == 1) \
atomic_increment (mem); \
else if (__builtin_constant_p (value) && (value) == -1) \
atomic_decrement (mem); \
else if (sizeof (*mem) == 1) \
__asm __volatile (LOCK_PREFIX "addb %b1, %0" \
: "=m" (*mem) \
: "ir" (value), "m" (*mem)); \
else if (sizeof (*mem) == 2) \
__asm __volatile (LOCK_PREFIX "addw %w1, %0" \
: "=m" (*mem) \
: "ir" (value), "m" (*mem)); \
else if (sizeof (*mem) == 4) \
__asm __volatile (LOCK_PREFIX "addl %1, %0" \
: "=m" (*mem) \
: "ir" (value), "m" (*mem)); \
else \
{ \
__typeof (value) __addval = (value); \
__typeof (mem) __memp = (mem); \
__typeof (*mem) __oldval = *__memp; \
__typeof (*mem) __tmpval; \
do \
__tmpval = __oldval; \
while ((__oldval = __arch_compare_and_exchange_val_64_acq \
(__memp, __oldval + __addval, __oldval)) == __tmpval); \
} \
})
do { \
if (__builtin_constant_p (value) && (value) == 1) \
atomic_increment (mem); \
else if (__builtin_constant_p (value) && (value) == -1) \
atomic_decrement (mem); \
else if (sizeof (*mem) == 1) \
__asm __volatile (LOCK_PREFIX "addb %b1, %0" \
: "=m" (*mem) \
: "ir" (value), "m" (*mem)); \
else if (sizeof (*mem) == 2) \
__asm __volatile (LOCK_PREFIX "addw %w1, %0" \
: "=m" (*mem) \
: "ir" (value), "m" (*mem)); \
else if (sizeof (*mem) == 4) \
__asm __volatile (LOCK_PREFIX "addl %1, %0" \
: "=m" (*mem) \
: "ir" (value), "m" (*mem)); \
else \
{ \
__typeof (value) __addval = (value); \
__typeof (mem) __memp = (mem); \
__typeof (*mem) __oldval = *__memp; \
__typeof (*mem) __tmpval; \
do \
__tmpval = __oldval; \
while ((__oldval = __arch_compare_and_exchange_val_64_acq \
(__memp, __oldval + __addval, __oldval)) == __tmpval); \
} \
} while (0)
#define atomic_add_negative(mem, value) \
@ -237,29 +238,30 @@ typedef uintmax_t uatomic_max_t;
#define atomic_increment(mem) \
(void) ({ if (sizeof (*mem) == 1) \
__asm __volatile (LOCK_PREFIX "incb %b0" \
: "=m" (*mem) \
: "m" (*mem)); \
else if (sizeof (*mem) == 2) \
__asm __volatile (LOCK_PREFIX "incw %w0" \
: "=m" (*mem) \
: "m" (*mem)); \
else if (sizeof (*mem) == 4) \
__asm __volatile (LOCK_PREFIX "incl %0" \
: "=m" (*mem) \
: "m" (*mem)); \
else \
{ \
__typeof (mem) __memp = (mem); \
__typeof (*mem) __oldval = *__memp; \
__typeof (*mem) __tmpval; \
do \
__tmpval = __oldval; \
while ((__oldval = __arch_compare_and_exchange_val_64_acq \
(__memp, __oldval + 1, __oldval)) == __tmpval); \
} \
})
do { \
if (sizeof (*mem) == 1) \
__asm __volatile (LOCK_PREFIX "incb %b0" \
: "=m" (*mem) \
: "m" (*mem)); \
else if (sizeof (*mem) == 2) \
__asm __volatile (LOCK_PREFIX "incw %w0" \
: "=m" (*mem) \
: "m" (*mem)); \
else if (sizeof (*mem) == 4) \
__asm __volatile (LOCK_PREFIX "incl %0" \
: "=m" (*mem) \
: "m" (*mem)); \
else \
{ \
__typeof (mem) __memp = (mem); \
__typeof (*mem) __oldval = *__memp; \
__typeof (*mem) __tmpval; \
do \
__tmpval = __oldval; \
while ((__oldval = __arch_compare_and_exchange_val_64_acq \
(__memp, __oldval + 1, __oldval)) == __tmpval); \
} \
} while (0)
#define atomic_increment_and_test(mem) \
@ -282,29 +284,30 @@ typedef uintmax_t uatomic_max_t;
#define atomic_decrement(mem) \
(void) ({ if (sizeof (*mem) == 1) \
__asm __volatile (LOCK_PREFIX "decb %b0" \
: "=m" (*mem) \
: "m" (*mem)); \
else if (sizeof (*mem) == 2) \
__asm __volatile (LOCK_PREFIX "decw %w0" \
: "=m" (*mem) \
: "m" (*mem)); \
else if (sizeof (*mem) == 4) \
__asm __volatile (LOCK_PREFIX "decl %0" \
: "=m" (*mem) \
: "m" (*mem)); \
else \
{ \
__typeof (mem) __memp = (mem); \
__typeof (*mem) __oldval = *__memp; \
__typeof (*mem) __tmpval; \
do \
__tmpval = __oldval; \
while ((__oldval = __arch_compare_and_exchange_val_64_acq \
(__memp, __oldval - 1, __oldval)) == __tmpval); \
} \
})
do { \
if (sizeof (*mem) == 1) \
__asm __volatile (LOCK_PREFIX "decb %b0" \
: "=m" (*mem) \
: "m" (*mem)); \
else if (sizeof (*mem) == 2) \
__asm __volatile (LOCK_PREFIX "decw %w0" \
: "=m" (*mem) \
: "m" (*mem)); \
else if (sizeof (*mem) == 4) \
__asm __volatile (LOCK_PREFIX "decl %0" \
: "=m" (*mem) \
: "m" (*mem)); \
else \
{ \
__typeof (mem) __memp = (mem); \
__typeof (*mem) __oldval = *__memp; \
__typeof (*mem) __tmpval; \
do \
__tmpval = __oldval; \
while ((__oldval = __arch_compare_and_exchange_val_64_acq \
(__memp, __oldval - 1, __oldval)) == __tmpval); \
} \
} while (0)
#define atomic_decrement_and_test(mem) \
@ -327,21 +330,22 @@ typedef uintmax_t uatomic_max_t;
#define atomic_bit_set(mem, bit) \
(void) ({ if (sizeof (*mem) == 1) \
__asm __volatile (LOCK_PREFIX "orb %b2, %0" \
: "=m" (*mem) \
: "m" (*mem), "ir" (1 << (bit))); \
else if (sizeof (*mem) == 2) \
__asm __volatile (LOCK_PREFIX "orw %w2, %0" \
: "=m" (*mem) \
: "m" (*mem), "ir" (1 << (bit))); \
else if (sizeof (*mem) == 4) \
__asm __volatile (LOCK_PREFIX "orl %2, %0" \
: "=m" (*mem) \
: "m" (*mem), "ir" (1 << (bit))); \
else \
abort (); \
})
do { \
if (sizeof (*mem) == 1) \
__asm __volatile (LOCK_PREFIX "orb %b2, %0" \
: "=m" (*mem) \
: "m" (*mem), "ir" (1 << (bit))); \
else if (sizeof (*mem) == 2) \
__asm __volatile (LOCK_PREFIX "orw %w2, %0" \
: "=m" (*mem) \
: "m" (*mem), "ir" (1 << (bit))); \
else if (sizeof (*mem) == 4) \
__asm __volatile (LOCK_PREFIX "orl %2, %0" \
: "=m" (*mem) \
: "m" (*mem), "ir" (1 << (bit))); \
else \
abort (); \
} while (0)
#define atomic_bit_test_set(mem, bit) \
@ -364,3 +368,41 @@ typedef uintmax_t uatomic_max_t;
#define atomic_delay() asm ("rep; nop")
#define atomic_and(mem, mask) \
do { \
if (sizeof (*mem) == 1) \
__asm __volatile (LOCK_PREFIX "andb %1, %b0" \
: "=m" (*mem) \
: "ir" (mask), "m" (*mem)); \
else if (sizeof (*mem) == 2) \
__asm __volatile (LOCK_PREFIX "andw %1, %w0" \
: "=m" (*mem) \
: "ir" (mask), "m" (*mem)); \
else if (sizeof (*mem) == 4) \
__asm __volatile (LOCK_PREFIX "andl %1, %0" \
: "=m" (*mem) \
: "ir" (mask), "m" (*mem)); \
else \
abort (); \
} while (0)
#define atomic_or(mem, mask) \
do { \
if (sizeof (*mem) == 1) \
__asm __volatile (LOCK_PREFIX "orb %1, %b0" \
: "=m" (*mem) \
: "ir" (mask), "m" (*mem)); \
else if (sizeof (*mem) == 2) \
__asm __volatile (LOCK_PREFIX "orw %1, %w0" \
: "=m" (*mem) \
: "ir" (mask), "m" (*mem)); \
else if (sizeof (*mem) == 4) \
__asm __volatile (LOCK_PREFIX "orl %1, %0" \
: "=m" (*mem) \
: "ir" (mask), "m" (*mem)); \
else \
abort (); \
} while (0)

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
/* Copyright (C) 2002, 2003, 2004, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@ -195,23 +195,24 @@ typedef uintmax_t uatomic_max_t;
#define atomic_increment(mem) \
(void) ({ if (sizeof (*mem) == 1) \
__asm __volatile (LOCK_PREFIX "incb %b0" \
: "=m" (*mem) \
: "m" (*mem)); \
else if (sizeof (*mem) == 2) \
__asm __volatile (LOCK_PREFIX "incw %w0" \
: "=m" (*mem) \
: "m" (*mem)); \
else if (sizeof (*mem) == 4) \
__asm __volatile (LOCK_PREFIX "incl %0" \
: "=m" (*mem) \
: "m" (*mem)); \
else \
__asm __volatile (LOCK_PREFIX "incq %q0" \
: "=m" (*mem) \
: "m" (*mem)); \
})
do { \
if (sizeof (*mem) == 1) \
__asm __volatile (LOCK_PREFIX "incb %b0" \
: "=m" (*mem) \
: "m" (*mem)); \
else if (sizeof (*mem) == 2) \
__asm __volatile (LOCK_PREFIX "incw %w0" \
: "=m" (*mem) \
: "m" (*mem)); \
else if (sizeof (*mem) == 4) \
__asm __volatile (LOCK_PREFIX "incl %0" \
: "=m" (*mem) \
: "m" (*mem)); \
else \
__asm __volatile (LOCK_PREFIX "incq %q0" \
: "=m" (*mem) \
: "m" (*mem)); \
} while (0)
#define atomic_increment_and_test(mem) \
@ -236,23 +237,24 @@ typedef uintmax_t uatomic_max_t;
#define atomic_decrement(mem) \
(void) ({ if (sizeof (*mem) == 1) \
__asm __volatile (LOCK_PREFIX "decb %b0" \
: "=m" (*mem) \
: "m" (*mem)); \
else if (sizeof (*mem) == 2) \
__asm __volatile (LOCK_PREFIX "decw %w0" \
: "=m" (*mem) \
: "m" (*mem)); \
else if (sizeof (*mem) == 4) \
__asm __volatile (LOCK_PREFIX "decl %0" \
: "=m" (*mem) \
: "m" (*mem)); \
else \
__asm __volatile (LOCK_PREFIX "decq %q0" \
: "=m" (*mem) \
: "m" (*mem)); \
})
do { \
if (sizeof (*mem) == 1) \
__asm __volatile (LOCK_PREFIX "decb %b0" \
: "=m" (*mem) \
: "m" (*mem)); \
else if (sizeof (*mem) == 2) \
__asm __volatile (LOCK_PREFIX "decw %w0" \
: "=m" (*mem) \
: "m" (*mem)); \
else if (sizeof (*mem) == 4) \
__asm __volatile (LOCK_PREFIX "decl %0" \
: "=m" (*mem) \
: "m" (*mem)); \
else \
__asm __volatile (LOCK_PREFIX "decq %q0" \
: "=m" (*mem) \
: "m" (*mem)); \
} while (0)
#define atomic_decrement_and_test(mem) \
@ -277,27 +279,28 @@ typedef uintmax_t uatomic_max_t;
#define atomic_bit_set(mem, bit) \
(void) ({ if (sizeof (*mem) == 1) \
__asm __volatile (LOCK_PREFIX "orb %b2, %0" \
: "=m" (*mem) \
: "m" (*mem), "ir" (1L << (bit))); \
else if (sizeof (*mem) == 2) \
__asm __volatile (LOCK_PREFIX "orw %w2, %0" \
: "=m" (*mem) \
: "m" (*mem), "ir" (1L << (bit))); \
else if (sizeof (*mem) == 4) \
__asm __volatile (LOCK_PREFIX "orl %2, %0" \
: "=m" (*mem) \
: "m" (*mem), "ir" (1L << (bit))); \
else if (__builtin_constant_p (bit) && (bit) < 32) \
__asm __volatile (LOCK_PREFIX "orq %2, %0" \
: "=m" (*mem) \
: "m" (*mem), "i" (1L << (bit))); \
else \
__asm __volatile (LOCK_PREFIX "orq %q2, %0" \
: "=m" (*mem) \
: "m" (*mem), "r" (1UL << (bit))); \
})
do { \
if (sizeof (*mem) == 1) \
__asm __volatile (LOCK_PREFIX "orb %b2, %0" \
: "=m" (*mem) \
: "m" (*mem), "ir" (1L << (bit))); \
else if (sizeof (*mem) == 2) \
__asm __volatile (LOCK_PREFIX "orw %w2, %0" \
: "=m" (*mem) \
: "m" (*mem), "ir" (1L << (bit))); \
else if (sizeof (*mem) == 4) \
__asm __volatile (LOCK_PREFIX "orl %2, %0" \
: "=m" (*mem) \
: "m" (*mem), "ir" (1L << (bit))); \
else if (__builtin_constant_p (bit) && (bit) < 32) \
__asm __volatile (LOCK_PREFIX "orq %2, %0" \
: "=m" (*mem) \
: "m" (*mem), "i" (1L << (bit))); \
else \
__asm __volatile (LOCK_PREFIX "orq %q2, %0" \
: "=m" (*mem) \
: "m" (*mem), "r" (1UL << (bit))); \
} while (0)
#define atomic_bit_test_set(mem, bit) \
@ -322,3 +325,45 @@ typedef uintmax_t uatomic_max_t;
#define atomic_delay() asm ("rep; nop")
#define atomic_and(mem, mask) \
do { \
if (sizeof (*mem) == 1) \
__asm __volatile (LOCK_PREFIX "andb %1, %b0" \
: "=m" (*mem) \
: "ir" (mask), "m" (*mem)); \
else if (sizeof (*mem) == 2) \
__asm __volatile (LOCK_PREFIX "andw %1, %w0" \
: "=m" (*mem) \
: "ir" (mask), "m" (*mem)); \
else if (sizeof (*mem) == 4) \
__asm __volatile (LOCK_PREFIX "andl %1, %0" \
: "=m" (*mem) \
: "ir" (mask), "m" (*mem)); \
else \
__asm __volatile (LOCK_PREFIX "andq %1, %q0" \
: "=m" (*mem) \
: "ir" (mask), "m" (*mem)); \
} while (0)
#define atomic_or(mem, mask) \
do { \
if (sizeof (*mem) == 1) \
__asm __volatile (LOCK_PREFIX "orb %1, %b0" \
: "=m" (*mem) \
: "ir" (mask), "m" (*mem)); \
else if (sizeof (*mem) == 2) \
__asm __volatile (LOCK_PREFIX "orw %1, %w0" \
: "=m" (*mem) \
: "ir" (mask), "m" (*mem)); \
else if (sizeof (*mem) == 4) \
__asm __volatile (LOCK_PREFIX "orl %1, %0" \
: "=m" (*mem) \
: "ir" (mask), "m" (*mem)); \
else \
__asm __volatile (LOCK_PREFIX "orq %1, %q0" \
: "=m" (*mem) \
: "ir" (mask), "m" (*mem)); \
} while (0)