* include/atomic.c: Define catomic_* operations.

* sysdeps/x86_64/bits/atomic.h: Likewise.  Fix a few minor problems.
	* stdlib/cxa_finalize.c: Use catomic_* operations instead of atomic_*.
	* malloc/memusage.c: Likewise.
	* gmon/mcount.c: Likewise.
	* elf/dl-close.c: Likewise.
	* elf/dl-open.c: Likewise.
	* elf/dl-profile.c: Likewise.
	* elf/dl-sym.c: Likewise.
	* elf/dl-runtime.c: Likewise.
	* elf/dl-fptr.c: Likewise.
	* resolv/res_libc.c: Likewise.
This commit is contained in:
Ulrich Drepper 2006-10-11 09:01:52 +00:00
parent 2a6ee54934
commit 8099361ecd
15 changed files with 415 additions and 148 deletions

View File

@ -1,3 +1,18 @@
2006-10-11 Ulrich Drepper <drepper@redhat.com>
* include/atomic.c: Define catomic_* operations.
* sysdeps/x86_64/bits/atomic.h: Likewise. Fix a few minor problems.
* stdlib/cxa_finalize.c: Use catomic_* operations instead of atomic_*.
* malloc/memusage.c: Likewise.
* gmon/mcount.c: Likewise.
* elf/dl-close.c: Likewise.
* elf/dl-open.c: Likewise.
* elf/dl-profile.c: Likewise.
* elf/dl-sym.c: Likewise.
* elf/dl-runtime.c: Likewise.
* elf/dl-fptr.c: Likewise.
* resolv/res_libc.c: Likewise.
2006-10-10 Ulrich Drepper <drepper@redhat.com> 2006-10-10 Ulrich Drepper <drepper@redhat.com>
* nis/nis_subr.c (nis_getnames): Add trailing dot to NIS_PATH * nis/nis_subr.c (nis_getnames): Add trailing dot to NIS_PATH

View File

@ -423,11 +423,11 @@ _dl_close (void *_map)
imap->l_scoperec = newp; imap->l_scoperec = newp;
__rtld_mrlock_done (imap->l_scoperec_lock); __rtld_mrlock_done (imap->l_scoperec_lock);
if (atomic_increment_val (&old->nusers) != 1) if (catomic_increment_val (&old->nusers) != 1)
{ {
old->remove_after_use = true; old->remove_after_use = true;
old->notify = true; old->notify = true;
if (atomic_decrement_val (&old->nusers) != 0) if (catomic_decrement_val (&old->nusers) != 0)
__rtld_waitzero (old->nusers); __rtld_waitzero (old->nusers);
} }

View File

@ -1,5 +1,5 @@
/* Manage function descriptors. Generic version. /* Manage function descriptors. Generic version.
Copyright (C) 1999,2000,2001,2002,2003,2004 Free Software Foundation, Inc. Copyright (C) 1999-2004, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library. This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or The GNU C Library is free software; you can redistribute it and/or
@ -40,7 +40,7 @@
#ifndef COMPARE_AND_SWAP #ifndef COMPARE_AND_SWAP
# define COMPARE_AND_SWAP(ptr, old, new) \ # define COMPARE_AND_SWAP(ptr, old, new) \
(atomic_compare_and_exchange_bool_acq (ptr, new, old) == 0) (catomic_compare_and_exchange_bool_acq (ptr, new, old) == 0)
#endif #endif
ElfW(Addr) _dl_boot_fptr_table [ELF_MACHINE_BOOT_FPTR_TABLE_LEN]; ElfW(Addr) _dl_boot_fptr_table [ELF_MACHINE_BOOT_FPTR_TABLE_LEN];

View File

@ -429,9 +429,9 @@ dl_open_worker (void *a)
imap->l_scoperec = newp; imap->l_scoperec = newp;
__rtld_mrlock_done (imap->l_scoperec_lock); __rtld_mrlock_done (imap->l_scoperec_lock);
atomic_increment (&old->nusers); catomic_increment (&old->nusers);
old->remove_after_use = true; old->remove_after_use = true;
if (atomic_decrement_val (&old->nusers) == 0) if (catomic_decrement_val (&old->nusers) == 0)
/* No user, we can free it here and now. */ /* No user, we can free it here and now. */
free (old); free (old);
} }

View File

@ -1,5 +1,5 @@
/* Profiling of shared libraries. /* Profiling of shared libraries.
Copyright (C) 1997-2002, 2003, 2004 Free Software Foundation, Inc. Copyright (C) 1997-2002, 2003, 2004, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library. This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997. Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997.
Based on the BSD mcount implementation. Based on the BSD mcount implementation.
@ -509,24 +509,24 @@ _dl_mcount (ElfW(Addr) frompc, ElfW(Addr) selfpc)
size_t newfromidx; size_t newfromidx;
to_index = (data[narcs].self_pc to_index = (data[narcs].self_pc
/ (HASHFRACTION * sizeof (*tos))); / (HASHFRACTION * sizeof (*tos)));
newfromidx = atomic_exchange_and_add (&fromidx, 1) + 1; newfromidx = catomic_exchange_and_add (&fromidx, 1) + 1;
froms[newfromidx].here = &data[narcs]; froms[newfromidx].here = &data[narcs];
froms[newfromidx].link = tos[to_index]; froms[newfromidx].link = tos[to_index];
tos[to_index] = newfromidx; tos[to_index] = newfromidx;
atomic_increment (&narcs); catomic_increment (&narcs);
} }
/* If we still have no entry stop searching and insert. */ /* If we still have no entry stop searching and insert. */
if (*topcindex == 0) if (*topcindex == 0)
{ {
uint_fast32_t newarc = atomic_exchange_and_add (narcsp, 1); uint_fast32_t newarc = catomic_exchange_and_add (narcsp, 1);
/* In rare cases it could happen that all entries in FROMS are /* In rare cases it could happen that all entries in FROMS are
occupied. So we cannot count this anymore. */ occupied. So we cannot count this anymore. */
if (newarc >= fromlimit) if (newarc >= fromlimit)
goto done; goto done;
*topcindex = atomic_exchange_and_add (&fromidx, 1) + 1; *topcindex = catomic_exchange_and_add (&fromidx, 1) + 1;
fromp = &froms[*topcindex]; fromp = &froms[*topcindex];
fromp->here = &data[newarc]; fromp->here = &data[newarc];
@ -534,7 +534,7 @@ _dl_mcount (ElfW(Addr) frompc, ElfW(Addr) selfpc)
data[newarc].self_pc = selfpc; data[newarc].self_pc = selfpc;
data[newarc].count = 0; data[newarc].count = 0;
fromp->link = 0; fromp->link = 0;
atomic_increment (&narcs); catomic_increment (&narcs);
break; break;
} }
@ -547,7 +547,7 @@ _dl_mcount (ElfW(Addr) frompc, ElfW(Addr) selfpc)
} }
/* Increment the counter. */ /* Increment the counter. */
atomic_increment (&fromp->here->count); catomic_increment (&fromp->here->count);
done: done:
; ;

View File

@ -97,7 +97,7 @@ _dl_fixup (
{ {
__rtld_mrlock_lock (l->l_scoperec_lock); __rtld_mrlock_lock (l->l_scoperec_lock);
scoperec = l->l_scoperec; scoperec = l->l_scoperec;
atomic_increment (&scoperec->nusers); catomic_increment (&scoperec->nusers);
__rtld_mrlock_unlock (l->l_scoperec_lock); __rtld_mrlock_unlock (l->l_scoperec_lock);
} }
@ -107,7 +107,7 @@ _dl_fixup (
DL_LOOKUP_ADD_DEPENDENCY, NULL); DL_LOOKUP_ADD_DEPENDENCY, NULL);
if (l->l_type == lt_loaded if (l->l_type == lt_loaded
&& atomic_decrement_val (&scoperec->nusers) == 0 && catomic_decrement_val (&scoperec->nusers) == 0
&& __builtin_expect (scoperec->remove_after_use, 0)) && __builtin_expect (scoperec->remove_after_use, 0))
{ {
if (scoperec->notify) if (scoperec->notify)
@ -199,7 +199,7 @@ _dl_profile_fixup (
{ {
__rtld_mrlock_lock (l->l_scoperec_lock); __rtld_mrlock_lock (l->l_scoperec_lock);
scoperec = l->l_scoperec; scoperec = l->l_scoperec;
atomic_increment (&scoperec->nusers); catomic_increment (&scoperec->nusers);
__rtld_mrlock_unlock (l->l_scoperec_lock); __rtld_mrlock_unlock (l->l_scoperec_lock);
} }
@ -209,7 +209,7 @@ _dl_profile_fixup (
DL_LOOKUP_ADD_DEPENDENCY, NULL); DL_LOOKUP_ADD_DEPENDENCY, NULL);
if (l->l_type == lt_loaded if (l->l_type == lt_loaded
&& atomic_decrement_val (&scoperec->nusers) == 0 && catomic_decrement_val (&scoperec->nusers) == 0
&& __builtin_expect (scoperec->remove_after_use, 0)) && __builtin_expect (scoperec->remove_after_use, 0))
{ {
if (scoperec->notify) if (scoperec->notify)

View File

@ -124,7 +124,7 @@ do_sym (void *handle, const char *name, void *who,
{ {
__rtld_mrlock_lock (match->l_scoperec_lock); __rtld_mrlock_lock (match->l_scoperec_lock);
struct r_scoperec *scoperec = match->l_scoperec; struct r_scoperec *scoperec = match->l_scoperec;
atomic_increment (&scoperec->nusers); catomic_increment (&scoperec->nusers);
__rtld_mrlock_unlock (match->l_scoperec_lock); __rtld_mrlock_unlock (match->l_scoperec_lock);
struct call_dl_lookup_args args; struct call_dl_lookup_args args;
@ -141,7 +141,7 @@ do_sym (void *handle, const char *name, void *who,
int err = GLRO(dl_catch_error) (&objname, &errstring, &malloced, int err = GLRO(dl_catch_error) (&objname, &errstring, &malloced,
call_dl_lookup, &args); call_dl_lookup, &args);
if (atomic_decrement_val (&scoperec->nusers) == 0 if (catomic_decrement_val (&scoperec->nusers) == 0
&& __builtin_expect (scoperec->remove_after_use, 0)) && __builtin_expect (scoperec->remove_after_use, 0))
{ {
if (scoperec->notify) if (scoperec->notify)

View File

@ -69,8 +69,8 @@ _MCOUNT_DECL(frompc, selfpc) /* _mcount; may be static, inline, etc */
* check that we are profiling * check that we are profiling
* and that we aren't recursively invoked. * and that we aren't recursively invoked.
*/ */
if (atomic_compare_and_exchange_bool_acq (&p->state, GMON_PROF_BUSY, if (catomic_compare_and_exchange_bool_acq (&p->state, GMON_PROF_BUSY,
GMON_PROF_ON)) GMON_PROF_ON))
return; return;
/* /*

View File

@ -21,6 +21,26 @@
#ifndef _ATOMIC_H #ifndef _ATOMIC_H
#define _ATOMIC_H 1 #define _ATOMIC_H 1
/* This header defines three types of macros:
- atomic arithmetic and logic operation on memory. They all
have the prefix "atomic_".
- conditionally atomic operations of the same kinds. These
always behave identical but can be faster when atomicity
is not really needed since only one thread has access to
the memory location. In that case the code is slower in
the multi-thread case. The interfaces have the prefix
"catomic_".
- support functions like barriers. They also have the preifx
"atomic_".
Architectures must provide a few lowlevel macros (the compare
and exchange definitions). All others are optional. They
should only be provided if the architecture has specific
support for the operation. */
#include <stdlib.h> #include <stdlib.h>
#include <bits/atomic.h> #include <bits/atomic.h>
@ -70,12 +90,29 @@
#endif #endif
#if !defined catomic_compare_and_exchange_val_acq \
&& defined __arch_c_compare_and_exchange_val_32_acq
# define catomic_compare_and_exchange_val_acq(mem, newval, oldval) \
__atomic_val_bysize (__arch_c_compare_and_exchange_val,acq, \
mem, newval, oldval)
#else
# define catomic_compare_and_exchange_val_acq(mem, newval, oldval) \
atomic_compare_and_exchange_val_acq (mem, newval, oldval)
#endif
#ifndef atomic_compare_and_exchange_val_rel #ifndef atomic_compare_and_exchange_val_rel
# define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \ # define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \
atomic_compare_and_exchange_val_acq (mem, newval, oldval) atomic_compare_and_exchange_val_acq (mem, newval, oldval)
#endif #endif
#ifndef catomic_compare_and_exchange_val_rel
# define catomic_compare_and_exchange_val_rel(mem, newval, oldval) \
atomic_compare_and_exchange_val_acq (mem, newval, oldval)
#endif
/* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL. /* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL.
Return zero if *MEM was changed or non-zero if no exchange happened. */ Return zero if *MEM was changed or non-zero if no exchange happened. */
#ifndef atomic_compare_and_exchange_bool_acq #ifndef atomic_compare_and_exchange_bool_acq
@ -94,12 +131,34 @@
#endif #endif
#ifndef catomic_compare_and_exchange_bool_acq
# ifdef __arch_c_compare_and_exchange_bool_32_acq
# define catomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
__atomic_bool_bysize (__arch_c_compare_and_exchange_bool,acq, \
mem, newval, oldval)
# else
# define catomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
({ /* Cannot use __oldval here, because macros later in this file might \
call this macro with __oldval argument. */ \
__typeof (oldval) __old = (oldval); \
catomic_compare_and_exchange_val_acq (mem, newval, __old) != __old; \
})
# endif
#endif
#ifndef atomic_compare_and_exchange_bool_rel #ifndef atomic_compare_and_exchange_bool_rel
# define atomic_compare_and_exchange_bool_rel(mem, newval, oldval) \ # define atomic_compare_and_exchange_bool_rel(mem, newval, oldval) \
atomic_compare_and_exchange_bool_acq (mem, newval, oldval) atomic_compare_and_exchange_bool_acq (mem, newval, oldval)
#endif #endif
#ifndef catomic_compare_and_exchange_bool_rel
# define catomic_compare_and_exchange_bool_rel(mem, newval, oldval) \
catomic_compare_and_exchange_bool_acq (mem, newval, oldval)
#endif
/* Store NEWVALUE in *MEM and return the old value. */ /* Store NEWVALUE in *MEM and return the old value. */
#ifndef atomic_exchange_acq #ifndef atomic_exchange_acq
# define atomic_exchange_acq(mem, newvalue) \ # define atomic_exchange_acq(mem, newvalue) \
@ -141,6 +200,23 @@
#endif #endif
#ifndef catomic_exchange_and_add
# define catomic_exchange_and_add(mem, value) \
({ __typeof (*(mem)) __oldv; \
__typeof (mem) __memp = (mem); \
__typeof (*(mem)) __value = (value); \
\
do \
__oldv = *__memp; \
while (__builtin_expect (catomic_compare_and_exchange_bool_acq (__memp, \
__oldv \
+ __value,\
__oldv), \
0)); \
\
__oldv; })
#endif
#ifndef atomic_max #ifndef atomic_max
# define atomic_max(mem, value) \ # define atomic_max(mem, value) \
@ -159,6 +235,25 @@
} while (0) } while (0)
#endif #endif
#ifndef catomic_max
# define catomic_max(mem, value) \
do { \
__typeof (*(mem)) __oldv; \
__typeof (mem) __memp = (mem); \
__typeof (*(mem)) __value = (value); \
do { \
__oldv = *__memp; \
if (__oldv >= __value) \
break; \
} while (__builtin_expect (catomic_compare_and_exchange_bool_acq (__memp, \
__value,\
__oldv),\
0)); \
} while (0)
#endif
#ifndef atomic_min #ifndef atomic_min
# define atomic_min(mem, value) \ # define atomic_min(mem, value) \
do { \ do { \
@ -176,21 +271,38 @@
} while (0) } while (0)
#endif #endif
#ifndef atomic_add #ifndef atomic_add
# define atomic_add(mem, value) (void) atomic_exchange_and_add ((mem), (value)) # define atomic_add(mem, value) (void) atomic_exchange_and_add ((mem), (value))
#endif #endif
#ifndef catomic_add
# define catomic_add(mem, value) \
(void) catomic_exchange_and_add ((mem), (value))
#endif
#ifndef atomic_increment #ifndef atomic_increment
# define atomic_increment(mem) atomic_add ((mem), 1) # define atomic_increment(mem) atomic_add ((mem), 1)
#endif #endif
#ifndef catomic_increment
# define catomic_increment(mem) catomic_add ((mem), 1)
#endif
#ifndef atomic_increment_val #ifndef atomic_increment_val
# define atomic_increment_val(mem) (atomic_exchange_and_add ((mem), 1) + 1) # define atomic_increment_val(mem) (atomic_exchange_and_add ((mem), 1) + 1)
#endif #endif
#ifndef catomic_increment_val
# define catomic_increment_val(mem) (catomic_exchange_and_add ((mem), 1) + 1)
#endif
/* Add one to *MEM and return true iff it's now zero. */ /* Add one to *MEM and return true iff it's now zero. */
#ifndef atomic_increment_and_test #ifndef atomic_increment_and_test
# define atomic_increment_and_test(mem) \ # define atomic_increment_and_test(mem) \
@ -203,11 +315,21 @@
#endif #endif
#ifndef catomic_decrement
# define catomic_decrement(mem) catomic_add ((mem), -1)
#endif
#ifndef atomic_decrement_val #ifndef atomic_decrement_val
# define atomic_decrement_val(mem) (atomic_exchange_and_add ((mem), -1) - 1) # define atomic_decrement_val(mem) (atomic_exchange_and_add ((mem), -1) - 1)
#endif #endif
#ifndef catomic_decrement_val
# define catomic_decrement_val(mem) (catomic_exchange_and_add ((mem), -1) - 1)
#endif
/* Subtract 1 from *MEM and return true iff it's now zero. */ /* Subtract 1 from *MEM and return true iff it's now zero. */
#ifndef atomic_decrement_and_test #ifndef atomic_decrement_and_test
# define atomic_decrement_and_test(mem) \ # define atomic_decrement_and_test(mem) \
@ -327,6 +449,23 @@
} while (0) } while (0)
#endif #endif
#ifndef catomic_or
# define catomic_or(mem, mask) \
do { \
__typeof (*(mem)) __oldval; \
__typeof (mem) __memp = (mem); \
__typeof (*(mem)) __mask = (mask); \
\
do \
__oldval = (*__memp); \
while (__builtin_expect (catomic_compare_and_exchange_bool_acq (__memp, \
__oldval \
| __mask, \
__oldval),\
0)); \
} while (0)
#endif
/* Atomically *mem |= mask and return the old value of *mem. */ /* Atomically *mem |= mask and return the old value of *mem. */
#ifndef atomic_or_val #ifndef atomic_or_val
# define atomic_or_val(mem, mask) \ # define atomic_or_val(mem, mask) \

View File

@ -1,5 +1,5 @@
/* Profile heap and stack memory usage of running program. /* Profile heap and stack memory usage of running program.
Copyright (C) 1998-2002, 2004, 2005 Free Software Foundation, Inc. Copyright (C) 1998-2002, 2004, 2005, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library. This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@cygnus.com>, 1998. Contributed by Ulrich Drepper <drepper@cygnus.com>, 1998.
@ -128,8 +128,8 @@ update_data (struct header *result, size_t len, size_t old_len)
/* Compute current heap usage and compare it with the maximum value. */ /* Compute current heap usage and compare it with the maximum value. */
memusage_size_t heap memusage_size_t heap
= atomic_exchange_and_add (&current_heap, len - old_len) + len - old_len; = catomic_exchange_and_add (&current_heap, len - old_len) + len - old_len;
atomic_max (&peak_heap, heap); catomic_max (&peak_heap, heap);
/* Compute current stack usage and compare it with the maximum /* Compute current stack usage and compare it with the maximum
value. The base stack pointer might not be set if this is not value. The base stack pointer might not be set if this is not
@ -152,15 +152,15 @@ update_data (struct header *result, size_t len, size_t old_len)
start_sp = sp; start_sp = sp;
size_t current_stack = start_sp - sp; size_t current_stack = start_sp - sp;
#endif #endif
atomic_max (&peak_stack, current_stack); catomic_max (&peak_stack, current_stack);
/* Add up heap and stack usage and compare it with the maximum value. */ /* Add up heap and stack usage and compare it with the maximum value. */
atomic_max (&peak_total, heap + current_stack); catomic_max (&peak_total, heap + current_stack);
/* Store the value only if we are writing to a file. */ /* Store the value only if we are writing to a file. */
if (fd != -1) if (fd != -1)
{ {
uatomic32_t idx = atomic_exchange_and_add (&buffer_cnt, 1); uatomic32_t idx = catomic_exchange_and_add (&buffer_cnt, 1);
if (idx >= 2 * buffer_size) if (idx >= 2 * buffer_size)
{ {
/* We try to reset the counter to the correct range. If /* We try to reset the counter to the correct range. If
@ -168,7 +168,7 @@ update_data (struct header *result, size_t len, size_t old_len)
counter it does not matter since that thread will take counter it does not matter since that thread will take
care of the correction. */ care of the correction. */
unsigned int reset = idx - 2 * buffer_size; unsigned int reset = idx - 2 * buffer_size;
atomic_compare_and_exchange_val_acq (&buffer_size, reset, idx); catomic_compare_and_exchange_val_acq (&buffer_size, reset, idx);
idx = reset; idx = reset;
} }
@ -337,24 +337,24 @@ malloc (size_t len)
return (*mallocp) (len); return (*mallocp) (len);
/* Keep track of number of calls. */ /* Keep track of number of calls. */
atomic_increment (&calls[idx_malloc]); catomic_increment (&calls[idx_malloc]);
/* Keep track of total memory consumption for `malloc'. */ /* Keep track of total memory consumption for `malloc'. */
atomic_add (&total[idx_malloc], len); catomic_add (&total[idx_malloc], len);
/* Keep track of total memory requirement. */ /* Keep track of total memory requirement. */
atomic_add (&grand_total, len); catomic_add (&grand_total, len);
/* Remember the size of the request. */ /* Remember the size of the request. */
if (len < 65536) if (len < 65536)
atomic_increment (&histogram[len / 16]); catomic_increment (&histogram[len / 16]);
else else
atomic_increment (&large); catomic_increment (&large);
/* Total number of calls of any of the functions. */ /* Total number of calls of any of the functions. */
atomic_increment (&calls_total); catomic_increment (&calls_total);
/* Do the real work. */ /* Do the real work. */
result = (struct header *) (*mallocp) (len + sizeof (struct header)); result = (struct header *) (*mallocp) (len + sizeof (struct header));
if (result == NULL) if (result == NULL)
{ {
atomic_increment (&failed[idx_malloc]); catomic_increment (&failed[idx_malloc]);
return NULL; return NULL;
} }
@ -403,36 +403,36 @@ realloc (void *old, size_t len)
} }
/* Keep track of number of calls. */ /* Keep track of number of calls. */
atomic_increment (&calls[idx_realloc]); catomic_increment (&calls[idx_realloc]);
if (len > old_len) if (len > old_len)
{ {
/* Keep track of total memory consumption for `realloc'. */ /* Keep track of total memory consumption for `realloc'. */
atomic_add (&total[idx_realloc], len - old_len); catomic_add (&total[idx_realloc], len - old_len);
/* Keep track of total memory requirement. */ /* Keep track of total memory requirement. */
atomic_add (&grand_total, len - old_len); catomic_add (&grand_total, len - old_len);
} }
/* Remember the size of the request. */ /* Remember the size of the request. */
if (len < 65536) if (len < 65536)
atomic_increment (&histogram[len / 16]); catomic_increment (&histogram[len / 16]);
else else
atomic_increment (&large); catomic_increment (&large);
/* Total number of calls of any of the functions. */ /* Total number of calls of any of the functions. */
atomic_increment (&calls_total); catomic_increment (&calls_total);
/* Do the real work. */ /* Do the real work. */
result = (struct header *) (*reallocp) (real, len + sizeof (struct header)); result = (struct header *) (*reallocp) (real, len + sizeof (struct header));
if (result == NULL) if (result == NULL)
{ {
atomic_increment (&failed[idx_realloc]); catomic_increment (&failed[idx_realloc]);
return NULL; return NULL;
} }
/* Record whether the reduction/increase happened in place. */ /* Record whether the reduction/increase happened in place. */
if (real == result) if (real == result)
atomic_increment (&inplace); catomic_increment (&inplace);
/* Was the buffer increased? */ /* Was the buffer increased? */
if (old_len > len) if (old_len > len)
atomic_increment (&decreasing); catomic_increment (&decreasing);
/* Update the allocation data and write out the records if necessary. */ /* Update the allocation data and write out the records if necessary. */
update_data (result, len, old_len); update_data (result, len, old_len);
@ -463,16 +463,16 @@ calloc (size_t n, size_t len)
return (*callocp) (n, len); return (*callocp) (n, len);
/* Keep track of number of calls. */ /* Keep track of number of calls. */
atomic_increment (&calls[idx_calloc]); catomic_increment (&calls[idx_calloc]);
/* Keep track of total memory consumption for `calloc'. */ /* Keep track of total memory consumption for `calloc'. */
atomic_add (&total[idx_calloc], size); catomic_add (&total[idx_calloc], size);
/* Keep track of total memory requirement. */ /* Keep track of total memory requirement. */
atomic_add (&grand_total, size); catomic_add (&grand_total, size);
/* Remember the size of the request. */ /* Remember the size of the request. */
if (size < 65536) if (size < 65536)
atomic_increment (&histogram[size / 16]); catomic_increment (&histogram[size / 16]);
else else
atomic_increment (&large); catomic_increment (&large);
/* Total number of calls of any of the functions. */ /* Total number of calls of any of the functions. */
++calls_total; ++calls_total;
@ -480,7 +480,7 @@ calloc (size_t n, size_t len)
result = (struct header *) (*mallocp) (size + sizeof (struct header)); result = (struct header *) (*mallocp) (size + sizeof (struct header));
if (result == NULL) if (result == NULL)
{ {
atomic_increment (&failed[idx_calloc]); catomic_increment (&failed[idx_calloc]);
return NULL; return NULL;
} }
@ -517,7 +517,7 @@ free (void *ptr)
/* `free (NULL)' has no effect. */ /* `free (NULL)' has no effect. */
if (ptr == NULL) if (ptr == NULL)
{ {
atomic_increment (&calls[idx_free]); catomic_increment (&calls[idx_free]);
return; return;
} }
@ -531,9 +531,9 @@ free (void *ptr)
} }
/* Keep track of number of calls. */ /* Keep track of number of calls. */
atomic_increment (&calls[idx_free]); catomic_increment (&calls[idx_free]);
/* Keep track of total memory freed using `free'. */ /* Keep track of total memory freed using `free'. */
atomic_add (&total[idx_free], real->length); catomic_add (&total[idx_free], real->length);
/* Update the allocation data and write out the records if necessary. */ /* Update the allocation data and write out the records if necessary. */
update_data (NULL, 0, real->length); update_data (NULL, 0, real->length);
@ -567,22 +567,22 @@ mmap (void *start, size_t len, int prot, int flags, int fd, off_t offset)
? idx_mmap_a : prot & PROT_WRITE ? idx_mmap_w : idx_mmap_r); ? idx_mmap_a : prot & PROT_WRITE ? idx_mmap_w : idx_mmap_r);
/* Keep track of number of calls. */ /* Keep track of number of calls. */
atomic_increment (&calls[idx]); catomic_increment (&calls[idx]);
/* Keep track of total memory consumption for `malloc'. */ /* Keep track of total memory consumption for `malloc'. */
atomic_add (&total[idx], len); catomic_add (&total[idx], len);
/* Keep track of total memory requirement. */ /* Keep track of total memory requirement. */
atomic_add (&grand_total, len); catomic_add (&grand_total, len);
/* Remember the size of the request. */ /* Remember the size of the request. */
if (len < 65536) if (len < 65536)
atomic_increment (&histogram[len / 16]); catomic_increment (&histogram[len / 16]);
else else
atomic_increment (&large); catomic_increment (&large);
/* Total number of calls of any of the functions. */ /* Total number of calls of any of the functions. */
atomic_increment (&calls_total); catomic_increment (&calls_total);
/* Check for failures. */ /* Check for failures. */
if (result == NULL) if (result == NULL)
atomic_increment (&failed[idx]); catomic_increment (&failed[idx]);
else if (idx == idx_mmap_w) else if (idx == idx_mmap_w)
/* Update the allocation data and write out the records if /* Update the allocation data and write out the records if
necessary. Note the first parameter is NULL which means necessary. Note the first parameter is NULL which means
@ -619,22 +619,22 @@ mmap64 (void *start, size_t len, int prot, int flags, int fd, off64_t offset)
? idx_mmap_a : prot & PROT_WRITE ? idx_mmap_w : idx_mmap_r); ? idx_mmap_a : prot & PROT_WRITE ? idx_mmap_w : idx_mmap_r);
/* Keep track of number of calls. */ /* Keep track of number of calls. */
atomic_increment (&calls[idx]); catomic_increment (&calls[idx]);
/* Keep track of total memory consumption for `malloc'. */ /* Keep track of total memory consumption for `malloc'. */
atomic_add (&total[idx], len); catomic_add (&total[idx], len);
/* Keep track of total memory requirement. */ /* Keep track of total memory requirement. */
atomic_add (&grand_total, len); catomic_add (&grand_total, len);
/* Remember the size of the request. */ /* Remember the size of the request. */
if (len < 65536) if (len < 65536)
atomic_increment (&histogram[len / 16]); catomic_increment (&histogram[len / 16]);
else else
atomic_increment (&large); catomic_increment (&large);
/* Total number of calls of any of the functions. */ /* Total number of calls of any of the functions. */
atomic_increment (&calls_total); catomic_increment (&calls_total);
/* Check for failures. */ /* Check for failures. */
if (result == NULL) if (result == NULL)
atomic_increment (&failed[idx]); catomic_increment (&failed[idx]);
else if (idx == idx_mmap_w) else if (idx == idx_mmap_w)
/* Update the allocation data and write out the records if /* Update the allocation data and write out the records if
necessary. Note the first parameter is NULL which means necessary. Note the first parameter is NULL which means
@ -673,33 +673,33 @@ mremap (void *start, size_t old_len, size_t len, int flags, ...)
if (!not_me && trace_mmap) if (!not_me && trace_mmap)
{ {
/* Keep track of number of calls. */ /* Keep track of number of calls. */
atomic_increment (&calls[idx_mremap]); catomic_increment (&calls[idx_mremap]);
if (len > old_len) if (len > old_len)
{ {
/* Keep track of total memory consumption for `malloc'. */ /* Keep track of total memory consumption for `malloc'. */
atomic_add (&total[idx_mremap], len - old_len); catomic_add (&total[idx_mremap], len - old_len);
/* Keep track of total memory requirement. */ /* Keep track of total memory requirement. */
atomic_add (&grand_total, len - old_len); catomic_add (&grand_total, len - old_len);
} }
/* Remember the size of the request. */ /* Remember the size of the request. */
if (len < 65536) if (len < 65536)
atomic_increment (&histogram[len / 16]); catomic_increment (&histogram[len / 16]);
else else
atomic_increment (&large); catomic_increment (&large);
/* Total number of calls of any of the functions. */ /* Total number of calls of any of the functions. */
atomic_increment (&calls_total); catomic_increment (&calls_total);
/* Check for failures. */ /* Check for failures. */
if (result == NULL) if (result == NULL)
atomic_increment (&failed[idx_mremap]); catomic_increment (&failed[idx_mremap]);
else else
{ {
/* Record whether the reduction/increase happened in place. */ /* Record whether the reduction/increase happened in place. */
if (start == result) if (start == result)
atomic_increment (&inplace_mremap); catomic_increment (&inplace_mremap);
/* Was the buffer increased? */ /* Was the buffer increased? */
if (old_len > len) if (old_len > len)
atomic_increment (&decreasing_mremap); catomic_increment (&decreasing_mremap);
/* Update the allocation data and write out the records if /* Update the allocation data and write out the records if
necessary. Note the first parameter is NULL which means necessary. Note the first parameter is NULL which means
@ -733,19 +733,19 @@ munmap (void *start, size_t len)
if (!not_me && trace_mmap) if (!not_me && trace_mmap)
{ {
/* Keep track of number of calls. */ /* Keep track of number of calls. */
atomic_increment (&calls[idx_munmap]); catomic_increment (&calls[idx_munmap]);
if (__builtin_expect (result == 0, 1)) if (__builtin_expect (result == 0, 1))
{ {
/* Keep track of total memory freed using `free'. */ /* Keep track of total memory freed using `free'. */
atomic_add (&total[idx_munmap], len); catomic_add (&total[idx_munmap], len);
/* Update the allocation data and write out the records if /* Update the allocation data and write out the records if
necessary. */ necessary. */
update_data (NULL, 0, len); update_data (NULL, 0, len);
} }
else else
atomic_increment (&failed[idx_munmap]); catomic_increment (&failed[idx_munmap]);
} }
return result; return result;

View File

@ -1,3 +1,8 @@
2006-10-11 Ulrich Drepper <drepper@redhat.com>
* sysdeps/unix/sysv/linux/rtld-lowlevel.h: Use catomic_*
operations instead of atomic_*.
2006-10-09 Ulrich Drepper <drepper@redhat.com> 2006-10-09 Ulrich Drepper <drepper@redhat.com>
* sysdeps/unix/sysv/linux/rtld-lowlevel.h: New file.. * sysdeps/unix/sysv/linux/rtld-lowlevel.h: New file..

View File

@ -62,9 +62,9 @@ typedef int __rtld_mrlock_t;
{ \ { \
int newval = ((oldval & __RTLD_MRLOCK_RBITS) \ int newval = ((oldval & __RTLD_MRLOCK_RBITS) \
+ __RTLD_MRLOCK_INC); \ + __RTLD_MRLOCK_INC); \
int ret = atomic_compare_and_exchange_val_acq (&(lock), \ int ret = catomic_compare_and_exchange_val_acq (&(lock), \
newval, \ newval, \
oldval); \ oldval); \
if (__builtin_expect (ret == oldval, 1)) \ if (__builtin_expect (ret == oldval, 1)) \
goto out; \ goto out; \
} \ } \
@ -72,7 +72,7 @@ typedef int __rtld_mrlock_t;
} \ } \
if ((oldval & __RTLD_MRLOCK_RWAIT) == 0) \ if ((oldval & __RTLD_MRLOCK_RWAIT) == 0) \
{ \ { \
atomic_or (&(lock), __RTLD_MRLOCK_RWAIT); \ catomic_or (&(lock), __RTLD_MRLOCK_RWAIT); \
oldval |= __RTLD_MRLOCK_RWAIT; \ oldval |= __RTLD_MRLOCK_RWAIT; \
} \ } \
lll_futex_wait (lock, oldval); \ lll_futex_wait (lock, oldval); \
@ -83,10 +83,10 @@ typedef int __rtld_mrlock_t;
#define __rtld_mrlock_unlock(lock) \ #define __rtld_mrlock_unlock(lock) \
do { \ do { \
int oldval = atomic_exchange_and_add (&(lock), -__RTLD_MRLOCK_INC); \ int oldval = catomic_exchange_and_add (&(lock), -__RTLD_MRLOCK_INC); \
if (__builtin_expect ((oldval \ if (__builtin_expect ((oldval \
& (__RTLD_MRLOCK_RBITS | __RTLD_MRLOCK_WWAIT)) \ & (__RTLD_MRLOCK_RBITS | __RTLD_MRLOCK_WWAIT)) \
== __RTLD_MRLOCK_INC | __RTLD_MRLOCK_WWAIT, 0)) \ == (__RTLD_MRLOCK_INC | __RTLD_MRLOCK_WWAIT), 0)) \
/* We have to wake all threads since there might be some queued \ /* We have to wake all threads since there might be some queued \
readers already. */ \ readers already. */ \
lll_futex_wake (&(lock), 0x7fffffff); \ lll_futex_wake (&(lock), 0x7fffffff); \
@ -107,7 +107,7 @@ typedef int __rtld_mrlock_t;
{ \ { \
int newval = ((oldval & __RTLD_MRLOCK_RWAIT) \ int newval = ((oldval & __RTLD_MRLOCK_RWAIT) \
+ __RTLD_MRLOCK_WRITER); \ + __RTLD_MRLOCK_WRITER); \
int ret = atomic_compare_and_exchange_val_acq (&(lock), \ int ret = catomic_compare_and_exchange_val_acq (&(lock), \
newval, \ newval, \
oldval); \ oldval); \
if (__builtin_expect (ret == oldval, 1)) \ if (__builtin_expect (ret == oldval, 1)) \
@ -115,7 +115,7 @@ typedef int __rtld_mrlock_t;
} \ } \
atomic_delay (); \ atomic_delay (); \
} \ } \
atomic_or (&(lock), __RTLD_MRLOCK_WWAIT); \ catomic_or (&(lock), __RTLD_MRLOCK_WWAIT); \
oldval |= __RTLD_MRLOCK_WWAIT; \ oldval |= __RTLD_MRLOCK_WWAIT; \
lll_futex_wait (lock, oldval); \ lll_futex_wait (lock, oldval); \
} \ } \
@ -125,7 +125,7 @@ typedef int __rtld_mrlock_t;
#define __rtld_mrlock_done(lock) \ #define __rtld_mrlock_done(lock) \
do { \ do { \
int oldval = atomic_exchange_and_add (&(lock), -__RTLD_MRLOCK_WRITER); \ int oldval = catomic_exchange_and_add (&(lock), -__RTLD_MRLOCK_WRITER); \
if (__builtin_expect ((oldval & __RTLD_MRLOCK_RWAIT) != 0, 0)) \ if (__builtin_expect ((oldval & __RTLD_MRLOCK_RWAIT) != 0, 0)) \
lll_futex_wake (&(lock), 0x7fffffff); \ lll_futex_wake (&(lock), 0x7fffffff); \
} while (0) } while (0)

View File

@ -33,7 +33,7 @@ extern unsigned long long int __res_initstamp attribute_hidden;
#if __WORDSIZE == 64 #if __WORDSIZE == 64
# define atomicinclock(lock) (void) 0 # define atomicinclock(lock) (void) 0
# define atomicincunlock(lock) (void) 0 # define atomicincunlock(lock) (void) 0
# define atomicinc(var) atomic_increment (&(var)) # define atomicinc(var) catomic_increment (&(var))
#else #else
__libc_lock_define_initialized (static, lock); __libc_lock_define_initialized (static, lock);
# define atomicinclock(lock) __libc_lock_lock (lock) # define atomicinclock(lock) __libc_lock_lock (lock)

View File

@ -45,8 +45,8 @@ __cxa_finalize (void *d)
/* We don't want to run this cleanup more than once. */ /* We don't want to run this cleanup more than once. */
&& (cxafn = f->func.cxa.fn, && (cxafn = f->func.cxa.fn,
cxaarg = f->func.cxa.arg, cxaarg = f->func.cxa.arg,
! atomic_compare_and_exchange_bool_acq (&f->flavor, ef_free, ! catomic_compare_and_exchange_bool_acq (&f->flavor, ef_free,
ef_cxa))) ef_cxa)))
{ {
uint64_t check = __new_exitfn_called; uint64_t check = __new_exitfn_called;

View File

@ -18,6 +18,7 @@
02111-1307 USA. */ 02111-1307 USA. */
#include <stdint.h> #include <stdint.h>
#include <tls.h> /* For tcbhead_t. */
typedef int8_t atomic8_t; typedef int8_t atomic8_t;
@ -85,6 +86,51 @@ typedef uintmax_t uatomic_max_t;
ret; }) ret; })
#define __arch_c_compare_and_exchange_val_8_acq(mem, newval, oldval) \
({ __typeof (*mem) ret; \
__asm __volatile ("cmpl $0, %%fs:%P5\n\t" \
"je 0f\n\t" \
"lock\n" \
"0:\tcmpxchgb %b2, %1" \
: "=a" (ret), "=m" (*mem) \
: "q" (newval), "m" (*mem), "0" (oldval), \
"i" (offsetof (tcbhead_t, multiple_threads))); \
ret; })
#define __arch_c_compare_and_exchange_val_16_acq(mem, newval, oldval) \
({ __typeof (*mem) ret; \
__asm __volatile ("cmpl $0, %%fs:%P5\n\t" \
"je 0f\n\t" \
"lock\n" \
"0:\tcmpxchgw %w2, %1" \
: "=a" (ret), "=m" (*mem) \
: "q" (newval), "m" (*mem), "0" (oldval), \
"i" (offsetof (tcbhead_t, multiple_threads))); \
ret; })
#define __arch_c_compare_and_exchange_val_32_acq(mem, newval, oldval) \
({ __typeof (*mem) ret; \
__asm __volatile ("cmpl $0, %%fs:%P5\n\t" \
"je 0f\n\t" \
"lock\n" \
"0:\tcmpxchgl %2, %1" \
: "=a" (ret), "=m" (*mem) \
: "q" (newval), "m" (*mem), "0" (oldval), \
"i" (offsetof (tcbhead_t, multiple_threads))); \
ret; })
#define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \
({ __typeof (*mem) ret; \
__asm __volatile ("cmpl $0, %%fs:%P5\n\t" \
"je 0f\n\t" \
"lock\n" \
"0:\tcmpxchgq %q2, %1" \
: "=a" (ret), "=m" (*mem) \
: "q" (newval), "m" (*mem), "0" (oldval), \
"i" (offsetof (tcbhead_t, multiple_threads))); \
ret; })
/* Note that we need no lock prefix. */ /* Note that we need no lock prefix. */
#define atomic_exchange_acq(mem, newvalue) \ #define atomic_exchange_acq(mem, newvalue) \
({ __typeof (*mem) result; \ ({ __typeof (*mem) result; \
@ -107,49 +153,76 @@ typedef uintmax_t uatomic_max_t;
result; }) result; })
#define atomic_exchange_and_add(mem, value) \ #define __arch_exchange_and_add_body(lock, mem, value) \
({ __typeof (*mem) result; \ ({ __typeof (*mem) result; \
if (sizeof (*mem) == 1) \ if (sizeof (*mem) == 1) \
__asm __volatile (LOCK_PREFIX "xaddb %b0, %1" \ __asm __volatile (lock "xaddb %b0, %1" \
: "=r" (result), "=m" (*mem) \ : "=r" (result), "=m" (*mem) \
: "0" (value), "m" (*mem)); \ : "0" (value), "m" (*mem), \
"i" (offsetof (tcbhead_t, multiple_threads))); \
else if (sizeof (*mem) == 2) \ else if (sizeof (*mem) == 2) \
__asm __volatile (LOCK_PREFIX "xaddw %w0, %1" \ __asm __volatile (lock "xaddw %w0, %1" \
: "=r" (result), "=m" (*mem) \ : "=r" (result), "=m" (*mem) \
: "0" (value), "m" (*mem)); \ : "0" (value), "m" (*mem), \
"i" (offsetof (tcbhead_t, multiple_threads))); \
else if (sizeof (*mem) == 4) \ else if (sizeof (*mem) == 4) \
__asm __volatile (LOCK_PREFIX "xaddl %0, %1" \ __asm __volatile (lock "xaddl %0, %1" \
: "=r" (result), "=m" (*mem) \ : "=r" (result), "=m" (*mem) \
: "0" (value), "m" (*mem)); \ : "0" (value), "m" (*mem), \
"i" (offsetof (tcbhead_t, multiple_threads))); \
else \ else \
__asm __volatile (LOCK_PREFIX "xaddq %q0, %1" \ __asm __volatile (lock "xaddq %q0, %1" \
: "=r" (result), "=m" (*mem) \ : "=r" (result), "=m" (*mem) \
: "0" ((long) (value)), "m" (*mem)); \ : "0" ((long) (value)), "m" (*mem), \
"i" (offsetof (tcbhead_t, multiple_threads))); \
result; }) result; })
#define atomic_exchange_and_add(mem, value) \
__arch_exchange_and_add_body (LOCK_PREFIX, mem, value)
#define __arch_exchange_and_add_cprefix \
"cmpl $0, %%fs:%P4\n\tje 0f\n\tlock\n0:\t"
#define catomic_exchange_and_add(mem, value) \
__arch_exchange_and_add_body (__arch_exchange_and_add_cprefix, mem, value)
#define __arch_add_body(lock, pfx, mem, value) \
do { \
if (__builtin_constant_p (value) && (value) == 1) \
pfx##_increment (mem); \
else if (__builtin_constant_p (value) && (value) == -1) \
pfx##_decrement (mem); \
else if (sizeof (*mem) == 1) \
__asm __volatile (lock "addb %b1, %0" \
: "=m" (*mem) \
: "ir" (value), "m" (*mem), \
"i" (offsetof (tcbhead_t, multiple_threads))); \
else if (sizeof (*mem) == 2) \
__asm __volatile (lock "addw %w1, %0" \
: "=m" (*mem) \
: "ir" (value), "m" (*mem), \
"i" (offsetof (tcbhead_t, multiple_threads))); \
else if (sizeof (*mem) == 4) \
__asm __volatile (lock "addl %1, %0" \
: "=m" (*mem) \
: "ir" (value), "m" (*mem), \
"i" (offsetof (tcbhead_t, multiple_threads))); \
else \
__asm __volatile (lock "addq %q1, %0" \
: "=m" (*mem) \
: "ir" ((long) (value)), "m" (*mem), \
"i" (offsetof (tcbhead_t, multiple_threads))); \
} while (0)
#define atomic_add(mem, value) \ #define atomic_add(mem, value) \
(void) ({ if (__builtin_constant_p (value) && (value) == 1) \ __arch_add_body (LOCK_PREFIX, atomic, mem, value)
atomic_increment (mem); \
else if (__builtin_constant_p (value) && (value) == 1) \ #define __arch_add_cprefix \
atomic_decrement (mem); \ "cmpl $0, %%fs:%P3\n\tje 0f\n\tlock\n0:\t"
else if (sizeof (*mem) == 1) \
__asm __volatile (LOCK_PREFIX "addb %b1, %0" \ #define catomic_add(mem, value) \
: "=m" (*mem) \ __arch_add_body (__arch_add_cprefix, catomic, mem, value)
: "ir" (value), "m" (*mem)); \
else if (sizeof (*mem) == 2) \
__asm __volatile (LOCK_PREFIX "addw %w1, %0" \
: "=m" (*mem) \
: "ir" (value), "m" (*mem)); \
else if (sizeof (*mem) == 4) \
__asm __volatile (LOCK_PREFIX "addl %1, %0" \
: "=m" (*mem) \
: "ir" (value), "m" (*mem)); \
else \
__asm __volatile (LOCK_PREFIX "addq %q1, %0" \
: "=m" (*mem) \
: "ir" ((long) (value)), "m" (*mem)); \
})
#define atomic_add_negative(mem, value) \ #define atomic_add_negative(mem, value) \
@ -194,26 +267,38 @@ typedef uintmax_t uatomic_max_t;
__result; }) __result; })
#define atomic_increment(mem) \ #define __arch_increment_body(lock, mem) \
do { \ do { \
if (sizeof (*mem) == 1) \ if (sizeof (*mem) == 1) \
__asm __volatile (LOCK_PREFIX "incb %b0" \ __asm __volatile (lock "incb %b0" \
: "=m" (*mem) \ : "=m" (*mem) \
: "m" (*mem)); \ : "m" (*mem), \
"i" (offsetof (tcbhead_t, multiple_threads))); \
else if (sizeof (*mem) == 2) \ else if (sizeof (*mem) == 2) \
__asm __volatile (LOCK_PREFIX "incw %w0" \ __asm __volatile (lock "incw %w0" \
: "=m" (*mem) \ : "=m" (*mem) \
: "m" (*mem)); \ : "m" (*mem), \
"i" (offsetof (tcbhead_t, multiple_threads))); \
else if (sizeof (*mem) == 4) \ else if (sizeof (*mem) == 4) \
__asm __volatile (LOCK_PREFIX "incl %0" \ __asm __volatile (lock "incl %0" \
: "=m" (*mem) \ : "=m" (*mem) \
: "m" (*mem)); \ : "m" (*mem), \
"i" (offsetof (tcbhead_t, multiple_threads))); \
else \ else \
__asm __volatile (LOCK_PREFIX "incq %q0" \ __asm __volatile (lock "incq %q0" \
: "=m" (*mem) \ : "=m" (*mem) \
: "m" (*mem)); \ : "m" (*mem), \
"i" (offsetof (tcbhead_t, multiple_threads))); \
} while (0) } while (0)
#define atomic_increment(mem) __arch_increment_body (LOCK_PREFIX, mem)
#define __arch_increment_cprefix \
"cmpl $0, %%fs:%P2\n\tje 0f\n\tlock\n0:\t"
#define catomic_increment(mem) \
__arch_increment_body (__arch_increment_cprefix, mem)
#define atomic_increment_and_test(mem) \ #define atomic_increment_and_test(mem) \
({ unsigned char __result; \ ({ unsigned char __result; \
@ -236,26 +321,38 @@ typedef uintmax_t uatomic_max_t;
__result; }) __result; })
#define atomic_decrement(mem) \ #define __arch_decrement_body(lock, mem) \
do { \ do { \
if (sizeof (*mem) == 1) \ if (sizeof (*mem) == 1) \
__asm __volatile (LOCK_PREFIX "decb %b0" \ __asm __volatile (lock "decb %b0" \
: "=m" (*mem) \ : "=m" (*mem) \
: "m" (*mem)); \ : "m" (*mem), \
"i" (offsetof (tcbhead_t, multiple_threads))); \
else if (sizeof (*mem) == 2) \ else if (sizeof (*mem) == 2) \
__asm __volatile (LOCK_PREFIX "decw %w0" \ __asm __volatile (lock "decw %w0" \
: "=m" (*mem) \ : "=m" (*mem) \
: "m" (*mem)); \ : "m" (*mem), \
"i" (offsetof (tcbhead_t, multiple_threads))); \
else if (sizeof (*mem) == 4) \ else if (sizeof (*mem) == 4) \
__asm __volatile (LOCK_PREFIX "decl %0" \ __asm __volatile (lock "decl %0" \
: "=m" (*mem) \ : "=m" (*mem) \
: "m" (*mem)); \ : "m" (*mem), \
"i" (offsetof (tcbhead_t, multiple_threads))); \
else \ else \
__asm __volatile (LOCK_PREFIX "decq %q0" \ __asm __volatile (lock "decq %q0" \
: "=m" (*mem) \ : "=m" (*mem) \
: "m" (*mem)); \ : "m" (*mem), \
"i" (offsetof (tcbhead_t, multiple_threads))); \
} while (0) } while (0)
#define atomic_decrement(mem) __arch_decrement_body (LOCK_PREFIX, mem)
#define __arch_decrement_cprefix \
"cmpl $0, %%fs:%P2\n\tje 0f\n\tlock\n0:\t"
#define catomic_decrement(mem) \
__arch_increment_body (__arch_decrement_cprefix, mem)
#define atomic_decrement_and_test(mem) \ #define atomic_decrement_and_test(mem) \
({ unsigned char __result; \ ({ unsigned char __result; \
@ -348,22 +445,33 @@ typedef uintmax_t uatomic_max_t;
} while (0) } while (0)
#define atomic_or(mem, mask) \ #define __arch_or_body(lock, mem, mask) \
do { \ do { \
if (sizeof (*mem) == 1) \ if (sizeof (*mem) == 1) \
__asm __volatile (LOCK_PREFIX "orb %1, %b0" \ __asm __volatile (lock "orb %1, %b0" \
: "=m" (*mem) \ : "=m" (*mem) \
: "ir" (mask), "m" (*mem)); \ : "ir" (mask), "m" (*mem), \
"i" (offsetof (tcbhead_t, multiple_threads))); \
else if (sizeof (*mem) == 2) \ else if (sizeof (*mem) == 2) \
__asm __volatile (LOCK_PREFIX "orw %1, %w0" \ __asm __volatile (lock "orw %1, %w0" \
: "=m" (*mem) \ : "=m" (*mem) \
: "ir" (mask), "m" (*mem)); \ : "ir" (mask), "m" (*mem), \
"i" (offsetof (tcbhead_t, multiple_threads))); \
else if (sizeof (*mem) == 4) \ else if (sizeof (*mem) == 4) \
__asm __volatile (LOCK_PREFIX "orl %1, %0" \ __asm __volatile (lock "orl %1, %0" \
: "=m" (*mem) \ : "=m" (*mem) \
: "ir" (mask), "m" (*mem)); \ : "ir" (mask), "m" (*mem), \
"i" (offsetof (tcbhead_t, multiple_threads))); \
else \ else \
__asm __volatile (LOCK_PREFIX "orq %1, %q0" \ __asm __volatile (lock "orq %1, %q0" \
: "=m" (*mem) \ : "=m" (*mem) \
: "ir" (mask), "m" (*mem)); \ : "ir" (mask), "m" (*mem), \
"i" (offsetof (tcbhead_t, multiple_threads))); \
} while (0) } while (0)
#define atomic_or(mem, mask) __arch_or_body (LOCK_PREFIX, mem, mask)
#define __arch_or_cprefix \
"cmpl $0, %%fs:%P3\n\tje 0f\n\tlock\n0:\t"
#define catomic_or(mem, mask) __arch_or_body (__arch_or_cprefix, mem, mask)