mirror of
https://sourceware.org/git/glibc.git
synced 2024-11-05 21:00:05 +00:00
(charmap_read): Prepend the condition filename == NULL.
This commit is contained in:
parent
68eefde7b6
commit
4ad1d0cfbf
@ -63,7 +63,7 @@ int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
|||||||
|
|
||||||
/* Check whether the mutex is locked and owned by this thread. */
|
/* Check whether the mutex is locked and owned by this thread. */
|
||||||
if (mutex->__m_kind != PTHREAD_MUTEX_TIMED_NP
|
if (mutex->__m_kind != PTHREAD_MUTEX_TIMED_NP
|
||||||
&& mutex->__m_kind != PTHREAD_MUTEX_FAST_NP
|
&& mutex->__m_kind != PTHREAD_MUTEX_ADAPTIVE_NP
|
||||||
&& mutex->__m_owner != self)
|
&& mutex->__m_owner != self)
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
|
|
||||||
@ -124,7 +124,7 @@ pthread_cond_timedwait_relative(pthread_cond_t *cond,
|
|||||||
|
|
||||||
/* Check whether the mutex is locked and owned by this thread. */
|
/* Check whether the mutex is locked and owned by this thread. */
|
||||||
if (mutex->__m_kind != PTHREAD_MUTEX_TIMED_NP
|
if (mutex->__m_kind != PTHREAD_MUTEX_TIMED_NP
|
||||||
&& mutex->__m_kind != PTHREAD_MUTEX_FAST_NP
|
&& mutex->__m_kind != PTHREAD_MUTEX_ADAPTIVE_NP
|
||||||
&& mutex->__m_owner != self)
|
&& mutex->__m_owner != self)
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
|
|
||||||
|
@ -287,6 +287,9 @@ extern volatile td_thr_events_t __pthread_threads_events;
|
|||||||
/* Pointer to descriptor of thread with last event. */
|
/* Pointer to descriptor of thread with last event. */
|
||||||
extern volatile pthread_descr __pthread_last_event;
|
extern volatile pthread_descr __pthread_last_event;
|
||||||
|
|
||||||
|
/* Flag which tells whether we are executing on SMP kernel. */
|
||||||
|
extern int __pthread_smp_kernel;
|
||||||
|
|
||||||
/* Return the handle corresponding to a thread id */
|
/* Return the handle corresponding to a thread id */
|
||||||
|
|
||||||
static inline pthread_handle thread_handle(pthread_t id)
|
static inline pthread_handle thread_handle(pthread_t id)
|
||||||
|
@ -38,7 +38,7 @@ strong_alias (__pthread_mutex_init, pthread_mutex_init)
|
|||||||
|
|
||||||
int __pthread_mutex_destroy(pthread_mutex_t * mutex)
|
int __pthread_mutex_destroy(pthread_mutex_t * mutex)
|
||||||
{
|
{
|
||||||
if (mutex->__m_lock.__status != 0) return EBUSY;
|
if ((mutex->__m_lock.__status & 1) != 0) return EBUSY;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
strong_alias (__pthread_mutex_destroy, pthread_mutex_destroy)
|
strong_alias (__pthread_mutex_destroy, pthread_mutex_destroy)
|
||||||
@ -49,7 +49,7 @@ int __pthread_mutex_trylock(pthread_mutex_t * mutex)
|
|||||||
int retcode;
|
int retcode;
|
||||||
|
|
||||||
switch(mutex->__m_kind) {
|
switch(mutex->__m_kind) {
|
||||||
case PTHREAD_MUTEX_FAST_NP:
|
case PTHREAD_MUTEX_ADAPTIVE_NP:
|
||||||
retcode = __pthread_trylock(&mutex->__m_lock);
|
retcode = __pthread_trylock(&mutex->__m_lock);
|
||||||
return retcode;
|
return retcode;
|
||||||
case PTHREAD_MUTEX_RECURSIVE_NP:
|
case PTHREAD_MUTEX_RECURSIVE_NP:
|
||||||
@ -84,7 +84,7 @@ int __pthread_mutex_lock(pthread_mutex_t * mutex)
|
|||||||
pthread_descr self;
|
pthread_descr self;
|
||||||
|
|
||||||
switch(mutex->__m_kind) {
|
switch(mutex->__m_kind) {
|
||||||
case PTHREAD_MUTEX_FAST_NP:
|
case PTHREAD_MUTEX_ADAPTIVE_NP:
|
||||||
__pthread_lock(&mutex->__m_lock, NULL);
|
__pthread_lock(&mutex->__m_lock, NULL);
|
||||||
return 0;
|
return 0;
|
||||||
case PTHREAD_MUTEX_RECURSIVE_NP:
|
case PTHREAD_MUTEX_RECURSIVE_NP:
|
||||||
@ -122,7 +122,7 @@ int __pthread_mutex_timedlock (pthread_mutex_t *mutex,
|
|||||||
return EINVAL;
|
return EINVAL;
|
||||||
|
|
||||||
switch(mutex->__m_kind) {
|
switch(mutex->__m_kind) {
|
||||||
case PTHREAD_MUTEX_FAST_NP:
|
case PTHREAD_MUTEX_ADAPTIVE_NP:
|
||||||
__pthread_lock(&mutex->__m_lock, NULL);
|
__pthread_lock(&mutex->__m_lock, NULL);
|
||||||
return 0;
|
return 0;
|
||||||
case PTHREAD_MUTEX_RECURSIVE_NP:
|
case PTHREAD_MUTEX_RECURSIVE_NP:
|
||||||
@ -158,7 +158,7 @@ strong_alias (__pthread_mutex_timedlock, pthread_mutex_timedlock)
|
|||||||
int __pthread_mutex_unlock(pthread_mutex_t * mutex)
|
int __pthread_mutex_unlock(pthread_mutex_t * mutex)
|
||||||
{
|
{
|
||||||
switch (mutex->__m_kind) {
|
switch (mutex->__m_kind) {
|
||||||
case PTHREAD_MUTEX_FAST_NP:
|
case PTHREAD_MUTEX_ADAPTIVE_NP:
|
||||||
__pthread_unlock(&mutex->__m_lock);
|
__pthread_unlock(&mutex->__m_lock);
|
||||||
return 0;
|
return 0;
|
||||||
case PTHREAD_MUTEX_RECURSIVE_NP:
|
case PTHREAD_MUTEX_RECURSIVE_NP:
|
||||||
@ -170,7 +170,7 @@ int __pthread_mutex_unlock(pthread_mutex_t * mutex)
|
|||||||
__pthread_unlock(&mutex->__m_lock);
|
__pthread_unlock(&mutex->__m_lock);
|
||||||
return 0;
|
return 0;
|
||||||
case PTHREAD_MUTEX_ERRORCHECK_NP:
|
case PTHREAD_MUTEX_ERRORCHECK_NP:
|
||||||
if (mutex->__m_owner != thread_self() || mutex->__m_lock.__status == 0)
|
if (mutex->__m_owner != thread_self() || (mutex->__m_lock.__status & 1) == 0)
|
||||||
return EPERM;
|
return EPERM;
|
||||||
mutex->__m_owner = NULL;
|
mutex->__m_owner = NULL;
|
||||||
__pthread_alt_unlock(&mutex->__m_lock);
|
__pthread_alt_unlock(&mutex->__m_lock);
|
||||||
@ -199,7 +199,7 @@ strong_alias (__pthread_mutexattr_destroy, pthread_mutexattr_destroy)
|
|||||||
|
|
||||||
int __pthread_mutexattr_settype(pthread_mutexattr_t *attr, int kind)
|
int __pthread_mutexattr_settype(pthread_mutexattr_t *attr, int kind)
|
||||||
{
|
{
|
||||||
if (kind != PTHREAD_MUTEX_FAST_NP
|
if (kind != PTHREAD_MUTEX_ADAPTIVE_NP
|
||||||
&& kind != PTHREAD_MUTEX_RECURSIVE_NP
|
&& kind != PTHREAD_MUTEX_RECURSIVE_NP
|
||||||
&& kind != PTHREAD_MUTEX_ERRORCHECK_NP
|
&& kind != PTHREAD_MUTEX_ERRORCHECK_NP
|
||||||
&& kind != PTHREAD_MUTEX_TIMED_NP)
|
&& kind != PTHREAD_MUTEX_TIMED_NP)
|
||||||
|
@ -150,7 +150,7 @@ pthread_descr __pthread_main_thread = &__pthread_initial_thread;
|
|||||||
/* Limit between the stack of the initial thread (above) and the
|
/* Limit between the stack of the initial thread (above) and the
|
||||||
stacks of other threads (below). Aligned on a STACK_SIZE boundary. */
|
stacks of other threads (below). Aligned on a STACK_SIZE boundary. */
|
||||||
|
|
||||||
char *__pthread_initial_thread_bos = NULL;
|
char *__pthread_initial_thread_bos;
|
||||||
|
|
||||||
/* File descriptor for sending requests to the thread manager. */
|
/* File descriptor for sending requests to the thread manager. */
|
||||||
/* Initially -1, meaning that the thread manager is not running. */
|
/* Initially -1, meaning that the thread manager is not running. */
|
||||||
@ -163,13 +163,17 @@ int __pthread_manager_reader;
|
|||||||
|
|
||||||
/* Limits of the thread manager stack */
|
/* Limits of the thread manager stack */
|
||||||
|
|
||||||
char *__pthread_manager_thread_bos = NULL;
|
char *__pthread_manager_thread_bos;
|
||||||
char *__pthread_manager_thread_tos = NULL;
|
char *__pthread_manager_thread_tos;
|
||||||
|
|
||||||
/* For process-wide exit() */
|
/* For process-wide exit() */
|
||||||
|
|
||||||
int __pthread_exit_requested = 0;
|
int __pthread_exit_requested;
|
||||||
int __pthread_exit_code = 0;
|
int __pthread_exit_code;
|
||||||
|
|
||||||
|
/* Nozero if the machine has more than one processor. */
|
||||||
|
int __pthread_smp_kernel;
|
||||||
|
|
||||||
|
|
||||||
#if !__ASSUME_REALTIME_SIGNALS
|
#if !__ASSUME_REALTIME_SIGNALS
|
||||||
/* Pointers that select new or old suspend/resume functions
|
/* Pointers that select new or old suspend/resume functions
|
||||||
@ -212,7 +216,7 @@ static int current_rtmin = -1;
|
|||||||
static int current_rtmax = -1;
|
static int current_rtmax = -1;
|
||||||
int __pthread_sig_restart = SIGUSR1;
|
int __pthread_sig_restart = SIGUSR1;
|
||||||
int __pthread_sig_cancel = SIGUSR2;
|
int __pthread_sig_cancel = SIGUSR2;
|
||||||
int __pthread_sig_debug = 0;
|
int __pthread_sig_debug;
|
||||||
#else
|
#else
|
||||||
static int current_rtmin;
|
static int current_rtmin;
|
||||||
static int current_rtmax;
|
static int current_rtmax;
|
||||||
@ -224,7 +228,7 @@ int __pthread_sig_debug = __SIGRTMIN + 2;
|
|||||||
#else
|
#else
|
||||||
int __pthread_sig_restart = SIGUSR1;
|
int __pthread_sig_restart = SIGUSR1;
|
||||||
int __pthread_sig_cancel = SIGUSR2;
|
int __pthread_sig_cancel = SIGUSR2;
|
||||||
int __pthread_sig_debug = 0;
|
int __pthread_sig_debug;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static int rtsigs_initialized;
|
static int rtsigs_initialized;
|
||||||
@ -399,6 +403,8 @@ static void pthread_initialize(void)
|
|||||||
__cxa_atexit((void (*) (void *)) pthread_exit_process, NULL, __dso_handle);
|
__cxa_atexit((void (*) (void *)) pthread_exit_process, NULL, __dso_handle);
|
||||||
else
|
else
|
||||||
__on_exit (pthread_exit_process, NULL);
|
__on_exit (pthread_exit_process, NULL);
|
||||||
|
/* How many processors. */
|
||||||
|
__pthread_smp_kernel = sysconf (_SC_NPROCESSORS_ONLN) > 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
void __pthread_initialize(void)
|
void __pthread_initialize(void)
|
||||||
|
@ -24,12 +24,20 @@
|
|||||||
#include "spinlock.h"
|
#include "spinlock.h"
|
||||||
#include "restart.h"
|
#include "restart.h"
|
||||||
|
|
||||||
/* The status field of a spinlock has the following meaning:
|
/* The status field of a spinlock is a pointer whose least significant
|
||||||
0: spinlock is free
|
bit is a locked flag.
|
||||||
1: spinlock is taken, no thread is waiting on it
|
|
||||||
ADDR: psinlock is taken, ADDR is address of thread descriptor for
|
Thus the field values have the following meanings:
|
||||||
first waiting thread, other waiting threads are linked via
|
|
||||||
their p_nextlock field.
|
status == 0: spinlock is free
|
||||||
|
status == 1: spinlock is taken; no thread is waiting on it
|
||||||
|
|
||||||
|
(status & 1) == 1: spinlock is taken and (status & ~1L) is a
|
||||||
|
pointer to the first waiting thread; other
|
||||||
|
waiting threads are linked via the p_nextlock
|
||||||
|
field.
|
||||||
|
(status & 1) == 0: same as above, but spinlock is not taken.
|
||||||
|
|
||||||
The waiting list is not sorted by priority order.
|
The waiting list is not sorted by priority order.
|
||||||
Actually, we always insert at top of list (sole insertion mode
|
Actually, we always insert at top of list (sole insertion mode
|
||||||
that can be performed without locking).
|
that can be performed without locking).
|
||||||
@ -38,29 +46,70 @@
|
|||||||
This is safe because there are no concurrent __pthread_unlock
|
This is safe because there are no concurrent __pthread_unlock
|
||||||
operations -- only the thread that locked the mutex can unlock it. */
|
operations -- only the thread that locked the mutex can unlock it. */
|
||||||
|
|
||||||
|
|
||||||
void internal_function __pthread_lock(struct _pthread_fastlock * lock,
|
void internal_function __pthread_lock(struct _pthread_fastlock * lock,
|
||||||
pthread_descr self)
|
pthread_descr self)
|
||||||
{
|
{
|
||||||
|
#if defined HAS_COMPARE_AND_SWAP
|
||||||
long oldstatus, newstatus;
|
long oldstatus, newstatus;
|
||||||
int spurious_wakeup_count = 0;
|
int successful_seizure, spurious_wakeup_count = 0;
|
||||||
|
int spin_count = 0;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined TEST_FOR_COMPARE_AND_SWAP
|
||||||
|
if (!__pthread_has_cas)
|
||||||
|
#endif
|
||||||
|
#if !defined HAS_COMPARE_AND_SWAP
|
||||||
|
{
|
||||||
|
__pthread_acquire(&lock->__spinlock);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined HAS_COMPARE_AND_SWAP
|
||||||
|
again:
|
||||||
|
|
||||||
|
/* On SMP, try spinning to get the lock. */
|
||||||
|
|
||||||
|
if (__pthread_smp_kernel) {
|
||||||
|
int max_count = lock->__spinlock * 2 + 10;
|
||||||
|
|
||||||
|
for (spin_count = 0; spin_count < max_count; spin_count++) {
|
||||||
|
if (((oldstatus = lock->__status) & 1) == 0) {
|
||||||
|
if(__compare_and_swap(&lock->__status, oldstatus, oldstatus | 1))
|
||||||
|
{
|
||||||
|
if (spin_count)
|
||||||
|
lock->__spinlock += (spin_count - lock->__spinlock) / 8;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
lock->__spinlock += (spin_count - lock->__spinlock) / 8;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* No luck, try once more or suspend. */
|
||||||
|
|
||||||
do {
|
do {
|
||||||
oldstatus = lock->__status;
|
oldstatus = lock->__status;
|
||||||
if (oldstatus == 0) {
|
successful_seizure = 0;
|
||||||
newstatus = 1;
|
|
||||||
|
if ((oldstatus & 1) == 0) {
|
||||||
|
newstatus = oldstatus | 1;
|
||||||
|
successful_seizure = 1;
|
||||||
} else {
|
} else {
|
||||||
if (self == NULL)
|
if (self == NULL)
|
||||||
self = thread_self();
|
self = thread_self();
|
||||||
newstatus = (long) self;
|
newstatus = (long) self | 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (self != NULL) {
|
if (self != NULL) {
|
||||||
THREAD_SETMEM(self, p_nextlock, (pthread_descr) oldstatus);
|
THREAD_SETMEM(self, p_nextlock, (pthread_descr) (oldstatus & ~1L));
|
||||||
/* Make sure the store in p_nextlock completes before performing
|
/* Make sure the store in p_nextlock completes before performing
|
||||||
the compare-and-swap */
|
the compare-and-swap */
|
||||||
MEMORY_BARRIER();
|
MEMORY_BARRIER();
|
||||||
}
|
}
|
||||||
} while(! compare_and_swap(&lock->__status, oldstatus, newstatus,
|
} while(! __compare_and_swap(&lock->__status, oldstatus, newstatus));
|
||||||
&lock->__spinlock));
|
|
||||||
|
|
||||||
/* Suspend with guard against spurious wakeup.
|
/* Suspend with guard against spurious wakeup.
|
||||||
This can happen in pthread_cond_timedwait_relative, when the thread
|
This can happen in pthread_cond_timedwait_relative, when the thread
|
||||||
@ -68,7 +117,7 @@ void internal_function __pthread_lock(struct _pthread_fastlock * lock,
|
|||||||
locks the queue to remove itself. At that point it may still be on the
|
locks the queue to remove itself. At that point it may still be on the
|
||||||
queue, and may be resumed by a condition signal. */
|
queue, and may be resumed by a condition signal. */
|
||||||
|
|
||||||
if (oldstatus != 0) {
|
if (!successful_seizure) {
|
||||||
for (;;) {
|
for (;;) {
|
||||||
suspend(self);
|
suspend(self);
|
||||||
if (self->p_nextlock != NULL) {
|
if (self->p_nextlock != NULL) {
|
||||||
@ -78,37 +127,50 @@ void internal_function __pthread_lock(struct _pthread_fastlock * lock,
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
goto again;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Put back any resumes we caught that don't belong to us. */
|
/* Put back any resumes we caught that don't belong to us. */
|
||||||
while (spurious_wakeup_count--)
|
while (spurious_wakeup_count--)
|
||||||
restart(self);
|
restart(self);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
int __pthread_unlock(struct _pthread_fastlock * lock)
|
int __pthread_unlock(struct _pthread_fastlock * lock)
|
||||||
{
|
{
|
||||||
|
#if defined HAS_COMPARE_AND_SWAP
|
||||||
long oldstatus;
|
long oldstatus;
|
||||||
pthread_descr thr, * ptr, * maxptr;
|
pthread_descr thr, * ptr, * maxptr;
|
||||||
int maxprio;
|
int maxprio;
|
||||||
|
#endif
|
||||||
|
|
||||||
again:
|
#if defined TEST_FOR_COMPARE_AND_SWAP
|
||||||
oldstatus = lock->__status;
|
if (!__pthread_has_cas)
|
||||||
if (oldstatus == 0 || oldstatus == 1) {
|
#endif
|
||||||
/* No threads are waiting for this lock. Please note that we also
|
#if !defined HAS_COMPARE_AND_SWAP
|
||||||
enter this case if the lock is not taken at all. If this wouldn't
|
{
|
||||||
be done here we would crash further down. */
|
lock->__spinlock = 0;
|
||||||
if (! compare_and_swap_with_release_semantics (&lock->__status,
|
WRITE_MEMORY_BARRIER();
|
||||||
oldstatus, 0,
|
|
||||||
&lock->__spinlock))
|
|
||||||
goto again;
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined HAS_COMPARE_AND_SWAP
|
||||||
|
again:
|
||||||
|
oldstatus = lock->__status;
|
||||||
|
|
||||||
|
while ((oldstatus = lock->__status) == 1) {
|
||||||
|
if (__compare_and_swap_with_release_semantics(&lock->__status,
|
||||||
|
oldstatus, 0))
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* Find thread in waiting queue with maximal priority */
|
/* Find thread in waiting queue with maximal priority */
|
||||||
ptr = (pthread_descr *) &lock->__status;
|
ptr = (pthread_descr *) &lock->__status;
|
||||||
thr = (pthread_descr) oldstatus;
|
thr = (pthread_descr) (oldstatus & ~1L);
|
||||||
maxprio = 0;
|
maxprio = 0;
|
||||||
maxptr = ptr;
|
maxptr = ptr;
|
||||||
while (thr != (pthread_descr) 1) {
|
while (thr != 0) {
|
||||||
if (thr->p_priority >= maxprio) {
|
if (thr->p_priority >= maxprio) {
|
||||||
maxptr = ptr;
|
maxptr = ptr;
|
||||||
maxprio = thr->p_priority;
|
maxprio = thr->p_priority;
|
||||||
@ -128,16 +190,25 @@ again:
|
|||||||
/* Remove max prio thread from waiting list. */
|
/* Remove max prio thread from waiting list. */
|
||||||
if (maxptr == (pthread_descr *) &lock->__status) {
|
if (maxptr == (pthread_descr *) &lock->__status) {
|
||||||
/* If max prio thread is at head, remove it with compare-and-swap
|
/* If max prio thread is at head, remove it with compare-and-swap
|
||||||
to guard against concurrent lock operation */
|
to guard against concurrent lock operation. This removal
|
||||||
thr = (pthread_descr) oldstatus;
|
also has the side effect of marking the lock as released
|
||||||
if (! compare_and_swap_with_release_semantics
|
because the new status comes from thr->p_nextlock whose
|
||||||
(&lock->__status, oldstatus, (long)(thr->p_nextlock),
|
least significant bit is clear. */
|
||||||
&lock->__spinlock))
|
thr = (pthread_descr) (oldstatus & ~1L);
|
||||||
|
if (! __compare_and_swap_with_release_semantics
|
||||||
|
(&lock->__status, oldstatus, (long)(thr->p_nextlock)))
|
||||||
goto again;
|
goto again;
|
||||||
} else {
|
} else {
|
||||||
/* No risk of concurrent access, remove max prio thread normally */
|
/* No risk of concurrent access, remove max prio thread normally.
|
||||||
|
But in this case we must also flip the least significant bit
|
||||||
|
of the status to mark the lock as released. */
|
||||||
thr = *maxptr;
|
thr = *maxptr;
|
||||||
*maxptr = thr->p_nextlock;
|
*maxptr = thr->p_nextlock;
|
||||||
|
|
||||||
|
do {
|
||||||
|
oldstatus = lock->__status;
|
||||||
|
} while (!__compare_and_swap_with_release_semantics(&lock->__status,
|
||||||
|
oldstatus, oldstatus & ~1L));
|
||||||
}
|
}
|
||||||
/* Prevent reordering of store to *maxptr above and store to thr->p_nextlock
|
/* Prevent reordering of store to *maxptr above and store to thr->p_nextlock
|
||||||
below */
|
below */
|
||||||
@ -147,6 +218,7 @@ again:
|
|||||||
restart(thr);
|
restart(thr);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -81,6 +81,7 @@ static inline int compare_and_swap(long * ptr, long oldval, long newval,
|
|||||||
|
|
||||||
#ifndef HAS_COMPARE_AND_SWAP_WITH_RELEASE_SEMANTICS
|
#ifndef HAS_COMPARE_AND_SWAP_WITH_RELEASE_SEMANTICS
|
||||||
#define compare_and_swap_with_release_semantics compare_and_swap
|
#define compare_and_swap_with_release_semantics compare_and_swap
|
||||||
|
#define __compare_and_swap_with_release_semantics __compare_and_swap
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Internal locks */
|
/* Internal locks */
|
||||||
@ -97,13 +98,26 @@ static inline void __pthread_init_lock(struct _pthread_fastlock * lock)
|
|||||||
|
|
||||||
static inline int __pthread_trylock (struct _pthread_fastlock * lock)
|
static inline int __pthread_trylock (struct _pthread_fastlock * lock)
|
||||||
{
|
{
|
||||||
|
#if defined HAS_COMPARE_AND_SWAP
|
||||||
long oldstatus;
|
long oldstatus;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined TEST_FOR_COMPARE_AND_SWAP
|
||||||
|
if (!__pthread_has_cas)
|
||||||
|
#endif
|
||||||
|
#if !defined HAS_COMPARE_AND_SWAP
|
||||||
|
{
|
||||||
|
return (testandset(&lock->__spinlock) : EBUSY : 0)
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined HAS_COMPARE_AND_SWAP
|
||||||
do {
|
do {
|
||||||
oldstatus = lock->__status;
|
oldstatus = lock->__status;
|
||||||
if (oldstatus != 0) return EBUSY;
|
if (oldstatus != 0) return EBUSY;
|
||||||
} while(! compare_and_swap(&lock->__status, 0, 1, &lock->__spinlock));
|
} while(! compare_and_swap(&lock->__status, 0, 1, &lock->__spinlock));
|
||||||
return 0;
|
return 0;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Variation of internal lock used for pthread_mutex_t, supporting
|
/* Variation of internal lock used for pthread_mutex_t, supporting
|
||||||
|
@ -25,8 +25,9 @@
|
|||||||
/* Fast locks (not abstract because mutexes and conditions aren't abstract). */
|
/* Fast locks (not abstract because mutexes and conditions aren't abstract). */
|
||||||
struct _pthread_fastlock
|
struct _pthread_fastlock
|
||||||
{
|
{
|
||||||
long int __status; /* "Free" or "taken" or head of waiting list */
|
long int __status; /* "Free" or "taken" or head of waiting list */
|
||||||
int __spinlock; /* For compare-and-swap emulation */
|
int __spinlock; /* Used by compare_and_swap emulation. Also,
|
||||||
|
adaptive SMP lock stores spin count here. */
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifndef _PTHREAD_DESCR_DEFINED
|
#ifndef _PTHREAD_DESCR_DEFINED
|
||||||
|
@ -77,10 +77,10 @@ enum
|
|||||||
|
|
||||||
enum
|
enum
|
||||||
{
|
{
|
||||||
PTHREAD_MUTEX_FAST_NP,
|
PTHREAD_MUTEX_TIMED_NP,
|
||||||
PTHREAD_MUTEX_RECURSIVE_NP,
|
PTHREAD_MUTEX_RECURSIVE_NP,
|
||||||
PTHREAD_MUTEX_ERRORCHECK_NP,
|
PTHREAD_MUTEX_ERRORCHECK_NP,
|
||||||
PTHREAD_MUTEX_TIMED_NP
|
PTHREAD_MUTEX_ADAPTIVE_NP
|
||||||
#ifdef __USE_UNIX98
|
#ifdef __USE_UNIX98
|
||||||
,
|
,
|
||||||
PTHREAD_MUTEX_NORMAL = PTHREAD_MUTEX_TIMED_NP,
|
PTHREAD_MUTEX_NORMAL = PTHREAD_MUTEX_TIMED_NP,
|
||||||
|
@ -159,7 +159,8 @@ charmap_read (const char *filename)
|
|||||||
== 1)
|
== 1)
|
||||||
|| fscanf (fp, "%% alias %as", &name) == 1)
|
|| fscanf (fp, "%% alias %as", &name) == 1)
|
||||||
{
|
{
|
||||||
if (strcasecmp (name, filename) == 0)
|
if (filename != NULL
|
||||||
|
&& strcasecmp (name, filename) == 0)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
free (name);
|
free (name);
|
||||||
|
Loading…
Reference in New Issue
Block a user