2000-05-23  Jakub Jelinek  <jakub@redhat.com>

	* sysdeps/i386/fpu/bits/mathinline.h (__sincos, __sincosf,
	__sincosl): Guard with __USE_GNU.
This commit is contained in:
Ulrich Drepper 2000-05-25 06:15:25 +00:00
parent db33f7d4ae
commit 0f5504179a
24 changed files with 731 additions and 117 deletions

View File

@ -1,3 +1,8 @@
2000-05-23 Jakub Jelinek <jakub@redhat.com>
* sysdeps/i386/fpu/bits/mathinline.h (__sincos, __sincosf,
__sincosl): Guard with __USE_GNU.
2000-05-24 Ulrich Drepper <drepper@redhat.com>
* csu/Makefile (routines): Add check_fds.

View File

@ -35,7 +35,7 @@ extra-libs-others := $(extra-libs)
libpthread-routines := attr cancel condvar join manager mutex ptfork \
ptlongjmp pthread signals specific errno lockfile \
semaphore spinlock wrapsyscall rwlock pt-machine \
oldsemaphore events getcpuclockid
oldsemaphore events getcpuclockid pspinlock
vpath %.c Examples
tests = ex1 ex2 ex3 ex4 ex5 ex6 ex7

View File

@ -58,14 +58,14 @@ int pthread_cancel(pthread_t thread)
__pthread_lock(&handle->h_lock, NULL);
if (invalid_handle(handle, thread)) {
__pthread_spin_unlock(&handle->h_lock);
__pthread_unlock(&handle->h_lock);
return ESRCH;
}
th = handle->h_descr;
if (th->p_canceled) {
__pthread_spin_unlock(&handle->h_lock);
__pthread_unlock(&handle->h_lock);
return 0;
}
@ -85,7 +85,7 @@ int pthread_cancel(pthread_t thread)
th->p_woken_by_cancel = dorestart;
}
__pthread_spin_unlock(&handle->h_lock);
__pthread_unlock(&handle->h_lock);
/* If the thread has suspended or is about to, then we unblock it by
issuing a restart, instead of a cancel signal. Otherwise we send

View File

@ -50,7 +50,7 @@ static int cond_extricate_func(void *obj, pthread_descr th)
__pthread_lock(&cond->__c_lock, self);
did_remove = remove_from_queue(&cond->__c_waiting, th);
__pthread_spin_unlock(&cond->__c_lock);
__pthread_unlock(&cond->__c_lock);
return did_remove;
}
@ -85,7 +85,7 @@ int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
enqueue(&cond->__c_waiting, self);
else
already_canceled = 1;
__pthread_spin_unlock(&cond->__c_lock);
__pthread_unlock(&cond->__c_lock);
if (already_canceled) {
__pthread_set_own_extricate_if(self, 0);
@ -138,7 +138,7 @@ pthread_cond_timedwait_relative(pthread_cond_t *cond,
enqueue(&cond->__c_waiting, self);
else
already_canceled = 1;
__pthread_spin_unlock(&cond->__c_lock);
__pthread_unlock(&cond->__c_lock);
if (already_canceled) {
__pthread_set_own_extricate_if(self, 0);
@ -155,7 +155,7 @@ pthread_cond_timedwait_relative(pthread_cond_t *cond,
__pthread_lock(&cond->__c_lock, self);
was_on_queue = remove_from_queue(&cond->__c_waiting, self);
__pthread_spin_unlock(&cond->__c_lock);
__pthread_unlock(&cond->__c_lock);
if (was_on_queue) {
__pthread_set_own_extricate_if(self, 0);
@ -196,7 +196,7 @@ int pthread_cond_signal(pthread_cond_t *cond)
__pthread_lock(&cond->__c_lock, NULL);
th = dequeue(&cond->__c_waiting);
__pthread_spin_unlock(&cond->__c_lock);
__pthread_unlock(&cond->__c_lock);
if (th != NULL) restart(th);
return 0;
}
@ -209,7 +209,7 @@ int pthread_cond_broadcast(pthread_cond_t *cond)
/* Copy the current state of the waiting queue and empty it */
tosignal = cond->__c_waiting;
cond->__c_waiting = NULL;
__pthread_spin_unlock(&cond->__c_lock);
__pthread_unlock(&cond->__c_lock);
/* Now signal each process in the queue */
while ((th = dequeue(&tosignal)) != NULL) restart(th);
return 0;

View File

@ -138,7 +138,7 @@ struct _pthread_descr_struct {
pthread_t p_tid; /* Thread identifier */
int p_pid; /* PID of Unix process */
int p_priority; /* Thread priority (== 0 if not realtime) */
pthread_spinlock_t * p_lock; /* Spinlock for synchronized accesses */
struct _pthread_fastlock * p_lock; /* Spinlock for synchronized accesses */
int p_signal; /* last signal received */
sigjmp_buf * p_signal_jmp; /* where to siglongjmp on a signal or NULL */
sigjmp_buf * p_cancel_jmp; /* where to siglongjmp on a cancel or NULL */
@ -189,7 +189,7 @@ struct _pthread_descr_struct {
typedef struct pthread_handle_struct * pthread_handle;
struct pthread_handle_struct {
pthread_spinlock_t h_lock; /* Fast lock for sychronized access */
struct _pthread_fastlock h_lock; /* Fast lock for sychronized access */
pthread_descr h_descr; /* Thread descriptor or NULL if invalid */
char * h_bottom; /* Lowest address in the stack thread */
};

View File

@ -62,7 +62,7 @@ void pthread_exit(void * retval)
THREAD_SETMEM(self, p_terminated, 1);
/* See if someone is joining on us */
joining = THREAD_GETMEM(self, p_joining);
__pthread_spin_unlock(THREAD_GETMEM(self, p_lock));
__pthread_unlock(THREAD_GETMEM(self, p_lock));
/* Restart joining thread if any */
if (joining != NULL) restart(joining);
/* If this is the initial thread, block until all threads have terminated.
@ -76,7 +76,7 @@ void pthread_exit(void * retval)
/* Main thread flushes stdio streams and runs atexit functions.
It also calls a handler within LinuxThreads which sends a process exit
request to the thread manager. */
exit(0);
exit(0);
}
/* Threads other than the main one terminate without flushing stdio streams
or running atexit functions. */
@ -97,7 +97,7 @@ static int join_extricate_func(void *obj, pthread_descr th)
jo = handle->h_descr;
did_remove = jo->p_joining != NULL;
jo->p_joining = NULL;
__pthread_spin_unlock(&handle->h_lock);
__pthread_unlock(&handle->h_lock);
return did_remove;
}
@ -117,17 +117,17 @@ int pthread_join(pthread_t thread_id, void ** thread_return)
__pthread_lock(&handle->h_lock, self);
if (invalid_handle(handle, thread_id)) {
__pthread_spin_unlock(&handle->h_lock);
__pthread_unlock(&handle->h_lock);
return ESRCH;
}
th = handle->h_descr;
if (th == self) {
__pthread_spin_unlock(&handle->h_lock);
__pthread_unlock(&handle->h_lock);
return EDEADLK;
}
/* If detached or already joined, error */
if (th->p_detached || th->p_joining != NULL) {
__pthread_spin_unlock(&handle->h_lock);
__pthread_unlock(&handle->h_lock);
return EINVAL;
}
/* If not terminated yet, suspend ourselves. */
@ -139,7 +139,7 @@ int pthread_join(pthread_t thread_id, void ** thread_return)
th->p_joining = self;
else
already_canceled = 1;
__pthread_spin_unlock(&handle->h_lock);
__pthread_unlock(&handle->h_lock);
if (already_canceled) {
__pthread_set_own_extricate_if(self, 0);
@ -160,7 +160,7 @@ int pthread_join(pthread_t thread_id, void ** thread_return)
}
/* Get return value */
if (thread_return != NULL) *thread_return = th->p_retval;
__pthread_spin_unlock(&handle->h_lock);
__pthread_unlock(&handle->h_lock);
/* Send notification to thread manager */
if (__pthread_manager_request >= 0) {
request.req_thread = self;
@ -181,24 +181,24 @@ int pthread_detach(pthread_t thread_id)
__pthread_lock(&handle->h_lock, NULL);
if (invalid_handle(handle, thread_id)) {
__pthread_spin_unlock(&handle->h_lock);
__pthread_unlock(&handle->h_lock);
return ESRCH;
}
th = handle->h_descr;
/* If already detached, error */
if (th->p_detached) {
__pthread_spin_unlock(&handle->h_lock);
__pthread_unlock(&handle->h_lock);
return EINVAL;
}
/* If already joining, don't do anything. */
if (th->p_joining != NULL) {
__pthread_spin_unlock(&handle->h_lock);
__pthread_unlock(&handle->h_lock);
return 0;
}
/* Mark as detached */
th->p_detached = 1;
terminated = th->p_terminated;
__pthread_spin_unlock(&handle->h_lock);
__pthread_unlock(&handle->h_lock);
/* If already terminated, notify thread manager to reclaim resources */
if (terminated && __pthread_manager_request >= 0) {
request.req_thread = thread_self();

View File

@ -177,7 +177,7 @@ int __pthread_manager(void *arg)
__on_exit handler, which in turn will send REQ_PROCESS_EXIT
to the thread manager. In case you are wondering how the
manager terminates from its loop here. */
}
}
break;
case REQ_POST:
__new_sem_post(request.req_args.post);
@ -207,7 +207,7 @@ int __pthread_manager_event(void *arg)
/* Get the lock the manager will free once all is correctly set up. */
__pthread_lock (THREAD_GETMEM((&__pthread_manager_thread), p_lock), NULL);
/* Free it immediately. */
__pthread_spin_unlock (THREAD_GETMEM((&__pthread_manager_thread), p_lock));
__pthread_unlock (THREAD_GETMEM((&__pthread_manager_thread), p_lock));
return __pthread_manager(arg);
}
@ -273,7 +273,7 @@ static int pthread_start_thread_event(void *arg)
/* Get the lock the manager will free once all is correctly set up. */
__pthread_lock (THREAD_GETMEM(self, p_lock), NULL);
/* Free it immediately. */
__pthread_spin_unlock (THREAD_GETMEM(self, p_lock));
__pthread_unlock (THREAD_GETMEM(self, p_lock));
/* Continue with the real function. */
return pthread_start_thread (arg);
@ -474,7 +474,7 @@ static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr,
__linuxthreads_create_event ();
/* Now restart the thread. */
__pthread_spin_unlock(new_thread->p_lock);
__pthread_unlock(new_thread->p_lock);
}
}
}
@ -523,7 +523,7 @@ static void pthread_free(pthread_descr th)
__pthread_lock(&handle->h_lock, NULL);
handle->h_descr = NULL;
handle->h_bottom = (char *)(-1L);
__pthread_spin_unlock(&handle->h_lock);
__pthread_unlock(&handle->h_lock);
#ifdef FREE_THREAD
FREE_THREAD(th, th->p_nr);
#endif
@ -594,7 +594,7 @@ static void pthread_exited(pid_t pid)
}
}
detached = th->p_detached;
__pthread_spin_unlock(th->p_lock);
__pthread_unlock(th->p_lock);
if (detached)
pthread_free(th);
break;
@ -637,19 +637,19 @@ static void pthread_handle_free(pthread_t th_id)
if (invalid_handle(handle, th_id)) {
/* pthread_reap_children has deallocated the thread already,
nothing needs to be done */
__pthread_spin_unlock(&handle->h_lock);
__pthread_unlock(&handle->h_lock);
return;
}
th = handle->h_descr;
if (th->p_exited) {
__pthread_spin_unlock(&handle->h_lock);
__pthread_unlock(&handle->h_lock);
pthread_free(th);
} else {
/* The Unix process of the thread is still running.
Mark the thread as detached so that the thread manager will
deallocate its resources when the Unix process exits. */
th->p_detached = 1;
__pthread_spin_unlock(&handle->h_lock);
__pthread_unlock(&handle->h_lock);
}
}

View File

@ -110,7 +110,7 @@ int __pthread_mutex_unlock(pthread_mutex_t * mutex)
{
switch (mutex->__m_kind) {
case PTHREAD_MUTEX_FAST_NP:
__pthread_spin_unlock(&mutex->__m_lock);
__pthread_unlock(&mutex->__m_lock);
return 0;
case PTHREAD_MUTEX_RECURSIVE_NP:
if (mutex->__m_count > 0) {
@ -118,13 +118,13 @@ int __pthread_mutex_unlock(pthread_mutex_t * mutex)
return 0;
}
mutex->__m_owner = NULL;
__pthread_spin_unlock(&mutex->__m_lock);
__pthread_unlock(&mutex->__m_lock);
return 0;
case PTHREAD_MUTEX_ERRORCHECK_NP:
if (mutex->__m_owner != thread_self() || mutex->__m_lock.__status == 0)
return EPERM;
mutex->__m_owner = NULL;
__pthread_spin_unlock(&mutex->__m_lock);
__pthread_unlock(&mutex->__m_lock);
return 0;
default:
return EINVAL;

View File

@ -460,7 +460,7 @@ int __pthread_initialize_manager(void)
__linuxthreads_create_event ();
/* Now restart the thread. */
__pthread_spin_unlock(__pthread_manager_thread.p_lock);
__pthread_unlock(__pthread_manager_thread.p_lock);
}
}
}
@ -587,16 +587,16 @@ int pthread_setschedparam(pthread_t thread, int policy,
__pthread_lock(&handle->h_lock, NULL);
if (invalid_handle(handle, thread)) {
__pthread_spin_unlock(&handle->h_lock);
__pthread_unlock(&handle->h_lock);
return ESRCH;
}
th = handle->h_descr;
if (__sched_setscheduler(th->p_pid, policy, param) == -1) {
__pthread_spin_unlock(&handle->h_lock);
__pthread_unlock(&handle->h_lock);
return errno;
}
th->p_priority = policy == SCHED_OTHER ? 0 : param->sched_priority;
__pthread_spin_unlock(&handle->h_lock);
__pthread_unlock(&handle->h_lock);
if (__pthread_manager_request >= 0)
__pthread_manager_adjust_prio(th->p_priority);
return 0;
@ -610,11 +610,11 @@ int pthread_getschedparam(pthread_t thread, int *policy,
__pthread_lock(&handle->h_lock, NULL);
if (invalid_handle(handle, thread)) {
__pthread_spin_unlock(&handle->h_lock);
__pthread_unlock(&handle->h_lock);
return ESRCH;
}
pid = handle->h_descr->p_pid;
__pthread_spin_unlock(&handle->h_lock);
__pthread_unlock(&handle->h_lock);
pol = __sched_getscheduler(pid);
if (pol == -1) return errno;
if (__sched_getparam(pid, param) == -1) return errno;
@ -788,7 +788,7 @@ void __pthread_set_own_extricate_if(pthread_descr self, pthread_extricate_if *pe
{
__pthread_lock(self->p_lock, self);
THREAD_SETMEM(self, p_extricate, peif);
__pthread_spin_unlock(self->p_lock);
__pthread_unlock(self->p_lock);
}
/* Primitives for controlling thread execution */

View File

@ -217,7 +217,7 @@ __pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
__pthread_lock (&rwlock->__rw_lock, NULL);
readers = rwlock->__rw_readers;
writer = rwlock->__rw_writer;
__pthread_spin_unlock (&rwlock->__rw_lock);
__pthread_unlock (&rwlock->__rw_lock);
if (readers > 0 || writer != NULL)
return EBUSY;
@ -247,12 +247,12 @@ __pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
break;
enqueue (&rwlock->__rw_read_waiting, self);
__pthread_spin_unlock (&rwlock->__rw_lock);
__pthread_unlock (&rwlock->__rw_lock);
suspend (self); /* This is not a cancellation point */
}
++rwlock->__rw_readers;
__pthread_spin_unlock (&rwlock->__rw_lock);
__pthread_unlock (&rwlock->__rw_lock);
if (have_lock_already || out_of_mem)
{
@ -291,7 +291,7 @@ __pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
retval = 0;
}
__pthread_spin_unlock (&rwlock->__rw_lock);
__pthread_unlock (&rwlock->__rw_lock);
if (retval == 0)
{
@ -320,13 +320,13 @@ __pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
if (rwlock->__rw_readers == 0 && rwlock->__rw_writer == NULL)
{
rwlock->__rw_writer = self;
__pthread_spin_unlock (&rwlock->__rw_lock);
__pthread_unlock (&rwlock->__rw_lock);
return 0;
}
/* Suspend ourselves, then try again */
enqueue (&rwlock->__rw_write_waiting, self);
__pthread_spin_unlock (&rwlock->__rw_lock);
__pthread_unlock (&rwlock->__rw_lock);
suspend (self); /* This is not a cancellation point */
}
}
@ -344,7 +344,7 @@ __pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
rwlock->__rw_writer = thread_self ();
result = 0;
}
__pthread_spin_unlock (&rwlock->__rw_lock);
__pthread_unlock (&rwlock->__rw_lock);
return result;
}
@ -363,7 +363,7 @@ __pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
/* Unlocking a write lock. */
if (rwlock->__rw_writer != thread_self ())
{
__pthread_spin_unlock (&rwlock->__rw_lock);
__pthread_unlock (&rwlock->__rw_lock);
return EPERM;
}
rwlock->__rw_writer = NULL;
@ -375,14 +375,14 @@ __pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
/* Restart all waiting readers. */
torestart = rwlock->__rw_read_waiting;
rwlock->__rw_read_waiting = NULL;
__pthread_spin_unlock (&rwlock->__rw_lock);
__pthread_unlock (&rwlock->__rw_lock);
while ((th = dequeue (&torestart)) != NULL)
restart (th);
}
else
{
/* Restart one waiting writer. */
__pthread_spin_unlock (&rwlock->__rw_lock);
__pthread_unlock (&rwlock->__rw_lock);
restart (th);
}
}
@ -391,7 +391,7 @@ __pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
/* Unlocking a read lock. */
if (rwlock->__rw_readers == 0)
{
__pthread_spin_unlock (&rwlock->__rw_lock);
__pthread_unlock (&rwlock->__rw_lock);
return EPERM;
}
@ -402,7 +402,7 @@ __pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
else
th = NULL;
__pthread_spin_unlock (&rwlock->__rw_lock);
__pthread_unlock (&rwlock->__rw_lock);
if (th != NULL)
restart (th);

View File

@ -33,7 +33,7 @@ int __new_sem_init(sem_t *sem, int pshared, unsigned int value)
errno = ENOSYS;
return -1;
}
__pthread_init_lock((pthread_spinlock_t *) &sem->__sem_lock);
__pthread_init_lock((struct _pthread_fastlock *) &sem->__sem_lock);
sem->__sem_value = value;
sem->__sem_waiting = NULL;
return 0;
@ -48,9 +48,9 @@ static int new_sem_extricate_func(void *obj, pthread_descr th)
sem_t *sem = obj;
int did_remove = 0;
__pthread_lock((pthread_spinlock_t *) &sem->__sem_lock, self);
__pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
did_remove = remove_from_queue(&sem->__sem_waiting, th);
__pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
__pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
return did_remove;
}
@ -65,10 +65,10 @@ int __new_sem_wait(sem_t * sem)
extr.pu_object = sem;
extr.pu_extricate_func = new_sem_extricate_func;
__pthread_lock((pthread_spinlock_t *) &sem->__sem_lock, self);
__pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
if (sem->__sem_value > 0) {
sem->__sem_value--;
__pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
__pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
return 0;
}
/* Register extrication interface */
@ -79,7 +79,7 @@ int __new_sem_wait(sem_t * sem)
enqueue(&sem->__sem_waiting, self);
else
already_canceled = 1;
__pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
__pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
if (already_canceled) {
__pthread_set_own_extricate_if(self, 0);
@ -106,7 +106,7 @@ int __new_sem_trywait(sem_t * sem)
{
int retval;
__pthread_lock((pthread_spinlock_t *) &sem->__sem_lock, NULL);
__pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, NULL);
if (sem->__sem_value == 0) {
errno = EAGAIN;
retval = -1;
@ -114,7 +114,7 @@ int __new_sem_trywait(sem_t * sem)
sem->__sem_value--;
retval = 0;
}
__pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
__pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
return retval;
}
@ -125,19 +125,19 @@ int __new_sem_post(sem_t * sem)
struct pthread_request request;
if (THREAD_GETMEM(self, p_in_sighandler) == NULL) {
__pthread_lock((pthread_spinlock_t *) &sem->__sem_lock, self);
__pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
if (sem->__sem_waiting == NULL) {
if (sem->__sem_value >= SEM_VALUE_MAX) {
/* Overflow */
errno = ERANGE;
__pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
__pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
return -1;
}
sem->__sem_value++;
__pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
__pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
} else {
th = dequeue(&sem->__sem_waiting);
__pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
__pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
restart(th);
}
} else {
@ -196,17 +196,17 @@ int sem_timedwait(sem_t *sem, const struct timespec *abstime)
pthread_extricate_if extr;
int already_canceled = 0;
__pthread_lock((pthread_spinlock_t *) &sem->__sem_lock, self);
__pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
if (sem->__sem_value > 0) {
--sem->__sem_value;
__pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
__pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
return 0;
}
if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) {
/* The standard requires that if the function would block and the
time value is illegal, the function returns with an error. */
__pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
__pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
return EINVAL;
}
@ -222,7 +222,7 @@ int sem_timedwait(sem_t *sem, const struct timespec *abstime)
enqueue(&sem->__sem_waiting, self);
else
already_canceled = 1;
__pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
__pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
if (already_canceled) {
__pthread_set_own_extricate_if(self, 0);
@ -235,9 +235,9 @@ int sem_timedwait(sem_t *sem, const struct timespec *abstime)
/* __pthread_lock will queue back any spurious restarts that
may happen to it. */
__pthread_lock((pthread_spinlock_t *)&sem->__sem_lock, self);
__pthread_lock((struct _pthread_fastlock *)&sem->__sem_lock, self);
was_on_queue = remove_from_queue(&sem->__sem_waiting, self);
__pthread_spin_unlock((pthread_spinlock_t *)&sem->__sem_lock);
__pthread_unlock((struct _pthread_fastlock *)&sem->__sem_lock);
if (was_on_queue) {
__pthread_set_own_extricate_if(self, 0);

View File

@ -57,11 +57,11 @@ int pthread_kill(pthread_t thread, int signo)
__pthread_lock(&handle->h_lock, NULL);
if (invalid_handle(handle, thread)) {
__pthread_spin_unlock(&handle->h_lock);
__pthread_unlock(&handle->h_lock);
return ESRCH;
}
pid = handle->h_descr->p_pid;
__pthread_spin_unlock(&handle->h_lock);
__pthread_unlock(&handle->h_lock);
if (kill(pid, signo) == -1)
return errno;
else

View File

@ -36,7 +36,7 @@
This is safe because there are no concurrent __pthread_unlock
operations -- only the thread that locked the mutex can unlock it. */
void internal_function __pthread_lock(pthread_spinlock_t * lock,
void internal_function __pthread_lock(struct _pthread_fastlock * lock,
pthread_descr self)
{
long oldstatus, newstatus;
@ -83,14 +83,8 @@ void internal_function __pthread_lock(pthread_spinlock_t * lock,
while (spurious_wakeup_count--)
restart(self);
}
int __pthread_spin_lock(pthread_spinlock_t * lock)
{
__pthread_lock (lock, NULL);
return 0;
}
weak_alias (__pthread_spin_lock, pthread_spin_lock)
int __pthread_spin_unlock(pthread_spinlock_t * lock)
int __pthread_unlock(struct _pthread_fastlock * lock)
{
long oldstatus;
pthread_descr thr, * ptr, * maxptr;
@ -151,32 +145,8 @@ again:
return 0;
}
weak_alias (__pthread_spin_unlock, pthread_spin_unlock)
int __pthread_spin_trylock (pthread_spinlock_t *lock)
{
return __pthread_trylock (lock);
}
weak_alias (__pthread_spin_trylock, pthread_spin_trylock)
int __pthread_spin_init(pthread_spinlock_t *lock, int pshared)
{
if (pshared != 0)
return ENOSYS;
__pthread_init_lock (lock);
return 0;
}
weak_alias (__pthread_spin_init, pthread_spin_init)
int __pthread_spin_destroy(pthread_spinlock_t *lock)
{
/* Nothing to do. */
return 0;
}
weak_alias (__pthread_spin_destroy, pthread_spin_destroy)
/* Compare-and-swap emulation with a spinlock */
#ifdef TEST_FOR_COMPARE_AND_SWAP

View File

@ -50,17 +50,17 @@ static inline int compare_and_swap(long * ptr, long oldval, long newval,
/* Internal locks */
extern void internal_function __pthread_lock(pthread_spinlock_t * lock,
extern void internal_function __pthread_lock(struct _pthread_fastlock * lock,
pthread_descr self);
extern int __pthread_spin_unlock(pthread_spinlock_t *lock);
extern int __pthread_unlock(struct _pthread_fastlock *lock);
static inline void __pthread_init_lock(pthread_spinlock_t * lock)
static inline void __pthread_init_lock(struct _pthread_fastlock * lock)
{
lock->__status = 0;
lock->__spinlock = 0;
}
static inline int __pthread_trylock (pthread_spinlock_t * lock)
static inline int __pthread_trylock (struct _pthread_fastlock * lock)
{
long oldstatus;

View File

@ -0,0 +1,109 @@
/* POSIX spinlock implementation. Alpha version.
Copyright (C) 2000 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
#include <errno.h>
#include <pthread.h>
/* This implementation is similar to the one used in the Linux kernel.
But the kernel is byte instructions for the memory access. This is
faster but unusable here. The problem is that only 128
threads/processes could use the spinlock at the same time. If (by
a design error in the program) a thread/process would hold the
spinlock for a time long enough to accumulate 128 waiting
processes, the next one will find a positive value in the spinlock
and assume it is unlocked. We cannot accept that. */
int
__pthread_spin_lock (pthread_spinlock_t *lock)
{
unsigned int tmp;
asm volatile
("1: ldl_l %0,%1\n"
" blbs %0,2f\n"
" or %0,1,%0\n"
" stl_c %0,%1\n"
" beq %0,2f\n"
" mb\n"
".subsection 2\n"
"2: ldl %0,%1\n"
" blbs %0,2b\n"
" br 1b\n"
".previous"
: "=r" (tmp), "=m" (lock)
: "m" (lock));
return 0;
}
weak_alias (__pthread_spin_lock, pthread_spin_lock)
int
__pthread_spin_trylock (pthread_spinlock_t *lock)
{
unsigned long int oldval;
unsigned long int temp;
asm volatile
("1: ldl_l %0,%1\n"
" and %0,%3,%2\n"
" bne %2,2f\n"
" xor %0,%3,%0\n"
" stl_c %0,%1\n"
" beq %0,3f\n"
" mb\n"
"2:\n"
".subsection 2\n"
"3: br 1b\n"
".previous"
: "=&r" (temp), "=m" (*lock), "=&r" (oldval)
: "Ir" (1UL), "m" (*lock));
return oldval == 0 ? 0 : EBUSY;
}
weak_alias (__pthread_spin_trylock, pthread_spin_trylock)
int
__pthread_spin_unlock (pthread_spinlock_t *lock)
{
asm volatile ("mb");
return *lock = 0;
}
weak_alias (__pthread_spin_unlock, pthread_spin_unlock)
int
__pthread_spin_init (pthread_spinlock_t *lock, int pshared)
{
/* We can ignore the `pshared' parameter. Since we are busy-waiting
all processes which can access the memory location `lock' points
to can use the spinlock. */
*lock = 0;
return 0;
}
weak_alias (__pthread_spin_init, pthread_spin_init)
int
__pthread_spin_destroy (pthread_spinlock_t *lock)
{
/* Nothing to do. */
return 0;
}
weak_alias (__pthread_spin_destroy, pthread_spin_destroy)

View File

@ -0,0 +1,81 @@
/* POSIX spinlock implementation. Arm version.
Copyright (C) 2000 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
#include <errno.h>
#include <pthread.h>
int
__pthread_spin_lock (pthread_spinlock_t *lock)
{
unsigned int val;
do
asm volatile ("swp %0, %1, [%2]"
: "=r" (val)
: "0" (1), "r" (lock)
: "memory");
while (val != 0);
return 0;
}
weak_alias (__pthread_spin_lock, pthread_spin_lock)
int
__pthread_spin_trylock (pthread_spinlock_t *lock)
{
unsigned int val;
asm volatile ("swp %0, %1, [%2]"
: "=r" (val)
: "0" (1), "r" (lock)
: "memory");
return val ? EBUSY : 0;
}
weak_alias (__pthread_spin_trylock, pthread_spin_trylock)
int
__pthread_spin_unlock (pthread_spinlock_t *lock)
{
return *lock = 0;
}
weak_alias (__pthread_spin_unlock, pthread_spin_unlock)
int
__pthread_spin_init (pthread_spinlock_t *lock, int pshared)
{
/* We can ignore the `pshared' parameter. Since we are busy-waiting
all processes which can access the memory location `lock' points
to can use the spinlock. */
return *lock = 0;
}
weak_alias (__pthread_spin_init, pthread_spin_init)
int
__pthread_spin_destroy (pthread_spinlock_t *lock)
{
/* Nothing to do. */
return 0;
}
weak_alias (__pthread_spin_destroy, pthread_spin_destroy)

View File

@ -0,0 +1,97 @@
/* POSIX spinlock implementation. x86 version.
Copyright (C) 2000 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
#include <errno.h>
#include <pthread.h>
/* This implementation is similar to the one used in the Linux kernel.
But the kernel is byte instructions for the memory access. This is
faster but unusable here. The problem is that only 128
threads/processes could use the spinlock at the same time. If (by
a design error in the program) a thread/process would hold the
spinlock for a time long enough to accumulate 128 waiting
processes, the next one will find a positive value in the spinlock
and assume it is unlocked. We cannot accept that. */
int
__pthread_spin_lock (pthread_spinlock_t *lock)
{
asm volatile
("\n"
"1:\n\t"
"lock; decl %0\n\t"
"js 2f\n\t"
".section .text.spinlock,\"ax\"\n"
"2:\n\t"
"cmpl $0,%0\n\t"
"rep; nop\n\t"
"jle 2b\n\t"
"jmp 1b\n\t"
".previous"
: "=m" (*lock));
return 0;
}
weak_alias (__pthread_spin_lock, pthread_spin_lock)
int
__pthread_spin_trylock (pthread_spinlock_t *lock)
{
int oldval;
asm volatile
("xchgl %0,%1"
: "=r" (oldval), "=m" (*lock)
: "0" (0));
return oldval > 0 ? 0 : EBUSY;
}
weak_alias (__pthread_spin_trylock, pthread_spin_trylock)
int
__pthread_spin_unlock (pthread_spinlock_t *lock)
{
asm volatile
("movl $1,%0"
: "=m" (*lock));
return 0;
}
weak_alias (__pthread_spin_unlock, pthread_spin_unlock)
int
__pthread_spin_init (pthread_spinlock_t *lock, int pshared)
{
/* We can ignore the `pshared' parameter. Since we are busy-waiting
all processes which can access the memory location `lock' points
to can use the spinlock. */
*lock = 1;
return 0;
}
weak_alias (__pthread_spin_init, pthread_spin_init)
int
__pthread_spin_destroy (pthread_spinlock_t *lock)
{
/* Nothing to do. */
return 0;
}
weak_alias (__pthread_spin_destroy, pthread_spin_destroy)

View File

@ -0,0 +1,81 @@
/* POSIX spinlock implementation. M68k version.
Copyright (C) 2000 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
#include <errno.h>
#include <pthread.h>
int
__pthread_spin_lock (pthread_spinlock_t *lock)
{
unsigned int val;
do
asm volatile ("tas %1; sne %0"
: "=dm" (val), "=m" (*lock)
: "m" (*lock)
: "cc");
while (val);
return 0;
}
weak_alias (__pthread_spin_lock, pthread_spin_lock)
int
__pthread_spin_trylock (pthread_spinlock_t *lock)
{
unsigned int val;
asm volatile ("tas %1; sne %0"
: "=dm" (val), "=m" (*lock)
: "m" (*lock)
: "cc");
return val ? EBUSY : 0;
}
weak_alias (__pthread_spin_trylock, pthread_spin_trylock)
int
__pthread_spin_unlock (pthread_spinlock_t *lock)
{
return *lock = 0;
}
weak_alias (__pthread_spin_unlock, pthread_spin_unlock)
int
__pthread_spin_init (pthread_spinlock_t *lock, int pshared)
{
/* We can ignore the `pshared' parameter. Since we are busy-waiting
all processes which can access the memory location `lock' points
to can use the spinlock. */
return *lock = 0;
}
weak_alias (__pthread_spin_init, pthread_spin_init)
int
__pthread_spin_destroy (pthread_spinlock_t *lock)
{
/* Nothing to do. */
return 0;
}
weak_alias (__pthread_spin_destroy, pthread_spin_destroy)

View File

@ -0,0 +1,66 @@
/* POSIX spinlock implementation. MIPS version.
Copyright (C) 2000 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
#include <errno.h>
#include <pthread.h>
int
__pthread_spin_lock (pthread_spinlock_t *lock)
{
XXX
}
weak_alias (__pthread_spin_lock, pthread_spin_lock)
int
__pthread_spin_trylock (pthread_spinlock_t *lock)
{
XXX
}
weak_alias (__pthread_spin_trylock, pthread_spin_trylock)
int
__pthread_spin_unlock (pthread_spinlock_t *lock)
{
XXX
}
weak_alias (__pthread_spin_unlock, pthread_spin_unlock)
int
__pthread_spin_init (pthread_spinlock_t *lock, int pshared)
{
/* We can ignore the `pshared' parameter. Since we are busy-waiting
all processes which can access the memory location `lock' points
to can use the spinlock. */
XXX
return 0;
}
weak_alias (__pthread_spin_init, pthread_spin_init)
int
__pthread_spin_destroy (pthread_spinlock_t *lock)
{
/* Nothing to do. */
return 0;
}
weak_alias (__pthread_spin_destroy, pthread_spin_destroy)

View File

@ -0,0 +1,66 @@
/* POSIX spinlock implementation. PowerPC version.
Copyright (C) 2000 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
#include <errno.h>
#include <pthread.h>
int
__pthread_spin_lock (pthread_spinlock_t *lock)
{
XXX
}
weak_alias (__pthread_spin_lock, pthread_spin_lock)
int
__pthread_spin_trylock (pthread_spinlock_t *lock)
{
XXX
}
weak_alias (__pthread_spin_trylock, pthread_spin_trylock)
int
__pthread_spin_unlock (pthread_spinlock_t *lock)
{
XXX
}
weak_alias (__pthread_spin_unlock, pthread_spin_unlock)
int
__pthread_spin_init (pthread_spinlock_t *lock, int pshared)
{
/* We can ignore the `pshared' parameter. Since we are busy-waiting
all processes which can access the memory location `lock' points
to can use the spinlock. */
XXX
return 0;
}
weak_alias (__pthread_spin_init, pthread_spin_init)
int
__pthread_spin_destroy (pthread_spinlock_t *lock)
{
/* Nothing to do. */
return 0;
}
weak_alias (__pthread_spin_destroy, pthread_spin_destroy)

View File

@ -23,11 +23,11 @@
#include <bits/sched.h>
/* Fast locks (not abstract because mutexes and conditions aren't abstract). */
typedef struct
struct _pthread_fastlock
{
long int __status; /* "Free" or "taken" or head of waiting list */
int __spinlock; /* For compare-and-swap emulation */
} pthread_spinlock_t;
};
#ifndef _PTHREAD_DESCR_DEFINED
/* Thread descriptors */
@ -54,7 +54,7 @@ typedef struct
/* Conditions (not abstract because of PTHREAD_COND_INITIALIZER */
typedef struct
{
pthread_spinlock_t __c_lock; /* Protect against concurrent access */
struct _pthread_fastlock __c_lock; /* Protect against concurrent access */
_pthread_descr __c_waiting; /* Threads waiting on this condition */
} pthread_cond_t;
@ -78,7 +78,7 @@ typedef struct
int __m_count; /* Depth of recursive locking */
_pthread_descr __m_owner; /* Owner thread (if recursive or errcheck) */
int __m_kind; /* Mutex kind: fast, recursive or errcheck */
pthread_spinlock_t __m_lock; /* Underlying fast lock */
struct _pthread_fastlock __m_lock; /* Underlying fast lock */
} pthread_mutex_t;
@ -97,7 +97,7 @@ typedef int pthread_once_t;
/* Read-write locks. */
typedef struct _pthread_rwlock_t
{
pthread_spinlock_t __rw_lock; /* Lock to guarantee mutual exclusion */
struct _pthread_fastlock __rw_lock; /* Lock to guarantee mutual exclusion */
int __rw_readers; /* Number of readers */
_pthread_descr __rw_writer; /* Identity of writer, or NULL if none */
_pthread_descr __rw_read_waiting; /* Threads waiting for reading */
@ -115,6 +115,11 @@ typedef struct
} pthread_rwlockattr_t;
#endif
#ifdef __USE_XOPEN2K
/* POSIX spinlock data type. */
typedef volatile int pthread_spinlock_t;
#endif
/* Thread identifiers */
typedef unsigned long int pthread_t;

View File

@ -0,0 +1,66 @@
/* POSIX spinlock implementation. SPARC32 version.
Copyright (C) 2000 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
#include <errno.h>
#include <pthread.h>
int
__pthread_spin_lock (pthread_spinlock_t *lock)
{
XXX
}
weak_alias (__pthread_spin_lock, pthread_spin_lock)
int
__pthread_spin_trylock (pthread_spinlock_t *lock)
{
XXX
}
weak_alias (__pthread_spin_trylock, pthread_spin_trylock)
int
__pthread_spin_unlock (pthread_spinlock_t *lock)
{
XXX
}
weak_alias (__pthread_spin_unlock, pthread_spin_unlock)
int
__pthread_spin_init (pthread_spinlock_t *lock, int pshared)
{
/* We can ignore the `pshared' parameter. Since we are busy-waiting
all processes which can access the memory location `lock' points
to can use the spinlock. */
XXX
return 0;
}
weak_alias (__pthread_spin_init, pthread_spin_init)
int
__pthread_spin_destroy (pthread_spinlock_t *lock)
{
/* Nothing to do. */
return 0;
}
weak_alias (__pthread_spin_destroy, pthread_spin_destroy)

View File

@ -0,0 +1,66 @@
/* POSIX spinlock implementation. SPARC64 version.
Copyright (C) 2000 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
#include <errno.h>
#include <pthread.h>
int
__pthread_spin_lock (pthread_spinlock_t *lock)
{
XXX
}
weak_alias (__pthread_spin_lock, pthread_spin_lock)
int
__pthread_spin_trylock (pthread_spinlock_t *lock)
{
XXX
}
weak_alias (__pthread_spin_trylock, pthread_spin_trylock)
int
__pthread_spin_unlock (pthread_spinlock_t *lock)
{
XXX
}
weak_alias (__pthread_spin_unlock, pthread_spin_unlock)
int
__pthread_spin_init (pthread_spinlock_t *lock, int pshared)
{
/* We can ignore the `pshared' parameter. Since we are busy-waiting
all processes which can access the memory location `lock' points
to can use the spinlock. */
XXX
return 0;
}
weak_alias (__pthread_spin_init, pthread_spin_init)
int
__pthread_spin_destroy (pthread_spinlock_t *lock)
{
/* Nothing to do. */
return 0;
}
weak_alias (__pthread_spin_destroy, pthread_spin_destroy)

View File

@ -305,7 +305,8 @@ __inline_mathcode (__pow2, __x, \
: "=t" (__value) : "0" (__value), "u" (__exponent)); \
return __value)
#define __sincos_code \
#ifdef __USE_GNU
# define __sincos_code \
register long double __cosr; \
register long double __sinr; \
__asm __volatile__ \
@ -344,6 +345,7 @@ __sincosl (long double __x, long double *__sinx, long double *__cosx)
{
__sincos_code;
}
#endif
/* Optimized inline implementation, sometimes with reduced precision