mirror of
https://sourceware.org/git/glibc.git
synced 2024-11-09 23:00:07 +00:00
Finish user stack support. Change locking code to be safe in situations with different priorities.
1998-06-25 19:27 Ulrich Drepper <drepper@cygnus.com> * attr.c: Finish user stack support. Change locking code to be safe in situations with different priorities. * cancel.c: Likewise. * condvar.c: Likewise. * internals.h: Likewise. * join.c: Likewise. * manager.c: Likewise. * mutex.c: Likewise. * pthread.c: Likewise. * ptlongjmp.c: Likewise. * queue.h: Likewise. * rwlock.c: Likewise. * semaphore.c: Likewise. * semaphore.h: Likewise. * signals.c: Likewise. * spinlock.c: Likewise. * spinlock.h: Likewise. Patches by Xavier leroy. 1998-06-25 Ulrich Drepper <drepper@cygnus.com> * sysdeps/pthread/pthread.h: Make [sg]et_stacksize and [sg]et_stackaddr prototypes always available. * sysdeps/unix/sysv/linux/bits/posix_opt.h: Define _POSIX_THREAD_ATTR_STACKSIZE and _POSIX_THREAD_ATTR_STACKADDR.
This commit is contained in:
parent
d47aac3999
commit
3387a425e6
@ -564,8 +564,9 @@ endif # $(+cflags) == ""
|
||||
libio-include = -I$(..)libio
|
||||
|
||||
# These are the variables that the implicit compilation rules use.
|
||||
CPPFLAGS = $(+includes) $(defines) -include $(..)include/libc-symbols.h \
|
||||
$(sysdep-CPPFLAGS) $(CPPFLAGS-$(suffix $@)) $(CPPFLAGS-$(<F)) \
|
||||
CPPFLAGS = $($(subdir)-CPPFLAGS) $(+includes) $(defines) \
|
||||
-include $(..)include/libc-symbols.h $(sysdep-CPPFLAGS) \
|
||||
$($(subdir)-CPPFLAGS) $(CPPFLAGS-$(suffix $@)) $(CPPFLAGS-$(<F)) \
|
||||
$(CPPFLAGS-$(@F))
|
||||
override CFLAGS = $(filter-out %frame-pointer,$(+cflags)) $(sysdep-CFLAGS) \
|
||||
$(CFLAGS-$(suffix $@)) $(CFLAGS-$(<F)) $(CFLAGS-$(@F))
|
||||
|
3
libc.map
3
libc.map
@ -87,7 +87,8 @@ GLIBC_2.0 {
|
||||
__ffs;
|
||||
__close; __connect; __fcntl; __lseek; __open; __read; __send; __wait;
|
||||
__ieee_get_fp_control; __ieee_set_fp_control;
|
||||
__dgettext;
|
||||
__dgettext; __sigaction;
|
||||
|
||||
# libio
|
||||
_IO_adjust_column; _IO_clearerr; _IO_default_doallocate;
|
||||
_IO_default_finish; _IO_default_pbackfail; _IO_default_uflow;
|
||||
|
@ -1,3 +1,32 @@
|
||||
1998-06-25 19:27 Ulrich Drepper <drepper@cygnus.com>
|
||||
|
||||
* attr.c: Finish user stack support. Change locking code to be safe
|
||||
in situations with different priorities.
|
||||
* cancel.c: Likewise.
|
||||
* condvar.c: Likewise.
|
||||
* internals.h: Likewise.
|
||||
* join.c: Likewise.
|
||||
* manager.c: Likewise.
|
||||
* mutex.c: Likewise.
|
||||
* pthread.c: Likewise.
|
||||
* ptlongjmp.c: Likewise.
|
||||
* queue.h: Likewise.
|
||||
* rwlock.c: Likewise.
|
||||
* semaphore.c: Likewise.
|
||||
* semaphore.h: Likewise.
|
||||
* signals.c: Likewise.
|
||||
* spinlock.c: Likewise.
|
||||
* spinlock.h: Likewise.
|
||||
Patches by Xavier leroy.
|
||||
|
||||
1998-06-25 Ulrich Drepper <drepper@cygnus.com>
|
||||
|
||||
* sysdeps/pthread/pthread.h: Make [sg]et_stacksize and
|
||||
[sg]et_stackaddr prototypes always available.
|
||||
|
||||
* sysdeps/unix/sysv/linux/bits/posix_opt.h: Define
|
||||
_POSIX_THREAD_ATTR_STACKSIZE and _POSIX_THREAD_ATTR_STACKADDR.
|
||||
|
||||
1998-06-24 Ulrich Drepper <drepper@cygnus.com>
|
||||
|
||||
* manager.c (pthread_free): Undo patch from 980430.
|
||||
|
@ -19,7 +19,6 @@
|
||||
#include "pthread.h"
|
||||
#include "internals.h"
|
||||
|
||||
|
||||
int __pthread_attr_init_2_1(pthread_attr_t *attr)
|
||||
{
|
||||
size_t ps = __getpagesize ();
|
||||
@ -185,11 +184,8 @@ weak_alias (__pthread_attr_getstackaddr, pthread_attr_getstackaddr)
|
||||
|
||||
int __pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
|
||||
{
|
||||
size_t ps = __getpagesize ();
|
||||
|
||||
/* We don't accept value smaller than PTHREAD_STACK_MIN or bigger than
|
||||
2MB - pagesize. */
|
||||
if (stacksize < PTHREAD_STACK_MIN || stacksize > STACK_SIZE - ps)
|
||||
/* We don't accept value smaller than PTHREAD_STACK_MIN. */
|
||||
if (stacksize < PTHREAD_STACK_MIN)
|
||||
return EINVAL;
|
||||
|
||||
attr->stacksize = stacksize;
|
||||
|
@ -53,14 +53,14 @@ int pthread_cancel(pthread_t thread)
|
||||
pthread_handle handle = thread_handle(thread);
|
||||
int pid;
|
||||
|
||||
acquire(&handle->h_spinlock);
|
||||
__pthread_lock(&handle->h_lock);
|
||||
if (invalid_handle(handle, thread)) {
|
||||
release(&handle->h_spinlock);
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
return ESRCH;
|
||||
}
|
||||
handle->h_descr->p_canceled = 1;
|
||||
pid = handle->h_descr->p_pid;
|
||||
release(&handle->h_spinlock);
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
kill(pid, PTHREAD_SIG_CANCEL);
|
||||
return 0;
|
||||
}
|
||||
|
@ -25,42 +25,36 @@
|
||||
#include "queue.h"
|
||||
#include "restart.h"
|
||||
|
||||
static void remove_from_queue(pthread_queue * q, pthread_descr th);
|
||||
|
||||
int pthread_cond_init(pthread_cond_t *cond,
|
||||
const pthread_condattr_t *cond_attr)
|
||||
{
|
||||
cond->c_spinlock = 0;
|
||||
queue_init(&cond->c_waiting);
|
||||
__pthread_init_lock(&cond->c_lock);
|
||||
cond->c_waiting = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_cond_destroy(pthread_cond_t *cond)
|
||||
{
|
||||
pthread_descr head;
|
||||
|
||||
acquire(&cond->c_spinlock);
|
||||
head = cond->c_waiting.head;
|
||||
release(&cond->c_spinlock);
|
||||
if (head != NULL) return EBUSY;
|
||||
if (cond->c_waiting != NULL) return EBUSY;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
{
|
||||
volatile pthread_descr self = thread_self();
|
||||
acquire(&cond->c_spinlock);
|
||||
|
||||
__pthread_lock(&cond->c_lock);
|
||||
enqueue(&cond->c_waiting, self);
|
||||
release(&cond->c_spinlock);
|
||||
__pthread_unlock(&cond->c_lock);
|
||||
pthread_mutex_unlock(mutex);
|
||||
suspend_with_cancellation(self);
|
||||
pthread_mutex_lock(mutex);
|
||||
/* This is a cancellation point */
|
||||
if (self->p_canceled && self->p_cancelstate == PTHREAD_CANCEL_ENABLE) {
|
||||
/* Remove ourselves from the waiting queue if we're still on it */
|
||||
acquire(&cond->c_spinlock);
|
||||
__pthread_lock(&cond->c_lock);
|
||||
remove_from_queue(&cond->c_waiting, self);
|
||||
release(&cond->c_spinlock);
|
||||
__pthread_unlock(&cond->c_lock);
|
||||
pthread_exit(PTHREAD_CANCELED);
|
||||
}
|
||||
return 0;
|
||||
@ -77,15 +71,14 @@ pthread_cond_timedwait_relative(pthread_cond_t *cond,
|
||||
sigjmp_buf jmpbuf;
|
||||
|
||||
/* Wait on the condition */
|
||||
acquire(&cond->c_spinlock);
|
||||
__pthread_lock(&cond->c_lock);
|
||||
enqueue(&cond->c_waiting, self);
|
||||
release(&cond->c_spinlock);
|
||||
__pthread_unlock(&cond->c_lock);
|
||||
pthread_mutex_unlock(mutex);
|
||||
/* Set up a longjmp handler for the restart signal */
|
||||
/* No need to save the signal mask, since PTHREAD_SIG_RESTART will be
|
||||
blocked when doing the siglongjmp, and we'll just leave it blocked. */
|
||||
if (sigsetjmp(jmpbuf, 0) == 0) {
|
||||
/* Set up a longjmp handler for the restart and cancel signals */
|
||||
if (sigsetjmp(jmpbuf, 1) == 0) {
|
||||
self->p_signal_jmp = &jmpbuf;
|
||||
self->p_cancel_jmp = &jmpbuf;
|
||||
self->p_signal = 0;
|
||||
/* Check for cancellation */
|
||||
if (self->p_canceled && self->p_cancelstate == PTHREAD_CANCEL_ENABLE) {
|
||||
@ -104,28 +97,28 @@ pthread_cond_timedwait_relative(pthread_cond_t *cond,
|
||||
retsleep = -1;
|
||||
}
|
||||
self->p_signal_jmp = NULL;
|
||||
self->p_cancel_jmp = NULL;
|
||||
/* Here, either the condition was signaled (self->p_signal != 0)
|
||||
or we got canceled (self->p_canceled != 0)
|
||||
or the timeout occurred (retsleep == 0)
|
||||
or another interrupt occurred (retsleep == -1) */
|
||||
/* Re-acquire the spinlock */
|
||||
acquire(&cond->c_spinlock);
|
||||
/* This is a cancellation point */
|
||||
if (self->p_canceled && self->p_cancelstate == PTHREAD_CANCEL_ENABLE) {
|
||||
__pthread_lock(&cond->c_lock);
|
||||
remove_from_queue(&cond->c_waiting, self);
|
||||
release(&cond->c_spinlock);
|
||||
__pthread_unlock(&cond->c_lock);
|
||||
pthread_mutex_lock(mutex);
|
||||
pthread_exit(PTHREAD_CANCELED);
|
||||
}
|
||||
/* If not signaled: also remove ourselves and return an error code */
|
||||
if (self->p_signal == 0) {
|
||||
__pthread_lock(&cond->c_lock);
|
||||
remove_from_queue(&cond->c_waiting, self);
|
||||
release(&cond->c_spinlock);
|
||||
__pthread_unlock(&cond->c_lock);
|
||||
pthread_mutex_lock(mutex);
|
||||
return retsleep == 0 ? ETIMEDOUT : EINTR;
|
||||
}
|
||||
/* Otherwise, return normally */
|
||||
release(&cond->c_spinlock);
|
||||
pthread_mutex_lock(mutex);
|
||||
return 0;
|
||||
}
|
||||
@ -151,23 +144,22 @@ int pthread_cond_signal(pthread_cond_t *cond)
|
||||
{
|
||||
pthread_descr th;
|
||||
|
||||
acquire(&cond->c_spinlock);
|
||||
__pthread_lock(&cond->c_lock);
|
||||
th = dequeue(&cond->c_waiting);
|
||||
release(&cond->c_spinlock);
|
||||
__pthread_unlock(&cond->c_lock);
|
||||
if (th != NULL) restart(th);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_cond_broadcast(pthread_cond_t *cond)
|
||||
{
|
||||
pthread_queue tosignal;
|
||||
pthread_descr th;
|
||||
pthread_descr tosignal, th;
|
||||
|
||||
acquire(&cond->c_spinlock);
|
||||
__pthread_lock(&cond->c_lock);
|
||||
/* Copy the current state of the waiting queue and empty it */
|
||||
tosignal = cond->c_waiting;
|
||||
queue_init(&cond->c_waiting);
|
||||
release(&cond->c_spinlock);
|
||||
cond->c_waiting = NULL;
|
||||
__pthread_unlock(&cond->c_lock);
|
||||
/* Now signal each process in the queue */
|
||||
while ((th = dequeue(&tosignal)) != NULL) restart(th);
|
||||
return 0;
|
||||
@ -182,26 +174,3 @@ int pthread_condattr_destroy(pthread_condattr_t *attr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Auxiliary function on queues */
|
||||
|
||||
static void remove_from_queue(pthread_queue * q, pthread_descr th)
|
||||
{
|
||||
pthread_descr t;
|
||||
|
||||
if (q->head == NULL) return;
|
||||
if (q->head == th) {
|
||||
q->head = th->p_nextwaiting;
|
||||
if (q->head == NULL) q->tail = NULL;
|
||||
th->p_nextwaiting = NULL;
|
||||
return;
|
||||
}
|
||||
for (t = q->head; t->p_nextwaiting != NULL; t = t->p_nextwaiting) {
|
||||
if (t->p_nextwaiting == th) {
|
||||
t->p_nextwaiting = th->p_nextwaiting;
|
||||
if (th->p_nextwaiting == NULL) q->tail = t;
|
||||
th->p_nextwaiting = NULL;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ struct _pthread_descr_struct {
|
||||
pthread_t p_tid; /* Thread identifier */
|
||||
int p_pid; /* PID of Unix process */
|
||||
int p_priority; /* Thread priority (== 0 if not realtime) */
|
||||
int * p_spinlock; /* Spinlock for synchronized accesses */
|
||||
struct _pthread_fastlock * p_lock; /* Spinlock for synchronized accesses */
|
||||
int p_signal; /* last signal received */
|
||||
sigjmp_buf * p_signal_jmp; /* where to siglongjmp on a signal or NULL */
|
||||
sigjmp_buf * p_cancel_jmp; /* where to siglongjmp on a cancel or NULL */
|
||||
@ -81,6 +81,8 @@ struct _pthread_descr_struct {
|
||||
int p_errno; /* error returned by last system call */
|
||||
int * p_h_errnop; /* pointer to used h_errno variable */
|
||||
int p_h_errno; /* error returned by last netdb function */
|
||||
char * p_in_sighandler; /* stack address of sighandler, or NULL */
|
||||
char p_sigwaiting; /* true if a sigwait() is in progress */
|
||||
struct pthread_start_args p_start_args; /* arguments for thread creation */
|
||||
void ** p_specific[PTHREAD_KEY_1STLEVEL_SIZE]; /* thread-specific data */
|
||||
void * p_libc_specific[_LIBC_TSD_KEY_N]; /* thread-specific data for libc */
|
||||
@ -94,8 +96,9 @@ struct _pthread_descr_struct {
|
||||
typedef struct pthread_handle_struct * pthread_handle;
|
||||
|
||||
struct pthread_handle_struct {
|
||||
int h_spinlock; /* Spinlock for sychronized access */
|
||||
struct _pthread_fastlock h_lock; /* Fast lock for sychronized access */
|
||||
pthread_descr h_descr; /* Thread descriptor or NULL if invalid */
|
||||
char * h_bottom; /* Lowest address in the stack thread */
|
||||
};
|
||||
|
||||
/* The type of messages sent to the thread manager thread */
|
||||
@ -103,7 +106,8 @@ struct pthread_handle_struct {
|
||||
struct pthread_request {
|
||||
pthread_descr req_thread; /* Thread doing the request */
|
||||
enum { /* Request kind */
|
||||
REQ_CREATE, REQ_FREE, REQ_PROCESS_EXIT, REQ_MAIN_THREAD_EXIT
|
||||
REQ_CREATE, REQ_FREE, REQ_PROCESS_EXIT, REQ_MAIN_THREAD_EXIT,
|
||||
REQ_POST, REQ_DEBUG
|
||||
} req_kind;
|
||||
union { /* Arguments for request */
|
||||
struct { /* For REQ_CREATE: */
|
||||
@ -118,6 +122,7 @@ struct pthread_request {
|
||||
struct { /* For REQ_PROCESS_EXIT: */
|
||||
int code; /* exit status */
|
||||
} exit;
|
||||
void * post; /* For REQ_POST: the semaphore */
|
||||
} req_args;
|
||||
};
|
||||
|
||||
@ -160,6 +165,11 @@ extern pthread_descr __pthread_main_thread;
|
||||
|
||||
extern char *__pthread_initial_thread_bos;
|
||||
|
||||
/* Indicate whether at least one thread has a user-defined stack (if 1),
|
||||
or all threads have stacks supplied by LinuxThreads (if 0). */
|
||||
|
||||
extern int __pthread_nonstandard_stacks;
|
||||
|
||||
/* File descriptor for sending requests to the thread manager.
|
||||
Initially -1, meaning that pthread_initialize must be called. */
|
||||
|
||||
@ -178,6 +188,10 @@ extern char *__pthread_manager_thread_tos;
|
||||
|
||||
extern int __pthread_exit_requested, __pthread_exit_code;
|
||||
|
||||
/* Set to 1 by gdb if we're debugging */
|
||||
|
||||
extern volatile int __pthread_threads_debug;
|
||||
|
||||
/* Return the handle corresponding to a thread id */
|
||||
|
||||
static inline pthread_handle thread_handle(pthread_t id)
|
||||
@ -233,6 +247,8 @@ static inline int invalid_handle(pthread_handle h, pthread_t id)
|
||||
|
||||
/* Recover thread descriptor for the current thread */
|
||||
|
||||
extern pthread_descr __pthread_find_self (void) __attribute__ ((const));
|
||||
|
||||
static inline pthread_descr thread_self (void) __attribute__ ((const));
|
||||
static inline pthread_descr thread_self (void)
|
||||
{
|
||||
@ -245,6 +261,8 @@ static inline pthread_descr thread_self (void)
|
||||
else if (sp >= __pthread_manager_thread_bos
|
||||
&& sp < __pthread_manager_thread_tos)
|
||||
return &__pthread_manager_thread;
|
||||
else if (__pthread_nonstandard_stacks)
|
||||
return __pthread_find_self();
|
||||
else
|
||||
return (pthread_descr)(((unsigned long)sp | (STACK_SIZE-1))+1) - 1;
|
||||
#endif
|
||||
@ -282,8 +300,8 @@ static inline pthread_descr thread_self (void)
|
||||
|
||||
void __pthread_destroy_specifics(void);
|
||||
void __pthread_perform_cleanup(void);
|
||||
void __pthread_sighandler(int sig);
|
||||
void __pthread_message(char * fmt, long arg, ...);
|
||||
int __pthread_initialize_manager(void);
|
||||
void __pthread_message(char * fmt, ...);
|
||||
int __pthread_manager(void *reqfd);
|
||||
void __pthread_manager_sighandler(int sig);
|
||||
void __pthread_reset_main_thread(void);
|
||||
|
@ -35,13 +35,13 @@ void pthread_exit(void * retval)
|
||||
__pthread_perform_cleanup();
|
||||
__pthread_destroy_specifics();
|
||||
/* Store return value */
|
||||
acquire(self->p_spinlock);
|
||||
__pthread_lock(self->p_lock);
|
||||
self->p_retval = retval;
|
||||
/* Say that we've terminated */
|
||||
self->p_terminated = 1;
|
||||
/* See if someone is joining on us */
|
||||
joining = self->p_joining;
|
||||
release(self->p_spinlock);
|
||||
__pthread_unlock(self->p_lock);
|
||||
/* Restart joining thread if any */
|
||||
if (joining != NULL) restart(joining);
|
||||
/* If this is the initial thread, block until all threads have terminated.
|
||||
@ -65,36 +65,36 @@ int pthread_join(pthread_t thread_id, void ** thread_return)
|
||||
pthread_handle handle = thread_handle(thread_id);
|
||||
pthread_descr th;
|
||||
|
||||
acquire(&handle->h_spinlock);
|
||||
__pthread_lock(&handle->h_lock);
|
||||
if (invalid_handle(handle, thread_id)) {
|
||||
release(&handle->h_spinlock);
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
return ESRCH;
|
||||
}
|
||||
th = handle->h_descr;
|
||||
if (th == self) {
|
||||
release(&handle->h_spinlock);
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
return EDEADLK;
|
||||
}
|
||||
/* If detached or already joined, error */
|
||||
if (th->p_detached || th->p_joining != NULL) {
|
||||
release(&handle->h_spinlock);
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
return EINVAL;
|
||||
}
|
||||
/* If not terminated yet, suspend ourselves. */
|
||||
if (! th->p_terminated) {
|
||||
th->p_joining = self;
|
||||
release(&handle->h_spinlock);
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
suspend_with_cancellation(self);
|
||||
/* This is a cancellation point */
|
||||
if (self->p_canceled && self->p_cancelstate == PTHREAD_CANCEL_ENABLE) {
|
||||
th->p_joining = NULL;
|
||||
pthread_exit(PTHREAD_CANCELED);
|
||||
}
|
||||
acquire(&handle->h_spinlock);
|
||||
__pthread_lock(&handle->h_lock);
|
||||
}
|
||||
/* Get return value */
|
||||
if (thread_return != NULL) *thread_return = th->p_retval;
|
||||
release(&handle->h_spinlock);
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
/* Send notification to thread manager */
|
||||
if (__pthread_manager_request >= 0) {
|
||||
request.req_thread = self;
|
||||
@ -113,26 +113,26 @@ int pthread_detach(pthread_t thread_id)
|
||||
pthread_handle handle = thread_handle(thread_id);
|
||||
pthread_descr th;
|
||||
|
||||
acquire(&handle->h_spinlock);
|
||||
__pthread_lock(&handle->h_lock);
|
||||
if (invalid_handle(handle, thread_id)) {
|
||||
release(&handle->h_spinlock);
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
return ESRCH;
|
||||
}
|
||||
th = handle->h_descr;
|
||||
/* If already detached, error */
|
||||
if (th->p_detached) {
|
||||
release(&handle->h_spinlock);
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
return EINVAL;
|
||||
}
|
||||
/* If already joining, don't do anything. */
|
||||
if (th->p_joining != NULL) {
|
||||
release(&handle->h_spinlock);
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
return 0;
|
||||
}
|
||||
/* Mark as detached */
|
||||
th->p_detached = 1;
|
||||
terminated = th->p_terminated;
|
||||
release(&handle->h_spinlock);
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
/* If already terminated, notify thread manager to reclaim resources */
|
||||
if (terminated && __pthread_manager_request >= 0) {
|
||||
request.req_thread = thread_self();
|
||||
|
@ -31,15 +31,22 @@
|
||||
#include "internals.h"
|
||||
#include "spinlock.h"
|
||||
#include "restart.h"
|
||||
#include "semaphore.h"
|
||||
|
||||
/* Array of active threads. Entry 0 is reserved for the initial thread. */
|
||||
|
||||
struct pthread_handle_struct __pthread_handles[PTHREAD_THREADS_MAX] =
|
||||
{ { 0, &__pthread_initial_thread, 0}, /* All NULLs */ };
|
||||
{ { LOCK_INITIALIZER, &__pthread_initial_thread, 0}, /* All NULLs */ };
|
||||
|
||||
/* Indicate whether at least one thread has a user-defined stack (if 1),
|
||||
or if all threads have stacks supplied by LinuxThreads (if 0). */
|
||||
int __pthread_nonstandard_stacks;
|
||||
int __pthread_nonstandard_stacks = 0;
|
||||
|
||||
/* Number of active entries in __pthread_handles (used by gdb) */
|
||||
volatile int __pthread_handles_num = 1;
|
||||
|
||||
/* Whether to use debugger additional actions for thread creation
|
||||
(set to 1 by gdb) */
|
||||
volatile int __pthread_threads_debug = 0;
|
||||
|
||||
/* Mapping from stack segment to thread descriptor. */
|
||||
/* Stack segment numbers are also indices into the __pthread_handles array. */
|
||||
@ -93,12 +100,18 @@ int __pthread_manager(void *arg)
|
||||
/* Set the error variable. */
|
||||
__pthread_manager_thread.p_errnop = &__pthread_manager_thread.p_errno;
|
||||
__pthread_manager_thread.p_h_errnop = &__pthread_manager_thread.p_h_errno;
|
||||
/* Block all signals except PTHREAD_SIG_RESTART */
|
||||
/* Block all signals except PTHREAD_SIG_RESTART, PTHREAD_SIG_CANCEL
|
||||
and SIGTRAP */
|
||||
sigfillset(&mask);
|
||||
sigdelset(&mask, PTHREAD_SIG_RESTART);
|
||||
sigdelset(&mask, PTHREAD_SIG_CANCEL); /* for debugging new threads */
|
||||
sigdelset(&mask, SIGTRAP); /* for debugging purposes */
|
||||
sigprocmask(SIG_SETMASK, &mask, NULL);
|
||||
/* Raise our priority to match that of main thread */
|
||||
__pthread_manager_adjust_prio(__pthread_main_thread->p_priority);
|
||||
/* Synchronize debugging of the thread manager */
|
||||
n = __libc_read(reqfd, (char *)&request, sizeof(request));
|
||||
ASSERT(n == sizeof(request) && request.req_kind == REQ_DEBUG);
|
||||
/* Enter server loop */
|
||||
while(1) {
|
||||
FD_ZERO(&readfds);
|
||||
@ -146,6 +159,14 @@ int __pthread_manager(void *arg)
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
case REQ_POST:
|
||||
sem_post(request.req_args.post);
|
||||
break;
|
||||
case REQ_DEBUG:
|
||||
/* Make gdb aware of new thread */
|
||||
if (__pthread_threads_debug) raise(PTHREAD_SIG_CANCEL);
|
||||
restart(request.req_thread);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -156,6 +177,7 @@ int __pthread_manager(void *arg)
|
||||
static int pthread_start_thread(void *arg)
|
||||
{
|
||||
pthread_descr self = (pthread_descr) arg;
|
||||
struct pthread_request request;
|
||||
void * outcome;
|
||||
/* Initialize special thread_self processing, if any. */
|
||||
#ifdef INIT_THREAD_SELF
|
||||
@ -171,6 +193,14 @@ static int pthread_start_thread(void *arg)
|
||||
if (self->p_start_args.schedpolicy >= 0)
|
||||
__sched_setscheduler(self->p_pid, self->p_start_args.schedpolicy,
|
||||
&self->p_start_args.schedparam);
|
||||
/* Make gdb aware of new thread */
|
||||
if (__pthread_threads_debug) {
|
||||
request.req_thread = self;
|
||||
request.req_kind = REQ_DEBUG;
|
||||
__libc_write(__pthread_manager_request,
|
||||
(char *) &request, sizeof(request));
|
||||
suspend(self);
|
||||
}
|
||||
/* Run the thread code */
|
||||
outcome = self->p_start_args.start_routine(self->p_start_args.arg);
|
||||
/* Exit with the given return value */
|
||||
@ -188,6 +218,7 @@ static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr,
|
||||
char * new_thread_bottom;
|
||||
pthread_t new_thread_id;
|
||||
void *guardaddr = NULL;
|
||||
size_t guardsize = 0;
|
||||
|
||||
/* Find a free stack segment for the current stack */
|
||||
for (sseg = 1; ; sseg++)
|
||||
@ -211,13 +242,16 @@ static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr,
|
||||
and allocate it if necessary. */
|
||||
if (attr == NULL || attr->guardsize != 0)
|
||||
{
|
||||
guardsize = attr ? attr->guardsize : __getpagesize ();
|
||||
guardaddr = mmap ((caddr_t)((char *)(new_thread+1)
|
||||
- STACK_SIZE),
|
||||
attr ? attr->guardsize : __getpagesize (),
|
||||
0, MAP_FIXED, -1, 0);
|
||||
guardsize, 0, MAP_FIXED, -1, 0);
|
||||
if (guardaddr == MAP_FAILED)
|
||||
/* We don't make this an error. */
|
||||
guardaddr = NULL;
|
||||
{
|
||||
/* We don't make this an error. */
|
||||
guardaddr = NULL;
|
||||
guardsize = 0;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -238,7 +272,7 @@ static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr,
|
||||
new_thread->p_nextwaiting = NULL;
|
||||
new_thread->p_tid = new_thread_id;
|
||||
new_thread->p_priority = 0;
|
||||
new_thread->p_spinlock = &(__pthread_handles[sseg].h_spinlock);
|
||||
new_thread->p_lock = &(__pthread_handles[sseg].h_lock);
|
||||
new_thread->p_signal = 0;
|
||||
new_thread->p_signal_jmp = NULL;
|
||||
new_thread->p_cancel_jmp = NULL;
|
||||
@ -255,16 +289,15 @@ static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr,
|
||||
new_thread->p_errno = 0;
|
||||
new_thread->p_h_errnop = &new_thread->p_h_errno;
|
||||
new_thread->p_h_errno = 0;
|
||||
new_thread->p_in_sighandler = NULL;
|
||||
new_thread->p_sigwaiting = 0;
|
||||
new_thread->p_guardaddr = guardaddr;
|
||||
new_thread->p_guardsize = (guardaddr == NULL
|
||||
? 0
|
||||
: (attr == NULL
|
||||
? __getpagesize () : attr->guardsize));
|
||||
new_thread->p_guardsize = guardsize;
|
||||
new_thread->p_userstack = attr != NULL && attr->stackaddr_set;
|
||||
memset (new_thread->p_specific, '\0',
|
||||
PTHREAD_KEY_1STLEVEL_SIZE * sizeof (new_thread->p_specific[0]));
|
||||
/* Initialize the thread handle */
|
||||
__pthread_handles[sseg].h_spinlock = 0; /* should already be 0 */
|
||||
__pthread_init_lock(&__pthread_handles[sseg].h_lock);
|
||||
__pthread_handles[sseg].h_descr = new_thread;
|
||||
__pthread_handles[sseg].h_bottom = new_thread_bottom;
|
||||
/* Determine scheduling parameters for the thread */
|
||||
@ -305,6 +338,8 @@ static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr,
|
||||
munmap(new_thread->p_guardaddr, new_thread->p_guardsize);
|
||||
}
|
||||
__pthread_handles[sseg].h_descr = NULL;
|
||||
__pthread_handles[sseg].h_bottom = NULL;
|
||||
__pthread_handles_num--;
|
||||
return errno;
|
||||
}
|
||||
/* Insert new thread in doubly linked list of active threads */
|
||||
@ -330,10 +365,12 @@ static void pthread_free(pthread_descr th)
|
||||
ASSERT(th->p_exited);
|
||||
/* Make the handle invalid */
|
||||
handle = thread_handle(th->p_tid);
|
||||
acquire(&handle->h_spinlock);
|
||||
__pthread_lock(&handle->h_lock);
|
||||
handle->h_descr = NULL;
|
||||
handle->h_bottom = (char *)(-1L);
|
||||
release(&handle->h_spinlock);
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
/* One fewer threads in __pthread_handles */
|
||||
__pthread_handles_num--;
|
||||
/* If initial thread, nothing to free */
|
||||
if (th == &__pthread_initial_thread) return;
|
||||
if (!th->p_userstack)
|
||||
@ -360,10 +397,10 @@ static void pthread_exited(pid_t pid)
|
||||
th->p_nextlive->p_prevlive = th->p_prevlive;
|
||||
th->p_prevlive->p_nextlive = th->p_nextlive;
|
||||
/* Mark thread as exited, and if detached, free its resources */
|
||||
acquire(th->p_spinlock);
|
||||
__pthread_lock(th->p_lock);
|
||||
th->p_exited = 1;
|
||||
detached = th->p_detached;
|
||||
release(th->p_spinlock);
|
||||
__pthread_unlock(th->p_lock);
|
||||
if (detached) pthread_free(th);
|
||||
break;
|
||||
}
|
||||
@ -409,16 +446,16 @@ static void pthread_handle_free(pthread_descr th)
|
||||
} while (t != __pthread_main_thread);
|
||||
if (t != th) return;
|
||||
|
||||
acquire(th->p_spinlock);
|
||||
__pthread_lock(th->p_lock);
|
||||
if (th->p_exited) {
|
||||
release(th->p_spinlock);
|
||||
__pthread_unlock(th->p_lock);
|
||||
pthread_free(th);
|
||||
} else {
|
||||
/* The Unix process of the thread is still running.
|
||||
Mark the thread as detached so that the thread manager will
|
||||
deallocate its resources when the Unix process exits. */
|
||||
th->p_detached = 1;
|
||||
release(th->p_spinlock);
|
||||
__pthread_unlock(th->p_lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -26,23 +26,18 @@
|
||||
int __pthread_mutex_init(pthread_mutex_t * mutex,
|
||||
const pthread_mutexattr_t * mutex_attr)
|
||||
{
|
||||
mutex->m_spinlock = 0;
|
||||
mutex->m_count = 0;
|
||||
mutex->m_owner = NULL;
|
||||
__pthread_init_lock(&mutex->m_lock);
|
||||
mutex->m_kind =
|
||||
mutex_attr == NULL ? PTHREAD_MUTEX_FAST_NP : mutex_attr->mutexkind;
|
||||
queue_init(&mutex->m_waiting);
|
||||
mutex->m_count = 0;
|
||||
mutex->m_owner = NULL;
|
||||
return 0;
|
||||
}
|
||||
weak_alias (__pthread_mutex_init, pthread_mutex_init)
|
||||
|
||||
int __pthread_mutex_destroy(pthread_mutex_t * mutex)
|
||||
{
|
||||
int count;
|
||||
acquire(&mutex->m_spinlock);
|
||||
count = mutex->m_count;
|
||||
release(&mutex->m_spinlock);
|
||||
if (count > 0) return EBUSY;
|
||||
if (mutex->m_lock.status != 0) return EBUSY;
|
||||
return 0;
|
||||
}
|
||||
weak_alias (__pthread_mutex_destroy, pthread_mutex_destroy)
|
||||
@ -50,40 +45,33 @@ weak_alias (__pthread_mutex_destroy, pthread_mutex_destroy)
|
||||
int __pthread_mutex_trylock(pthread_mutex_t * mutex)
|
||||
{
|
||||
pthread_descr self;
|
||||
int retcode;
|
||||
|
||||
acquire(&mutex->m_spinlock);
|
||||
switch(mutex->m_kind) {
|
||||
case PTHREAD_MUTEX_FAST_NP:
|
||||
if (mutex->m_count == 0) {
|
||||
mutex->m_count = 1;
|
||||
release(&mutex->m_spinlock);
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
retcode = __pthread_trylock(&mutex->m_lock);
|
||||
return retcode;
|
||||
case PTHREAD_MUTEX_RECURSIVE_NP:
|
||||
self = thread_self();
|
||||
if (mutex->m_count == 0 || mutex->m_owner == self) {
|
||||
if (mutex->m_owner == self) {
|
||||
mutex->m_count++;
|
||||
mutex->m_owner = self;
|
||||
release(&mutex->m_spinlock);
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
retcode = __pthread_trylock(&mutex->m_lock);
|
||||
if (retcode == 0) {
|
||||
mutex->m_owner = self;
|
||||
mutex->m_count = 0;
|
||||
}
|
||||
return retcode;
|
||||
case PTHREAD_MUTEX_ERRORCHECK_NP:
|
||||
self = thread_self();
|
||||
if (mutex->m_count == 0) {
|
||||
mutex->m_count = 1;
|
||||
mutex->m_owner = self;
|
||||
release(&mutex->m_spinlock);
|
||||
return 0;
|
||||
retcode = __pthread_trylock(&mutex->m_lock);
|
||||
if (retcode == 0) {
|
||||
mutex->m_owner = thread_self();
|
||||
}
|
||||
break;
|
||||
return retcode;
|
||||
default:
|
||||
release(&mutex->m_spinlock);
|
||||
return EINVAL;
|
||||
}
|
||||
release(&mutex->m_spinlock);
|
||||
return EBUSY;
|
||||
}
|
||||
weak_alias (__pthread_mutex_trylock, pthread_mutex_trylock)
|
||||
|
||||
@ -91,87 +79,55 @@ int __pthread_mutex_lock(pthread_mutex_t * mutex)
|
||||
{
|
||||
pthread_descr self;
|
||||
|
||||
acquire(&mutex->m_spinlock);
|
||||
switch(mutex->m_kind) {
|
||||
case PTHREAD_MUTEX_FAST_NP:
|
||||
if (mutex->m_count == 0) {
|
||||
mutex->m_count = 1;
|
||||
release(&mutex->m_spinlock);
|
||||
return 0;
|
||||
}
|
||||
self = thread_self();
|
||||
break;
|
||||
__pthread_lock(&mutex->m_lock);
|
||||
return 0;
|
||||
case PTHREAD_MUTEX_RECURSIVE_NP:
|
||||
self = thread_self();
|
||||
if (mutex->m_count == 0 || mutex->m_owner == self) {
|
||||
if (mutex->m_owner == self) {
|
||||
mutex->m_count++;
|
||||
mutex->m_owner = self;
|
||||
release(&mutex->m_spinlock);
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
__pthread_lock(&mutex->m_lock);
|
||||
mutex->m_owner = self;
|
||||
mutex->m_count = 0;
|
||||
return 0;
|
||||
case PTHREAD_MUTEX_ERRORCHECK_NP:
|
||||
self = thread_self();
|
||||
if (mutex->m_count == 0) {
|
||||
mutex->m_count = 1;
|
||||
mutex->m_owner = self;
|
||||
release(&mutex->m_spinlock);
|
||||
return 0;
|
||||
} else if (mutex->m_owner == self) {
|
||||
release(&mutex->m_spinlock);
|
||||
return EDEADLK;
|
||||
}
|
||||
break;
|
||||
if (mutex->m_owner == self) return EDEADLK;
|
||||
__pthread_lock(&mutex->m_lock);
|
||||
mutex->m_owner = self;
|
||||
return 0;
|
||||
default:
|
||||
release(&mutex->m_spinlock);
|
||||
return EINVAL;
|
||||
}
|
||||
/* Suspend ourselves */
|
||||
enqueue(&mutex->m_waiting, self);
|
||||
release(&mutex->m_spinlock);
|
||||
suspend(self); /* This is not a cancellation point */
|
||||
/* Now we own the mutex */
|
||||
ASSERT(mutex->m_count == 1);
|
||||
mutex->m_owner = self; /* for recursive and errorcheck mutexes */
|
||||
return 0;
|
||||
}
|
||||
weak_alias (__pthread_mutex_lock, pthread_mutex_lock)
|
||||
|
||||
int __pthread_mutex_unlock(pthread_mutex_t * mutex)
|
||||
{
|
||||
pthread_descr th;
|
||||
|
||||
acquire(&mutex->m_spinlock);
|
||||
switch (mutex->m_kind) {
|
||||
case PTHREAD_MUTEX_FAST_NP:
|
||||
break;
|
||||
__pthread_unlock(&mutex->m_lock);
|
||||
return 0;
|
||||
case PTHREAD_MUTEX_RECURSIVE_NP:
|
||||
if (mutex->m_count >= 2) {
|
||||
if (mutex->m_count > 0) {
|
||||
mutex->m_count--;
|
||||
release(&mutex->m_spinlock);
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
mutex->m_owner = NULL;
|
||||
__pthread_unlock(&mutex->m_lock);
|
||||
return 0;
|
||||
case PTHREAD_MUTEX_ERRORCHECK_NP:
|
||||
if (mutex->m_count == 0 || mutex->m_owner != thread_self()) {
|
||||
release(&mutex->m_spinlock);
|
||||
if (mutex->m_owner != thread_self() || mutex->m_lock.status == 0)
|
||||
return EPERM;
|
||||
}
|
||||
break;
|
||||
mutex->m_owner = NULL;
|
||||
__pthread_unlock(&mutex->m_lock);
|
||||
return 0;
|
||||
default:
|
||||
release(&mutex->m_spinlock);
|
||||
return EINVAL;
|
||||
}
|
||||
th = dequeue(&mutex->m_waiting);
|
||||
/* If no waiters, unlock the mutex */
|
||||
if (th == NULL) mutex->m_count = 0;
|
||||
release(&mutex->m_spinlock);
|
||||
/* If there is a waiter, restart it with the mutex still locked */
|
||||
if (th != NULL) {
|
||||
mutex->m_owner = NULL; /* we no longer own the mutex */
|
||||
restart(th);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
weak_alias (__pthread_mutex_unlock, pthread_mutex_unlock)
|
||||
|
||||
|
@ -35,7 +35,7 @@ struct _pthread_descr_struct __pthread_initial_thread = {
|
||||
PTHREAD_THREADS_MAX, /* pthread_t p_tid */
|
||||
0, /* int p_pid */
|
||||
0, /* int p_priority */
|
||||
&__pthread_handles[0].h_spinlock, /* int * p_spinlock */
|
||||
&__pthread_handles[0].h_lock, /* struct _pthread_fastlock * p_lock */
|
||||
0, /* int p_signal */
|
||||
NULL, /* sigjmp_buf * p_signal_buf */
|
||||
NULL, /* sigjmp_buf * p_cancel_buf */
|
||||
@ -53,6 +53,8 @@ struct _pthread_descr_struct __pthread_initial_thread = {
|
||||
0, /* int p_errno */
|
||||
NULL, /* int *p_h_errnop */
|
||||
0, /* int p_h_errno */
|
||||
NULL, /* char * p_in_sighandler */
|
||||
0, /* char p_sigwaiting */
|
||||
PTHREAD_START_ARGS_INITIALIZER, /* struct pthread_start_args p_start_args */
|
||||
{NULL} /* void * p_specific[PTHREAD_KEYS_MAX] */
|
||||
};
|
||||
@ -68,7 +70,7 @@ struct _pthread_descr_struct __pthread_manager_thread = {
|
||||
0, /* int p_tid */
|
||||
0, /* int p_pid */
|
||||
0, /* int p_priority */
|
||||
NULL, /* int * p_spinlock */
|
||||
NULL, /* struct _pthread_fastlock * p_lock */
|
||||
0, /* int p_signal */
|
||||
NULL, /* sigjmp_buf * p_signal_buf */
|
||||
NULL, /* sigjmp_buf * p_cancel_buf */
|
||||
@ -86,6 +88,8 @@ struct _pthread_descr_struct __pthread_manager_thread = {
|
||||
0, /* int p_errno */
|
||||
NULL, /* int *p_h_errnop */
|
||||
0, /* int p_h_errno */
|
||||
NULL, /* char * p_in_sighandler */
|
||||
0, /* char p_sigwaiting */
|
||||
PTHREAD_START_ARGS_INITIALIZER, /* struct pthread_start_args p_start_args */
|
||||
{NULL} /* void * p_specific[PTHREAD_KEYS_MAX] */
|
||||
};
|
||||
@ -119,6 +123,15 @@ char *__pthread_manager_thread_tos = NULL;
|
||||
int __pthread_exit_requested = 0;
|
||||
int __pthread_exit_code = 0;
|
||||
|
||||
/* Communicate relevant LinuxThreads constants to gdb */
|
||||
|
||||
const int __pthread_threads_max = PTHREAD_THREADS_MAX;
|
||||
const int __pthread_sizeof_handle = sizeof(struct pthread_handle_struct);
|
||||
const int __pthread_offsetof_descr = offsetof(struct pthread_handle_struct,
|
||||
h_descr);
|
||||
const int __pthread_offsetof_pid = offsetof(struct _pthread_descr_struct,
|
||||
p_pid);
|
||||
|
||||
/* Signal numbers used for the communication. */
|
||||
int __pthread_sig_restart;
|
||||
int __pthread_sig_cancel;
|
||||
@ -131,6 +144,7 @@ extern int _h_errno;
|
||||
|
||||
static void pthread_exit_process(int retcode, void *arg);
|
||||
static void pthread_handle_sigcancel(int sig);
|
||||
static void pthread_handle_sigrestart(int sig);
|
||||
|
||||
/* Initialize the pthread library.
|
||||
Initialization is split in two functions:
|
||||
@ -148,6 +162,10 @@ static void pthread_initialize(void)
|
||||
|
||||
/* If already done (e.g. by a constructor called earlier!), bail out */
|
||||
if (__pthread_initial_thread_bos != NULL) return;
|
||||
#ifdef TEST_FOR_COMPARE_AND_SWAP
|
||||
/* Test if compare-and-swap is available */
|
||||
__pthread_has_cas = compare_and_swap_is_available();
|
||||
#endif
|
||||
/* For the initial stack, reserve at least STACK_SIZE bytes of stack
|
||||
below the current stack address, and align that on a
|
||||
STACK_SIZE boundary. */
|
||||
@ -178,14 +196,14 @@ static void pthread_initialize(void)
|
||||
/* Setup signal handlers for the initial thread.
|
||||
Since signal handlers are shared between threads, these settings
|
||||
will be inherited by all other threads. */
|
||||
sa.sa_handler = __pthread_sighandler;
|
||||
sa.sa_handler = pthread_handle_sigrestart;
|
||||
sigemptyset(&sa.sa_mask);
|
||||
sa.sa_flags = SA_RESTART; /* does not matter for regular threads, but
|
||||
better for the thread manager */
|
||||
sigaction(PTHREAD_SIG_RESTART, &sa, NULL);
|
||||
__sigaction(PTHREAD_SIG_RESTART, &sa, NULL);
|
||||
sa.sa_handler = pthread_handle_sigcancel;
|
||||
sa.sa_flags = 0;
|
||||
sigaction(PTHREAD_SIG_CANCEL, &sa, NULL);
|
||||
__sigaction(PTHREAD_SIG_CANCEL, &sa, NULL);
|
||||
|
||||
/* Initially, block PTHREAD_SIG_RESTART. Will be unblocked on demand. */
|
||||
sigemptyset(&mask);
|
||||
@ -197,10 +215,11 @@ static void pthread_initialize(void)
|
||||
__on_exit(pthread_exit_process, NULL);
|
||||
}
|
||||
|
||||
static int pthread_initialize_manager(void)
|
||||
int __pthread_initialize_manager(void)
|
||||
{
|
||||
int manager_pipe[2];
|
||||
int pid;
|
||||
struct pthread_request request;
|
||||
|
||||
/* If basic initialization not done yet (e.g. we're called from a
|
||||
constructor run before our constructor), do it now */
|
||||
@ -228,6 +247,11 @@ static int pthread_initialize_manager(void)
|
||||
__pthread_manager_request = manager_pipe[1]; /* writing end */
|
||||
__pthread_manager_reader = manager_pipe[0]; /* reading end */
|
||||
__pthread_manager_thread.p_pid = pid;
|
||||
/* Make gdb aware of new thread manager */
|
||||
if (__pthread_threads_debug) raise(PTHREAD_SIG_CANCEL);
|
||||
/* Synchronize debugging of the thread manager */
|
||||
request.req_kind = REQ_DEBUG;
|
||||
__libc_write(__pthread_manager_request, (char *) &request, sizeof(request));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -239,7 +263,7 @@ int __pthread_create_2_1(pthread_t *thread, const pthread_attr_t *attr,
|
||||
pthread_descr self = thread_self();
|
||||
struct pthread_request request;
|
||||
if (__pthread_manager_request < 0) {
|
||||
if (pthread_initialize_manager() < 0) return EAGAIN;
|
||||
if (__pthread_initialize_manager() < 0) return EAGAIN;
|
||||
}
|
||||
request.req_thread = self;
|
||||
request.req_kind = REQ_CREATE;
|
||||
@ -296,6 +320,24 @@ int pthread_equal(pthread_t thread1, pthread_t thread2)
|
||||
return thread1 == thread2;
|
||||
}
|
||||
|
||||
/* Helper function for thread_self in the case of user-provided stacks */
|
||||
|
||||
#ifndef THREAD_SELF
|
||||
|
||||
pthread_descr __pthread_find_self()
|
||||
{
|
||||
char * sp = CURRENT_STACK_FRAME;
|
||||
pthread_handle h;
|
||||
|
||||
/* __pthread_handles[0] is the initial thread, handled specially in
|
||||
thread_self(), so start at 1 */
|
||||
h = __pthread_handles + 1;
|
||||
while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom)) h++;
|
||||
return h->h_descr;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/* Thread scheduling */
|
||||
|
||||
int pthread_setschedparam(pthread_t thread, int policy,
|
||||
@ -304,18 +346,18 @@ int pthread_setschedparam(pthread_t thread, int policy,
|
||||
pthread_handle handle = thread_handle(thread);
|
||||
pthread_descr th;
|
||||
|
||||
acquire(&handle->h_spinlock);
|
||||
__pthread_lock(&handle->h_lock);
|
||||
if (invalid_handle(handle, thread)) {
|
||||
release(&handle->h_spinlock);
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
return ESRCH;
|
||||
}
|
||||
th = handle->h_descr;
|
||||
if (__sched_setscheduler(th->p_pid, policy, param) == -1) {
|
||||
release(&handle->h_spinlock);
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
return errno;
|
||||
}
|
||||
th->p_priority = policy == SCHED_OTHER ? 0 : param->sched_priority;
|
||||
release(&handle->h_spinlock);
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
if (__pthread_manager_request >= 0)
|
||||
__pthread_manager_adjust_prio(th->p_priority);
|
||||
return 0;
|
||||
@ -327,13 +369,13 @@ int pthread_getschedparam(pthread_t thread, int *policy,
|
||||
pthread_handle handle = thread_handle(thread);
|
||||
int pid, pol;
|
||||
|
||||
acquire(&handle->h_spinlock);
|
||||
__pthread_lock(&handle->h_lock);
|
||||
if (invalid_handle(handle, thread)) {
|
||||
release(&handle->h_spinlock);
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
return ESRCH;
|
||||
}
|
||||
pid = handle->h_descr->p_pid;
|
||||
release(&handle->h_spinlock);
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
pol = __sched_getscheduler(pid);
|
||||
if (pol == -1) return errno;
|
||||
if (__sched_getparam(pid, param) == -1) return errno;
|
||||
@ -364,11 +406,11 @@ static void pthread_exit_process(int retcode, void *arg)
|
||||
|
||||
/* The handler for the RESTART signal just records the signal received
|
||||
in the thread descriptor, and optionally performs a siglongjmp
|
||||
(for pthread_cond_timedwait). Also used in sigwait.
|
||||
(for pthread_cond_timedwait).
|
||||
For the thread manager thread, redirect the signal to
|
||||
__pthread_manager_sighandler. */
|
||||
|
||||
void __pthread_sighandler(int sig)
|
||||
static void pthread_handle_sigrestart(int sig)
|
||||
{
|
||||
pthread_descr self = thread_self();
|
||||
if (self == &__pthread_manager_thread) {
|
||||
@ -380,13 +422,24 @@ void __pthread_sighandler(int sig)
|
||||
}
|
||||
|
||||
/* The handler for the CANCEL signal checks for cancellation
|
||||
(in asynchronous mode) and for process-wide exit and exec requests. */
|
||||
(in asynchronous mode), for process-wide exit and exec requests.
|
||||
For the thread manager thread, we ignore the signal.
|
||||
The debugging strategy is as follows:
|
||||
On reception of a REQ_DEBUG request (sent by new threads created to
|
||||
the thread manager under debugging mode), the thread manager throws
|
||||
PTHREAD_SIG_CANCEL to itself. The debugger (if active) intercepts
|
||||
this signal, takes into account new threads and continue execution
|
||||
of the thread manager by propagating the signal because it doesn't
|
||||
know what it is specifically done for. In the current implementation,
|
||||
the thread manager simply discards it. */
|
||||
|
||||
static void pthread_handle_sigcancel(int sig)
|
||||
{
|
||||
pthread_descr self = thread_self();
|
||||
sigjmp_buf * jmpbuf;
|
||||
|
||||
if (self == &__pthread_manager_thread)
|
||||
return;
|
||||
if (__pthread_exit_requested) {
|
||||
/* Main thread should accumulate times for thread manager and its
|
||||
children, so that timings for main thread account for all threads. */
|
||||
@ -469,7 +522,7 @@ weak_alias (__pthread_getconcurrency, pthread_getconcurrency)
|
||||
#ifdef DEBUG
|
||||
#include <stdarg.h>
|
||||
|
||||
void __pthread_message(char * fmt, long arg)
|
||||
void __pthread_message(char * fmt, ...)
|
||||
{
|
||||
char buffer[1024];
|
||||
va_list args;
|
||||
|
@ -37,6 +37,8 @@ static void pthread_cleanup_upto(__jmp_buf target)
|
||||
c = c->prev)
|
||||
c->routine(c->arg);
|
||||
self->p_cleanup = c;
|
||||
if (self->p_in_sighandler && _JMPBUF_UNWINDS(target, self->p_in_sighandler))
|
||||
self->p_in_sighandler = NULL;
|
||||
}
|
||||
|
||||
void siglongjmp(sigjmp_buf env, int val)
|
||||
|
@ -14,49 +14,42 @@
|
||||
|
||||
/* Waiting queues */
|
||||
|
||||
typedef struct _pthread_queue pthread_queue;
|
||||
/* Waiting queues are represented by lists of thread descriptors
|
||||
linked through their p_nextwaiting field. The lists are kept
|
||||
sorted by decreasing priority, and then decreasing waiting time. */
|
||||
|
||||
static inline void queue_init(pthread_queue * q)
|
||||
static inline void enqueue(pthread_descr * q, pthread_descr th)
|
||||
{
|
||||
q->head = q->tail = NULL;
|
||||
}
|
||||
|
||||
static inline void enqueue(pthread_queue * q, pthread_descr th)
|
||||
{
|
||||
int prio;
|
||||
pthread_descr * elt;
|
||||
|
||||
int prio = th->p_priority;
|
||||
ASSERT(th->p_nextwaiting == NULL);
|
||||
if (q->tail == NULL) {
|
||||
q->head = th;
|
||||
q->tail = th;
|
||||
return;
|
||||
}
|
||||
prio = th->p_priority;
|
||||
if (prio > 0) {
|
||||
/* Insert in queue according to priority order */
|
||||
for (elt = &(q->head); *elt != NULL; elt = &((*elt)->p_nextwaiting)) {
|
||||
if (prio > (*elt)->p_priority) {
|
||||
th->p_nextwaiting = *elt;
|
||||
*elt = th;
|
||||
return;
|
||||
}
|
||||
for (; *q != NULL; q = &((*q)->p_nextwaiting)) {
|
||||
if (prio > (*q)->p_priority) {
|
||||
th->p_nextwaiting = *q;
|
||||
*q = th;
|
||||
return;
|
||||
}
|
||||
}
|
||||
/* Priority is no greater than any thread in the queue.
|
||||
Insert at end of queue */
|
||||
q->tail->p_nextwaiting = th;
|
||||
q->tail = th;
|
||||
*q = th;
|
||||
}
|
||||
|
||||
static inline pthread_descr dequeue(pthread_queue * q)
|
||||
static inline pthread_descr dequeue(pthread_descr * q)
|
||||
{
|
||||
pthread_descr th;
|
||||
th = q->head;
|
||||
th = *q;
|
||||
if (th != NULL) {
|
||||
q->head = th->p_nextwaiting;
|
||||
if (q->head == NULL) q->tail = NULL;
|
||||
*q = th->p_nextwaiting;
|
||||
th->p_nextwaiting = NULL;
|
||||
}
|
||||
return th;
|
||||
}
|
||||
|
||||
static inline void remove_from_queue(pthread_descr * q, pthread_descr th)
|
||||
{
|
||||
for (; *q != NULL; q = &((*q)->p_nextwaiting)) {
|
||||
if (*q == th) {
|
||||
*q = th->p_nextwaiting;
|
||||
th->p_nextwaiting = NULL;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -30,12 +30,11 @@ int
|
||||
pthread_rwlock_init (pthread_rwlock_t *rwlock,
|
||||
const pthread_rwlockattr_t *attr)
|
||||
{
|
||||
rwlock->rw_spinlock = 0;
|
||||
__pthread_init_lock(&rwlock->rw_lock);
|
||||
rwlock->rw_readers = 0;
|
||||
rwlock->rw_writer = NULL;
|
||||
|
||||
queue_init(&rwlock->rw_read_waiting);
|
||||
queue_init(&rwlock->rw_write_waiting);
|
||||
rwlock->rw_read_waiting = NULL;
|
||||
rwlock->rw_write_waiting = NULL;
|
||||
|
||||
if (attr == NULL)
|
||||
{
|
||||
@ -58,10 +57,10 @@ pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
|
||||
int readers;
|
||||
_pthread_descr writer;
|
||||
|
||||
acquire (&rwlock->rw_spinlock);
|
||||
__pthread_lock (&rwlock->rw_lock);
|
||||
readers = rwlock->rw_readers;
|
||||
writer = rwlock->rw_writer;
|
||||
release (&rwlock->rw_spinlock);
|
||||
__pthread_unlock (&rwlock->rw_lock);
|
||||
|
||||
if (readers > 0 || writer != NULL)
|
||||
return EBUSY;
|
||||
@ -77,7 +76,7 @@ pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
|
||||
|
||||
while (1)
|
||||
{
|
||||
acquire (&rwlock->rw_spinlock);
|
||||
__pthread_lock (&rwlock->rw_lock);
|
||||
if (rwlock->rw_writer == NULL
|
||||
|| (rwlock->rw_kind == PTHREAD_RWLOCK_PREFER_READER_NP
|
||||
&& rwlock->rw_readers != 0))
|
||||
@ -87,12 +86,12 @@ pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
|
||||
/* Suspend ourselves, then try again */
|
||||
self = thread_self ();
|
||||
enqueue (&rwlock->rw_read_waiting, self);
|
||||
release (&rwlock->rw_spinlock);
|
||||
__pthread_unlock (&rwlock->rw_lock);
|
||||
suspend (self); /* This is not a cancellation point */
|
||||
}
|
||||
|
||||
++rwlock->rw_readers;
|
||||
release (&rwlock->rw_spinlock);
|
||||
__pthread_unlock (&rwlock->rw_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -103,7 +102,7 @@ pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
|
||||
{
|
||||
int result = EBUSY;
|
||||
|
||||
acquire (&rwlock->rw_spinlock);
|
||||
__pthread_lock (&rwlock->rw_lock);
|
||||
if (rwlock->rw_writer == NULL
|
||||
|| (rwlock->rw_kind == PTHREAD_RWLOCK_PREFER_READER_NP
|
||||
&& rwlock->rw_readers != 0))
|
||||
@ -111,7 +110,7 @@ pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
|
||||
++rwlock->rw_readers;
|
||||
result = 0;
|
||||
}
|
||||
release (&rwlock->rw_spinlock);
|
||||
__pthread_unlock (&rwlock->rw_lock);
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -124,17 +123,17 @@ pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
|
||||
|
||||
while(1)
|
||||
{
|
||||
acquire (&rwlock->rw_spinlock);
|
||||
__pthread_lock (&rwlock->rw_lock);
|
||||
if (rwlock->rw_readers == 0 && rwlock->rw_writer == NULL)
|
||||
{
|
||||
rwlock->rw_writer = self;
|
||||
release (&rwlock->rw_spinlock);
|
||||
__pthread_unlock (&rwlock->rw_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Suspend ourselves, then try again */
|
||||
enqueue (&rwlock->rw_write_waiting, self);
|
||||
release (&rwlock->rw_spinlock);
|
||||
__pthread_unlock (&rwlock->rw_lock);
|
||||
suspend (self); /* This is not a cancellation point */
|
||||
}
|
||||
}
|
||||
@ -145,13 +144,13 @@ pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
|
||||
{
|
||||
int result = EBUSY;
|
||||
|
||||
acquire (&rwlock->rw_spinlock);
|
||||
__pthread_lock (&rwlock->rw_lock);
|
||||
if (rwlock->rw_readers == 0 && rwlock->rw_writer == NULL)
|
||||
{
|
||||
rwlock->rw_writer = thread_self ();
|
||||
result = 0;
|
||||
}
|
||||
release (&rwlock->rw_spinlock);
|
||||
__pthread_unlock (&rwlock->rw_lock);
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -160,16 +159,16 @@ pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
|
||||
int
|
||||
pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
|
||||
{
|
||||
struct _pthread_queue torestart;
|
||||
pthread_descr torestart;
|
||||
pthread_descr th;
|
||||
|
||||
acquire (&rwlock->rw_spinlock);
|
||||
__pthread_lock (&rwlock->rw_lock);
|
||||
if (rwlock->rw_writer != NULL)
|
||||
{
|
||||
/* Unlocking a write lock. */
|
||||
if (rwlock->rw_writer != thread_self ())
|
||||
{
|
||||
release (&rwlock->rw_spinlock);
|
||||
__pthread_unlock (&rwlock->rw_lock);
|
||||
return EPERM;
|
||||
}
|
||||
rwlock->rw_writer = NULL;
|
||||
@ -179,15 +178,15 @@ pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
|
||||
{
|
||||
/* Restart all waiting readers. */
|
||||
torestart = rwlock->rw_read_waiting;
|
||||
queue_init (&rwlock->rw_read_waiting);
|
||||
release (&rwlock->rw_spinlock);
|
||||
rwlock->rw_read_waiting = NULL;
|
||||
__pthread_unlock (&rwlock->rw_lock);
|
||||
while ((th = dequeue (&torestart)) != NULL)
|
||||
restart (th);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Restart one waiting writer. */
|
||||
release (&rwlock->rw_spinlock);
|
||||
__pthread_unlock (&rwlock->rw_lock);
|
||||
restart (th);
|
||||
}
|
||||
}
|
||||
@ -196,7 +195,7 @@ pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
|
||||
/* Unlocking a read lock. */
|
||||
if (rwlock->rw_readers == 0)
|
||||
{
|
||||
release (&rwlock->rw_spinlock);
|
||||
__pthread_unlock (&rwlock->rw_lock);
|
||||
return EPERM;
|
||||
}
|
||||
|
||||
@ -207,7 +206,7 @@ pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
|
||||
else
|
||||
th = NULL;
|
||||
|
||||
release (&rwlock->rw_spinlock);
|
||||
__pthread_unlock (&rwlock->rw_lock);
|
||||
if (th != NULL)
|
||||
restart (th);
|
||||
}
|
||||
|
@ -17,69 +17,13 @@
|
||||
#include "pthread.h"
|
||||
#include "semaphore.h"
|
||||
#include "internals.h"
|
||||
#include "spinlock.h"
|
||||
#include "restart.h"
|
||||
|
||||
|
||||
#if !defined HAS_COMPARE_AND_SWAP && !defined TEST_FOR_COMPARE_AND_SWAP
|
||||
/* If we have no atomic compare and swap, fake it using an extra spinlock. */
|
||||
|
||||
#include "spinlock.h"
|
||||
static inline int sem_compare_and_swap(sem_t *sem, long oldval, long newval)
|
||||
{
|
||||
int ret;
|
||||
acquire(&sem->sem_spinlock);
|
||||
ret = (sem->sem_status == oldval);
|
||||
if (ret) sem->sem_status = newval;
|
||||
release(&sem->sem_spinlock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#elif defined TEST_FOR_COMPARE_AND_SWAP
|
||||
|
||||
#include "spinlock.h"
|
||||
static int has_compare_and_swap = -1; /* to be determined at run-time */
|
||||
|
||||
static inline int sem_compare_and_swap(sem_t *sem, long oldval, long newval)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (has_compare_and_swap == 1)
|
||||
return __compare_and_swap(&sem->sem_status, oldval, newval);
|
||||
|
||||
acquire(&sem->sem_spinlock);
|
||||
ret = (sem->sem_status == oldval);
|
||||
if (ret) sem->sem_status = newval;
|
||||
release(&sem->sem_spinlock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#else
|
||||
/* But if we do have an atomic compare and swap, use it! */
|
||||
|
||||
static inline int sem_compare_and_swap(sem_t *sem, long oldval, long newval)
|
||||
{
|
||||
return __compare_and_swap(&sem->sem_status, oldval, newval);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/* The state of a semaphore is represented by a long int encoding
|
||||
either the semaphore count if >= 0 and no thread is waiting on it,
|
||||
or the head of the list of threads waiting for the semaphore.
|
||||
To distinguish the two cases, we encode the semaphore count N
|
||||
as 2N+1, so that it has the lowest bit set.
|
||||
|
||||
A sequence of sem_wait operations on a semaphore initialized to N
|
||||
result in the following successive states:
|
||||
2N+1, 2N-1, ..., 3, 1, &first_waiting_thread, &second_waiting_thread, ...
|
||||
*/
|
||||
|
||||
static void sem_restart_list(pthread_descr waiting);
|
||||
#include "queue.h"
|
||||
|
||||
int sem_init(sem_t *sem, int pshared, unsigned int value)
|
||||
{
|
||||
if ((long)value > SEM_VALUE_MAX) {
|
||||
if (value > SEM_VALUE_MAX) {
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
}
|
||||
@ -87,150 +31,104 @@ int sem_init(sem_t *sem, int pshared, unsigned int value)
|
||||
errno = ENOSYS;
|
||||
return -1;
|
||||
}
|
||||
#ifdef TEST_FOR_COMPARE_AND_SWAP
|
||||
if (has_compare_and_swap == -1) {
|
||||
has_compare_and_swap = compare_and_swap_is_available();
|
||||
}
|
||||
#endif
|
||||
#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
|
||||
sem->sem_spinlock = 0;
|
||||
#endif
|
||||
sem->sem_status = ((long)value << 1) + 1;
|
||||
__pthread_init_lock((struct _pthread_fastlock *) &sem->sem_lock);
|
||||
sem->sem_value = value;
|
||||
sem->sem_waiting = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sem_wait(sem_t * sem)
|
||||
{
|
||||
long oldstatus, newstatus;
|
||||
volatile pthread_descr self = thread_self();
|
||||
pthread_descr * th;
|
||||
volatile pthread_descr self;
|
||||
|
||||
while (1) {
|
||||
do {
|
||||
oldstatus = sem->sem_status;
|
||||
if ((oldstatus & 1) && (oldstatus != 1))
|
||||
newstatus = oldstatus - 2;
|
||||
else {
|
||||
newstatus = (long) self;
|
||||
self->p_nextwaiting = (pthread_descr) oldstatus;
|
||||
}
|
||||
}
|
||||
while (! sem_compare_and_swap(sem, oldstatus, newstatus));
|
||||
if (newstatus & 1)
|
||||
/* We got the semaphore. */
|
||||
return 0;
|
||||
/* Wait for sem_post or cancellation */
|
||||
suspend_with_cancellation(self);
|
||||
/* This is a cancellation point */
|
||||
if (self->p_canceled && self->p_cancelstate == PTHREAD_CANCEL_ENABLE) {
|
||||
/* Remove ourselves from the waiting list if we're still on it */
|
||||
/* First check if we're at the head of the list. */
|
||||
do {
|
||||
oldstatus = sem->sem_status;
|
||||
if (oldstatus != (long) self) break;
|
||||
newstatus = (long) self->p_nextwaiting;
|
||||
}
|
||||
while (! sem_compare_and_swap(sem, oldstatus, newstatus));
|
||||
/* Now, check if we're somewhere in the list.
|
||||
There's a race condition with sem_post here, but it does not matter:
|
||||
the net result is that at the time pthread_exit is called,
|
||||
self is no longer reachable from sem->sem_status. */
|
||||
if (oldstatus != (long) self && (oldstatus & 1) == 0) {
|
||||
for (th = &(((pthread_descr) oldstatus)->p_nextwaiting);
|
||||
*th != NULL && *th != (pthread_descr) 1;
|
||||
th = &((*th)->p_nextwaiting)) {
|
||||
if (*th == self) {
|
||||
*th = self->p_nextwaiting;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
pthread_exit(PTHREAD_CANCELED);
|
||||
}
|
||||
__pthread_lock((struct _pthread_fastlock *) &sem->sem_lock);
|
||||
if (sem->sem_value > 0) {
|
||||
sem->sem_value--;
|
||||
__pthread_unlock((struct _pthread_fastlock *) &sem->sem_lock);
|
||||
return 0;
|
||||
}
|
||||
self = thread_self();
|
||||
enqueue(&sem->sem_waiting, self);
|
||||
/* Wait for sem_post or cancellation */
|
||||
__pthread_unlock((struct _pthread_fastlock *) &sem->sem_lock);
|
||||
suspend_with_cancellation(self);
|
||||
/* This is a cancellation point */
|
||||
if (self->p_canceled && self->p_cancelstate == PTHREAD_CANCEL_ENABLE) {
|
||||
/* Remove ourselves from the waiting list if we're still on it */
|
||||
__pthread_lock((struct _pthread_fastlock *) &sem->sem_lock);
|
||||
remove_from_queue(&sem->sem_waiting, self);
|
||||
__pthread_unlock((struct _pthread_fastlock *) &sem->sem_lock);
|
||||
pthread_exit(PTHREAD_CANCELED);
|
||||
}
|
||||
/* We got the semaphore */
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sem_trywait(sem_t * sem)
|
||||
{
|
||||
long oldstatus, newstatus;
|
||||
int retval;
|
||||
|
||||
do {
|
||||
oldstatus = sem->sem_status;
|
||||
if ((oldstatus & 1) == 0 || (oldstatus == 1)) {
|
||||
errno = EAGAIN;
|
||||
return -1;
|
||||
}
|
||||
newstatus = oldstatus - 2;
|
||||
__pthread_lock((struct _pthread_fastlock *) &sem->sem_lock);
|
||||
if (sem->sem_value == 0) {
|
||||
errno = EAGAIN;
|
||||
retval = -1;
|
||||
} else {
|
||||
sem->sem_value--;
|
||||
retval = 0;
|
||||
}
|
||||
while (! sem_compare_and_swap(sem, oldstatus, newstatus));
|
||||
return 0;
|
||||
return retval;
|
||||
}
|
||||
|
||||
int sem_post(sem_t * sem)
|
||||
{
|
||||
long oldstatus, newstatus;
|
||||
pthread_descr self = thread_self();
|
||||
pthread_descr th;
|
||||
struct pthread_request request;
|
||||
|
||||
do {
|
||||
oldstatus = sem->sem_status;
|
||||
if ((oldstatus & 1) == 0)
|
||||
newstatus = 3;
|
||||
else {
|
||||
if (oldstatus >= SEM_VALUE_MAX) {
|
||||
if (self->p_in_sighandler == NULL) {
|
||||
__pthread_lock((struct _pthread_fastlock *) &sem->sem_lock);
|
||||
if (sem->sem_waiting == NULL) {
|
||||
if (sem->sem_value >= SEM_VALUE_MAX) {
|
||||
/* Overflow */
|
||||
errno = ERANGE;
|
||||
__pthread_unlock((struct _pthread_fastlock *) &sem->sem_lock);
|
||||
return -1;
|
||||
}
|
||||
newstatus = oldstatus + 2;
|
||||
sem->sem_value++;
|
||||
__pthread_unlock((struct _pthread_fastlock *) &sem->sem_lock);
|
||||
} else {
|
||||
th = dequeue(&sem->sem_waiting);
|
||||
__pthread_unlock((struct _pthread_fastlock *) &sem->sem_lock);
|
||||
restart(th);
|
||||
}
|
||||
} else {
|
||||
/* If we're in signal handler, delegate post operation to
|
||||
the thread manager. */
|
||||
if (__pthread_manager_request < 0) {
|
||||
if (__pthread_initialize_manager() < 0) {
|
||||
errno = EAGAIN;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
request.req_kind = REQ_POST;
|
||||
request.req_args.post = sem;
|
||||
__libc_write(__pthread_manager_request,
|
||||
(char *) &request, sizeof(request));
|
||||
}
|
||||
while (! sem_compare_and_swap(sem, oldstatus, newstatus));
|
||||
if ((oldstatus & 1) == 0)
|
||||
sem_restart_list((pthread_descr) oldstatus);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sem_getvalue(sem_t * sem, int * sval)
|
||||
{
|
||||
long status = sem->sem_status;
|
||||
if (status & 1)
|
||||
*sval = (int)((unsigned long) status >> 1);
|
||||
else
|
||||
*sval = 0;
|
||||
*sval = sem->sem_value;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sem_destroy(sem_t * sem)
|
||||
{
|
||||
if ((sem->sem_status & 1) == 0) {
|
||||
if (sem->sem_waiting != NULL) {
|
||||
errno = EBUSY;
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Auxiliary function for restarting all threads on a waiting list,
|
||||
in priority order. */
|
||||
|
||||
static void sem_restart_list(pthread_descr waiting)
|
||||
{
|
||||
pthread_descr th, towake, *p;
|
||||
|
||||
/* Sort list of waiting threads by decreasing priority (insertion sort) */
|
||||
towake = NULL;
|
||||
while (waiting != (pthread_descr) 1) {
|
||||
th = waiting;
|
||||
waiting = waiting->p_nextwaiting;
|
||||
p = &towake;
|
||||
while (*p != NULL && th->p_priority < (*p)->p_priority)
|
||||
p = &((*p)->p_nextwaiting);
|
||||
th->p_nextwaiting = *p;
|
||||
*p = th;
|
||||
}
|
||||
/* Wake up threads in priority order */
|
||||
while (towake != NULL) {
|
||||
th = towake;
|
||||
towake = towake->p_nextwaiting;
|
||||
th->p_nextwaiting = NULL;
|
||||
restart(th);
|
||||
}
|
||||
}
|
||||
|
@ -21,8 +21,11 @@
|
||||
|
||||
#define SEM_VALUE_MAX INT_MAX
|
||||
|
||||
/* Get the semaphore structure definition. */
|
||||
#include <bits/semaphore.h>
|
||||
typedef struct {
|
||||
struct { long status; int spinlock; } sem_lock;
|
||||
int sem_value;
|
||||
_pthread_descr sem_waiting;
|
||||
} sem_t;
|
||||
|
||||
__BEGIN_DECLS
|
||||
|
||||
|
@ -53,83 +53,88 @@ int pthread_kill(pthread_t thread, int signo)
|
||||
pthread_handle handle = thread_handle(thread);
|
||||
int pid;
|
||||
|
||||
acquire(&handle->h_spinlock);
|
||||
__pthread_lock(&handle->h_lock);
|
||||
if (invalid_handle(handle, thread)) {
|
||||
release(&handle->h_spinlock);
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
return ESRCH;
|
||||
}
|
||||
pid = handle->h_descr->p_pid;
|
||||
release(&handle->h_spinlock);
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
if (kill(pid, signo) == -1)
|
||||
return errno;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* The set of signals on which some thread is doing a sigwait */
|
||||
static sigset_t sigwaited;
|
||||
static pthread_mutex_t sigwaited_mut = PTHREAD_MUTEX_INITIALIZER;
|
||||
static pthread_cond_t sigwaited_changed = PTHREAD_COND_INITIALIZER;
|
||||
/* User-provided signal handlers */
|
||||
static __sighandler_t sighandler[NSIG];
|
||||
|
||||
/* The wrapper around user-provided signal handlers */
|
||||
static void pthread_sighandler(int signo)
|
||||
{
|
||||
pthread_descr self = thread_self();
|
||||
char * in_sighandler;
|
||||
/* If we're in a sigwait operation, just record the signal received
|
||||
and return without calling the user's handler */
|
||||
if (self->p_sigwaiting) {
|
||||
self->p_sigwaiting = 0;
|
||||
self->p_signal = signo;
|
||||
return;
|
||||
}
|
||||
/* Record that we're in a signal handler and call the user's
|
||||
handler function */
|
||||
in_sighandler = self->p_in_sighandler;
|
||||
if (in_sighandler == NULL) self->p_in_sighandler = CURRENT_STACK_FRAME;
|
||||
sighandler[signo](signo);
|
||||
if (in_sighandler == NULL) self->p_in_sighandler = NULL;
|
||||
}
|
||||
|
||||
int sigaction(int sig, const struct sigaction * act,
|
||||
struct sigaction * oact)
|
||||
{
|
||||
struct sigaction newact;
|
||||
|
||||
if (sig == PTHREAD_SIG_RESTART || sig == PTHREAD_SIG_CANCEL)
|
||||
return EINVAL;
|
||||
newact = *act;
|
||||
if (act->sa_handler != SIG_IGN && act->sa_handler != SIG_DFL)
|
||||
newact.sa_handler = pthread_sighandler;
|
||||
if (__sigaction(sig, &newact, oact) == -1)
|
||||
return -1;
|
||||
if (oact != NULL) oact->sa_handler = sighandler[sig];
|
||||
sighandler[sig] = act->sa_handler;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sigwait(const sigset_t * set, int * sig)
|
||||
{
|
||||
volatile pthread_descr self = thread_self();
|
||||
sigset_t mask;
|
||||
int s;
|
||||
struct sigaction action, saved_signals[NSIG];
|
||||
sigjmp_buf jmpbuf;
|
||||
|
||||
pthread_mutex_lock(&sigwaited_mut);
|
||||
/* Make sure no other thread is waiting on our signals */
|
||||
test_again:
|
||||
for (s = 1; s < NSIG; s++) {
|
||||
if (sigismember(set, s) && sigismember(&sigwaited, s)) {
|
||||
pthread_cond_wait(&sigwaited_changed, &sigwaited_mut);
|
||||
goto test_again;
|
||||
}
|
||||
}
|
||||
/* Get ready to block all signals except those in set
|
||||
and the cancellation signal */
|
||||
sigfillset(&mask);
|
||||
sigdelset(&mask, PTHREAD_SIG_CANCEL);
|
||||
/* Signals in set are assumed blocked on entrance */
|
||||
/* Install our signal handler on all signals in set,
|
||||
and unblock them in mask.
|
||||
Also mark those signals as being sigwaited on */
|
||||
for (s = 1; s < NSIG; s++) {
|
||||
if (sigismember(set, s) && s != PTHREAD_SIG_CANCEL) {
|
||||
for (s = 1; s <= NSIG; s++) {
|
||||
if (sigismember(set, s) && s != PTHREAD_SIG_CANCEL)
|
||||
sigdelset(&mask, s);
|
||||
action.sa_handler = __pthread_sighandler;
|
||||
sigemptyset(&action.sa_mask);
|
||||
action.sa_flags = 0;
|
||||
sigaction(s, &action, &(saved_signals[s]));
|
||||
sigaddset(&sigwaited, s);
|
||||
}
|
||||
}
|
||||
pthread_mutex_unlock(&sigwaited_mut);
|
||||
|
||||
/* Test for cancellation */
|
||||
if (sigsetjmp(jmpbuf, 1) == 0) {
|
||||
self->p_cancel_jmp = &jmpbuf;
|
||||
if (! (self->p_canceled && self->p_cancelstate == PTHREAD_CANCEL_ENABLE)) {
|
||||
/* Reset the signal count */
|
||||
self->p_signal = 0;
|
||||
/* Say we're in sigwait */
|
||||
self->p_sigwaiting = 1;
|
||||
/* Unblock the signals and wait for them */
|
||||
sigsuspend(&mask);
|
||||
}
|
||||
}
|
||||
self->p_cancel_jmp = NULL;
|
||||
/* The signals are now reblocked. Restore the sighandlers. */
|
||||
pthread_mutex_lock(&sigwaited_mut);
|
||||
for (s = 1; s < NSIG; s++) {
|
||||
if (sigismember(set, s) && s != PTHREAD_SIG_CANCEL) {
|
||||
sigaction(s, &(saved_signals[s]), NULL);
|
||||
sigdelset(&sigwaited, s);
|
||||
}
|
||||
}
|
||||
pthread_cond_broadcast(&sigwaited_changed);
|
||||
pthread_mutex_unlock(&sigwaited_mut);
|
||||
/* Check for cancellation */
|
||||
/* The signals are now reblocked. Check for cancellation */
|
||||
pthread_testcancel();
|
||||
/* We should have self->p_signal != 0 and equal to the signal received */
|
||||
*sig = self->p_signal;
|
||||
|
@ -12,15 +12,130 @@
|
||||
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
|
||||
/* GNU Library General Public License for more details. */
|
||||
|
||||
/* Spin locks */
|
||||
/* Internal locks */
|
||||
|
||||
#include <sched.h>
|
||||
#include <time.h>
|
||||
#include "pthread.h"
|
||||
#include "internals.h"
|
||||
#include "spinlock.h"
|
||||
#include "restart.h"
|
||||
|
||||
/* This function is called if the inlined test-and-set in acquire() failed */
|
||||
/* The status field of a fastlock has the following meaning:
|
||||
0: fastlock is free
|
||||
1: fastlock is taken, no thread is waiting on it
|
||||
ADDR: fastlock is taken, ADDR is address of thread descriptor for
|
||||
first waiting thread, other waiting threads are linked via
|
||||
their p_nextwaiting field.
|
||||
The waiting list is not sorted by priority order.
|
||||
Actually, we always insert at top of list (sole insertion mode
|
||||
that can be performed without locking).
|
||||
For __pthread_unlock, we perform a linear search in the list
|
||||
to find the highest-priority, oldest waiting thread.
|
||||
This is safe because there are no concurrent __pthread_unlock
|
||||
operations -- only the thread that locked the mutex can unlock it. */
|
||||
|
||||
void __pthread_lock(struct _pthread_fastlock * lock)
|
||||
{
|
||||
long oldstatus, newstatus;
|
||||
pthread_descr self = NULL;
|
||||
|
||||
do {
|
||||
oldstatus = lock->status;
|
||||
if (oldstatus == 0) {
|
||||
newstatus = 1;
|
||||
} else {
|
||||
self = thread_self();
|
||||
self->p_nextwaiting = (pthread_descr) oldstatus;
|
||||
newstatus = (long) self;
|
||||
}
|
||||
} while(! compare_and_swap(&lock->status, oldstatus, newstatus,
|
||||
&lock->spinlock));
|
||||
if (oldstatus != 0) suspend(self);
|
||||
}
|
||||
|
||||
int __pthread_trylock(struct _pthread_fastlock * lock)
|
||||
{
|
||||
long oldstatus;
|
||||
|
||||
do {
|
||||
oldstatus = lock->status;
|
||||
if (oldstatus != 0) return EBUSY;
|
||||
} while(! compare_and_swap(&lock->status, 0, 1, &lock->spinlock));
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __pthread_unlock(struct _pthread_fastlock * lock)
|
||||
{
|
||||
long oldstatus;
|
||||
pthread_descr thr, * ptr, * maxptr;
|
||||
int maxprio;
|
||||
|
||||
again:
|
||||
oldstatus = lock->status;
|
||||
if (oldstatus == 1) {
|
||||
/* No threads are waiting for this lock */
|
||||
if (! compare_and_swap(&lock->status, 1, 0, &lock->spinlock)) goto again;
|
||||
return;
|
||||
}
|
||||
/* Find thread in waiting queue with maximal priority */
|
||||
ptr = (pthread_descr *) &lock->status;
|
||||
thr = (pthread_descr) oldstatus;
|
||||
maxprio = 0;
|
||||
maxptr = ptr;
|
||||
while (thr != (pthread_descr) 1) {
|
||||
if (thr->p_priority >= maxprio) {
|
||||
maxptr = ptr;
|
||||
maxprio = thr->p_priority;
|
||||
}
|
||||
ptr = &(thr->p_nextwaiting);
|
||||
thr = *ptr;
|
||||
}
|
||||
/* Remove max prio thread from waiting list. */
|
||||
if (maxptr == (pthread_descr *) &lock->status) {
|
||||
/* If max prio thread is at head, remove it with compare-and-swap
|
||||
to guard against concurrent lock operation */
|
||||
thr = (pthread_descr) oldstatus;
|
||||
if (! compare_and_swap(&lock->status,
|
||||
oldstatus, (long)(thr->p_nextwaiting),
|
||||
&lock->spinlock))
|
||||
goto again;
|
||||
} else {
|
||||
/* No risk of concurrent access, remove max prio thread normally */
|
||||
thr = *maxptr;
|
||||
*maxptr = thr->p_nextwaiting;
|
||||
}
|
||||
/* Wake up the selected waiting thread */
|
||||
thr->p_nextwaiting = NULL;
|
||||
restart(thr);
|
||||
}
|
||||
|
||||
/* Compare-and-swap emulation with a spinlock */
|
||||
|
||||
#ifdef TEST_FOR_COMPARE_AND_SWAP
|
||||
int __pthread_has_cas = 0;
|
||||
#endif
|
||||
|
||||
#ifndef HAS_COMPARE_AND_SWAP
|
||||
|
||||
static void __pthread_acquire(int * spinlock);
|
||||
|
||||
int __pthread_compare_and_swap(long * ptr, long oldval, long newval,
|
||||
int * spinlock)
|
||||
{
|
||||
int res;
|
||||
if (testandset(spinlock)) __pthread_acquire(spinlock);
|
||||
if (*ptr == oldval) {
|
||||
*ptr = newval; res = 1;
|
||||
} else {
|
||||
res = 0;
|
||||
}
|
||||
*spinlock = 0;
|
||||
return res;
|
||||
}
|
||||
|
||||
/* This function is called if the inlined test-and-set
|
||||
in __pthread_compare_and_swap() failed */
|
||||
|
||||
/* The retry strategy is as follows:
|
||||
- We test and set the spinlock MAX_SPIN_COUNT times, calling
|
||||
@ -40,7 +155,7 @@
|
||||
- When nanosleep() returns, we try again, doing MAX_SPIN_COUNT
|
||||
sched_yield(), then sleeping again if needed. */
|
||||
|
||||
void __pthread_acquire(int * spinlock)
|
||||
static void __pthread_acquire(int * spinlock)
|
||||
{
|
||||
int cnt = 0;
|
||||
struct timespec tm;
|
||||
@ -57,3 +172,5 @@ void __pthread_acquire(int * spinlock)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -1,7 +1,6 @@
|
||||
/* Linuxthreads - a simple clone()-based implementation of Posix */
|
||||
/* threads for Linux. */
|
||||
/* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) and */
|
||||
/* Richard Henderson (rth@tamu.edu) */
|
||||
/* Copyright (C) 1998 Xavier Leroy (Xavier.Leroy@inria.fr) */
|
||||
/* */
|
||||
/* This program is free software; you can redistribute it and/or */
|
||||
/* modify it under the terms of the GNU Library General Public License */
|
||||
@ -13,20 +12,52 @@
|
||||
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
|
||||
/* GNU Library General Public License for more details. */
|
||||
|
||||
/* Spin locks */
|
||||
/* Internal locks */
|
||||
|
||||
extern void __pthread_acquire(int * spinlock);
|
||||
extern void __pthread_lock(struct _pthread_fastlock * lock);
|
||||
extern int __pthread_trylock(struct _pthread_fastlock * lock);
|
||||
extern void __pthread_unlock(struct _pthread_fastlock * lock);
|
||||
|
||||
static inline void acquire(int * spinlock)
|
||||
static inline void __pthread_init_lock(struct _pthread_fastlock * lock)
|
||||
{
|
||||
if (testandset(spinlock)) __pthread_acquire(spinlock);
|
||||
lock->status = 0;
|
||||
lock->spinlock = 0;
|
||||
}
|
||||
|
||||
static inline void release(int * spinlock)
|
||||
#define LOCK_INITIALIZER {0, 0}
|
||||
|
||||
#if defined(TEST_FOR_COMPARE_AND_SWAP)
|
||||
|
||||
extern int __pthread_has_cas;
|
||||
extern int __pthread_compare_and_swap(long * ptr, long oldval, long newval,
|
||||
int * spinlock);
|
||||
|
||||
static inline int compare_and_swap(long * ptr, long oldval, long newval,
|
||||
int * spinlock)
|
||||
{
|
||||
#ifndef RELEASE
|
||||
*spinlock = 0;
|
||||
if (__pthread_has_cas)
|
||||
return __compare_and_swap(ptr, oldval, newval);
|
||||
else
|
||||
return __pthread_compare_and_swap(ptr, oldval, newval, spinlock);
|
||||
}
|
||||
|
||||
#elif defined(HAS_COMPARE_AND_SWAP)
|
||||
|
||||
static inline int compare_and_swap(long * ptr, long oldval, long newval,
|
||||
int * spinlock)
|
||||
{
|
||||
return __compare_and_swap(ptr, oldval, newval);
|
||||
}
|
||||
|
||||
#else
|
||||
RELEASE(spinlock);
|
||||
#endif
|
||||
|
||||
extern int __pthread_compare_and_swap(long * ptr, long oldval, long newval,
|
||||
int * spinlock);
|
||||
|
||||
static inline int compare_and_swap(long * ptr, long oldval, long newval,
|
||||
int * spinlock)
|
||||
{
|
||||
return __pthread_compare_and_swap(ptr, oldval, newval, spinlock);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -1,6 +1,6 @@
|
||||
/* Machine-dependent pthreads configuration and inline functions.
|
||||
i386 version.
|
||||
Copyright (C) 1996, 1997 Free Software Foundation, Inc.
|
||||
Copyright (C) 1996, 1997, 1998 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Richard Henderson <rth@tamu.edu>.
|
||||
|
||||
@ -45,7 +45,9 @@ register char * stack_pointer __asm__ ("%esp");
|
||||
We test dynamically whether it's available or not. */
|
||||
|
||||
#define HAS_COMPARE_AND_SWAP
|
||||
#define TEST_FOR_COMPARE_AND_SWAP
|
||||
#ifndef __i686__
|
||||
# define TEST_FOR_COMPARE_AND_SWAP
|
||||
#endif
|
||||
|
||||
extern inline int
|
||||
__compare_and_swap (long int *p, long int oldval, long int newval)
|
||||
|
@ -43,21 +43,24 @@ typedef unsigned long int pthread_t;
|
||||
/* Thread descriptors */
|
||||
typedef struct _pthread_descr_struct *_pthread_descr;
|
||||
|
||||
/* Waiting queues (not abstract because mutexes and conditions aren't). */
|
||||
struct _pthread_queue
|
||||
/* Fast locks (not abstract because mutexes and conditions aren't abstract). */
|
||||
struct _pthread_fastlock
|
||||
{
|
||||
_pthread_descr head; /* First element, or NULL if queue empty. */
|
||||
_pthread_descr tail; /* Last element, or NULL if queue empty. */
|
||||
long status; /* "Free" or "taken" or head of waiting list */
|
||||
int spinlock; /* For compare-and-swap emulation */
|
||||
};
|
||||
|
||||
/* Mutexes (not abstract because of PTHREAD_MUTEX_INITIALIZER). */
|
||||
/* (The layout is unnatural to maintain binary compatibility
|
||||
with earlier releases of LinuxThreads.) */
|
||||
|
||||
typedef struct
|
||||
{
|
||||
int m_spinlock; /* Spin lock to guarantee mutual exclusion. */
|
||||
int m_count; /* 0 if free, > 0 if taken. */
|
||||
_pthread_descr m_owner; /* Owner of mutex (for recursive mutexes) */
|
||||
int m_kind; /* Kind of mutex */
|
||||
struct _pthread_queue m_waiting; /* Threads waiting on this mutex. */
|
||||
int m_reserved; /* Reserved for future use */
|
||||
int m_count; /* Depth of recursive locking */
|
||||
_pthread_descr m_owner; /* Owner thread (if recursive or errcheck) */
|
||||
int m_kind; /* Mutex kind: fast, recursive or errcheck */
|
||||
struct _pthread_fastlock m_lock; /* Underlying fast lock */
|
||||
} pthread_mutex_t;
|
||||
|
||||
#define PTHREAD_MUTEX_INITIALIZER \
|
||||
@ -68,21 +71,21 @@ typedef struct
|
||||
/* Conditions (not abstract because of PTHREAD_COND_INITIALIZER */
|
||||
typedef struct
|
||||
{
|
||||
int c_spinlock; /* Spin lock to protect the queue. */
|
||||
struct _pthread_queue c_waiting; /* Threads waiting on this condition. */
|
||||
struct _pthread_fastlock c_lock; /* Protect against concurrent access */
|
||||
_pthread_descr c_waiting; /* Threads waiting on this condition */
|
||||
} pthread_cond_t;
|
||||
|
||||
#define PTHREAD_COND_INITIALIZER {0, {0, 0}}
|
||||
#define PTHREAD_COND_INITIALIZER {{0, 0}, 0}
|
||||
|
||||
#ifdef __USE_UNIX98
|
||||
/* Read-write locks. */
|
||||
typedef struct
|
||||
{
|
||||
int rw_spinlock; /* Spin lock to guarantee mutual exclusion */
|
||||
struct _pthread_fastlock rw_lock; /* Lock to guarantee mutual exclusion */
|
||||
int rw_readers; /* Number of readers */
|
||||
_pthread_descr rw_writer; /* Identity of writer, or NULL if none */
|
||||
struct _pthread_queue rw_read_waiting; /* Threads waiting for reading */
|
||||
struct _pthread_queue rw_write_waiting; /* Threads waiting for writing */
|
||||
_pthread_descr rw_read_waiting; /* Threads waiting for reading */
|
||||
_pthread_descr rw_write_waiting; /* Threads waiting for writing */
|
||||
int rw_kind; /* Reader/Writer preference selection */
|
||||
int rw_pshared; /* Shared between processes or not */
|
||||
} pthread_rwlock_t;
|
||||
@ -231,7 +234,8 @@ extern int pthread_detach __P ((pthread_t __th));
|
||||
/* Functions for handling attributes. */
|
||||
|
||||
/* Initialize thread attribute *ATTR with default attributes
|
||||
(detachstate is PTHREAD_JOINABLE, scheduling policy is SCHED_OTHER). */
|
||||
(detachstate is PTHREAD_JOINABLE, scheduling policy is SCHED_OTHER,
|
||||
no user-provided stack). */
|
||||
extern int pthread_attr_init __P ((pthread_attr_t *__attr));
|
||||
|
||||
/* Destroy thread attribute *ATTR. */
|
||||
@ -288,6 +292,7 @@ extern int __pthread_attr_getguardsize __P ((__const pthread_attr_t *__attr,
|
||||
size_t *__guardsize));
|
||||
extern int pthread_attr_getguardsize __P ((__const pthread_attr_t *__attr,
|
||||
size_t *__guardsize));
|
||||
#endif
|
||||
|
||||
/* Set the starting address of the stack of the thread to be created.
|
||||
Depending on whether the stack grows up or doen the value must either
|
||||
@ -317,7 +322,6 @@ extern int __pthread_attr_getstacksize __P ((__const pthread_attr_t *__attr,
|
||||
size_t *__stacksize));
|
||||
extern int pthread_attr_getstacksize __P ((__const pthread_attr_t *__attr,
|
||||
size_t *__stacksize));
|
||||
#endif
|
||||
|
||||
/* Functions for scheduling control. */
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* Define POSIX options for Linux.
|
||||
Copyright (C) 1996, 1997 Free Software Foundation, Inc.
|
||||
Copyright (C) 1996, 1997, 1998 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
@ -82,6 +82,12 @@
|
||||
/* We provide priority scheduling for threads. */
|
||||
#define _POSIX_THREAD_PRIORITY_SCHEDULING 1
|
||||
|
||||
/* We support user-defined stack sizes. */
|
||||
#define _POSIX_THREAD_ATTR_STACKSIZE 1
|
||||
|
||||
/* We support user-defined stacks. */
|
||||
#define _POSIX_THREAD_ATTR_STACKADDR 1
|
||||
|
||||
/* We support POSIX.1b semaphores, but only the non-shared form for now. */
|
||||
/*#define _POSIX_SEMAPHORES 1 XXX We are not quite there now. */
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
/* Definitions of constants and data structure for POSIX 1003.1b-1993
|
||||
scheduling interface.
|
||||
Copyright (C) 1996, 1997 Free Software Foundation, Inc.
|
||||
Copyright (C) 1996, 1997, 1998 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
@ -38,10 +38,11 @@ struct sched_param
|
||||
/* Cloning flags. */
|
||||
# define CSIGNAL 0x000000ff /* Signal mask to be sent at exit. */
|
||||
# define CLONE_VM 0x00000100 /* Set if VM shared between processes. */
|
||||
# define CLONE_FS 0x00000200 /* Set if fs info shared between processes.*/
|
||||
# define CLONE_FILES 0x00000400 /* Set if open files shared between processes*/
|
||||
# define CLONE_FS 0x00000200 /* Set if fs info shared between processes. */
|
||||
# define CLONE_FILES 0x00000400 /* Set if open files shared between processes. */
|
||||
# define CLONE_SIGHAND 0x00000800 /* Set if signal handlers shared. */
|
||||
# define CLONE_PID 0x00001000 /* Set if pid shared. */
|
||||
# define CLONE_PTRACE 0x00002000 /* Set if tracing continues on the child. */
|
||||
#endif
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user