diff --git a/linuxthreads/ChangeLog b/linuxthreads/ChangeLog index 9ed86c5a91..58947167da 100644 --- a/linuxthreads/ChangeLog +++ b/linuxthreads/ChangeLog @@ -1,5 +1,20 @@ 2000-08-04 Ulrich Drepper + * internals.h: Declare __pthread_max_stacksize. + * pthread.c (__pthread_max_stacksize): New variable. + (__pthread_initialize_manager): Determine __pthread_initialize_manager + value. + * manager.c (thread_segment): Return always NULL if FLOATING_STACKS. + (pthread_allocate_stack): Allow kernel to choose stack address if + FLOATING_STACKS. This also handles variable-sized stacks. + Always allocate stack and guardoage together. Use mprotect to + change guardpage access. + * sysdeps/i386/useldt.h: Define FLOATING_STACKS and + ARCH_STACK_MAX_SIZE. + + * attr.c (__pthread_attr_setstacksize): Also test value against + upper limit. + * manager.c (__pthread_nonstandard_stacks): Define only if THREAD_SELF is not defined. (pthread_allocate_stack): Always initialize gardaddr to a correct diff --git a/linuxthreads/attr.c b/linuxthreads/attr.c index 966cea1203..90ab019837 100644 --- a/linuxthreads/attr.c +++ b/linuxthreads/attr.c @@ -18,6 +18,7 @@ #include #include #include +#include #include "pthread.h" #include "internals.h" #include @@ -184,6 +185,30 @@ weak_alias (__pthread_attr_getstackaddr, pthread_attr_getstackaddr) int __pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize) { +#ifdef FLOATING_STACKS + /* We have to check against the maximum allowed stack size. This is no + problem if the manager is already started and we determined it. If + this hasn't happened, we have to find the limit outself. */ + if (__pthread_max_stacksize == 0) + { + struct rlimit limit; + + getrlimit(RLIMIT_STACK, &limit); +# ifdef NEED_SEPARATE_REGISTER_STACK + __pthread_max_stacksize = limit.rlim_max / 2; +# else + __pthread_max_stacksize = limit.rlim_max; +# endif + } + + if (stacksize > __pthread_max_stacksize) + return EINVAL; +#else + /* We have a fixed size limit. */ + if (stacksize > STACK_SIZE) + return EINVAL; +#endif + /* We don't accept value smaller than PTHREAD_STACK_MIN. */ if (stacksize < PTHREAD_STACK_MIN) return EINVAL; diff --git a/linuxthreads/internals.h b/linuxthreads/internals.h index fa6bf6b9cd..f5915529b2 100644 --- a/linuxthreads/internals.h +++ b/linuxthreads/internals.h @@ -272,6 +272,11 @@ extern int __pthread_manager_reader; extern char *__pthread_manager_thread_bos; extern char *__pthread_manager_thread_tos; +#ifdef FLOATING_STACKS +/* Maximum stack size. */ +extern size_t __pthread_max_stacksize; +#endif + /* Pending request for a process-wide exit */ extern int __pthread_exit_requested, __pthread_exit_code; diff --git a/linuxthreads/manager.c b/linuxthreads/manager.c index b76203c505..1510551374 100644 --- a/linuxthreads/manager.c +++ b/linuxthreads/manager.c @@ -64,25 +64,29 @@ volatile pthread_descr __pthread_last_event; /* Stack segment numbers are also indices into the __pthread_handles array. */ /* Stack segment number 0 is reserved for the initial thread. */ +#if FLOATING_STACKS +# define thread_segment(seq) NULL +#else static inline pthread_descr thread_segment(int seg) { return (pthread_descr)(THREAD_STACK_START_ADDRESS - (seg - 1) * STACK_SIZE) - 1; } +#endif /* Flag set in signal handler to record child termination */ -static volatile int terminated_children = 0; +static volatile int terminated_children; /* Flag set when the initial thread is blocked on pthread_exit waiting for all other threads to terminate */ -static int main_thread_exiting = 0; +static int main_thread_exiting; /* Counter used to generate unique thread identifier. Thread identifier is pthread_threads_counter + segment. */ -static pthread_t pthread_threads_counter = 0; +static pthread_t pthread_threads_counter; #ifdef NEED_SEPARATE_REGISTER_STACK /* Signal masks for the manager. These have to be global only when clone2 @@ -338,6 +342,7 @@ static int pthread_allocate_stack(const pthread_attr_t *attr, void *map_addr; /* Allocate space for stack and thread descriptor at default address */ +#ifdef NEED_SEPARATE_REGISTER_STACK if (attr != NULL) { guardsize = page_roundup (attr->__guardsize, granularity); @@ -350,7 +355,7 @@ static int pthread_allocate_stack(const pthread_attr_t *attr, guardsize = granularity; stacksize = STACK_SIZE - granularity; } -#ifdef NEED_SEPARATE_REGISTER_STACK + new_thread = default_new_thread; new_thread_bottom = (char *) (new_thread + 1) - stacksize - guardsize; /* Includes guard area, unlike the normal case. Use the bottom @@ -361,6 +366,8 @@ static int pthread_allocate_stack(const pthread_attr_t *attr, in the same region. The cost is that we might be able to map slightly fewer stacks. */ + /* XXX Fix for floating stacks with variable sizes. */ + /* First the main stack: */ if (mmap((caddr_t)((char *)(new_thread + 1) - stacksize / 2), stacksize / 2, PROT_READ | PROT_WRITE | PROT_EXEC, @@ -382,37 +389,63 @@ static int pthread_allocate_stack(const pthread_attr_t *attr, guardaddr = new_thread_bottom + stacksize/2; /* We leave the guard area in the middle unmapped. */ #else /* !NEED_SEPARATE_REGISTER_STACK */ +# if FLOATING_STACKS + if (attr != NULL) + { + guardsize = page_roundup (attr->__guardsize, granularity); + stacksize = __pthread_max_stacksize - guardsize; + stacksize = MIN (stacksize, + page_roundup (attr->__stacksize, granularity)); + } + else + { + guardsize = granularity; + stacksize = __pthread_max_stacksize - guardsize; + } + + map_addr = mmap(NULL, stacksize + guardsize, + PROT_READ | PROT_WRITE | PROT_EXEC, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (map_addr == MAP_FAILED) + /* No more memory available. */ + return -1; + + guardaddr = map_addr; + if (guardsize > 0) + mprotect (guardaddr, guardsize, PROT_NONE); + + new_thread_bottom = (char *) map_addr + guardsize; + new_thread = ((pthread_descr) (new_thread_bottom + stacksize)) - 1; +# else + if (attr != NULL) + { + guardsize = page_roundup (attr->__guardsize, granularity); + stacksize = STACK_SIZE - guardsize; + stacksize = MIN (stacksize, + page_roundup (attr->__stacksize, granularity)); + } + else + { + guardsize = granularity; + stacksize = STACK_SIZE - granularity; + } + new_thread = default_new_thread; new_thread_bottom = (char *) (new_thread + 1) - stacksize; - map_addr = mmap((caddr_t)((char *)(new_thread + 1) - stacksize), - stacksize, PROT_READ | PROT_WRITE | PROT_EXEC, + map_addr = mmap((caddr_t)((char *)(new_thread + 1) - stacksize - guardsize), + stacksize + guardsize, + PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0); if (map_addr == MAP_FAILED) /* Bad luck, this segment is already mapped. */ return -1; - /* We manage to get a stack. Now see whether we need a guard - and allocate it if necessary. Notice that the default - attributes (stack_size = STACK_SIZE - pagesize and guardsize - = pagesize) do not need a guard page, since the RLIMIT_STACK - soft limit prevents stacks from running into one another. */ - if (stacksize == STACK_SIZE - pagesize) - { - /* We don't need a guard page. */ - guardaddr = new_thread_bottom; - guardsize = 0; - } - else - { - /* Put a bad page at the bottom of the stack */ - guardaddr = (void *)new_thread_bottom - guardsize; - if (mmap ((caddr_t) guardaddr, guardsize, 0, MAP_FIXED, -1, 0) - == MAP_FAILED) - { - /* We don't make this an error. */ - guardaddr = new_thread_bottom; - guardsize = 0; - } - } + + /* We manage to get a stack. Protect the guard area pages if + necessary. */ + guardaddr = map_addr; + if (guardsize > 0) + mprotect (guardaddr, guardsize, PROT_NONE); +# endif #endif /* !NEED_SEPARATE_REGISTER_STACK */ } /* Clear the thread data structure. */ diff --git a/linuxthreads/pthread.c b/linuxthreads/pthread.c index d13923a821..514ba5b71f 100644 --- a/linuxthreads/pthread.c +++ b/linuxthreads/pthread.c @@ -182,6 +182,9 @@ char *__pthread_manager_thread_tos; int __pthread_exit_requested; int __pthread_exit_code; +/* Maximum stack size. */ +size_t __pthread_max_stacksize; + /* Nozero if the machine has more than one processor. */ int __pthread_smp_kernel; @@ -455,20 +458,32 @@ int __pthread_initialize_manager(void) struct rlimit limit; int max_stack; + getrlimit(RLIMIT_STACK, &limit); +#ifdef FLOATING_STACKS + if (limit.rlim_cur == RLIM_INFINITY) + limit.rlim_cur = ARCH_STACK_MAX_SIZE; +# ifdef NEED_SEPARATE_REGISTER_STACK + max_stack = limit.rlim_cur / 2; +# else + max_stack = limit.rlim_cur; +#endif + + __pthread_max_stacksize = max_stack; +#else /* Play with the stack size limit to make sure that no stack ever grows beyond STACK_SIZE minus one page (to act as a guard page). */ - getrlimit(RLIMIT_STACK, &limit); -#ifdef NEED_SEPARATE_REGISTER_STACK +# ifdef NEED_SEPARATE_REGISTER_STACK /* STACK_SIZE bytes hold both the main stack and register backing store. The rlimit value applies to each individually. */ - max_stack = STACK_SIZE/2 - __getpagesize(); -#else + max_stack = STACK_SIZE/2 - __getpagesize (); +# else max_stack = STACK_SIZE - __getpagesize(); -#endif +# endif if (limit.rlim_cur > max_stack) { limit.rlim_cur = max_stack; setrlimit(RLIMIT_STACK, &limit); } +#endif /* If basic initialization not done yet (e.g. we're called from a constructor run before our constructor), do it now */ if (__pthread_initial_thread_bos == NULL) pthread_initialize(); diff --git a/linuxthreads/sysdeps/i386/useldt.h b/linuxthreads/sysdeps/i386/useldt.h index 10cc3d41a3..ebf520cc39 100644 --- a/linuxthreads/sysdeps/i386/useldt.h +++ b/linuxthreads/sysdeps/i386/useldt.h @@ -169,3 +169,9 @@ extern int __modify_ldt (int, struct modify_ldt_ldt_s *, size_t); member))); \ } \ }) + +/* We want the OS to assign stack addresses. */ +#define FLOATING_STACKS 1 + +/* Maximum size o fthe stack if the rlimit is unlimited. */ +#define ARCH_STACK_MAX_SIZE 8*1024*1024