mirror of
https://sourceware.org/git/glibc.git
synced 2024-12-22 10:50:07 +00:00
* sysdeps/unix/sysv/linux/powerpc/sem_post.c (__new_sem_post):
Use __asm __volatile (__lll_acq_instr ::: "memory") instead of atomic_full_barrier. 2007-07-31 Jakub Jelinek <jakub@redhat.com> * allocatestack.c (stack_cache_lock): Change type to int. (get_cached_stack, allocate_stack, __deallocate_stack, __make_stacks_executable, __find_thread_by_id, __nptl_setxid, __pthread_init_static_tls, __wait_lookup_done): Add LLL_PRIVATE as second argument to lll_lock and lll_unlock macros on stack_cache_lock. * pthread_create.c (__find_in_stack_list): Likewise. (start_thread): Similarly with pd->lock. Use lll_robust_dead macro instead of lll_robust_mutex_dead, pass LLL_SHARED to it as second argument. * descr.h (struct pthread): Change lock and setxid_futex field type to int. * old_pthread_cond_broadcast.c (__pthread_cond_broadcast_2_0): Use LLL_LOCK_INITIALIZER instead of LLL_MUTEX_LOCK_INITIALIZER. * old_pthread_cond_signal.c (__pthread_cond_signal_2_0): Likewise. * old_pthread_cond_timedwait.c (__pthread_cond_timedwait_2_0): Likewise. * old_pthread_cond_wait.c (__pthread_cond_wait_2_0): Likewise. * pthread_cond_init.c (__pthread_cond_init): Likewise. * pthreadP.h (__attr_list_lock): Change type to int. * pthread_attr_init.c (__attr_list_lock): Likewise. * pthread_barrier_destroy.c (pthread_barrier_destroy): Pass ibarrier->private ^ FUTEX_PRIVATE_FLAG as second argument to lll_{,un}lock. * pthread_barrier_wait.c (pthread_barrier_wait): Likewise and also for lll_futex_{wake,wait}. * pthread_barrier_init.c (pthread_barrier_init): Make iattr a pointer to const. * pthread_cond_broadcast.c (__pthread_cond_broadcast): Pass LLL_SHARED as second argument to lll_{,un}lock. * pthread_cond_destroy.c (__pthread_cond_destroy): Likewise. * pthread_cond_signal.c (__pthread_cond_singal): Likewise. * pthread_cond_timedwait.c (__pthread_cond_timedwait): Likewise. * pthread_cond_wait.c (__condvar_cleanup, __pthread_cond_wait): Likewise. * pthread_getattr_np.c (pthread_getattr_np): Add LLL_PRIVATE as second argument to lll_{,un}lock macros on pd->lock. * pthread_getschedparam.c (__pthread_getschedparam): Likewise. * pthread_setschedparam.c (__pthread_setschedparam): Likewise. * pthread_setschedprio.c (pthread_setschedprio): Likewise. * tpp.c (__pthread_tpp_change_priority, __pthread_current_priority): Likewise. * sysdeps/pthread/createthread.c (do_clone, create_thread): Likewise. * pthread_once.c (once_lock): Change type to int. (__pthread_once): Pass LLL_PRIVATE as second argument to lll_{,un}lock macros on once_lock. * pthread_rwlock_rdlock.c (__pthread_rwlock_rdlock): Use lll_{,un}lock macros instead of lll_mutex_{,un}lock, pass rwlock->__data.__shared as second argument to them and similarly for lll_futex_w*. * pthread_rwlock_timedrdlock.c (pthread_rwlock_timedrdlock): Likewise. * pthread_rwlock_timedwrlock.c (pthread_rwlock_timedwrlock): Likewise. * pthread_rwlock_tryrdlock.c (__pthread_rwlock_tryrdlock): Likewise. * pthread_rwlock_trywrlock.c (__pthread_rwlock_trywrlock): Likewise. * pthread_rwlock_unlock.c (__pthread_rwlock_unlock): Likewise. * pthread_rwlock_wrlock.c (__pthread_rwlock_wrlock): Likewise. * sem_close.c (sem_close): Pass LLL_PRIVATE as second argument to lll_{,un}lock macros on __sem_mappings_lock. * sem_open.c (check_add_mapping): Likewise. (__sem_mappings_lock): Change type to int. * semaphoreP.h (__sem_mappings_lock): Likewise. * pthread_mutex_lock.c (LLL_MUTEX_LOCK, LLL_MUTEX_TRYLOCK, LLL_ROBUST_MUTEX_LOCK): Use lll_{,try,robust_}lock macros instead of lll_*mutex_*, pass LLL_SHARED as last argument. (__pthread_mutex_lock): Use lll_unlock instead of lll_mutex_unlock, pass LLL_SHARED as last argument. * sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c (LLL_MUTEX_LOCK, LLL_MUTEX_TRYLOCK, LLL_ROBUST_MUTEX_LOCK): Use lll_{cond_,cond_try,robust_cond}lock macros instead of lll_*mutex_*, pass LLL_SHARED as last argument. * pthread_mutex_timedlock.c (pthread_mutex_timedlock): Use lll_{timed,try,robust_timed,un}lock instead of lll_*mutex*, pass LLL_SHARED as last argument. * pthread_mutex_trylock.c (__pthread_mutex_trylock): Similarly. * pthread_mutex_unlock.c (__pthread_mutex_unlock_usercnt): Similarly. * sysdeps/pthread/bits/libc-lock.h (__libc_lock_lock, __libc_lock_lock_recursive, __libc_lock_unlock, __libc_lock_unlock_recursive): Pass LLL_PRIVATE as second argument to lll_{,un}lock. * sysdeps/pthread/bits/stdio-lock.h (_IO_lock_lock, _IO_lock_unlock): Likewise. * sysdeps/unix/sysv/linux/fork.c (__libc_fork): Don't use compound literal. * sysdeps/unix/sysv/linux/unregister-atfork.c (__unregister_atfork): Pass LLL_PRIVATE as second argument to lll_{,un}lock macros on __fork_lock. * sysdeps/unix/sysv/linux/register-atfork.c (__register_atfork, free_mem): Likewise. (__fork_lock): Change type to int. * sysdeps/unix/sysv/linux/fork.h (__fork_lock): Likewise. * sysdeps/unix/sysv/linux/sem_post.c (__new_sem_post): Pass isem->private ^ FUTEX_PRIVATE_FLAG as second argument to lll_futex_wake. * sysdeps/unix/sysv/linux/sem_timedwait.c (sem_timedwait): Likewise. * sysdeps/unix/sysv/linux/sem_wait.c (__new_sem_wait): Likewise. * sysdeps/unix/sysv/linux/lowlevellock.c (__lll_lock_wait_private): New function. (__lll_lock_wait, __lll_timedlock_wait): Add private argument and pass it through to lll_futex_*wait, only compile in when IS_IN_libpthread. * sysdeps/unix/sysv/linux/lowlevelrobustlock.c (__lll_robust_lock_wait, __lll_robust_timedlock_wait): Add private argument and pass it through to lll_futex_*wait. * sysdeps/unix/sysv/linux/alpha/lowlevellock.h: Renamed all lll_mutex_* resp. lll_robust_mutex_* macros to lll_* resp. lll_robust_*. Renamed all __lll_mutex_* resp. __lll_robust_mutex_* inline functions to __lll_* resp. __lll_robust_*. (LLL_MUTEX_LOCK_INITIALIZER): Remove. (lll_mutex_dead): Add private argument. (__lll_lock_wait_private): New prototype. (__lll_lock_wait, __lll_robust_lock_wait, __lll_lock_timedwait, __lll_robust_lock_timedwait): Add private argument to prototypes. (__lll_lock): Add private argument, if it is constant LLL_PRIVATE, call __lll_lock_wait_private, otherwise pass private to __lll_lock_wait. (__lll_robust_lock, __lll_cond_lock, __lll_timedlock, __lll_robust_timedlock): Add private argument, pass it to __lll_*wait functions. (__lll_unlock): Add private argument, if it is constant LLL_PRIVATE, call __lll_unlock_wake_private, otherwise pass private to __lll_unlock_wake. (__lll_robust_unlock): Add private argument, pass it to __lll_robust_unlock_wake. (lll_lock, lll_robust_lock, lll_cond_lock, lll_timedlock, lll_robust_timedlock, lll_unlock, lll_robust_unlock): Add private argument, pass it through to __lll_* inline function. (__lll_mutex_unlock_force, lll_mutex_unlock_force): Remove. (lll_lock_t): Remove. (__lll_cond_wait, __lll_cond_timedwait, __lll_cond_wake, __lll_cond_broadcast, lll_cond_wait, lll_cond_timedwait, lll_cond_wake, lll_cond_broadcast): Remove. * sysdeps/unix/sysv/linux/ia64/lowlevellock.h: Likewise. * sysdeps/unix/sysv/linux/powerpc/lowlevellock.h: Likewise. * sysdeps/unix/sysv/linux/s390/lowlevellock.h: Likewise. * sysdeps/unix/sysv/linux/sparc/lowlevellock.h: Likewise. * sysdeps/unix/sysv/linux/i386/lowlevellock.h: Allow including the header from assembler. Renamed all lll_mutex_* resp. lll_robust_mutex_* macros to lll_* resp. lll_robust_*. (LOCK, FUTEX_CMP_REQUEUE, FUTEX_WAKE_OP, FUTEX_OP_CLEAR_WAKE_IF_GT_ONE): Define. (LLL_MUTEX_LOCK_INITIALIZER, LLL_MUTEX_LOCK_INITIALIZER_LOCKED, LLL_MUTEX_LOCK_INITIALIZER_WAITERS): Remove. (__lll_mutex_lock_wait, __lll_mutex_timedlock_wait, __lll_mutex_unlock_wake, __lll_lock_wait, __lll_unlock_wake): Remove prototype. (__lll_trylock_asm, __lll_lock_asm_start, __lll_unlock_asm): Define. (lll_robust_trylock, lll_cond_trylock): Use LLL_LOCK_INITIALIZER* rather than LLL_MUTEX_LOCK_INITIALIZER* macros. (lll_trylock): Likewise, use __lll_trylock_asm, pass MULTIPLE_THREADS_OFFSET as another asm operand. (lll_lock): Add private argument, use __lll_lock_asm_start, pass MULTIPLE_THREADS_OFFSET as last asm operand, call __lll_lock_wait_private if private is constant LLL_PRIVATE, otherwise pass private as another argument to __lll_lock_wait. (lll_robust_lock, lll_cond_lock, lll_robust_cond_lock, lll_timedlock, lll_robust_timedlock): Add private argument, pass private as another argument to __lll_*lock_wait call. (lll_unlock): Add private argument, use __lll_unlock_asm, pass MULTIPLE_THREADS_OFFSET as another asm operand, call __lll_unlock_wake_private if private is constant LLL_PRIVATE, otherwise pass private as another argument to __lll_unlock_wake. (lll_robust_unlock): Add private argument, pass private as another argument to __lll_unlock_wake. (lll_robust_dead): Add private argument, use __lll_private_flag macro. (lll_islocked): Use LLL_LOCK_INITIALIZER instead of LLL_MUTEX_LOCK_INITIALIZER. (lll_lock_t): Remove. (LLL_LOCK_INITIALIZER_WAITERS): Define. (__lll_cond_wait, __lll_cond_timedwait, __lll_cond_wake, __lll_cond_broadcast, lll_cond_wait, lll_cond_timedwait, lll_cond_wake, lll_cond_broadcast): Remove. * sysdeps/unix/sysv/linux/x86_64/lowlevellock.h: Likewise. * sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S: Revert 2007-05-2{3,9} changes. * sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S: Include kernel-features.h and lowlevellock.h. (LOAD_PRIVATE_FUTEX_WAIT): Define. (LOAD_FUTEX_WAIT): Rewritten. (LOCK, SYS_gettimeofday, SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Don't define. (__lll_lock_wait_private, __lll_unlock_wake_private): New functions. (__lll_mutex_lock_wait): Rename to ... (__lll_lock_wait): ... this. Take futex addr from %edx instead of %ecx, %ecx is now private argument. Don't compile in for libc.so. (__lll_mutex_timedlock_wait): Rename to ... (__lll_timedlock_wait): ... this. Use __NR_gettimeofday. %esi contains private argument. Don't compile in for libc.so. (__lll_mutex_unlock_wake): Rename to ... (__lll_unlock_wake): ... this. %ecx contains private argument. Don't compile in for libc.so. (__lll_timedwait_tid): Use __NR_gettimeofday. * sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S: Include kernel-features.h and lowlevellock.h. (LOAD_FUTEX_WAIT): Define. (LOCK, SYS_gettimeofday, SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Don't define. (__lll_robust_mutex_lock_wait): Rename to ... (__lll_robust_lock_wait): ... this. Futex addr is now in %edx argument, %ecx argument contains private. Use LOAD_FUTEX_WAIT macro. (__lll_robust_mutex_timedlock_wait): Rename to ... (__lll_robust_timedlock_wait): ... this. Use __NR_gettimeofday. %esi argument contains private, use LOAD_FUTEX_WAIT macro. * sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S: Include lowlevellock.h. (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define. (pthread_barrier_wait): Rename __lll_mutex_* to __lll_*, pass PRIVATE(%ebx) ^ LLL_SHARED as private argument in %ecx to __lll_lock_wait and __lll_unlock_wake, pass MUTEX(%ebx) address to __lll_lock_wait in %edx. * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S: Include lowlevellock.h and pthread-errnos.h. (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, EINVAL, LOCK): Don't define. (__pthread_cond_broadcast): Rename __lll_mutex_* to __lll_*, pass cond_lock address in %edx rather than %ecx to __lll_lock_wait, pass LLL_SHARED in %ecx to both __lll_lock_wait and __lll_unlock_wake. * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S: Include lowlevellock.h and pthread-errnos.h. (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_WAKE_OP, FUTEX_OP_CLEAR_WAKE_IF_GT_ONE, EINVAL, LOCK): Don't define. (__pthread_cond_signal): Rename __lll_mutex_* to __lll_*, pass cond_lock address in %edx rather than %ecx to __lll_lock_wait, pass LLL_SHARED in %ecx to both __lll_lock_wait and __lll_unlock_wake. * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S: Include lowlevellock.h. (SYS_futex, SYS_gettimeofday, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define. (__pthread_cond_timedwait): Rename __lll_mutex_* to __lll_*, pass cond_lock address in %edx rather than %ecx to __lll_lock_wait, pass LLL_SHARED in %ecx to both __lll_lock_wait and __lll_unlock_wake. Use __NR_gettimeofday. * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S: Include lowlevellock.h. (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define. (__pthread_cond_wait, __condvar_w_cleanup): Rename __lll_mutex_* to __lll_*, pass cond_lock address in %edx rather than %ecx to __lll_lock_wait, pass LLL_SHARED in %ecx to both __lll_lock_wait and __lll_unlock_wake. * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S: Include lowlevellock.h. (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define. (__pthread_rwlock_rdlock): Rename __lll_mutex_* to __lll_*, pass MUTEX(%ebx) address in %edx rather than %ecx to __lll_lock_wait, pass PSHARED(%ebx) in %ecx to both __lll_lock_wait and __lll_unlock_wake. Move return value from %ecx to %edx register. * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S: Include lowlevellock.h. (SYS_futex, SYS_gettimeofday, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define. (__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*, pass MUTEX(%ebp) address in %edx rather than %ecx to __lll_lock_wait, pass PSHARED(%ebp) in %ecx to both __lll_lock_wait and __lll_unlock_wake. Move return value from %ecx to %edx register. Use __NR_gettimeofday. * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S: Include lowlevellock.h. (SYS_futex, SYS_gettimeofday, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define. (__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*, pass MUTEX(%ebp) address in %edx rather than %ecx to __lll_lock_wait, pass PSHARED(%ebp) in %ecx to both __lll_lock_wait and __lll_unlock_wake. Move return value from %ecx to %edx register. Use __NR_gettimeofday. * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S: Include lowlevellock.h. (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define. (__pthread_rwlock_unlock): Rename __lll_mutex_* to __lll_*, pass MUTEX(%edi) address in %edx rather than %ecx to __lll_lock_wait, pass PSHARED(%edi) in %ecx to both __lll_lock_wait and __lll_unlock_wake. * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S: Include lowlevellock.h. (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define. (__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*, pass MUTEX(%ebx) address in %edx rather than %ecx to __lll_lock_wait, pass PSHARED(%ebx) in %ecx to both __lll_lock_wait and __lll_unlock_wake. Move return value from %ecx to %edx register. * sysdeps/unix/sysv/linux/i386/pthread_once.S: Include lowlevellock.h. (LOCK, SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG): Don't define. * sysdeps/unix/sysv/linux/i386/i486/sem_post.S: Include lowlevellock.h. (LOCK, SYS_futex, FUTEX_WAKE): Don't define. * sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S: Include lowlevellock.h. (LOCK, SYS_futex, SYS_gettimeofday, FUTEX_WAIT): Don't define. (sem_timedwait): Use __NR_gettimeofday. * sysdeps/unix/sysv/linux/i386/i486/sem_trywait.S: Include lowlevellock.h. (LOCK): Don't define. * sysdeps/unix/sysv/linux/i386/i486/sem_wait.S: Include lowlevellock.h. (LOCK, SYS_futex, FUTEX_WAIT): Don't define. * sysdeps/unix/sysv/linux/powerpc/sem_post.c: Wake only when there are waiters. * sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S: Revert 2007-05-2{3,9} changes. * sysdeps/unix/sysv/linux/x86_64/lowlevellock.S: Include kernel-features.h and lowlevellock.h. (LOAD_PRIVATE_FUTEX_WAIT): Define. (LOAD_FUTEX_WAIT): Rewritten. (LOCK, SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Don't define. (__lll_lock_wait_private, __lll_unlock_wake_private): New functions. (__lll_mutex_lock_wait): Rename to ... (__lll_lock_wait): ... this. %esi is now private argument. Don't compile in for libc.so. (__lll_mutex_timedlock_wait): Rename to ... (__lll_timedlock_wait): ... this. %esi contains private argument. Don't compile in for libc.so. (__lll_mutex_unlock_wake): Rename to ... (__lll_unlock_wake): ... this. %esi contains private argument. Don't compile in for libc.so. * sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S: Include kernel-features.h and lowlevellock.h. (LOAD_FUTEX_WAIT): Define. (LOCK, SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Don't define. (__lll_robust_mutex_lock_wait): Rename to ... (__lll_robust_lock_wait): ... this. %esi argument contains private. Use LOAD_FUTEX_WAIT macro. (__lll_robust_mutex_timedlock_wait): Rename to ... (__lll_robust_timedlock_wait): ... this. %esi argument contains private, use LOAD_FUTEX_WAIT macro. * sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S: Include lowlevellock.h. (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define. (pthread_barrier_wait): Rename __lll_mutex_* to __lll_*, pass PRIVATE(%rdi) ^ LLL_SHARED as private argument in %esi to __lll_lock_wait and __lll_unlock_wake. * sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S: Include lowlevellock.h and pthread-errnos.h. (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, EINVAL, LOCK): Don't define. (__pthread_cond_broadcast): Rename __lll_mutex_* to __lll_*, pass LLL_SHARED in %esi to both __lll_lock_wait and __lll_unlock_wake. * sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S: Include lowlevellock.h and pthread-errnos.h. (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_WAKE_OP, FUTEX_OP_CLEAR_WAKE_IF_GT_ONE, EINVAL, LOCK): Don't define. (__pthread_cond_signal): Rename __lll_mutex_* to __lll_*, pass LLL_SHARED in %esi to both __lll_lock_wait and __lll_unlock_wake. * sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S: Include lowlevellock.h. (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define. (__pthread_cond_timedwait): Rename __lll_mutex_* to __lll_*, pass LLL_SHARED in %esi to both __lll_lock_wait and __lll_unlock_wake. * sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S: Include lowlevellock.h. (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define. (__pthread_cond_wait, __condvar_cleanup): Rename __lll_mutex_* to __lll_*, pass LLL_SHARED in %esi to both __lll_lock_wait and __lll_unlock_wake. * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S: Include lowlevellock.h. (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK): Don't define. (__pthread_rwlock_rdlock): Rename __lll_mutex_* to __lll_*, pass PSHARED(%rdi) in %esi to both __lll_lock_wait and __lll_unlock_wake. * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S: Include lowlevellock.h. (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK): Don't define. (__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*, pass PSHARED(%rdi) in %esi to both __lll_lock_wait and __lll_unlock_wake. * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S: Include lowlevellock.h. (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK): Don't define. (__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*, pass PSHARED(%rdi) in %esi to both __lll_lock_wait and __lll_unlock_wake. * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S: Include lowlevellock.h. (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK): Don't define. (__pthread_rwlock_unlock): Rename __lll_mutex_* to __lll_*, pass PSHARED(%rdi) in %esi to both __lll_lock_wait and __lll_unlock_wake. * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S: Include lowlevellock.h. (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK): Don't define. (__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*, pass PSHARED(%rdi) in %ecx to both __lll_lock_wait and __lll_unlock_wake. * sysdeps/unix/sysv/linux/x86_64/pthread_once.S: Include lowlevellock.h. (LOCK, SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG): Don't define. * sysdeps/unix/sysv/linux/x86_64/sem_post.S: Include lowlevellock.h. (LOCK, SYS_futex, FUTEX_WAKE): Don't define. * sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S: Include lowlevellock.h. (LOCK, SYS_futex, FUTEX_WAIT): Don't define. * sysdeps/unix/sysv/linux/x86_64/sem_trywait.S: Include lowlevellock.h. (LOCK): Don't define. * sysdeps/unix/sysv/linux/x86_64/sem_wait.S: Include lowlevellock.h. (LOCK, SYS_futex, FUTEX_WAIT): Don't define. * sysdeps/unix/sysv/linux/sparc/internaltypes.h: New file. * sysdeps/unix/sysv/linux/sparc/pthread_barrier_destroy.c: New file. * sysdeps/unix/sysv/linux/sparc/pthread_barrier_init.c: New file. * sysdeps/unix/sysv/linux/sparc/pthread_barrier_wait.c: New file. * sysdeps/unix/sysv/linux/sparc/sparc32/lowlevellock.c (__lll_lock_wait_private): New function. (__lll_lock_wait, __lll_timedlock_wait): Add private argument, pass it to lll_futex_*wait. Don't compile in for libc.so. * sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_init.c: Remove. * sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_wait.c (struct sparc_pthread_barrier): Remove. (pthread_barrier_wait): Use union sparc_pthread_barrier instead of struct sparc_pthread_barrier. Pass ibarrier->s.pshared ? LLL_SHARED : LLL_PRIVATE to lll_{,un}lock and lll_futex_wait macros. * sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/pthread_barrier_init.c: Remove. * sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/pthread_barrier_wait.c: Include sparc pthread_barrier_wait.c instead of generic one.
This commit is contained in:
parent
1475e2012f
commit
e51deae7f6
444
nptl/ChangeLog
444
nptl/ChangeLog
@ -1,3 +1,447 @@
|
||||
2007-07-31 Anton Blanchard <anton@samba.org>
|
||||
|
||||
* sysdeps/unix/sysv/linux/powerpc/sem_post.c (__new_sem_post):
|
||||
Use __asm __volatile (__lll_acq_instr ::: "memory") instead of
|
||||
atomic_full_barrier.
|
||||
|
||||
2007-07-31 Jakub Jelinek <jakub@redhat.com>
|
||||
|
||||
* allocatestack.c (stack_cache_lock): Change type to int.
|
||||
(get_cached_stack, allocate_stack, __deallocate_stack,
|
||||
__make_stacks_executable, __find_thread_by_id, __nptl_setxid,
|
||||
__pthread_init_static_tls, __wait_lookup_done): Add LLL_PRIVATE
|
||||
as second argument to lll_lock and lll_unlock macros on
|
||||
stack_cache_lock.
|
||||
* pthread_create.c (__find_in_stack_list): Likewise.
|
||||
(start_thread): Similarly with pd->lock. Use lll_robust_dead
|
||||
macro instead of lll_robust_mutex_dead, pass LLL_SHARED to it
|
||||
as second argument.
|
||||
* descr.h (struct pthread): Change lock and setxid_futex field
|
||||
type to int.
|
||||
* old_pthread_cond_broadcast.c (__pthread_cond_broadcast_2_0): Use
|
||||
LLL_LOCK_INITIALIZER instead of LLL_MUTEX_LOCK_INITIALIZER.
|
||||
* old_pthread_cond_signal.c (__pthread_cond_signal_2_0): Likewise.
|
||||
* old_pthread_cond_timedwait.c (__pthread_cond_timedwait_2_0):
|
||||
Likewise.
|
||||
* old_pthread_cond_wait.c (__pthread_cond_wait_2_0): Likewise.
|
||||
* pthread_cond_init.c (__pthread_cond_init): Likewise.
|
||||
* pthreadP.h (__attr_list_lock): Change type to int.
|
||||
* pthread_attr_init.c (__attr_list_lock): Likewise.
|
||||
* pthread_barrier_destroy.c (pthread_barrier_destroy): Pass
|
||||
ibarrier->private ^ FUTEX_PRIVATE_FLAG as second argument to
|
||||
lll_{,un}lock.
|
||||
* pthread_barrier_wait.c (pthread_barrier_wait): Likewise and
|
||||
also for lll_futex_{wake,wait}.
|
||||
* pthread_barrier_init.c (pthread_barrier_init): Make iattr
|
||||
a pointer to const.
|
||||
* pthread_cond_broadcast.c (__pthread_cond_broadcast): Pass
|
||||
LLL_SHARED as second argument to lll_{,un}lock.
|
||||
* pthread_cond_destroy.c (__pthread_cond_destroy): Likewise.
|
||||
* pthread_cond_signal.c (__pthread_cond_singal): Likewise.
|
||||
* pthread_cond_timedwait.c (__pthread_cond_timedwait): Likewise.
|
||||
* pthread_cond_wait.c (__condvar_cleanup, __pthread_cond_wait):
|
||||
Likewise.
|
||||
* pthread_getattr_np.c (pthread_getattr_np): Add LLL_PRIVATE
|
||||
as second argument to lll_{,un}lock macros on pd->lock.
|
||||
* pthread_getschedparam.c (__pthread_getschedparam): Likewise.
|
||||
* pthread_setschedparam.c (__pthread_setschedparam): Likewise.
|
||||
* pthread_setschedprio.c (pthread_setschedprio): Likewise.
|
||||
* tpp.c (__pthread_tpp_change_priority, __pthread_current_priority):
|
||||
Likewise.
|
||||
* sysdeps/pthread/createthread.c (do_clone, create_thread):
|
||||
Likewise.
|
||||
* pthread_once.c (once_lock): Change type to int.
|
||||
(__pthread_once): Pass LLL_PRIVATE as second argument to
|
||||
lll_{,un}lock macros on once_lock.
|
||||
* pthread_rwlock_rdlock.c (__pthread_rwlock_rdlock): Use
|
||||
lll_{,un}lock macros instead of lll_mutex_{,un}lock, pass
|
||||
rwlock->__data.__shared as second argument to them and similarly
|
||||
for lll_futex_w*.
|
||||
* pthread_rwlock_timedrdlock.c (pthread_rwlock_timedrdlock):
|
||||
Likewise.
|
||||
* pthread_rwlock_timedwrlock.c (pthread_rwlock_timedwrlock):
|
||||
Likewise.
|
||||
* pthread_rwlock_tryrdlock.c (__pthread_rwlock_tryrdlock): Likewise.
|
||||
* pthread_rwlock_trywrlock.c (__pthread_rwlock_trywrlock): Likewise.
|
||||
* pthread_rwlock_unlock.c (__pthread_rwlock_unlock): Likewise.
|
||||
* pthread_rwlock_wrlock.c (__pthread_rwlock_wrlock): Likewise.
|
||||
* sem_close.c (sem_close): Pass LLL_PRIVATE as second argument
|
||||
to lll_{,un}lock macros on __sem_mappings_lock.
|
||||
* sem_open.c (check_add_mapping): Likewise.
|
||||
(__sem_mappings_lock): Change type to int.
|
||||
* semaphoreP.h (__sem_mappings_lock): Likewise.
|
||||
* pthread_mutex_lock.c (LLL_MUTEX_LOCK, LLL_MUTEX_TRYLOCK,
|
||||
LLL_ROBUST_MUTEX_LOCK): Use lll_{,try,robust_}lock macros
|
||||
instead of lll_*mutex_*, pass LLL_SHARED as last
|
||||
argument.
|
||||
(__pthread_mutex_lock): Use lll_unlock instead of lll_mutex_unlock,
|
||||
pass LLL_SHARED as last argument.
|
||||
* sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c (LLL_MUTEX_LOCK,
|
||||
LLL_MUTEX_TRYLOCK, LLL_ROBUST_MUTEX_LOCK): Use
|
||||
lll_{cond_,cond_try,robust_cond}lock macros instead of lll_*mutex_*,
|
||||
pass LLL_SHARED as last argument.
|
||||
* pthread_mutex_timedlock.c (pthread_mutex_timedlock): Use
|
||||
lll_{timed,try,robust_timed,un}lock instead of lll_*mutex*, pass
|
||||
LLL_SHARED as last argument.
|
||||
* pthread_mutex_trylock.c (__pthread_mutex_trylock): Similarly.
|
||||
* pthread_mutex_unlock.c (__pthread_mutex_unlock_usercnt):
|
||||
Similarly.
|
||||
* sysdeps/pthread/bits/libc-lock.h (__libc_lock_lock,
|
||||
__libc_lock_lock_recursive, __libc_lock_unlock,
|
||||
__libc_lock_unlock_recursive): Pass LLL_PRIVATE as second
|
||||
argument to lll_{,un}lock.
|
||||
* sysdeps/pthread/bits/stdio-lock.h (_IO_lock_lock,
|
||||
_IO_lock_unlock): Likewise.
|
||||
* sysdeps/unix/sysv/linux/fork.c (__libc_fork): Don't use
|
||||
compound literal.
|
||||
* sysdeps/unix/sysv/linux/unregister-atfork.c (__unregister_atfork):
|
||||
Pass LLL_PRIVATE as second argument to lll_{,un}lock macros on
|
||||
__fork_lock.
|
||||
* sysdeps/unix/sysv/linux/register-atfork.c (__register_atfork,
|
||||
free_mem): Likewise.
|
||||
(__fork_lock): Change type to int.
|
||||
* sysdeps/unix/sysv/linux/fork.h (__fork_lock): Likewise.
|
||||
* sysdeps/unix/sysv/linux/sem_post.c (__new_sem_post): Pass
|
||||
isem->private ^ FUTEX_PRIVATE_FLAG as second argument to
|
||||
lll_futex_wake.
|
||||
* sysdeps/unix/sysv/linux/sem_timedwait.c (sem_timedwait): Likewise.
|
||||
* sysdeps/unix/sysv/linux/sem_wait.c (__new_sem_wait): Likewise.
|
||||
* sysdeps/unix/sysv/linux/lowlevellock.c (__lll_lock_wait_private):
|
||||
New function.
|
||||
(__lll_lock_wait, __lll_timedlock_wait): Add private argument and
|
||||
pass it through to lll_futex_*wait, only compile in when
|
||||
IS_IN_libpthread.
|
||||
* sysdeps/unix/sysv/linux/lowlevelrobustlock.c
|
||||
(__lll_robust_lock_wait, __lll_robust_timedlock_wait): Add private
|
||||
argument and pass it through to lll_futex_*wait.
|
||||
* sysdeps/unix/sysv/linux/alpha/lowlevellock.h: Renamed all
|
||||
lll_mutex_* resp. lll_robust_mutex_* macros to lll_* resp.
|
||||
lll_robust_*. Renamed all __lll_mutex_* resp. __lll_robust_mutex_*
|
||||
inline functions to __lll_* resp. __lll_robust_*.
|
||||
(LLL_MUTEX_LOCK_INITIALIZER): Remove.
|
||||
(lll_mutex_dead): Add private argument.
|
||||
(__lll_lock_wait_private): New prototype.
|
||||
(__lll_lock_wait, __lll_robust_lock_wait, __lll_lock_timedwait,
|
||||
__lll_robust_lock_timedwait): Add private argument to prototypes.
|
||||
(__lll_lock): Add private argument, if it is constant LLL_PRIVATE,
|
||||
call __lll_lock_wait_private, otherwise pass private to
|
||||
__lll_lock_wait.
|
||||
(__lll_robust_lock, __lll_cond_lock, __lll_timedlock,
|
||||
__lll_robust_timedlock): Add private argument, pass it to
|
||||
__lll_*wait functions.
|
||||
(__lll_unlock): Add private argument, if it is constant LLL_PRIVATE,
|
||||
call __lll_unlock_wake_private, otherwise pass private to
|
||||
__lll_unlock_wake.
|
||||
(__lll_robust_unlock): Add private argument, pass it to
|
||||
__lll_robust_unlock_wake.
|
||||
(lll_lock, lll_robust_lock, lll_cond_lock, lll_timedlock,
|
||||
lll_robust_timedlock, lll_unlock, lll_robust_unlock): Add private
|
||||
argument, pass it through to __lll_* inline function.
|
||||
(__lll_mutex_unlock_force, lll_mutex_unlock_force): Remove.
|
||||
(lll_lock_t): Remove.
|
||||
(__lll_cond_wait, __lll_cond_timedwait, __lll_cond_wake,
|
||||
__lll_cond_broadcast, lll_cond_wait, lll_cond_timedwait,
|
||||
lll_cond_wake, lll_cond_broadcast): Remove.
|
||||
* sysdeps/unix/sysv/linux/ia64/lowlevellock.h: Likewise.
|
||||
* sysdeps/unix/sysv/linux/powerpc/lowlevellock.h: Likewise.
|
||||
* sysdeps/unix/sysv/linux/s390/lowlevellock.h: Likewise.
|
||||
* sysdeps/unix/sysv/linux/sparc/lowlevellock.h: Likewise.
|
||||
* sysdeps/unix/sysv/linux/i386/lowlevellock.h: Allow including
|
||||
the header from assembler. Renamed all lll_mutex_* resp.
|
||||
lll_robust_mutex_* macros to lll_* resp. lll_robust_*.
|
||||
(LOCK, FUTEX_CMP_REQUEUE, FUTEX_WAKE_OP,
|
||||
FUTEX_OP_CLEAR_WAKE_IF_GT_ONE): Define.
|
||||
(LLL_MUTEX_LOCK_INITIALIZER, LLL_MUTEX_LOCK_INITIALIZER_LOCKED,
|
||||
LLL_MUTEX_LOCK_INITIALIZER_WAITERS): Remove.
|
||||
(__lll_mutex_lock_wait, __lll_mutex_timedlock_wait,
|
||||
__lll_mutex_unlock_wake, __lll_lock_wait, __lll_unlock_wake):
|
||||
Remove prototype.
|
||||
(__lll_trylock_asm, __lll_lock_asm_start, __lll_unlock_asm): Define.
|
||||
(lll_robust_trylock, lll_cond_trylock): Use LLL_LOCK_INITIALIZER*
|
||||
rather than LLL_MUTEX_LOCK_INITIALIZER* macros.
|
||||
(lll_trylock): Likewise, use __lll_trylock_asm, pass
|
||||
MULTIPLE_THREADS_OFFSET as another asm operand.
|
||||
(lll_lock): Add private argument, use __lll_lock_asm_start, pass
|
||||
MULTIPLE_THREADS_OFFSET as last asm operand, call
|
||||
__lll_lock_wait_private if private is constant LLL_PRIVATE,
|
||||
otherwise pass private as another argument to __lll_lock_wait.
|
||||
(lll_robust_lock, lll_cond_lock, lll_robust_cond_lock,
|
||||
lll_timedlock, lll_robust_timedlock): Add private argument, pass
|
||||
private as another argument to __lll_*lock_wait call.
|
||||
(lll_unlock): Add private argument, use __lll_unlock_asm, pass
|
||||
MULTIPLE_THREADS_OFFSET as another asm operand, call
|
||||
__lll_unlock_wake_private if private is constant LLL_PRIVATE,
|
||||
otherwise pass private as another argument to __lll_unlock_wake.
|
||||
(lll_robust_unlock): Add private argument, pass private as another
|
||||
argument to __lll_unlock_wake.
|
||||
(lll_robust_dead): Add private argument, use __lll_private_flag
|
||||
macro.
|
||||
(lll_islocked): Use LLL_LOCK_INITIALIZER instead of
|
||||
LLL_MUTEX_LOCK_INITIALIZER.
|
||||
(lll_lock_t): Remove.
|
||||
(LLL_LOCK_INITIALIZER_WAITERS): Define.
|
||||
(__lll_cond_wait, __lll_cond_timedwait, __lll_cond_wake,
|
||||
__lll_cond_broadcast, lll_cond_wait, lll_cond_timedwait,
|
||||
lll_cond_wake, lll_cond_broadcast): Remove.
|
||||
* sysdeps/unix/sysv/linux/x86_64/lowlevellock.h: Likewise.
|
||||
* sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S: Revert
|
||||
2007-05-2{3,9} changes.
|
||||
* sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S: Include
|
||||
kernel-features.h and lowlevellock.h.
|
||||
(LOAD_PRIVATE_FUTEX_WAIT): Define.
|
||||
(LOAD_FUTEX_WAIT): Rewritten.
|
||||
(LOCK, SYS_gettimeofday, SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Don't
|
||||
define.
|
||||
(__lll_lock_wait_private, __lll_unlock_wake_private): New functions.
|
||||
(__lll_mutex_lock_wait): Rename to ...
|
||||
(__lll_lock_wait): ... this. Take futex addr from %edx instead of
|
||||
%ecx, %ecx is now private argument. Don't compile in for libc.so.
|
||||
(__lll_mutex_timedlock_wait): Rename to ...
|
||||
(__lll_timedlock_wait): ... this. Use __NR_gettimeofday. %esi
|
||||
contains private argument. Don't compile in for libc.so.
|
||||
(__lll_mutex_unlock_wake): Rename to ...
|
||||
(__lll_unlock_wake): ... this. %ecx contains private argument.
|
||||
Don't compile in for libc.so.
|
||||
(__lll_timedwait_tid): Use __NR_gettimeofday.
|
||||
* sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S: Include
|
||||
kernel-features.h and lowlevellock.h.
|
||||
(LOAD_FUTEX_WAIT): Define.
|
||||
(LOCK, SYS_gettimeofday, SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Don't
|
||||
define.
|
||||
(__lll_robust_mutex_lock_wait): Rename to ...
|
||||
(__lll_robust_lock_wait): ... this. Futex addr is now in %edx
|
||||
argument, %ecx argument contains private. Use LOAD_FUTEX_WAIT
|
||||
macro.
|
||||
(__lll_robust_mutex_timedlock_wait): Rename to ...
|
||||
(__lll_robust_timedlock_wait): ... this. Use __NR_gettimeofday.
|
||||
%esi argument contains private, use LOAD_FUTEX_WAIT macro.
|
||||
* sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S: Include
|
||||
lowlevellock.h.
|
||||
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
|
||||
(pthread_barrier_wait): Rename __lll_mutex_* to __lll_*, pass
|
||||
PRIVATE(%ebx) ^ LLL_SHARED as private argument in %ecx to
|
||||
__lll_lock_wait and __lll_unlock_wake, pass MUTEX(%ebx) address
|
||||
to __lll_lock_wait in %edx.
|
||||
* sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S:
|
||||
Include lowlevellock.h and pthread-errnos.h.
|
||||
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_REQUEUE,
|
||||
FUTEX_CMP_REQUEUE, EINVAL, LOCK): Don't define.
|
||||
(__pthread_cond_broadcast): Rename __lll_mutex_* to __lll_*, pass
|
||||
cond_lock address in %edx rather than %ecx to __lll_lock_wait,
|
||||
pass LLL_SHARED in %ecx to both __lll_lock_wait and
|
||||
__lll_unlock_wake.
|
||||
* sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S:
|
||||
Include lowlevellock.h and pthread-errnos.h.
|
||||
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_WAKE_OP,
|
||||
FUTEX_OP_CLEAR_WAKE_IF_GT_ONE, EINVAL, LOCK): Don't define.
|
||||
(__pthread_cond_signal): Rename __lll_mutex_* to __lll_*, pass
|
||||
cond_lock address in %edx rather than %ecx to __lll_lock_wait,
|
||||
pass LLL_SHARED in %ecx to both __lll_lock_wait and
|
||||
__lll_unlock_wake.
|
||||
* sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S:
|
||||
Include lowlevellock.h.
|
||||
(SYS_futex, SYS_gettimeofday, FUTEX_WAIT, FUTEX_WAKE, LOCK):
|
||||
Don't define.
|
||||
(__pthread_cond_timedwait): Rename __lll_mutex_* to __lll_*, pass
|
||||
cond_lock address in %edx rather than %ecx to __lll_lock_wait,
|
||||
pass LLL_SHARED in %ecx to both __lll_lock_wait and
|
||||
__lll_unlock_wake. Use __NR_gettimeofday.
|
||||
* sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S:
|
||||
Include lowlevellock.h.
|
||||
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
|
||||
(__pthread_cond_wait, __condvar_w_cleanup): Rename __lll_mutex_*
|
||||
to __lll_*, pass cond_lock address in %edx rather than %ecx to
|
||||
__lll_lock_wait, pass LLL_SHARED in %ecx to both __lll_lock_wait
|
||||
and __lll_unlock_wake.
|
||||
* sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S:
|
||||
Include lowlevellock.h.
|
||||
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
|
||||
(__pthread_rwlock_rdlock): Rename __lll_mutex_* to __lll_*, pass
|
||||
MUTEX(%ebx) address in %edx rather than %ecx to
|
||||
__lll_lock_wait, pass PSHARED(%ebx) in %ecx to both __lll_lock_wait
|
||||
and __lll_unlock_wake. Move return value from %ecx to %edx
|
||||
register.
|
||||
* sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S:
|
||||
Include lowlevellock.h.
|
||||
(SYS_futex, SYS_gettimeofday, FUTEX_WAIT, FUTEX_WAKE, LOCK):
|
||||
Don't define.
|
||||
(__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*, pass
|
||||
MUTEX(%ebp) address in %edx rather than %ecx to
|
||||
__lll_lock_wait, pass PSHARED(%ebp) in %ecx to both __lll_lock_wait
|
||||
and __lll_unlock_wake. Move return value from %ecx to %edx
|
||||
register. Use __NR_gettimeofday.
|
||||
* sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S:
|
||||
Include lowlevellock.h.
|
||||
(SYS_futex, SYS_gettimeofday, FUTEX_WAIT, FUTEX_WAKE, LOCK):
|
||||
Don't define.
|
||||
(__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*, pass
|
||||
MUTEX(%ebp) address in %edx rather than %ecx to
|
||||
__lll_lock_wait, pass PSHARED(%ebp) in %ecx to both __lll_lock_wait
|
||||
and __lll_unlock_wake. Move return value from %ecx to %edx
|
||||
register. Use __NR_gettimeofday.
|
||||
* sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S:
|
||||
Include lowlevellock.h.
|
||||
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
|
||||
(__pthread_rwlock_unlock): Rename __lll_mutex_* to __lll_*, pass
|
||||
MUTEX(%edi) address in %edx rather than %ecx to
|
||||
__lll_lock_wait, pass PSHARED(%edi) in %ecx to both __lll_lock_wait
|
||||
and __lll_unlock_wake.
|
||||
* sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S:
|
||||
Include lowlevellock.h.
|
||||
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
|
||||
(__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*, pass
|
||||
MUTEX(%ebx) address in %edx rather than %ecx to
|
||||
__lll_lock_wait, pass PSHARED(%ebx) in %ecx to both __lll_lock_wait
|
||||
and __lll_unlock_wake. Move return value from %ecx to %edx
|
||||
register.
|
||||
* sysdeps/unix/sysv/linux/i386/pthread_once.S: Include
|
||||
lowlevellock.h.
|
||||
(LOCK, SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG): Don't
|
||||
define.
|
||||
* sysdeps/unix/sysv/linux/i386/i486/sem_post.S: Include lowlevellock.h.
|
||||
(LOCK, SYS_futex, FUTEX_WAKE): Don't define.
|
||||
* sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S: Include
|
||||
lowlevellock.h.
|
||||
(LOCK, SYS_futex, SYS_gettimeofday, FUTEX_WAIT): Don't define.
|
||||
(sem_timedwait): Use __NR_gettimeofday.
|
||||
* sysdeps/unix/sysv/linux/i386/i486/sem_trywait.S: Include
|
||||
lowlevellock.h.
|
||||
(LOCK): Don't define.
|
||||
* sysdeps/unix/sysv/linux/i386/i486/sem_wait.S: Include
|
||||
lowlevellock.h.
|
||||
(LOCK, SYS_futex, FUTEX_WAIT): Don't define.
|
||||
* sysdeps/unix/sysv/linux/powerpc/sem_post.c: Wake only when there
|
||||
are waiters.
|
||||
* sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S: Revert
|
||||
2007-05-2{3,9} changes.
|
||||
* sysdeps/unix/sysv/linux/x86_64/lowlevellock.S: Include
|
||||
kernel-features.h and lowlevellock.h.
|
||||
(LOAD_PRIVATE_FUTEX_WAIT): Define.
|
||||
(LOAD_FUTEX_WAIT): Rewritten.
|
||||
(LOCK, SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Don't define.
|
||||
(__lll_lock_wait_private, __lll_unlock_wake_private): New functions.
|
||||
(__lll_mutex_lock_wait): Rename to ...
|
||||
(__lll_lock_wait): ... this. %esi is now private argument.
|
||||
Don't compile in for libc.so.
|
||||
(__lll_mutex_timedlock_wait): Rename to ...
|
||||
(__lll_timedlock_wait): ... this. %esi contains private argument.
|
||||
Don't compile in for libc.so.
|
||||
(__lll_mutex_unlock_wake): Rename to ...
|
||||
(__lll_unlock_wake): ... this. %esi contains private argument.
|
||||
Don't compile in for libc.so.
|
||||
* sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S: Include
|
||||
kernel-features.h and lowlevellock.h.
|
||||
(LOAD_FUTEX_WAIT): Define.
|
||||
(LOCK, SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Don't define.
|
||||
(__lll_robust_mutex_lock_wait): Rename to ...
|
||||
(__lll_robust_lock_wait): ... this. %esi argument contains private.
|
||||
Use LOAD_FUTEX_WAIT macro.
|
||||
(__lll_robust_mutex_timedlock_wait): Rename to ...
|
||||
(__lll_robust_timedlock_wait): ... this. %esi argument contains
|
||||
private, use LOAD_FUTEX_WAIT macro.
|
||||
* sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S: Include
|
||||
lowlevellock.h.
|
||||
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
|
||||
(pthread_barrier_wait): Rename __lll_mutex_* to __lll_*, pass
|
||||
PRIVATE(%rdi) ^ LLL_SHARED as private argument in %esi to
|
||||
__lll_lock_wait and __lll_unlock_wake.
|
||||
* sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S:
|
||||
Include lowlevellock.h and pthread-errnos.h.
|
||||
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_REQUEUE,
|
||||
FUTEX_CMP_REQUEUE, EINVAL, LOCK): Don't define.
|
||||
(__pthread_cond_broadcast): Rename __lll_mutex_* to __lll_*,
|
||||
pass LLL_SHARED in %esi to both __lll_lock_wait and
|
||||
__lll_unlock_wake.
|
||||
* sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S:
|
||||
Include lowlevellock.h and pthread-errnos.h.
|
||||
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_WAKE_OP,
|
||||
FUTEX_OP_CLEAR_WAKE_IF_GT_ONE, EINVAL, LOCK): Don't define.
|
||||
(__pthread_cond_signal): Rename __lll_mutex_* to __lll_*,
|
||||
pass LLL_SHARED in %esi to both __lll_lock_wait and
|
||||
__lll_unlock_wake.
|
||||
* sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S:
|
||||
Include lowlevellock.h.
|
||||
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
|
||||
(__pthread_cond_timedwait): Rename __lll_mutex_* to __lll_*,
|
||||
pass LLL_SHARED in %esi to both __lll_lock_wait and
|
||||
__lll_unlock_wake.
|
||||
* sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S:
|
||||
Include lowlevellock.h.
|
||||
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
|
||||
(__pthread_cond_wait, __condvar_cleanup): Rename __lll_mutex_*
|
||||
to __lll_*, pass LLL_SHARED in %esi to both __lll_lock_wait
|
||||
and __lll_unlock_wake.
|
||||
* sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S:
|
||||
Include lowlevellock.h.
|
||||
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK):
|
||||
Don't define.
|
||||
(__pthread_rwlock_rdlock): Rename __lll_mutex_* to __lll_*,
|
||||
pass PSHARED(%rdi) in %esi to both __lll_lock_wait
|
||||
and __lll_unlock_wake.
|
||||
* sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S:
|
||||
Include lowlevellock.h.
|
||||
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK):
|
||||
Don't define.
|
||||
(__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*,
|
||||
pass PSHARED(%rdi) in %esi to both __lll_lock_wait
|
||||
and __lll_unlock_wake.
|
||||
* sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S:
|
||||
Include lowlevellock.h.
|
||||
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK):
|
||||
Don't define.
|
||||
(__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*,
|
||||
pass PSHARED(%rdi) in %esi to both __lll_lock_wait
|
||||
and __lll_unlock_wake.
|
||||
* sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S:
|
||||
Include lowlevellock.h.
|
||||
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK):
|
||||
Don't define.
|
||||
(__pthread_rwlock_unlock): Rename __lll_mutex_* to __lll_*,
|
||||
pass PSHARED(%rdi) in %esi to both __lll_lock_wait
|
||||
and __lll_unlock_wake.
|
||||
* sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S:
|
||||
Include lowlevellock.h.
|
||||
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK):
|
||||
Don't define.
|
||||
(__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*,
|
||||
pass PSHARED(%rdi) in %ecx to both __lll_lock_wait
|
||||
and __lll_unlock_wake.
|
||||
* sysdeps/unix/sysv/linux/x86_64/pthread_once.S: Include
|
||||
lowlevellock.h.
|
||||
(LOCK, SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG): Don't
|
||||
define.
|
||||
* sysdeps/unix/sysv/linux/x86_64/sem_post.S: Include lowlevellock.h.
|
||||
(LOCK, SYS_futex, FUTEX_WAKE): Don't define.
|
||||
* sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S: Include
|
||||
lowlevellock.h.
|
||||
(LOCK, SYS_futex, FUTEX_WAIT): Don't define.
|
||||
* sysdeps/unix/sysv/linux/x86_64/sem_trywait.S: Include
|
||||
lowlevellock.h.
|
||||
(LOCK): Don't define.
|
||||
* sysdeps/unix/sysv/linux/x86_64/sem_wait.S: Include
|
||||
lowlevellock.h.
|
||||
(LOCK, SYS_futex, FUTEX_WAIT): Don't define.
|
||||
* sysdeps/unix/sysv/linux/sparc/internaltypes.h: New file.
|
||||
* sysdeps/unix/sysv/linux/sparc/pthread_barrier_destroy.c: New file.
|
||||
* sysdeps/unix/sysv/linux/sparc/pthread_barrier_init.c: New file.
|
||||
* sysdeps/unix/sysv/linux/sparc/pthread_barrier_wait.c: New file.
|
||||
* sysdeps/unix/sysv/linux/sparc/sparc32/lowlevellock.c
|
||||
(__lll_lock_wait_private): New function.
|
||||
(__lll_lock_wait, __lll_timedlock_wait): Add private argument, pass
|
||||
it to lll_futex_*wait. Don't compile in for libc.so.
|
||||
* sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_init.c:
|
||||
Remove.
|
||||
* sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_wait.c
|
||||
(struct sparc_pthread_barrier): Remove.
|
||||
(pthread_barrier_wait): Use union sparc_pthread_barrier instead of
|
||||
struct sparc_pthread_barrier. Pass
|
||||
ibarrier->s.pshared ? LLL_SHARED : LLL_PRIVATE to lll_{,un}lock
|
||||
and lll_futex_wait macros.
|
||||
* sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/pthread_barrier_init.c:
|
||||
Remove.
|
||||
* sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/pthread_barrier_wait.c:
|
||||
Include sparc pthread_barrier_wait.c instead of generic one.
|
||||
|
||||
2007-07-30 Jakub Jelinek <jakub@redhat.com>
|
||||
|
||||
* tst-rwlock14.c (do_test): Avoid warnings on 32-bit arches.
|
||||
|
@ -103,7 +103,7 @@ static size_t stack_cache_maxsize = 40 * 1024 * 1024; /* 40MiBi by default. */
|
||||
static size_t stack_cache_actsize;
|
||||
|
||||
/* Mutex protecting this variable. */
|
||||
static lll_lock_t stack_cache_lock = LLL_LOCK_INITIALIZER;
|
||||
static int stack_cache_lock = LLL_LOCK_INITIALIZER;
|
||||
|
||||
/* List of queued stack frames. */
|
||||
static LIST_HEAD (stack_cache);
|
||||
@ -139,7 +139,7 @@ get_cached_stack (size_t *sizep, void **memp)
|
||||
struct pthread *result = NULL;
|
||||
list_t *entry;
|
||||
|
||||
lll_lock (stack_cache_lock);
|
||||
lll_lock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
/* Search the cache for a matching entry. We search for the
|
||||
smallest stack which has at least the required size. Note that
|
||||
@ -172,7 +172,7 @@ get_cached_stack (size_t *sizep, void **memp)
|
||||
|| __builtin_expect (result->stackblock_size > 4 * size, 0))
|
||||
{
|
||||
/* Release the lock. */
|
||||
lll_unlock (stack_cache_lock);
|
||||
lll_unlock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
@ -187,7 +187,7 @@ get_cached_stack (size_t *sizep, void **memp)
|
||||
stack_cache_actsize -= result->stackblock_size;
|
||||
|
||||
/* Release the lock early. */
|
||||
lll_unlock (stack_cache_lock);
|
||||
lll_unlock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
/* Report size and location of the stack to the caller. */
|
||||
*sizep = result->stackblock_size;
|
||||
@ -400,12 +400,12 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
|
||||
|
||||
|
||||
/* Prepare to modify global data. */
|
||||
lll_lock (stack_cache_lock);
|
||||
lll_lock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
/* And add to the list of stacks in use. */
|
||||
list_add (&pd->list, &__stack_user);
|
||||
|
||||
lll_unlock (stack_cache_lock);
|
||||
lll_unlock (stack_cache_lock, LLL_PRIVATE);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -544,12 +544,12 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
|
||||
|
||||
|
||||
/* Prepare to modify global data. */
|
||||
lll_lock (stack_cache_lock);
|
||||
lll_lock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
/* And add to the list of stacks in use. */
|
||||
list_add (&pd->list, &stack_used);
|
||||
|
||||
lll_unlock (stack_cache_lock);
|
||||
lll_unlock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
|
||||
/* There might have been a race. Another thread might have
|
||||
@ -598,12 +598,12 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
|
||||
mprot_error:
|
||||
err = errno;
|
||||
|
||||
lll_lock (stack_cache_lock);
|
||||
lll_lock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
/* Remove the thread from the list. */
|
||||
list_del (&pd->list);
|
||||
|
||||
lll_unlock (stack_cache_lock);
|
||||
lll_unlock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
/* Get rid of the TLS block we allocated. */
|
||||
_dl_deallocate_tls (TLS_TPADJ (pd), false);
|
||||
@ -699,7 +699,7 @@ void
|
||||
internal_function
|
||||
__deallocate_stack (struct pthread *pd)
|
||||
{
|
||||
lll_lock (stack_cache_lock);
|
||||
lll_lock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
/* Remove the thread from the list of threads with user defined
|
||||
stacks. */
|
||||
@ -715,7 +715,7 @@ __deallocate_stack (struct pthread *pd)
|
||||
/* Free the memory associated with the ELF TLS. */
|
||||
_dl_deallocate_tls (TLS_TPADJ (pd), false);
|
||||
|
||||
lll_unlock (stack_cache_lock);
|
||||
lll_unlock (stack_cache_lock, LLL_PRIVATE);
|
||||
}
|
||||
|
||||
|
||||
@ -732,7 +732,7 @@ __make_stacks_executable (void **stack_endp)
|
||||
const size_t pagemask = ~(__getpagesize () - 1);
|
||||
#endif
|
||||
|
||||
lll_lock (stack_cache_lock);
|
||||
lll_lock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
list_t *runp;
|
||||
list_for_each (runp, &stack_used)
|
||||
@ -761,7 +761,7 @@ __make_stacks_executable (void **stack_endp)
|
||||
break;
|
||||
}
|
||||
|
||||
lll_unlock (stack_cache_lock);
|
||||
lll_unlock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -837,7 +837,7 @@ __find_thread_by_id (pid_t tid)
|
||||
{
|
||||
struct pthread *result = NULL;
|
||||
|
||||
lll_lock (stack_cache_lock);
|
||||
lll_lock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
/* Iterate over the list with system-allocated threads first. */
|
||||
list_t *runp;
|
||||
@ -869,7 +869,7 @@ __find_thread_by_id (pid_t tid)
|
||||
}
|
||||
|
||||
out:
|
||||
lll_unlock (stack_cache_lock);
|
||||
lll_unlock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -920,7 +920,7 @@ attribute_hidden
|
||||
__nptl_setxid (struct xid_command *cmdp)
|
||||
{
|
||||
int result;
|
||||
lll_lock (stack_cache_lock);
|
||||
lll_lock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
__xidcmd = cmdp;
|
||||
cmdp->cntr = 0;
|
||||
@ -966,7 +966,7 @@ __nptl_setxid (struct xid_command *cmdp)
|
||||
result = -1;
|
||||
}
|
||||
|
||||
lll_unlock (stack_cache_lock);
|
||||
lll_unlock (stack_cache_lock, LLL_PRIVATE);
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -995,7 +995,7 @@ void
|
||||
attribute_hidden
|
||||
__pthread_init_static_tls (struct link_map *map)
|
||||
{
|
||||
lll_lock (stack_cache_lock);
|
||||
lll_lock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
/* Iterate over the list with system-allocated threads first. */
|
||||
list_t *runp;
|
||||
@ -1006,7 +1006,7 @@ __pthread_init_static_tls (struct link_map *map)
|
||||
list_for_each (runp, &__stack_user)
|
||||
init_one_static_tls (list_entry (runp, struct pthread, list), map);
|
||||
|
||||
lll_unlock (stack_cache_lock);
|
||||
lll_unlock (stack_cache_lock, LLL_PRIVATE);
|
||||
}
|
||||
|
||||
|
||||
@ -1014,7 +1014,7 @@ void
|
||||
attribute_hidden
|
||||
__wait_lookup_done (void)
|
||||
{
|
||||
lll_lock (stack_cache_lock);
|
||||
lll_lock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
struct pthread *self = THREAD_SELF;
|
||||
|
||||
@ -1063,5 +1063,5 @@ __wait_lookup_done (void)
|
||||
while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT);
|
||||
}
|
||||
|
||||
lll_unlock (stack_cache_lock);
|
||||
lll_unlock (stack_cache_lock, LLL_PRIVATE);
|
||||
}
|
||||
|
@ -309,10 +309,10 @@ struct pthread
|
||||
int parent_cancelhandling;
|
||||
|
||||
/* Lock to synchronize access to the descriptor. */
|
||||
lll_lock_t lock;
|
||||
int lock;
|
||||
|
||||
/* Lock for synchronizing setxid calls. */
|
||||
lll_lock_t setxid_futex;
|
||||
int setxid_futex;
|
||||
|
||||
#if HP_TIMING_AVAIL
|
||||
/* Offset of the CPU clock at start thread start time. */
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -33,7 +33,7 @@ __pthread_cond_broadcast_2_0 (cond)
|
||||
{
|
||||
pthread_cond_t *newcond;
|
||||
|
||||
#if LLL_MUTEX_LOCK_INITIALIZER == 0
|
||||
#if LLL_LOCK_INITIALIZER == 0
|
||||
newcond = (pthread_cond_t *) calloc (sizeof (pthread_cond_t), 1);
|
||||
if (newcond == NULL)
|
||||
return ENOMEM;
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -33,7 +33,7 @@ __pthread_cond_signal_2_0 (cond)
|
||||
{
|
||||
pthread_cond_t *newcond;
|
||||
|
||||
#if LLL_MUTEX_LOCK_INITIALIZER == 0
|
||||
#if LLL_LOCK_INITIALIZER == 0
|
||||
newcond = (pthread_cond_t *) calloc (sizeof (pthread_cond_t), 1);
|
||||
if (newcond == NULL)
|
||||
return ENOMEM;
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -35,7 +35,7 @@ __pthread_cond_timedwait_2_0 (cond, mutex, abstime)
|
||||
{
|
||||
pthread_cond_t *newcond;
|
||||
|
||||
#if LLL_MUTEX_LOCK_INITIALIZER == 0
|
||||
#if LLL_LOCK_INITIALIZER == 0
|
||||
newcond = (pthread_cond_t *) calloc (sizeof (pthread_cond_t), 1);
|
||||
if (newcond == NULL)
|
||||
return ENOMEM;
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -34,7 +34,7 @@ __pthread_cond_wait_2_0 (cond, mutex)
|
||||
{
|
||||
pthread_cond_t *newcond;
|
||||
|
||||
#if LLL_MUTEX_LOCK_INITIALIZER == 0
|
||||
#if LLL_LOCK_INITIALIZER == 0
|
||||
newcond = (pthread_cond_t *) calloc (sizeof (pthread_cond_t), 1);
|
||||
if (newcond == NULL)
|
||||
return ENOMEM;
|
||||
|
@ -151,7 +151,7 @@ hidden_proto (__stack_user)
|
||||
|
||||
/* Attribute handling. */
|
||||
extern struct pthread_attr *__attr_list attribute_hidden;
|
||||
extern lll_lock_t __attr_list_lock attribute_hidden;
|
||||
extern int __attr_list_lock attribute_hidden;
|
||||
|
||||
/* First available RT signal. */
|
||||
extern int __current_sigrtmin attribute_hidden;
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2004, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -27,7 +27,7 @@
|
||||
|
||||
|
||||
struct pthread_attr *__attr_list;
|
||||
lll_lock_t __attr_list_lock = LLL_LOCK_INITIALIZER;
|
||||
int __attr_list_lock = LLL_LOCK_INITIALIZER;
|
||||
|
||||
|
||||
int
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -31,14 +31,14 @@ pthread_barrier_destroy (barrier)
|
||||
|
||||
ibarrier = (struct pthread_barrier *) barrier;
|
||||
|
||||
lll_lock (ibarrier->lock);
|
||||
lll_lock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG);
|
||||
|
||||
if (__builtin_expect (ibarrier->left == ibarrier->init_count, 1))
|
||||
/* The barrier is not used anymore. */
|
||||
result = 0;
|
||||
else
|
||||
/* Still used, return with an error. */
|
||||
lll_unlock (ibarrier->lock);
|
||||
lll_unlock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ pthread_barrier_init (barrier, attr, count)
|
||||
if (__builtin_expect (count == 0, 0))
|
||||
return EINVAL;
|
||||
|
||||
struct pthread_barrierattr *iattr
|
||||
const struct pthread_barrierattr *iattr
|
||||
= (attr != NULL
|
||||
? iattr = (struct pthread_barrierattr *) attr
|
||||
: &default_attr);
|
||||
|
@ -32,7 +32,7 @@ pthread_barrier_wait (barrier)
|
||||
int result = 0;
|
||||
|
||||
/* Make sure we are alone. */
|
||||
lll_lock (ibarrier->lock);
|
||||
lll_lock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG);
|
||||
|
||||
/* One more arrival. */
|
||||
--ibarrier->left;
|
||||
@ -46,8 +46,7 @@ pthread_barrier_wait (barrier)
|
||||
|
||||
/* Wake up everybody. */
|
||||
lll_futex_wake (&ibarrier->curr_event, INT_MAX,
|
||||
// XYZ check mutex flag
|
||||
LLL_SHARED);
|
||||
ibarrier->private ^ FUTEX_PRIVATE_FLAG);
|
||||
|
||||
/* This is the thread which finished the serialization. */
|
||||
result = PTHREAD_BARRIER_SERIAL_THREAD;
|
||||
@ -59,13 +58,12 @@ pthread_barrier_wait (barrier)
|
||||
unsigned int event = ibarrier->curr_event;
|
||||
|
||||
/* Before suspending, make the barrier available to others. */
|
||||
lll_unlock (ibarrier->lock);
|
||||
lll_unlock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG);
|
||||
|
||||
/* Wait for the event counter of the barrier to change. */
|
||||
do
|
||||
lll_futex_wait (&ibarrier->curr_event, event,
|
||||
// XYZ check mutex flag
|
||||
LLL_SHARED);
|
||||
ibarrier->private ^ FUTEX_PRIVATE_FLAG);
|
||||
while (event == ibarrier->curr_event);
|
||||
}
|
||||
|
||||
@ -75,7 +73,7 @@ pthread_barrier_wait (barrier)
|
||||
/* If this was the last woken thread, unlock. */
|
||||
if (atomic_increment_val (&ibarrier->left) == init_count)
|
||||
/* We are done. */
|
||||
lll_unlock (ibarrier->lock);
|
||||
lll_unlock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -33,7 +33,7 @@ __pthread_cond_broadcast (cond)
|
||||
pthread_cond_t *cond;
|
||||
{
|
||||
/* Make sure we are alone. */
|
||||
lll_mutex_lock (cond->__data.__lock);
|
||||
lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
|
||||
|
||||
/* Are there any waiters to be woken? */
|
||||
if (cond->__data.__total_seq > cond->__data.__wakeup_seq)
|
||||
@ -47,7 +47,7 @@ __pthread_cond_broadcast (cond)
|
||||
++cond->__data.__broadcast_seq;
|
||||
|
||||
/* We are done. */
|
||||
lll_mutex_unlock (cond->__data.__lock);
|
||||
lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
|
||||
|
||||
/* Do not use requeue for pshared condvars. */
|
||||
if (cond->__data.__mutex == (void *) ~0l)
|
||||
@ -79,7 +79,7 @@ __pthread_cond_broadcast (cond)
|
||||
}
|
||||
|
||||
/* We are done. */
|
||||
lll_mutex_unlock (cond->__data.__lock);
|
||||
lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -27,13 +27,13 @@ __pthread_cond_destroy (cond)
|
||||
pthread_cond_t *cond;
|
||||
{
|
||||
/* Make sure we are alone. */
|
||||
lll_mutex_lock (cond->__data.__lock);
|
||||
lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
|
||||
|
||||
if (cond->__data.__total_seq > cond->__data.__wakeup_seq)
|
||||
{
|
||||
/* If there are still some waiters which have not been
|
||||
woken up, this is an application bug. */
|
||||
lll_mutex_unlock (cond->__data.__lock);
|
||||
lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
|
||||
return EBUSY;
|
||||
}
|
||||
|
||||
@ -66,13 +66,13 @@ __pthread_cond_destroy (cond)
|
||||
|
||||
do
|
||||
{
|
||||
lll_mutex_unlock (cond->__data.__lock);
|
||||
lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
|
||||
|
||||
lll_futex_wait (&cond->__data.__nwaiters, nwaiters,
|
||||
// XYZ check mutex flag
|
||||
LLL_SHARED);
|
||||
|
||||
lll_mutex_lock (cond->__data.__lock);
|
||||
lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
|
||||
|
||||
nwaiters = cond->__data.__nwaiters;
|
||||
}
|
||||
|
@ -28,7 +28,7 @@ __pthread_cond_init (cond, cond_attr)
|
||||
{
|
||||
struct pthread_condattr *icond_attr = (struct pthread_condattr *) cond_attr;
|
||||
|
||||
cond->__data.__lock = LLL_MUTEX_LOCK_INITIALIZER;
|
||||
cond->__data.__lock = LLL_LOCK_INITIALIZER;
|
||||
cond->__data.__futex = 0;
|
||||
cond->__data.__nwaiters = (icond_attr != NULL
|
||||
&& ((icond_attr->value
|
||||
|
@ -33,7 +33,7 @@ __pthread_cond_signal (cond)
|
||||
pthread_cond_t *cond;
|
||||
{
|
||||
/* Make sure we are alone. */
|
||||
lll_mutex_lock (cond->__data.__lock);
|
||||
lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
|
||||
|
||||
/* Are there any waiters to be woken? */
|
||||
if (cond->__data.__total_seq > cond->__data.__wakeup_seq)
|
||||
@ -56,7 +56,7 @@ __pthread_cond_signal (cond)
|
||||
}
|
||||
|
||||
/* We are done. */
|
||||
lll_mutex_unlock (cond->__data.__lock);
|
||||
lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -54,13 +54,13 @@ __pthread_cond_timedwait (cond, mutex, abstime)
|
||||
return EINVAL;
|
||||
|
||||
/* Make sure we are along. */
|
||||
lll_mutex_lock (cond->__data.__lock);
|
||||
lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
|
||||
|
||||
/* Now we can release the mutex. */
|
||||
int err = __pthread_mutex_unlock_usercnt (mutex, 0);
|
||||
if (err)
|
||||
{
|
||||
lll_mutex_unlock (cond->__data.__lock);
|
||||
lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -146,7 +146,7 @@ __pthread_cond_timedwait (cond, mutex, abstime)
|
||||
unsigned int futex_val = cond->__data.__futex;
|
||||
|
||||
/* Prepare to wait. Release the condvar futex. */
|
||||
lll_mutex_unlock (cond->__data.__lock);
|
||||
lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
|
||||
|
||||
/* Enable asynchronous cancellation. Required by the standard. */
|
||||
cbuffer.oldtype = __pthread_enable_asynccancel ();
|
||||
@ -161,7 +161,7 @@ __pthread_cond_timedwait (cond, mutex, abstime)
|
||||
__pthread_disable_asynccancel (cbuffer.oldtype);
|
||||
|
||||
/* We are going to look at shared data again, so get the lock. */
|
||||
lll_mutex_lock(cond->__data.__lock);
|
||||
lll_lock(cond->__data.__lock, /* XYZ */ LLL_SHARED);
|
||||
|
||||
/* If a broadcast happened, we are done. */
|
||||
if (cbuffer.bc_seq != cond->__data.__broadcast_seq)
|
||||
@ -203,7 +203,7 @@ __pthread_cond_timedwait (cond, mutex, abstime)
|
||||
LLL_SHARED);
|
||||
|
||||
/* We are done with the condvar. */
|
||||
lll_mutex_unlock (cond->__data.__lock);
|
||||
lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
|
||||
|
||||
/* The cancellation handling is back to normal, remove the handler. */
|
||||
__pthread_cleanup_pop (&buffer, 0);
|
||||
|
@ -45,7 +45,7 @@ __condvar_cleanup (void *arg)
|
||||
unsigned int destroying;
|
||||
|
||||
/* We are going to modify shared data. */
|
||||
lll_mutex_lock (cbuffer->cond->__data.__lock);
|
||||
lll_lock (cbuffer->cond->__data.__lock, /* XYZ */ LLL_SHARED);
|
||||
|
||||
if (cbuffer->bc_seq == cbuffer->cond->__data.__broadcast_seq)
|
||||
{
|
||||
@ -78,7 +78,7 @@ __condvar_cleanup (void *arg)
|
||||
}
|
||||
|
||||
/* We are done. */
|
||||
lll_mutex_unlock (cbuffer->cond->__data.__lock);
|
||||
lll_unlock (cbuffer->cond->__data.__lock, /* XYZ */ LLL_SHARED);
|
||||
|
||||
/* Wake everybody to make sure no condvar signal gets lost. */
|
||||
if (! destroying)
|
||||
@ -102,13 +102,13 @@ __pthread_cond_wait (cond, mutex)
|
||||
int err;
|
||||
|
||||
/* Make sure we are along. */
|
||||
lll_mutex_lock (cond->__data.__lock);
|
||||
lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
|
||||
|
||||
/* Now we can release the mutex. */
|
||||
err = __pthread_mutex_unlock_usercnt (mutex, 0);
|
||||
if (__builtin_expect (err, 0))
|
||||
{
|
||||
lll_mutex_unlock (cond->__data.__lock);
|
||||
lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -144,7 +144,7 @@ __pthread_cond_wait (cond, mutex)
|
||||
unsigned int futex_val = cond->__data.__futex;
|
||||
|
||||
/* Prepare to wait. Release the condvar futex. */
|
||||
lll_mutex_unlock (cond->__data.__lock);
|
||||
lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
|
||||
|
||||
/* Enable asynchronous cancellation. Required by the standard. */
|
||||
cbuffer.oldtype = __pthread_enable_asynccancel ();
|
||||
@ -158,7 +158,7 @@ __pthread_cond_wait (cond, mutex)
|
||||
__pthread_disable_asynccancel (cbuffer.oldtype);
|
||||
|
||||
/* We are going to look at shared data again, so get the lock. */
|
||||
lll_mutex_lock (cond->__data.__lock);
|
||||
lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
|
||||
|
||||
/* If a broadcast happened, we are done. */
|
||||
if (cbuffer.bc_seq != cond->__data.__broadcast_seq)
|
||||
@ -186,7 +186,7 @@ __pthread_cond_wait (cond, mutex)
|
||||
LLL_SHARED);
|
||||
|
||||
/* We are done with the condvar. */
|
||||
lll_mutex_unlock (cond->__data.__lock);
|
||||
lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
|
||||
|
||||
/* The cancellation handling is back to normal, remove the handler. */
|
||||
__pthread_cleanup_pop (&buffer, 0);
|
||||
|
@ -63,7 +63,7 @@ __find_in_stack_list (pd)
|
||||
list_t *entry;
|
||||
struct pthread *result = NULL;
|
||||
|
||||
lll_lock (stack_cache_lock);
|
||||
lll_lock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
list_for_each (entry, &stack_used)
|
||||
{
|
||||
@ -90,7 +90,7 @@ __find_in_stack_list (pd)
|
||||
}
|
||||
}
|
||||
|
||||
lll_unlock (stack_cache_lock);
|
||||
lll_unlock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -284,9 +284,9 @@ start_thread (void *arg)
|
||||
int oldtype = CANCEL_ASYNC ();
|
||||
|
||||
/* Get the lock the parent locked to force synchronization. */
|
||||
lll_lock (pd->lock);
|
||||
lll_lock (pd->lock, LLL_PRIVATE);
|
||||
/* And give it up right away. */
|
||||
lll_unlock (pd->lock);
|
||||
lll_unlock (pd->lock, LLL_PRIVATE);
|
||||
|
||||
CANCEL_RESET (oldtype);
|
||||
}
|
||||
@ -370,7 +370,7 @@ start_thread (void *arg)
|
||||
# endif
|
||||
this->__list.__next = NULL;
|
||||
|
||||
lll_robust_mutex_dead (this->__lock);
|
||||
lll_robust_dead (this->__lock, /* XYZ */ LLL_SHARED);
|
||||
}
|
||||
while (robust != (void *) &pd->robust_head);
|
||||
}
|
||||
|
@ -39,7 +39,7 @@ pthread_getattr_np (thread_id, attr)
|
||||
struct pthread_attr *iattr = (struct pthread_attr *) attr;
|
||||
int ret = 0;
|
||||
|
||||
lll_lock (thread->lock);
|
||||
lll_lock (thread->lock, LLL_PRIVATE);
|
||||
|
||||
/* The thread library is responsible for keeping the values in the
|
||||
thread desriptor up-to-date in case the user changes them. */
|
||||
@ -173,7 +173,7 @@ pthread_getattr_np (thread_id, attr)
|
||||
}
|
||||
}
|
||||
|
||||
lll_unlock (thread->lock);
|
||||
lll_unlock (thread->lock, LLL_PRIVATE);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ __pthread_getschedparam (threadid, policy, param)
|
||||
|
||||
int result = 0;
|
||||
|
||||
lll_lock (pd->lock);
|
||||
lll_lock (pd->lock, LLL_PRIVATE);
|
||||
|
||||
/* The library is responsible for maintaining the values at all
|
||||
times. If the user uses a interface other than
|
||||
@ -68,7 +68,7 @@ __pthread_getschedparam (threadid, policy, param)
|
||||
memcpy (param, &pd->schedparam, sizeof (struct sched_param));
|
||||
}
|
||||
|
||||
lll_unlock (pd->lock);
|
||||
lll_unlock (pd->lock, LLL_PRIVATE);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -27,9 +27,9 @@
|
||||
|
||||
|
||||
#ifndef LLL_MUTEX_LOCK
|
||||
# define LLL_MUTEX_LOCK(mutex) lll_mutex_lock (mutex)
|
||||
# define LLL_MUTEX_TRYLOCK(mutex) lll_mutex_trylock (mutex)
|
||||
# define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_mutex_lock (mutex, id)
|
||||
# define LLL_MUTEX_LOCK(mutex) lll_lock (mutex, /* XYZ */ LLL_SHARED)
|
||||
# define LLL_MUTEX_TRYLOCK(mutex) lll_trylock (mutex)
|
||||
# define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_lock (mutex, id, /* XYZ */ LLL_SHARED)
|
||||
#endif
|
||||
|
||||
|
||||
@ -198,7 +198,7 @@ __pthread_mutex_lock (mutex)
|
||||
{
|
||||
/* This mutex is now not recoverable. */
|
||||
mutex->__data.__count = 0;
|
||||
lll_mutex_unlock (mutex->__data.__lock);
|
||||
lll_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED);
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||
return ENOTRECOVERABLE;
|
||||
}
|
||||
|
@ -56,7 +56,8 @@ pthread_mutex_timedlock (mutex, abstime)
|
||||
}
|
||||
|
||||
/* We have to get the mutex. */
|
||||
result = lll_mutex_timedlock (mutex->__data.__lock, abstime);
|
||||
result = lll_timedlock (mutex->__data.__lock, abstime,
|
||||
/* XYZ */ LLL_SHARED);
|
||||
|
||||
if (result != 0)
|
||||
goto out;
|
||||
@ -76,14 +77,15 @@ pthread_mutex_timedlock (mutex, abstime)
|
||||
case PTHREAD_MUTEX_TIMED_NP:
|
||||
simple:
|
||||
/* Normal mutex. */
|
||||
result = lll_mutex_timedlock (mutex->__data.__lock, abstime);
|
||||
result = lll_timedlock (mutex->__data.__lock, abstime,
|
||||
/* XYZ */ LLL_SHARED);
|
||||
break;
|
||||
|
||||
case PTHREAD_MUTEX_ADAPTIVE_NP:
|
||||
if (! __is_smp)
|
||||
goto simple;
|
||||
|
||||
if (lll_mutex_trylock (mutex->__data.__lock) != 0)
|
||||
if (lll_trylock (mutex->__data.__lock) != 0)
|
||||
{
|
||||
int cnt = 0;
|
||||
int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
|
||||
@ -92,7 +94,8 @@ pthread_mutex_timedlock (mutex, abstime)
|
||||
{
|
||||
if (cnt++ >= max_cnt)
|
||||
{
|
||||
result = lll_mutex_timedlock (mutex->__data.__lock, abstime);
|
||||
result = lll_timedlock (mutex->__data.__lock, abstime,
|
||||
/* XYZ */ LLL_SHARED);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -100,7 +103,7 @@ pthread_mutex_timedlock (mutex, abstime)
|
||||
BUSY_WAIT_NOP;
|
||||
#endif
|
||||
}
|
||||
while (lll_mutex_trylock (mutex->__data.__lock) != 0);
|
||||
while (lll_trylock (mutex->__data.__lock) != 0);
|
||||
|
||||
mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
|
||||
}
|
||||
@ -174,15 +177,15 @@ pthread_mutex_timedlock (mutex, abstime)
|
||||
}
|
||||
}
|
||||
|
||||
result = lll_robust_mutex_timedlock (mutex->__data.__lock, abstime,
|
||||
id);
|
||||
result = lll_robust_timedlock (mutex->__data.__lock, abstime, id,
|
||||
/* XYZ */ LLL_SHARED);
|
||||
|
||||
if (__builtin_expect (mutex->__data.__owner
|
||||
== PTHREAD_MUTEX_NOTRECOVERABLE, 0))
|
||||
{
|
||||
/* This mutex is now not recoverable. */
|
||||
mutex->__data.__count = 0;
|
||||
lll_mutex_unlock (mutex->__data.__lock);
|
||||
lll_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED);
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||
return ENOTRECOVERABLE;
|
||||
}
|
||||
|
@ -48,7 +48,7 @@ __pthread_mutex_trylock (mutex)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (lll_mutex_trylock (mutex->__data.__lock) == 0)
|
||||
if (lll_trylock (mutex->__data.__lock) == 0)
|
||||
{
|
||||
/* Record the ownership. */
|
||||
mutex->__data.__owner = id;
|
||||
@ -62,7 +62,7 @@ __pthread_mutex_trylock (mutex)
|
||||
case PTHREAD_MUTEX_TIMED_NP:
|
||||
case PTHREAD_MUTEX_ADAPTIVE_NP:
|
||||
/* Normal mutex. */
|
||||
if (lll_mutex_trylock (mutex->__data.__lock) != 0)
|
||||
if (lll_trylock (mutex->__data.__lock) != 0)
|
||||
break;
|
||||
|
||||
/* Record the ownership. */
|
||||
@ -140,7 +140,7 @@ __pthread_mutex_trylock (mutex)
|
||||
}
|
||||
}
|
||||
|
||||
oldval = lll_robust_mutex_trylock (mutex->__data.__lock, id);
|
||||
oldval = lll_robust_trylock (mutex->__data.__lock, id);
|
||||
if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0)
|
||||
{
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||
@ -154,7 +154,7 @@ __pthread_mutex_trylock (mutex)
|
||||
/* This mutex is now not recoverable. */
|
||||
mutex->__data.__count = 0;
|
||||
if (oldval == id)
|
||||
lll_mutex_unlock (mutex->__data.__lock);
|
||||
lll_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED);
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||
return ENOTRECOVERABLE;
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ __pthread_mutex_unlock_usercnt (mutex, decr)
|
||||
case PTHREAD_MUTEX_ERRORCHECK_NP:
|
||||
/* Error checking mutex. */
|
||||
if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
|
||||
|| ! lll_mutex_islocked (mutex->__data.__lock))
|
||||
|| ! lll_islocked (mutex->__data.__lock))
|
||||
return EPERM;
|
||||
/* FALLTHROUGH */
|
||||
|
||||
@ -61,7 +61,7 @@ __pthread_mutex_unlock_usercnt (mutex, decr)
|
||||
--mutex->__data.__nusers;
|
||||
|
||||
/* Unlock. */
|
||||
lll_mutex_unlock (mutex->__data.__lock);
|
||||
lll_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED);
|
||||
break;
|
||||
|
||||
case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
|
||||
@ -92,7 +92,7 @@ __pthread_mutex_unlock_usercnt (mutex, decr)
|
||||
case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
|
||||
if ((mutex->__data.__lock & FUTEX_TID_MASK)
|
||||
!= THREAD_GETMEM (THREAD_SELF, tid)
|
||||
|| ! lll_mutex_islocked (mutex->__data.__lock))
|
||||
|| ! lll_islocked (mutex->__data.__lock))
|
||||
return EPERM;
|
||||
|
||||
/* If the previous owner died and the caller did not succeed in
|
||||
@ -115,7 +115,7 @@ __pthread_mutex_unlock_usercnt (mutex, decr)
|
||||
--mutex->__data.__nusers;
|
||||
|
||||
/* Unlock. */
|
||||
lll_robust_mutex_unlock (mutex->__data.__lock);
|
||||
lll_robust_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED);
|
||||
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||
break;
|
||||
@ -161,7 +161,7 @@ __pthread_mutex_unlock_usercnt (mutex, decr)
|
||||
case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
|
||||
if ((mutex->__data.__lock & FUTEX_TID_MASK)
|
||||
!= THREAD_GETMEM (THREAD_SELF, tid)
|
||||
|| ! lll_mutex_islocked (mutex->__data.__lock))
|
||||
|| ! lll_islocked (mutex->__data.__lock))
|
||||
return EPERM;
|
||||
|
||||
/* If the previous owner died and the caller did not succeed in
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -22,7 +22,7 @@
|
||||
|
||||
|
||||
|
||||
static lll_lock_t once_lock = LLL_LOCK_INITIALIZER;
|
||||
static int once_lock = LLL_LOCK_INITIALIZER;
|
||||
|
||||
|
||||
int
|
||||
@ -35,7 +35,7 @@ __pthread_once (once_control, init_routine)
|
||||
object. */
|
||||
if (*once_control == PTHREAD_ONCE_INIT)
|
||||
{
|
||||
lll_lock (once_lock);
|
||||
lll_lock (once_lock, LLL_PRIVATE);
|
||||
|
||||
/* XXX This implementation is not complete. It doesn't take
|
||||
cancelation and fork into account. */
|
||||
@ -46,7 +46,7 @@ __pthread_once (once_control, init_routine)
|
||||
*once_control = !PTHREAD_ONCE_INIT;
|
||||
}
|
||||
|
||||
lll_unlock (once_lock);
|
||||
lll_unlock (once_lock, LLL_PRIVATE);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -32,7 +32,7 @@ __pthread_rwlock_rdlock (rwlock)
|
||||
int result = 0;
|
||||
|
||||
/* Make sure we are along. */
|
||||
lll_mutex_lock (rwlock->__data.__lock);
|
||||
lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
|
||||
|
||||
while (1)
|
||||
{
|
||||
@ -74,21 +74,20 @@ __pthread_rwlock_rdlock (rwlock)
|
||||
int waitval = rwlock->__data.__readers_wakeup;
|
||||
|
||||
/* Free the lock. */
|
||||
lll_mutex_unlock (rwlock->__data.__lock);
|
||||
lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
|
||||
|
||||
/* Wait for the writer to finish. */
|
||||
lll_futex_wait (&rwlock->__data.__readers_wakeup, waitval,
|
||||
// XYZ check mutex flag
|
||||
LLL_SHARED);
|
||||
lll_futex_wait (&rwlock->__data.__readers_wakeup, waitval,
|
||||
rwlock->__data.__shared);
|
||||
|
||||
/* Get the lock. */
|
||||
lll_mutex_lock (rwlock->__data.__lock);
|
||||
lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
|
||||
|
||||
--rwlock->__data.__nr_readers_queued;
|
||||
}
|
||||
|
||||
/* We are done, free the lock. */
|
||||
lll_mutex_unlock (rwlock->__data.__lock);
|
||||
lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -33,7 +33,7 @@ pthread_rwlock_timedrdlock (rwlock, abstime)
|
||||
int result = 0;
|
||||
|
||||
/* Make sure we are along. */
|
||||
lll_mutex_lock(rwlock->__data.__lock);
|
||||
lll_lock(rwlock->__data.__lock, rwlock->__data.__shared);
|
||||
|
||||
while (1)
|
||||
{
|
||||
@ -110,16 +110,14 @@ pthread_rwlock_timedrdlock (rwlock, abstime)
|
||||
int waitval = rwlock->__data.__readers_wakeup;
|
||||
|
||||
/* Free the lock. */
|
||||
lll_mutex_unlock (rwlock->__data.__lock);
|
||||
lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
|
||||
|
||||
/* Wait for the writer to finish. */
|
||||
err = lll_futex_timed_wait (&rwlock->__data.__readers_wakeup,
|
||||
waitval, &rt,
|
||||
// XYZ check mutex flag
|
||||
LLL_SHARED);
|
||||
waitval, &rt, rwlock->__data.__shared);
|
||||
|
||||
/* Get the lock. */
|
||||
lll_mutex_lock (rwlock->__data.__lock);
|
||||
lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
|
||||
|
||||
--rwlock->__data.__nr_readers_queued;
|
||||
|
||||
@ -133,7 +131,7 @@ pthread_rwlock_timedrdlock (rwlock, abstime)
|
||||
}
|
||||
|
||||
/* We are done, free the lock. */
|
||||
lll_mutex_unlock (rwlock->__data.__lock);
|
||||
lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -33,7 +33,7 @@ pthread_rwlock_timedwrlock (rwlock, abstime)
|
||||
int result = 0;
|
||||
|
||||
/* Make sure we are along. */
|
||||
lll_mutex_lock (rwlock->__data.__lock);
|
||||
lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
|
||||
|
||||
while (1)
|
||||
{
|
||||
@ -100,16 +100,14 @@ pthread_rwlock_timedwrlock (rwlock, abstime)
|
||||
int waitval = rwlock->__data.__writer_wakeup;
|
||||
|
||||
/* Free the lock. */
|
||||
lll_mutex_unlock (rwlock->__data.__lock);
|
||||
lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
|
||||
|
||||
/* Wait for the writer or reader(s) to finish. */
|
||||
err = lll_futex_timed_wait (&rwlock->__data.__writer_wakeup,
|
||||
waitval, &rt,
|
||||
// XYZ check mutex flag
|
||||
LLL_SHARED);
|
||||
waitval, &rt, rwlock->__data.__shared);
|
||||
|
||||
/* Get the lock. */
|
||||
lll_mutex_lock (rwlock->__data.__lock);
|
||||
lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
|
||||
|
||||
/* To start over again, remove the thread from the writer list. */
|
||||
--rwlock->__data.__nr_writers_queued;
|
||||
@ -123,7 +121,7 @@ pthread_rwlock_timedwrlock (rwlock, abstime)
|
||||
}
|
||||
|
||||
/* We are done, free the lock. */
|
||||
lll_mutex_unlock (rwlock->__data.__lock);
|
||||
lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -28,7 +28,7 @@ __pthread_rwlock_tryrdlock (rwlock)
|
||||
{
|
||||
int result = EBUSY;
|
||||
|
||||
lll_mutex_lock (rwlock->__data.__lock);
|
||||
lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
|
||||
|
||||
if (rwlock->__data.__writer == 0
|
||||
&& (rwlock->__data.__nr_writers_queued == 0
|
||||
@ -43,7 +43,7 @@ __pthread_rwlock_tryrdlock (rwlock)
|
||||
result = 0;
|
||||
}
|
||||
|
||||
lll_mutex_unlock (rwlock->__data.__lock);
|
||||
lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -28,7 +28,7 @@ __pthread_rwlock_trywrlock (rwlock)
|
||||
{
|
||||
int result = EBUSY;
|
||||
|
||||
lll_mutex_lock (rwlock->__data.__lock);
|
||||
lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
|
||||
|
||||
if (rwlock->__data.__writer == 0 && rwlock->__data.__nr_readers == 0)
|
||||
{
|
||||
@ -36,7 +36,7 @@ __pthread_rwlock_trywrlock (rwlock)
|
||||
result = 0;
|
||||
}
|
||||
|
||||
lll_mutex_unlock (rwlock->__data.__lock);
|
||||
lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -27,7 +27,7 @@
|
||||
int
|
||||
__pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
|
||||
{
|
||||
lll_mutex_lock (rwlock->__data.__lock);
|
||||
lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
|
||||
if (rwlock->__data.__writer)
|
||||
rwlock->__data.__writer = 0;
|
||||
else
|
||||
@ -37,23 +37,21 @@ __pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
|
||||
if (rwlock->__data.__nr_writers_queued)
|
||||
{
|
||||
++rwlock->__data.__writer_wakeup;
|
||||
lll_mutex_unlock (rwlock->__data.__lock);
|
||||
lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
|
||||
lll_futex_wake (&rwlock->__data.__writer_wakeup, 1,
|
||||
// XYZ check mutex flag
|
||||
LLL_SHARED);
|
||||
rwlock->__data.__shared);
|
||||
return 0;
|
||||
}
|
||||
else if (rwlock->__data.__nr_readers_queued)
|
||||
{
|
||||
++rwlock->__data.__readers_wakeup;
|
||||
lll_mutex_unlock (rwlock->__data.__lock);
|
||||
lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
|
||||
lll_futex_wake (&rwlock->__data.__readers_wakeup, INT_MAX,
|
||||
// XYZ check mutex flag
|
||||
LLL_SHARED);
|
||||
rwlock->__data.__shared);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
lll_mutex_unlock (rwlock->__data.__lock);
|
||||
lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -32,7 +32,7 @@ __pthread_rwlock_wrlock (rwlock)
|
||||
int result = 0;
|
||||
|
||||
/* Make sure we are along. */
|
||||
lll_mutex_lock (rwlock->__data.__lock);
|
||||
lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
|
||||
|
||||
while (1)
|
||||
{
|
||||
@ -65,22 +65,21 @@ __pthread_rwlock_wrlock (rwlock)
|
||||
int waitval = rwlock->__data.__writer_wakeup;
|
||||
|
||||
/* Free the lock. */
|
||||
lll_mutex_unlock (rwlock->__data.__lock);
|
||||
lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
|
||||
|
||||
/* Wait for the writer or reader(s) to finish. */
|
||||
lll_futex_wait (&rwlock->__data.__writer_wakeup, waitval,
|
||||
// XYZ check mutex flag
|
||||
LLL_SHARED);
|
||||
rwlock->__data.__shared);
|
||||
|
||||
/* Get the lock. */
|
||||
lll_mutex_lock (rwlock->__data.__lock);
|
||||
lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
|
||||
|
||||
/* To start over again, remove the thread from the writer list. */
|
||||
--rwlock->__data.__nr_writers_queued;
|
||||
}
|
||||
|
||||
/* We are done, free the lock. */
|
||||
lll_mutex_unlock (rwlock->__data.__lock);
|
||||
lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -39,7 +39,7 @@ __pthread_setschedparam (threadid, policy, param)
|
||||
|
||||
int result = 0;
|
||||
|
||||
lll_lock (pd->lock);
|
||||
lll_lock (pd->lock, LLL_PRIVATE);
|
||||
|
||||
struct sched_param p;
|
||||
const struct sched_param *orig_param = param;
|
||||
@ -67,7 +67,7 @@ __pthread_setschedparam (threadid, policy, param)
|
||||
pd->flags |= ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET;
|
||||
}
|
||||
|
||||
lll_unlock (pd->lock);
|
||||
lll_unlock (pd->lock, LLL_PRIVATE);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -41,7 +41,7 @@ pthread_setschedprio (threadid, prio)
|
||||
struct sched_param param;
|
||||
param.sched_priority = prio;
|
||||
|
||||
lll_lock (pd->lock);
|
||||
lll_lock (pd->lock, LLL_PRIVATE);
|
||||
|
||||
/* If the thread should have higher priority because of some
|
||||
PTHREAD_PRIO_PROTECT mutexes it holds, adjust the priority. */
|
||||
@ -60,7 +60,7 @@ pthread_setschedprio (threadid, prio)
|
||||
pd->flags |= ATTR_FLAG_SCHED_SET;
|
||||
}
|
||||
|
||||
lll_unlock (pd->lock);
|
||||
lll_unlock (pd->lock, LLL_PRIVATE);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ sem_close (sem)
|
||||
int result = 0;
|
||||
|
||||
/* Get the lock. */
|
||||
lll_lock (__sem_mappings_lock);
|
||||
lll_lock (__sem_mappings_lock, LLL_PRIVATE);
|
||||
|
||||
/* Locate the entry for the mapping the caller provided. */
|
||||
rec = NULL;
|
||||
@ -75,7 +75,7 @@ sem_close (sem)
|
||||
}
|
||||
|
||||
/* Release the lock. */
|
||||
lll_unlock (__sem_mappings_lock);
|
||||
lll_unlock (__sem_mappings_lock, LLL_PRIVATE);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -147,7 +147,7 @@ __sem_search (const void *a, const void *b)
|
||||
void *__sem_mappings attribute_hidden;
|
||||
|
||||
/* Lock to protect the search tree. */
|
||||
lll_lock_t __sem_mappings_lock attribute_hidden = LLL_LOCK_INITIALIZER;
|
||||
int __sem_mappings_lock attribute_hidden = LLL_LOCK_INITIALIZER;
|
||||
|
||||
|
||||
/* Search for existing mapping and if possible add the one provided. */
|
||||
@ -161,7 +161,7 @@ check_add_mapping (const char *name, size_t namelen, int fd, sem_t *existing)
|
||||
if (__fxstat64 (_STAT_VER, fd, &st) == 0)
|
||||
{
|
||||
/* Get the lock. */
|
||||
lll_lock (__sem_mappings_lock);
|
||||
lll_lock (__sem_mappings_lock, LLL_PRIVATE);
|
||||
|
||||
/* Search for an existing mapping given the information we have. */
|
||||
struct inuse_sem *fake;
|
||||
@ -210,7 +210,7 @@ check_add_mapping (const char *name, size_t namelen, int fd, sem_t *existing)
|
||||
}
|
||||
|
||||
/* Release the lock. */
|
||||
lll_unlock (__sem_mappings_lock);
|
||||
lll_unlock (__sem_mappings_lock, LLL_PRIVATE);
|
||||
}
|
||||
|
||||
if (result != existing && existing != SEM_FAILED && existing != MAP_FAILED)
|
||||
|
@ -48,7 +48,7 @@ extern pthread_once_t __namedsem_once attribute_hidden;
|
||||
extern void *__sem_mappings attribute_hidden;
|
||||
|
||||
/* Lock to protect the search tree. */
|
||||
extern lll_lock_t __sem_mappings_lock attribute_hidden;
|
||||
extern int __sem_mappings_lock attribute_hidden;
|
||||
|
||||
|
||||
/* Initializer for mountpoint. */
|
||||
|
@ -228,7 +228,7 @@ typedef pthread_key_t __libc_key_t;
|
||||
/* Lock the named lock variable. */
|
||||
#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
|
||||
# define __libc_lock_lock(NAME) \
|
||||
({ lll_lock (NAME); 0; })
|
||||
({ lll_lock (NAME, LLL_PRIVATE); 0; })
|
||||
#else
|
||||
# define __libc_lock_lock(NAME) \
|
||||
__libc_maybe_call (__pthread_mutex_lock, (&(NAME)), 0)
|
||||
@ -245,7 +245,7 @@ typedef pthread_key_t __libc_key_t;
|
||||
void *self = THREAD_SELF; \
|
||||
if ((NAME).owner != self) \
|
||||
{ \
|
||||
lll_lock ((NAME).lock); \
|
||||
lll_lock ((NAME).lock, LLL_PRIVATE); \
|
||||
(NAME).owner = self; \
|
||||
} \
|
||||
++(NAME).cnt; \
|
||||
@ -299,7 +299,7 @@ typedef pthread_key_t __libc_key_t;
|
||||
/* Unlock the named lock variable. */
|
||||
#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
|
||||
# define __libc_lock_unlock(NAME) \
|
||||
lll_unlock (NAME)
|
||||
lll_unlock (NAME, LLL_PRIVATE)
|
||||
#else
|
||||
# define __libc_lock_unlock(NAME) \
|
||||
__libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
|
||||
@ -315,7 +315,7 @@ typedef pthread_key_t __libc_key_t;
|
||||
if (--(NAME).cnt == 0) \
|
||||
{ \
|
||||
(NAME).owner = NULL; \
|
||||
lll_unlock ((NAME).lock); \
|
||||
lll_unlock ((NAME).lock, LLL_PRIVATE); \
|
||||
} \
|
||||
} while (0)
|
||||
#else
|
||||
|
@ -42,7 +42,7 @@ typedef struct { int lock; int cnt; void *owner; } _IO_lock_t;
|
||||
void *__self = THREAD_SELF; \
|
||||
if ((_name).owner != __self) \
|
||||
{ \
|
||||
lll_lock ((_name).lock); \
|
||||
lll_lock ((_name).lock, LLL_PRIVATE); \
|
||||
(_name).owner = __self; \
|
||||
} \
|
||||
++(_name).cnt; \
|
||||
@ -72,7 +72,7 @@ typedef struct { int lock; int cnt; void *owner; } _IO_lock_t;
|
||||
if (--(_name).cnt == 0) \
|
||||
{ \
|
||||
(_name).owner = NULL; \
|
||||
lll_unlock ((_name).lock); \
|
||||
lll_unlock ((_name).lock, LLL_PRIVATE); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003, 2004, 2006 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -60,7 +60,7 @@ do_clone (struct pthread *pd, const struct pthread_attr *attr,
|
||||
/* We Make sure the thread does not run far by forcing it to get a
|
||||
lock. We lock it here too so that the new thread cannot continue
|
||||
until we tell it to. */
|
||||
lll_lock (pd->lock);
|
||||
lll_lock (pd->lock, LLL_PRIVATE);
|
||||
|
||||
/* One more thread. We cannot have the thread do this itself, since it
|
||||
might exist but not have been scheduled yet by the time we've returned
|
||||
@ -223,7 +223,7 @@ create_thread (struct pthread *pd, const struct pthread_attr *attr,
|
||||
__nptl_create_event ();
|
||||
|
||||
/* And finally restart the new thread. */
|
||||
lll_unlock (pd->lock);
|
||||
lll_unlock (pd->lock, LLL_PRIVATE);
|
||||
}
|
||||
|
||||
return res;
|
||||
@ -250,7 +250,7 @@ create_thread (struct pthread *pd, const struct pthread_attr *attr,
|
||||
|
||||
if (res == 0 && stopped)
|
||||
/* And finally restart the new thread. */
|
||||
lll_unlock (pd->lock);
|
||||
lll_unlock (pd->lock, LLL_PRIVATE);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
@ -70,9 +70,6 @@
|
||||
#endif
|
||||
|
||||
|
||||
/* Initializer for compatibility lock. */
|
||||
#define LLL_MUTEX_LOCK_INITIALIZER (0)
|
||||
|
||||
#define lll_futex_wait(futexp, val, private) \
|
||||
lll_futex_timed_wait (futexp, val, NULL, private)
|
||||
|
||||
@ -96,7 +93,7 @@
|
||||
INTERNAL_SYSCALL_ERROR_P (__ret, __err)? -__ret : __ret; \
|
||||
})
|
||||
|
||||
#define lll_robust_mutex_dead(futexv) \
|
||||
#define lll_robust_dead(futexv) \
|
||||
do \
|
||||
{ \
|
||||
int *__futexp = &(futexv); \
|
||||
@ -132,149 +129,130 @@
|
||||
|
||||
|
||||
static inline int __attribute__((always_inline))
|
||||
__lll_mutex_trylock(int *futex)
|
||||
__lll_trylock(int *futex)
|
||||
{
|
||||
return atomic_compare_and_exchange_val_acq (futex, 1, 0) != 0;
|
||||
}
|
||||
#define lll_mutex_trylock(lock) __lll_mutex_trylock (&(lock))
|
||||
#define lll_trylock(lock) __lll_trylock (&(lock))
|
||||
|
||||
|
||||
static inline int __attribute__((always_inline))
|
||||
__lll_mutex_cond_trylock(int *futex)
|
||||
__lll_cond_trylock(int *futex)
|
||||
{
|
||||
return atomic_compare_and_exchange_val_acq (futex, 2, 0) != 0;
|
||||
}
|
||||
#define lll_mutex_cond_trylock(lock) __lll_mutex_cond_trylock (&(lock))
|
||||
#define lll_cond_trylock(lock) __lll_cond_trylock (&(lock))
|
||||
|
||||
|
||||
static inline int __attribute__((always_inline))
|
||||
__lll_robust_mutex_trylock(int *futex, int id)
|
||||
__lll_robust_trylock(int *futex, int id)
|
||||
{
|
||||
return atomic_compare_and_exchange_val_acq (futex, id, 0) != 0;
|
||||
}
|
||||
#define lll_robust_mutex_trylock(lock, id) \
|
||||
__lll_robust_mutex_trylock (&(lock), id)
|
||||
#define lll_robust_trylock(lock, id) \
|
||||
__lll_robust_trylock (&(lock), id)
|
||||
|
||||
extern void __lll_lock_wait (int *futex) attribute_hidden;
|
||||
extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
|
||||
extern void __lll_lock_wait_private (int *futex) attribute_hidden;
|
||||
extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
|
||||
extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
|
||||
|
||||
static inline void __attribute__((always_inline))
|
||||
__lll_mutex_lock(int *futex)
|
||||
__lll_lock(int *futex, int private)
|
||||
{
|
||||
if (atomic_compare_and_exchange_bool_acq (futex, 1, 0) != 0)
|
||||
__lll_lock_wait (futex);
|
||||
{
|
||||
if (__builtin_constant_p (private) && private == LLL_PRIVATE)
|
||||
__lll_lock_wait_private (futex);
|
||||
else
|
||||
__lll_lock_wait (futex, private);
|
||||
}
|
||||
}
|
||||
#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex))
|
||||
#define lll_lock(futex, private) __lll_lock (&(futex), private)
|
||||
|
||||
|
||||
static inline int __attribute__ ((always_inline))
|
||||
__lll_robust_mutex_lock (int *futex, int id)
|
||||
__lll_robust_lock (int *futex, int id, int private)
|
||||
{
|
||||
int result = 0;
|
||||
if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
|
||||
result = __lll_robust_lock_wait (futex);
|
||||
result = __lll_robust_lock_wait (futex, private);
|
||||
return result;
|
||||
}
|
||||
#define lll_robust_mutex_lock(futex, id) \
|
||||
__lll_robust_mutex_lock (&(futex), id)
|
||||
#define lll_robust_lock(futex, id, private) \
|
||||
__lll_robust_lock (&(futex), id, private)
|
||||
|
||||
|
||||
static inline void __attribute__ ((always_inline))
|
||||
__lll_mutex_cond_lock (int *futex)
|
||||
__lll_cond_lock (int *futex, int private)
|
||||
{
|
||||
if (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0)
|
||||
__lll_lock_wait (futex);
|
||||
__lll_lock_wait (futex, private);
|
||||
}
|
||||
#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex))
|
||||
#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
|
||||
|
||||
|
||||
#define lll_robust_mutex_cond_lock(futex, id) \
|
||||
__lll_robust_mutex_lock (&(futex), (id) | FUTEX_WAITERS)
|
||||
#define lll_robust_cond_lock(futex, id, private) \
|
||||
__lll_robust_lock (&(futex), (id) | FUTEX_WAITERS, private)
|
||||
|
||||
|
||||
extern int __lll_timedlock_wait (int *futex, const struct timespec *)
|
||||
attribute_hidden;
|
||||
extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *)
|
||||
attribute_hidden;
|
||||
extern int __lll_timedlock_wait (int *futex, const struct timespec *,
|
||||
int private) attribute_hidden;
|
||||
extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
|
||||
int private) attribute_hidden;
|
||||
|
||||
static inline int __attribute__ ((always_inline))
|
||||
__lll_mutex_timedlock (int *futex, const struct timespec *abstime)
|
||||
__lll_timedlock (int *futex, const struct timespec *abstime, int private)
|
||||
{
|
||||
int result = 0;
|
||||
if (atomic_compare_and_exchange_bool_acq (futex, 1, 0) != 0)
|
||||
result = __lll_timedlock_wait (futex, abstime);
|
||||
result = __lll_timedlock_wait (futex, abstime, private);
|
||||
return result;
|
||||
}
|
||||
#define lll_mutex_timedlock(futex, abstime) \
|
||||
__lll_mutex_timedlock (&(futex), abstime)
|
||||
#define lll_timedlock(futex, abstime, private) \
|
||||
__lll_timedlock (&(futex), abstime, private)
|
||||
|
||||
|
||||
static inline int __attribute__ ((always_inline))
|
||||
__lll_robust_mutex_timedlock (int *futex, const struct timespec *abstime,
|
||||
int id)
|
||||
__lll_robust_timedlock (int *futex, const struct timespec *abstime,
|
||||
int id, int private)
|
||||
{
|
||||
int result = 0;
|
||||
if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
|
||||
result = __lll_robust_timedlock_wait (futex, abstime);
|
||||
return result;
|
||||
}
|
||||
#define lll_robust_mutex_timedlock(futex, abstime, id) \
|
||||
__lll_robust_mutex_timedlock (&(futex), abstime, id)
|
||||
#define lll_robust_timedlock(futex, abstime, id, private) \
|
||||
__lll_robust_timedlock (&(futex), abstime, id, private)
|
||||
|
||||
|
||||
static inline void __attribute__ ((always_inline))
|
||||
__lll_mutex_unlock (int *futex)
|
||||
__lll_unlock (int *futex, int private)
|
||||
{
|
||||
int val = atomic_exchange_rel (futex, 0);
|
||||
if (__builtin_expect (val > 1, 0))
|
||||
lll_futex_wake (futex, 1, LLL_SHARED);
|
||||
lll_futex_wake (futex, 1, private);
|
||||
}
|
||||
#define lll_mutex_unlock(futex) __lll_mutex_unlock(&(futex))
|
||||
#define lll_unlock(futex, private) __lll_unlock(&(futex), private)
|
||||
|
||||
|
||||
static inline void __attribute__ ((always_inline))
|
||||
__lll_robust_mutex_unlock (int *futex, int mask)
|
||||
__lll_robust_unlock (int *futex, int private)
|
||||
{
|
||||
int val = atomic_exchange_rel (futex, 0);
|
||||
if (__builtin_expect (val & mask, 0))
|
||||
lll_futex_wake (futex, 1, LLL_SHARED);
|
||||
if (__builtin_expect (val & FUTEX_WAITERS, 0))
|
||||
lll_futex_wake (futex, 1, private);
|
||||
}
|
||||
#define lll_robust_mutex_unlock(futex) \
|
||||
__lll_robust_mutex_unlock(&(futex), FUTEX_WAITERS)
|
||||
#define lll_robust_unlock(futex, private) \
|
||||
__lll_robust_unlock(&(futex), private)
|
||||
|
||||
|
||||
static inline void __attribute__ ((always_inline))
|
||||
__lll_mutex_unlock_force (int *futex)
|
||||
{
|
||||
(void) atomic_exchange_rel (futex, 0);
|
||||
lll_futex_wake (futex, 1, LLL_SHARED);
|
||||
}
|
||||
#define lll_mutex_unlock_force(futex) __lll_mutex_unlock_force(&(futex))
|
||||
|
||||
|
||||
#define lll_mutex_islocked(futex) \
|
||||
#define lll_islocked(futex) \
|
||||
(futex != 0)
|
||||
|
||||
|
||||
/* Our internal lock implementation is identical to the binary-compatible
|
||||
mutex implementation. */
|
||||
|
||||
/* Type for lock object. */
|
||||
typedef int lll_lock_t;
|
||||
|
||||
/* Initializers for lock. */
|
||||
#define LLL_LOCK_INITIALIZER (0)
|
||||
#define LLL_LOCK_INITIALIZER_LOCKED (1)
|
||||
|
||||
/* The states of a lock are:
|
||||
0 - untaken
|
||||
1 - taken by one user
|
||||
>1 - taken by more users */
|
||||
|
||||
#define lll_trylock(lock) lll_mutex_trylock (lock)
|
||||
#define lll_lock(lock) lll_mutex_lock (lock)
|
||||
#define lll_unlock(lock) lll_mutex_unlock (lock)
|
||||
#define lll_islocked(lock) lll_mutex_islocked (lock)
|
||||
|
||||
/* The kernel notifies a process which uses CLONE_CLEARTID via futex
|
||||
wakeup when the clone terminates. The memory location contains the
|
||||
@ -298,26 +276,4 @@ extern int __lll_timedwait_tid (int *, const struct timespec *)
|
||||
__res; \
|
||||
})
|
||||
|
||||
|
||||
/* Conditional variable handling. */
|
||||
|
||||
extern void __lll_cond_wait (pthread_cond_t *cond)
|
||||
attribute_hidden;
|
||||
extern int __lll_cond_timedwait (pthread_cond_t *cond,
|
||||
const struct timespec *abstime)
|
||||
attribute_hidden;
|
||||
extern void __lll_cond_wake (pthread_cond_t *cond)
|
||||
attribute_hidden;
|
||||
extern void __lll_cond_broadcast (pthread_cond_t *cond)
|
||||
attribute_hidden;
|
||||
|
||||
#define lll_cond_wait(cond) \
|
||||
__lll_cond_wait (cond)
|
||||
#define lll_cond_timedwait(cond, abstime) \
|
||||
__lll_cond_timedwait (cond, abstime)
|
||||
#define lll_cond_wake(cond) \
|
||||
__lll_cond_wake (cond)
|
||||
#define lll_cond_broadcast(cond) \
|
||||
__lll_cond_broadcast (cond)
|
||||
|
||||
#endif /* lowlevellock.h */
|
||||
|
@ -183,7 +183,7 @@ __libc_fork (void)
|
||||
}
|
||||
|
||||
/* Initialize the fork lock. */
|
||||
__fork_lock = (lll_lock_t) LLL_LOCK_INITIALIZER;
|
||||
__fork_lock = LLL_LOCK_INITIALIZER;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003, 2006 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2006, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -26,7 +26,7 @@ extern unsigned long int __fork_generation attribute_hidden;
|
||||
extern unsigned long int *__fork_generation_pointer attribute_hidden;
|
||||
|
||||
/* Lock to protect allocation and deallocation of fork handlers. */
|
||||
extern lll_lock_t __fork_lock attribute_hidden;
|
||||
extern int __fork_lock attribute_hidden;
|
||||
|
||||
/* Elements of the fork handler lists. */
|
||||
struct fork_handler
|
||||
|
@ -17,19 +17,4 @@
|
||||
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <kernel-features.h>
|
||||
|
||||
/* All locks in libc are private. Use the kernel feature if possible. */
|
||||
#define FUTEX_PRIVATE_FLAG 128
|
||||
#ifdef __ASSUME_PRIVATE_FUTEX
|
||||
# define FUTEX_WAIT (0 | FUTEX_PRIVATE_FLAG)
|
||||
# define FUTEX_WAKE (1 | FUTEX_PRIVATE_FLAG)
|
||||
#else
|
||||
# define LOAD_FUTEX_WAIT(reg) \
|
||||
movl %gs:PRIVATE_FUTEX, reg
|
||||
# define LOAD_FUTEX_WAKE(reg) \
|
||||
movl %gs:PRIVATE_FUTEX, reg ; \
|
||||
orl $FUTEX_WAKE, reg
|
||||
#endif
|
||||
|
||||
#include "lowlevellock.S"
|
||||
|
@ -19,42 +19,53 @@
|
||||
|
||||
#include <sysdep.h>
|
||||
#include <pthread-errnos.h>
|
||||
#include <kernel-features.h>
|
||||
#include <lowlevellock.h>
|
||||
|
||||
.text
|
||||
|
||||
#ifndef LOCK
|
||||
# ifdef UP
|
||||
# define LOCK
|
||||
#ifdef __ASSUME_PRIVATE_FUTEX
|
||||
# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
|
||||
movl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
|
||||
# define LOAD_PRIVATE_FUTEX_WAKE(reg) \
|
||||
movl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
|
||||
# define LOAD_FUTEX_WAIT(reg) \
|
||||
xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
|
||||
# define LOAD_FUTEX_WAKE(reg) \
|
||||
xorl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
|
||||
#else
|
||||
# if FUTEX_WAIT == 0
|
||||
# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
|
||||
movl %gs:PRIVATE_FUTEX, reg
|
||||
# else
|
||||
# define LOCK lock
|
||||
# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
|
||||
movl %gs:PRIVATE_FUTEX, reg ; \
|
||||
orl $FUTEX_WAIT, reg
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#define SYS_gettimeofday __NR_gettimeofday
|
||||
#define SYS_futex 240
|
||||
#ifndef FUTEX_WAIT
|
||||
# define FUTEX_WAIT 0
|
||||
# define FUTEX_WAKE 1
|
||||
#endif
|
||||
|
||||
#ifndef LOAD_FUTEX_WAIT
|
||||
# define LOAD_PRIVATE_FUTEX_WAKE(reg) \
|
||||
movl %gs:PRIVATE_FUTEX, reg ; \
|
||||
orl $FUTEX_WAKE, reg
|
||||
# if FUTEX_WAIT == 0
|
||||
# define LOAD_FUTEX_WAIT(reg) \
|
||||
xorl reg, reg
|
||||
xorl $FUTEX_PRIVATE_FLAG, reg ; \
|
||||
andl %gs:PRIVATE_FUTEX, reg
|
||||
# else
|
||||
# define LOAD_FUTEX_WAIT(reg) \
|
||||
movl $FUTEX_WAIT, reg
|
||||
xorl $FUTEX_PRIVATE_FLAG, reg ; \
|
||||
andl %gs:PRIVATE_FUTEX, reg ; \
|
||||
orl $FUTEX_WAIT, reg
|
||||
# endif
|
||||
# define LOAD_FUTEX_WAKE(reg) \
|
||||
movl $FUTEX_WAKE, reg
|
||||
xorl $FUTEX_PRIVATE_FLAG, reg ; \
|
||||
andl %gs:PRIVATE_FUTEX, reg ; \
|
||||
orl $FUTEX_WAKE, reg
|
||||
#endif
|
||||
|
||||
|
||||
.globl __lll_mutex_lock_wait
|
||||
.type __lll_mutex_lock_wait,@function
|
||||
.hidden __lll_mutex_lock_wait
|
||||
.globl __lll_lock_wait_private
|
||||
.type __lll_lock_wait_private,@function
|
||||
.hidden __lll_lock_wait_private
|
||||
.align 16
|
||||
__lll_mutex_lock_wait:
|
||||
__lll_lock_wait_private:
|
||||
cfi_startproc
|
||||
pushl %edx
|
||||
cfi_adjust_cfa_offset(4)
|
||||
@ -69,6 +80,53 @@ __lll_mutex_lock_wait:
|
||||
movl $2, %edx
|
||||
movl %ecx, %ebx
|
||||
xorl %esi, %esi /* No timeout. */
|
||||
LOAD_PRIVATE_FUTEX_WAIT (%ecx)
|
||||
|
||||
cmpl %edx, %eax /* NB: %edx == 2 */
|
||||
jne 2f
|
||||
|
||||
1: movl $SYS_futex, %eax
|
||||
ENTER_KERNEL
|
||||
|
||||
2: movl %edx, %eax
|
||||
xchgl %eax, (%ebx) /* NB: lock is implied */
|
||||
|
||||
testl %eax, %eax
|
||||
jnz 1b
|
||||
|
||||
popl %esi
|
||||
cfi_adjust_cfa_offset(-4)
|
||||
cfi_restore(%esi)
|
||||
popl %ebx
|
||||
cfi_adjust_cfa_offset(-4)
|
||||
cfi_restore(%ebx)
|
||||
popl %edx
|
||||
cfi_adjust_cfa_offset(-4)
|
||||
cfi_restore(%edx)
|
||||
ret
|
||||
cfi_endproc
|
||||
.size __lll_lock_wait_private,.-__lll_lock_wait_private
|
||||
|
||||
#ifdef NOT_IN_libc
|
||||
.globl __lll_lock_wait
|
||||
.type __lll_lock_wait,@function
|
||||
.hidden __lll_lock_wait
|
||||
.align 16
|
||||
__lll_lock_wait:
|
||||
cfi_startproc
|
||||
pushl %edx
|
||||
cfi_adjust_cfa_offset(4)
|
||||
pushl %ebx
|
||||
cfi_adjust_cfa_offset(4)
|
||||
pushl %esi
|
||||
cfi_adjust_cfa_offset(4)
|
||||
cfi_offset(%edx, -8)
|
||||
cfi_offset(%ebx, -12)
|
||||
cfi_offset(%esi, -16)
|
||||
|
||||
movl %edx, %ebx
|
||||
movl $2, %edx
|
||||
xorl %esi, %esi /* No timeout. */
|
||||
LOAD_FUTEX_WAIT (%ecx)
|
||||
|
||||
cmpl %edx, %eax /* NB: %edx == 2 */
|
||||
@ -94,15 +152,13 @@ __lll_mutex_lock_wait:
|
||||
cfi_restore(%edx)
|
||||
ret
|
||||
cfi_endproc
|
||||
.size __lll_mutex_lock_wait,.-__lll_mutex_lock_wait
|
||||
.size __lll_lock_wait,.-__lll_lock_wait
|
||||
|
||||
|
||||
#ifdef NOT_IN_libc
|
||||
.globl __lll_mutex_timedlock_wait
|
||||
.type __lll_mutex_timedlock_wait,@function
|
||||
.hidden __lll_mutex_timedlock_wait
|
||||
.globl __lll_timedlock_wait
|
||||
.type __lll_timedlock_wait,@function
|
||||
.hidden __lll_timedlock_wait
|
||||
.align 16
|
||||
__lll_mutex_timedlock_wait:
|
||||
__lll_timedlock_wait:
|
||||
cfi_startproc
|
||||
/* Check for a valid timeout value. */
|
||||
cmpl $1000000000, 4(%edx)
|
||||
@ -132,7 +188,7 @@ __lll_mutex_timedlock_wait:
|
||||
/* Get current time. */
|
||||
movl %esp, %ebx
|
||||
xorl %ecx, %ecx
|
||||
movl $SYS_gettimeofday, %eax
|
||||
movl $__NR_gettimeofday, %eax
|
||||
ENTER_KERNEL
|
||||
|
||||
/* Compute relative timeout. */
|
||||
@ -165,6 +221,7 @@ __lll_mutex_timedlock_wait:
|
||||
|
||||
/* Futex call. */
|
||||
movl %esp, %esi
|
||||
movl 16(%esp), %ecx
|
||||
LOAD_FUTEX_WAIT (%ecx)
|
||||
movl $SYS_futex, %eax
|
||||
ENTER_KERNEL
|
||||
@ -215,15 +272,51 @@ __lll_mutex_timedlock_wait:
|
||||
5: movl $ETIMEDOUT, %eax
|
||||
jmp 6b
|
||||
cfi_endproc
|
||||
.size __lll_mutex_timedlock_wait,.-__lll_mutex_timedlock_wait
|
||||
.size __lll_timedlock_wait,.-__lll_timedlock_wait
|
||||
#endif
|
||||
|
||||
|
||||
.globl __lll_mutex_unlock_wake
|
||||
.type __lll_mutex_unlock_wake,@function
|
||||
.hidden __lll_mutex_unlock_wake
|
||||
.globl __lll_unlock_wake_private
|
||||
.type __lll_unlock_wake_private,@function
|
||||
.hidden __lll_unlock_wake_private
|
||||
.align 16
|
||||
__lll_mutex_unlock_wake:
|
||||
__lll_unlock_wake_private:
|
||||
cfi_startproc
|
||||
pushl %ebx
|
||||
cfi_adjust_cfa_offset(4)
|
||||
pushl %ecx
|
||||
cfi_adjust_cfa_offset(4)
|
||||
pushl %edx
|
||||
cfi_adjust_cfa_offset(4)
|
||||
cfi_offset(%ebx, -8)
|
||||
cfi_offset(%ecx, -12)
|
||||
cfi_offset(%edx, -16)
|
||||
|
||||
movl %eax, %ebx
|
||||
movl $0, (%eax)
|
||||
LOAD_PRIVATE_FUTEX_WAKE (%ecx)
|
||||
movl $1, %edx /* Wake one thread. */
|
||||
movl $SYS_futex, %eax
|
||||
ENTER_KERNEL
|
||||
|
||||
popl %edx
|
||||
cfi_adjust_cfa_offset(-4)
|
||||
cfi_restore(%edx)
|
||||
popl %ecx
|
||||
cfi_adjust_cfa_offset(-4)
|
||||
cfi_restore(%ecx)
|
||||
popl %ebx
|
||||
cfi_adjust_cfa_offset(-4)
|
||||
cfi_restore(%ebx)
|
||||
ret
|
||||
cfi_endproc
|
||||
.size __lll_unlock_wake_private,.-__lll_unlock_wake_private
|
||||
|
||||
#ifdef NOT_IN_libc
|
||||
.globl __lll_unlock_wake
|
||||
.type __lll_unlock_wake,@function
|
||||
.hidden __lll_unlock_wake
|
||||
.align 16
|
||||
__lll_unlock_wake:
|
||||
cfi_startproc
|
||||
pushl %ebx
|
||||
cfi_adjust_cfa_offset(4)
|
||||
@ -253,10 +346,8 @@ __lll_mutex_unlock_wake:
|
||||
cfi_restore(%ebx)
|
||||
ret
|
||||
cfi_endproc
|
||||
.size __lll_mutex_unlock_wake,.-__lll_mutex_unlock_wake
|
||||
.size __lll_unlock_wake,.-__lll_unlock_wake
|
||||
|
||||
|
||||
#ifdef NOT_IN_libc
|
||||
.globl __lll_timedwait_tid
|
||||
.type __lll_timedwait_tid,@function
|
||||
.hidden __lll_timedwait_tid
|
||||
@ -274,7 +365,7 @@ __lll_timedwait_tid:
|
||||
/* Get current time. */
|
||||
2: movl %esp, %ebx
|
||||
xorl %ecx, %ecx
|
||||
movl $SYS_gettimeofday, %eax
|
||||
movl $__NR_gettimeofday, %eax
|
||||
ENTER_KERNEL
|
||||
|
||||
/* Compute relative timeout. */
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003, 2004, 2006 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -19,31 +19,36 @@
|
||||
|
||||
#include <sysdep.h>
|
||||
#include <pthread-errnos.h>
|
||||
#include <lowlevellock.h>
|
||||
#include <lowlevelrobustlock.h>
|
||||
#include <kernel-features.h>
|
||||
|
||||
.text
|
||||
|
||||
#ifndef LOCK
|
||||
# ifdef UP
|
||||
# define LOCK
|
||||
# else
|
||||
# define LOCK lock
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#define SYS_gettimeofday __NR_gettimeofday
|
||||
#define SYS_futex 240
|
||||
#define FUTEX_WAIT 0
|
||||
#define FUTEX_WAKE 1
|
||||
#define FUTEX_WAITERS 0x80000000
|
||||
#define FUTEX_OWNER_DIED 0x40000000
|
||||
|
||||
#ifdef __ASSUME_PRIVATE_FUTEX
|
||||
# define LOAD_FUTEX_WAIT(reg) \
|
||||
xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
|
||||
#else
|
||||
# if FUTEX_WAIT == 0
|
||||
# define LOAD_FUTEX_WAIT(reg) \
|
||||
xorl $FUTEX_PRIVATE_FLAG, reg ; \
|
||||
andl %gs:PRIVATE_FUTEX, reg
|
||||
# else
|
||||
# define LOAD_FUTEX_WAIT(reg) \
|
||||
xorl $FUTEX_PRIVATE_FLAG, reg ; \
|
||||
andl %gs:PRIVATE_FUTEX, reg ; \
|
||||
orl $FUTEX_WAIT, reg
|
||||
# endif
|
||||
#endif
|
||||
|
||||
.globl __lll_robust_mutex_lock_wait
|
||||
.type __lll_robust_mutex_lock_wait,@function
|
||||
.hidden __lll_robust_mutex_lock_wait
|
||||
.globl __lll_robust_lock_wait
|
||||
.type __lll_robust_lock_wait,@function
|
||||
.hidden __lll_robust_lock_wait
|
||||
.align 16
|
||||
__lll_robust_mutex_lock_wait:
|
||||
__lll_robust_lock_wait:
|
||||
cfi_startproc
|
||||
pushl %edx
|
||||
cfi_adjust_cfa_offset(4)
|
||||
@ -55,9 +60,9 @@ __lll_robust_mutex_lock_wait:
|
||||
cfi_offset(%ebx, -12)
|
||||
cfi_offset(%esi, -16)
|
||||
|
||||
movl %ecx, %ebx
|
||||
movl %edx, %ebx
|
||||
xorl %esi, %esi /* No timeout. */
|
||||
xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */
|
||||
LOAD_FUTEX_WAIT (%ecx)
|
||||
|
||||
4: movl %eax, %edx
|
||||
orl $FUTEX_WAITERS, %edx
|
||||
@ -98,14 +103,14 @@ __lll_robust_mutex_lock_wait:
|
||||
cfi_restore(%edx)
|
||||
ret
|
||||
cfi_endproc
|
||||
.size __lll_robust_mutex_lock_wait,.-__lll_robust_mutex_lock_wait
|
||||
.size __lll_robust_lock_wait,.-__lll_robust_lock_wait
|
||||
|
||||
|
||||
.globl __lll_robust_mutex_timedlock_wait
|
||||
.type __lll_robust_mutex_timedlock_wait,@function
|
||||
.hidden __lll_robust_mutex_timedlock_wait
|
||||
.globl __lll_robust_timedlock_wait
|
||||
.type __lll_robust_timedlock_wait,@function
|
||||
.hidden __lll_robust_timedlock_wait
|
||||
.align 16
|
||||
__lll_robust_mutex_timedlock_wait:
|
||||
__lll_robust_timedlock_wait:
|
||||
cfi_startproc
|
||||
/* Check for a valid timeout value. */
|
||||
cmpl $1000000000, 4(%edx)
|
||||
@ -136,7 +141,7 @@ __lll_robust_mutex_timedlock_wait:
|
||||
/* Get current time. */
|
||||
movl %esp, %ebx
|
||||
xorl %ecx, %ecx
|
||||
movl $SYS_gettimeofday, %eax
|
||||
movl $__NR_gettimeofday, %eax
|
||||
ENTER_KERNEL
|
||||
|
||||
/* Compute relative timeout. */
|
||||
@ -177,7 +182,8 @@ __lll_robust_mutex_timedlock_wait:
|
||||
2:
|
||||
/* Futex call. */
|
||||
movl %esp, %esi
|
||||
xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */
|
||||
movl 20(%esp), %ecx
|
||||
LOAD_FUTEX_WAIT (%ecx)
|
||||
movl $SYS_futex, %eax
|
||||
ENTER_KERNEL
|
||||
movl %eax, %ecx
|
||||
@ -224,4 +230,4 @@ __lll_robust_mutex_timedlock_wait:
|
||||
8: movl $ETIMEDOUT, %eax
|
||||
jmp 6b
|
||||
cfi_endproc
|
||||
.size __lll_robust_mutex_timedlock_wait,.-__lll_robust_mutex_timedlock_wait
|
||||
.size __lll_robust_timedlock_wait,.-__lll_robust_timedlock_wait
|
||||
|
@ -18,19 +18,9 @@
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <sysdep.h>
|
||||
#include <lowlevellock.h>
|
||||
#include <lowlevelbarrier.h>
|
||||
|
||||
#define SYS_futex 240
|
||||
#define FUTEX_WAIT 0
|
||||
#define FUTEX_WAKE 1
|
||||
|
||||
#ifndef UP
|
||||
# define LOCK lock
|
||||
#else
|
||||
# define LOCK
|
||||
#endif
|
||||
|
||||
|
||||
.text
|
||||
|
||||
.globl pthread_barrier_wait
|
||||
@ -152,19 +142,27 @@ pthread_barrier_wait:
|
||||
popl %ebx
|
||||
ret
|
||||
|
||||
1: leal MUTEX(%ebx), %ecx
|
||||
call __lll_mutex_lock_wait
|
||||
1: movl PRIVATE(%ebx), %ecx
|
||||
leal MUTEX(%ebx), %edx
|
||||
xorl $LLL_SHARED, %ecx
|
||||
call __lll_lock_wait
|
||||
jmp 2b
|
||||
|
||||
4: leal MUTEX(%ebx), %eax
|
||||
call __lll_mutex_unlock_wake
|
||||
4: movl PRIVATE(%ebx), %ecx
|
||||
leal MUTEX(%ebx), %eax
|
||||
xorl $LLL_SHARED, %ecx
|
||||
call __lll_unlock_wake
|
||||
jmp 5b
|
||||
|
||||
6: leal MUTEX(%ebx), %eax
|
||||
call __lll_mutex_unlock_wake
|
||||
6: movl PRIVATE(%ebx), %ecx
|
||||
leal MUTEX(%ebx), %eax
|
||||
xorl $LLL_SHARED, %ecx
|
||||
call __lll_unlock_wake
|
||||
jmp 7b
|
||||
|
||||
9: leal MUTEX(%ebx), %eax
|
||||
call __lll_mutex_unlock_wake
|
||||
9: movl PRIVATE(%ebx), %ecx
|
||||
leal MUTEX(%ebx), %eax
|
||||
xorl $LLL_SHARED, %ecx
|
||||
call __lll_unlock_wake
|
||||
jmp 10b
|
||||
.size pthread_barrier_wait,.-pthread_barrier_wait
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003, 2004, 2006 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -19,24 +19,11 @@
|
||||
|
||||
#include <sysdep.h>
|
||||
#include <shlib-compat.h>
|
||||
#include <lowlevellock.h>
|
||||
#include <lowlevelcond.h>
|
||||
#include <kernel-features.h>
|
||||
#include <pthread-pi-defines.h>
|
||||
|
||||
#ifdef UP
|
||||
# define LOCK
|
||||
#else
|
||||
# define LOCK lock
|
||||
#endif
|
||||
|
||||
#define SYS_futex 240
|
||||
#define FUTEX_WAIT 0
|
||||
#define FUTEX_WAKE 1
|
||||
#define FUTEX_REQUEUE 3
|
||||
#define FUTEX_CMP_REQUEUE 4
|
||||
|
||||
#define EINVAL 22
|
||||
|
||||
#include <pthread-errnos.h>
|
||||
|
||||
.text
|
||||
|
||||
@ -141,21 +128,27 @@ __pthread_cond_broadcast:
|
||||
/* Initial locking failed. */
|
||||
1:
|
||||
#if cond_lock == 0
|
||||
movl %ebx, %ecx
|
||||
movl %ebx, %edx
|
||||
#else
|
||||
leal cond_lock(%ebx), %ecx
|
||||
leal cond_lock(%ebx), %edx
|
||||
#endif
|
||||
call __lll_mutex_lock_wait
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %ecx
|
||||
call __lll_lock_wait
|
||||
jmp 2b
|
||||
|
||||
/* Unlock in loop requires waekup. */
|
||||
5: leal cond_lock-cond_futex(%ebx), %eax
|
||||
call __lll_mutex_unlock_wake
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %ecx
|
||||
call __lll_unlock_wake
|
||||
jmp 6b
|
||||
|
||||
/* Unlock in loop requires waekup. */
|
||||
7: leal cond_lock-cond_futex(%ebx), %eax
|
||||
call __lll_mutex_unlock_wake
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %ecx
|
||||
call __lll_unlock_wake
|
||||
jmp 8b
|
||||
|
||||
9: /* The futex requeue functionality is not available. */
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2004, 2005, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -19,23 +19,10 @@
|
||||
|
||||
#include <sysdep.h>
|
||||
#include <shlib-compat.h>
|
||||
#include <lowlevellock.h>
|
||||
#include <lowlevelcond.h>
|
||||
#include <kernel-features.h>
|
||||
|
||||
#ifdef UP
|
||||
# define LOCK
|
||||
#else
|
||||
# define LOCK lock
|
||||
#endif
|
||||
|
||||
#define SYS_futex 240
|
||||
#define FUTEX_WAIT 0
|
||||
#define FUTEX_WAKE 1
|
||||
#define FUTEX_WAKE_OP 5
|
||||
|
||||
#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1)
|
||||
|
||||
#define EINVAL 22
|
||||
#include <pthread-errnos.h>
|
||||
|
||||
|
||||
.text
|
||||
@ -119,17 +106,21 @@ __pthread_cond_signal:
|
||||
|
||||
/* Unlock in loop requires wakeup. */
|
||||
5: movl %edi, %eax
|
||||
call __lll_mutex_unlock_wake
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %ecx
|
||||
call __lll_unlock_wake
|
||||
jmp 6b
|
||||
|
||||
/* Initial locking failed. */
|
||||
1:
|
||||
#if cond_lock == 0
|
||||
movl %edi, %ecx
|
||||
movl %edi, %edx
|
||||
#else
|
||||
leal cond_lock(%edi), %ecx
|
||||
leal cond_lock(%edi), %edx
|
||||
#endif
|
||||
call __lll_mutex_lock_wait
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %ecx
|
||||
call __lll_lock_wait
|
||||
jmp 2b
|
||||
|
||||
.size __pthread_cond_signal, .-__pthread_cond_signal
|
||||
|
@ -19,20 +19,10 @@
|
||||
|
||||
#include <sysdep.h>
|
||||
#include <shlib-compat.h>
|
||||
#include <lowlevellock.h>
|
||||
#include <lowlevelcond.h>
|
||||
#include <pthread-errnos.h>
|
||||
|
||||
#ifdef UP
|
||||
# define LOCK
|
||||
#else
|
||||
# define LOCK lock
|
||||
#endif
|
||||
|
||||
#define SYS_gettimeofday __NR_gettimeofday
|
||||
#define SYS_futex 240
|
||||
#define FUTEX_WAIT 0
|
||||
#define FUTEX_WAKE 1
|
||||
|
||||
|
||||
.text
|
||||
|
||||
@ -127,7 +117,7 @@ __pthread_cond_timedwait:
|
||||
/* Get the current time. */
|
||||
leal 4(%esp), %ebx
|
||||
xorl %ecx, %ecx
|
||||
movl $SYS_gettimeofday, %eax
|
||||
movl $__NR_gettimeofday, %eax
|
||||
ENTER_KERNEL
|
||||
movl %edx, %ebx
|
||||
|
||||
@ -285,11 +275,13 @@ __pthread_cond_timedwait:
|
||||
1:
|
||||
.LSbl1:
|
||||
#if cond_lock == 0
|
||||
movl %ebx, %ecx
|
||||
movl %ebx, %edx
|
||||
#else
|
||||
leal cond_lock(%ebx), %ecx
|
||||
leal cond_lock(%ebx), %edx
|
||||
#endif
|
||||
call __lll_mutex_lock_wait
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %ecx
|
||||
call __lll_lock_wait
|
||||
jmp 2b
|
||||
|
||||
/* Unlock in loop requires wakeup. */
|
||||
@ -300,17 +292,21 @@ __pthread_cond_timedwait:
|
||||
#else
|
||||
leal cond_lock(%ebx), %eax
|
||||
#endif
|
||||
call __lll_mutex_unlock_wake
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %ecx
|
||||
call __lll_unlock_wake
|
||||
jmp 4b
|
||||
|
||||
/* Locking in loop failed. */
|
||||
5:
|
||||
#if cond_lock == 0
|
||||
movl %ebx, %ecx
|
||||
movl %ebx, %edx
|
||||
#else
|
||||
leal cond_lock(%ebx), %ecx
|
||||
leal cond_lock(%ebx), %edx
|
||||
#endif
|
||||
call __lll_mutex_lock_wait
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %ecx
|
||||
call __lll_lock_wait
|
||||
jmp 6b
|
||||
|
||||
/* Unlock after loop requires wakeup. */
|
||||
@ -320,7 +316,9 @@ __pthread_cond_timedwait:
|
||||
#else
|
||||
leal cond_lock(%ebx), %eax
|
||||
#endif
|
||||
call __lll_mutex_unlock_wake
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %ecx
|
||||
call __lll_unlock_wake
|
||||
jmp 11b
|
||||
|
||||
/* The initial unlocking of the mutex failed. */
|
||||
@ -340,7 +338,9 @@ __pthread_cond_timedwait:
|
||||
#else
|
||||
leal cond_lock(%ebx), %eax
|
||||
#endif
|
||||
call __lll_mutex_unlock_wake
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %ecx
|
||||
call __lll_unlock_wake
|
||||
|
||||
movl %esi, %eax
|
||||
jmp 18b
|
||||
@ -350,7 +350,7 @@ __pthread_cond_timedwait:
|
||||
.LSbl4:
|
||||
19: leal 4(%esp), %ebx
|
||||
xorl %ecx, %ecx
|
||||
movl $SYS_gettimeofday, %eax
|
||||
movl $__NR_gettimeofday, %eax
|
||||
ENTER_KERNEL
|
||||
movl %edx, %ebx
|
||||
|
||||
@ -396,11 +396,13 @@ __condvar_tw_cleanup:
|
||||
jz 1f
|
||||
|
||||
#if cond_lock == 0
|
||||
movl %ebx, %ecx
|
||||
movl %ebx, %edx
|
||||
#else
|
||||
leal cond_lock(%ebx), %ecx
|
||||
leal cond_lock(%ebx), %edx
|
||||
#endif
|
||||
call __lll_mutex_lock_wait
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %ecx
|
||||
call __lll_lock_wait
|
||||
|
||||
1: movl broadcast_seq(%ebx), %eax
|
||||
cmpl 20(%esp), %eax
|
||||
@ -457,7 +459,9 @@ __condvar_tw_cleanup:
|
||||
#else
|
||||
leal cond_lock(%ebx), %eax
|
||||
#endif
|
||||
call __lll_mutex_unlock_wake
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %ecx
|
||||
call __lll_unlock_wake
|
||||
|
||||
/* Wake up all waiters to make sure no signal gets lost. */
|
||||
2: testl %edi, %edi
|
||||
|
@ -19,19 +19,10 @@
|
||||
|
||||
#include <sysdep.h>
|
||||
#include <shlib-compat.h>
|
||||
#include <lowlevellock.h>
|
||||
#include <lowlevelcond.h>
|
||||
#include <tcb-offsets.h>
|
||||
|
||||
#ifdef UP
|
||||
# define LOCK
|
||||
#else
|
||||
# define LOCK lock
|
||||
#endif
|
||||
|
||||
#define SYS_futex 240
|
||||
#define FUTEX_WAIT 0
|
||||
#define FUTEX_WAKE 1
|
||||
|
||||
|
||||
.text
|
||||
|
||||
@ -202,11 +193,13 @@ __pthread_cond_wait:
|
||||
1:
|
||||
.LSbl1:
|
||||
#if cond_lock == 0
|
||||
movl %ebx, %ecx
|
||||
movl %ebx, %edx
|
||||
#else
|
||||
leal cond_lock(%ebx), %ecx
|
||||
leal cond_lock(%ebx), %edx
|
||||
#endif
|
||||
call __lll_mutex_lock_wait
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %ecx
|
||||
call __lll_lock_wait
|
||||
jmp 2b
|
||||
|
||||
/* Unlock in loop requires waekup. */
|
||||
@ -217,17 +210,21 @@ __pthread_cond_wait:
|
||||
#else
|
||||
leal cond_lock(%ebx), %eax
|
||||
#endif
|
||||
call __lll_mutex_unlock_wake
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %ecx
|
||||
call __lll_unlock_wake
|
||||
jmp 4b
|
||||
|
||||
/* Locking in loop failed. */
|
||||
5:
|
||||
#if cond_lock == 0
|
||||
movl %ebx, %ecx
|
||||
movl %ebx, %edx
|
||||
#else
|
||||
leal cond_lock(%ebx), %ecx
|
||||
leal cond_lock(%ebx), %edx
|
||||
#endif
|
||||
call __lll_mutex_lock_wait
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %ecx
|
||||
call __lll_lock_wait
|
||||
jmp 6b
|
||||
|
||||
/* Unlock after loop requires wakeup. */
|
||||
@ -237,7 +234,9 @@ __pthread_cond_wait:
|
||||
#else
|
||||
leal cond_lock(%ebx), %eax
|
||||
#endif
|
||||
call __lll_mutex_unlock_wake
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %ecx
|
||||
call __lll_unlock_wake
|
||||
jmp 11b
|
||||
|
||||
/* The initial unlocking of the mutex failed. */
|
||||
@ -257,7 +256,9 @@ __pthread_cond_wait:
|
||||
#else
|
||||
leal cond_lock(%ebx), %eax
|
||||
#endif
|
||||
call __lll_mutex_unlock_wake
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %ecx
|
||||
call __lll_unlock_wake
|
||||
|
||||
movl %esi, %eax
|
||||
jmp 14b
|
||||
@ -287,11 +288,13 @@ __condvar_w_cleanup:
|
||||
jz 1f
|
||||
|
||||
#if cond_lock == 0
|
||||
movl %ebx, %ecx
|
||||
movl %ebx, %edx
|
||||
#else
|
||||
leal cond_lock(%ebx), %ecx
|
||||
leal cond_lock(%ebx), %edx
|
||||
#endif
|
||||
call __lll_mutex_lock_wait
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %ecx
|
||||
call __lll_lock_wait
|
||||
|
||||
1: movl broadcast_seq(%ebx), %eax
|
||||
cmpl 12(%esp), %eax
|
||||
@ -348,7 +351,9 @@ __condvar_w_cleanup:
|
||||
#else
|
||||
leal cond_lock(%ebx), %eax
|
||||
#endif
|
||||
call __lll_mutex_unlock_wake
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %ecx
|
||||
call __lll_unlock_wake
|
||||
|
||||
/* Wake up all waiters to make sure no signal gets lost. */
|
||||
2: testl %edi, %edi
|
||||
|
@ -18,21 +18,11 @@
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <sysdep.h>
|
||||
#include <lowlevellock.h>
|
||||
#include <lowlevelrwlock.h>
|
||||
#include <pthread-errnos.h>
|
||||
|
||||
|
||||
#define SYS_futex 240
|
||||
#define FUTEX_WAIT 0
|
||||
#define FUTEX_WAKE 1
|
||||
|
||||
#ifndef UP
|
||||
# define LOCK lock
|
||||
#else
|
||||
# define LOCK
|
||||
#endif
|
||||
|
||||
|
||||
.text
|
||||
|
||||
.globl __pthread_rwlock_rdlock
|
||||
@ -108,7 +98,7 @@ __pthread_rwlock_rdlock:
|
||||
13: subl $1, READERS_QUEUED(%ebx)
|
||||
jmp 2b
|
||||
|
||||
5: xorl %ecx, %ecx
|
||||
5: xorl %edx, %edx
|
||||
addl $1, NR_READERS(%ebx)
|
||||
je 8f
|
||||
9: LOCK
|
||||
@ -120,24 +110,25 @@ __pthread_rwlock_rdlock:
|
||||
jne 6f
|
||||
7:
|
||||
|
||||
movl %ecx, %eax
|
||||
movl %edx, %eax
|
||||
popl %ebx
|
||||
popl %esi
|
||||
ret
|
||||
|
||||
1:
|
||||
#if MUTEX == 0
|
||||
movl %ebx, %ecx
|
||||
movl %ebx, %edx
|
||||
#else
|
||||
leal MUTEX(%ebx), %ecx
|
||||
leal MUTEX(%ebx), %edx
|
||||
#endif
|
||||
call __lll_mutex_lock_wait
|
||||
movl PSHARED(%ebx), %ecx
|
||||
call __lll_lock_wait
|
||||
jmp 2b
|
||||
|
||||
14: cmpl %gs:TID, %eax
|
||||
jne 3b
|
||||
/* Deadlock detected. */
|
||||
movl $EDEADLK, %ecx
|
||||
movl $EDEADLK, %edx
|
||||
jmp 9b
|
||||
|
||||
6:
|
||||
@ -146,17 +137,18 @@ __pthread_rwlock_rdlock:
|
||||
#else
|
||||
leal MUTEX(%ebx), %eax
|
||||
#endif
|
||||
call __lll_mutex_unlock_wake
|
||||
movl PSHARED(%ebx), %ecx
|
||||
call __lll_unlock_wake
|
||||
jmp 7b
|
||||
|
||||
/* Overflow. */
|
||||
8: subl $1, NR_READERS(%ebx)
|
||||
movl $EAGAIN, %ecx
|
||||
movl $EAGAIN, %edx
|
||||
jmp 9b
|
||||
|
||||
/* Overflow. */
|
||||
4: subl $1, READERS_QUEUED(%ebx)
|
||||
movl $EAGAIN, %ecx
|
||||
movl $EAGAIN, %edx
|
||||
jmp 9b
|
||||
|
||||
10:
|
||||
@ -165,16 +157,18 @@ __pthread_rwlock_rdlock:
|
||||
#else
|
||||
leal MUTEX(%ebx), %eax
|
||||
#endif
|
||||
call __lll_mutex_unlock_wake
|
||||
movl PSHARED(%ebx), %ecx
|
||||
call __lll_unlock_wake
|
||||
jmp 11b
|
||||
|
||||
12:
|
||||
#if MUTEX == 0
|
||||
movl %ebx, %ecx
|
||||
movl %ebx, %edx
|
||||
#else
|
||||
leal MUTEX(%ebx), %ecx
|
||||
leal MUTEX(%ebx), %edx
|
||||
#endif
|
||||
call __lll_mutex_lock_wait
|
||||
movl PSHARED(%ebx), %ecx
|
||||
call __lll_lock_wait
|
||||
jmp 13b
|
||||
.size __pthread_rwlock_rdlock,.-__pthread_rwlock_rdlock
|
||||
|
||||
|
@ -18,22 +18,11 @@
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <sysdep.h>
|
||||
#include <lowlevellock.h>
|
||||
#include <lowlevelrwlock.h>
|
||||
#include <pthread-errnos.h>
|
||||
|
||||
|
||||
#define SYS_gettimeofday __NR_gettimeofday
|
||||
#define SYS_futex 240
|
||||
#define FUTEX_WAIT 0
|
||||
#define FUTEX_WAKE 1
|
||||
|
||||
#ifndef UP
|
||||
# define LOCK lock
|
||||
#else
|
||||
# define LOCK
|
||||
#endif
|
||||
|
||||
|
||||
.text
|
||||
|
||||
.globl pthread_rwlock_timedrdlock
|
||||
@ -88,7 +77,7 @@ pthread_rwlock_timedrdlock:
|
||||
/* Get current time. */
|
||||
11: movl %esp, %ebx
|
||||
xorl %ecx, %ecx
|
||||
movl $SYS_gettimeofday, %eax
|
||||
movl $__NR_gettimeofday, %eax
|
||||
ENTER_KERNEL
|
||||
|
||||
/* Compute relative timeout. */
|
||||
@ -142,11 +131,11 @@ pthread_rwlock_timedrdlock:
|
||||
cmpl $-ETIMEDOUT, %esi
|
||||
jne 2b
|
||||
|
||||
18: movl $ETIMEDOUT, %ecx
|
||||
18: movl $ETIMEDOUT, %edx
|
||||
jmp 9f
|
||||
|
||||
|
||||
5: xorl %ecx, %ecx
|
||||
5: xorl %edx, %edx
|
||||
addl $1, NR_READERS(%ebp)
|
||||
je 8f
|
||||
9: LOCK
|
||||
@ -157,7 +146,7 @@ pthread_rwlock_timedrdlock:
|
||||
#endif
|
||||
jne 6f
|
||||
|
||||
7: movl %ecx, %eax
|
||||
7: movl %edx, %eax
|
||||
|
||||
addl $8, %esp
|
||||
popl %ebp
|
||||
@ -168,16 +157,17 @@ pthread_rwlock_timedrdlock:
|
||||
|
||||
1:
|
||||
#if MUTEX == 0
|
||||
movl %ebp, %ecx
|
||||
movl %ebp, %edx
|
||||
#else
|
||||
leal MUTEX(%ebp), %ecx
|
||||
leal MUTEX(%ebp), %edx
|
||||
#endif
|
||||
call __lll_mutex_lock_wait
|
||||
movl PSHARED(%ebp), %ecx
|
||||
call __lll_lock_wait
|
||||
jmp 2b
|
||||
|
||||
14: cmpl %gs:TID, %eax
|
||||
jne 3b
|
||||
movl $EDEADLK, %ecx
|
||||
movl $EDEADLK, %edx
|
||||
jmp 9b
|
||||
|
||||
6:
|
||||
@ -186,17 +176,18 @@ pthread_rwlock_timedrdlock:
|
||||
#else
|
||||
leal MUTEX(%ebp), %eax
|
||||
#endif
|
||||
call __lll_mutex_unlock_wake
|
||||
movl PSHARED(%ebp), %ecx
|
||||
call __lll_unlock_wake
|
||||
jmp 7b
|
||||
|
||||
/* Overflow. */
|
||||
8: subl $1, NR_READERS(%ebp)
|
||||
movl $EAGAIN, %ecx
|
||||
movl $EAGAIN, %edx
|
||||
jmp 9b
|
||||
|
||||
/* Overflow. */
|
||||
4: subl $1, READERS_QUEUED(%ebp)
|
||||
movl $EAGAIN, %ecx
|
||||
movl $EAGAIN, %edx
|
||||
jmp 9b
|
||||
|
||||
10:
|
||||
@ -205,21 +196,23 @@ pthread_rwlock_timedrdlock:
|
||||
#else
|
||||
leal MUTEX(%ebp), %eax
|
||||
#endif
|
||||
call __lll_mutex_unlock_wake
|
||||
movl PSHARED(%ebp), %ecx
|
||||
call __lll_unlock_wake
|
||||
jmp 11b
|
||||
|
||||
12:
|
||||
#if MUTEX == 0
|
||||
movl %ebp, %ecx
|
||||
movl %ebp, %edx
|
||||
#else
|
||||
leal MUTEX(%ebp), %ecx
|
||||
leal MUTEX(%ebp), %edx
|
||||
#endif
|
||||
call __lll_mutex_lock_wait
|
||||
movl PSHARED(%ebp), %ecx
|
||||
call __lll_lock_wait
|
||||
jmp 13b
|
||||
|
||||
16: movl $-ETIMEDOUT, %esi
|
||||
jmp 17b
|
||||
|
||||
19: movl $EINVAL, %ecx
|
||||
19: movl $EINVAL, %edx
|
||||
jmp 9b
|
||||
.size pthread_rwlock_timedrdlock,.-pthread_rwlock_timedrdlock
|
||||
|
@ -18,22 +18,11 @@
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <sysdep.h>
|
||||
#include <lowlevellock.h>
|
||||
#include <lowlevelrwlock.h>
|
||||
#include <pthread-errnos.h>
|
||||
|
||||
|
||||
#define SYS_gettimeofday __NR_gettimeofday
|
||||
#define SYS_futex 240
|
||||
#define FUTEX_WAIT 0
|
||||
#define FUTEX_WAKE 1
|
||||
|
||||
#ifndef UP
|
||||
# define LOCK lock
|
||||
#else
|
||||
# define LOCK
|
||||
#endif
|
||||
|
||||
|
||||
.text
|
||||
|
||||
.globl pthread_rwlock_timedwrlock
|
||||
@ -86,7 +75,7 @@ pthread_rwlock_timedwrlock:
|
||||
/* Get current time. */
|
||||
11: movl %esp, %ebx
|
||||
xorl %ecx, %ecx
|
||||
movl $SYS_gettimeofday, %eax
|
||||
movl $__NR_gettimeofday, %eax
|
||||
ENTER_KERNEL
|
||||
|
||||
/* Compute relative timeout. */
|
||||
@ -140,11 +129,11 @@ pthread_rwlock_timedwrlock:
|
||||
cmpl $-ETIMEDOUT, %esi
|
||||
jne 2b
|
||||
|
||||
18: movl $ETIMEDOUT, %ecx
|
||||
18: movl $ETIMEDOUT, %edx
|
||||
jmp 9f
|
||||
|
||||
|
||||
5: xorl %ecx, %ecx
|
||||
5: xorl %edx, %edx
|
||||
movl %gs:TID, %eax
|
||||
movl %eax, WRITER(%ebp)
|
||||
9: LOCK
|
||||
@ -155,7 +144,7 @@ pthread_rwlock_timedwrlock:
|
||||
#endif
|
||||
jne 6f
|
||||
|
||||
7: movl %ecx, %eax
|
||||
7: movl %edx, %eax
|
||||
|
||||
addl $8, %esp
|
||||
popl %ebp
|
||||
@ -166,16 +155,17 @@ pthread_rwlock_timedwrlock:
|
||||
|
||||
1:
|
||||
#if MUTEX == 0
|
||||
movl %ebp, %ecx
|
||||
movl %ebp, %edx
|
||||
#else
|
||||
leal MUTEX(%ebp), %ecx
|
||||
leal MUTEX(%ebp), %edx
|
||||
#endif
|
||||
call __lll_mutex_lock_wait
|
||||
movl PSHARED(%ebp), %ecx
|
||||
call __lll_lock_wait
|
||||
jmp 2b
|
||||
|
||||
14: cmpl %gs:TID, %eax
|
||||
jne 3b
|
||||
20: movl $EDEADLK, %ecx
|
||||
20: movl $EDEADLK, %edx
|
||||
jmp 9b
|
||||
|
||||
6:
|
||||
@ -184,12 +174,13 @@ pthread_rwlock_timedwrlock:
|
||||
#else
|
||||
leal MUTEX(%ebp), %eax
|
||||
#endif
|
||||
call __lll_mutex_unlock_wake
|
||||
movl PSHARED(%ebp), %ecx
|
||||
call __lll_unlock_wake
|
||||
jmp 7b
|
||||
|
||||
/* Overflow. */
|
||||
4: subl $1, WRITERS_QUEUED(%ebp)
|
||||
movl $EAGAIN, %ecx
|
||||
movl $EAGAIN, %edx
|
||||
jmp 9b
|
||||
|
||||
10:
|
||||
@ -198,21 +189,23 @@ pthread_rwlock_timedwrlock:
|
||||
#else
|
||||
leal MUTEX(%ebp), %eax
|
||||
#endif
|
||||
call __lll_mutex_unlock_wake
|
||||
movl PSHARED(%ebp), %ecx
|
||||
call __lll_unlock_wake
|
||||
jmp 11b
|
||||
|
||||
12:
|
||||
#if MUTEX == 0
|
||||
movl %ebp, %ecx
|
||||
movl %ebp, %edx
|
||||
#else
|
||||
leal MUTEX(%ebp), %ecx
|
||||
leal MUTEX(%ebp), %edx
|
||||
#endif
|
||||
call __lll_mutex_lock_wait
|
||||
movl PSHARED(%ebp), %ecx
|
||||
call __lll_lock_wait
|
||||
jmp 13b
|
||||
|
||||
16: movl $-ETIMEDOUT, %esi
|
||||
jmp 17b
|
||||
|
||||
19: movl $EINVAL, %ecx
|
||||
19: movl $EINVAL, %edx
|
||||
jmp 9b
|
||||
.size pthread_rwlock_timedwrlock,.-pthread_rwlock_timedwrlock
|
||||
|
@ -18,20 +18,10 @@
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <sysdep.h>
|
||||
#include <lowlevellock.h>
|
||||
#include <lowlevelrwlock.h>
|
||||
|
||||
|
||||
#define SYS_futex 240
|
||||
#define FUTEX_WAIT 0
|
||||
#define FUTEX_WAKE 1
|
||||
|
||||
#ifndef UP
|
||||
# define LOCK lock
|
||||
#else
|
||||
# define LOCK
|
||||
#endif
|
||||
|
||||
|
||||
.text
|
||||
|
||||
.globl __pthread_rwlock_unlock
|
||||
@ -115,11 +105,12 @@ __pthread_rwlock_unlock:
|
||||
|
||||
1:
|
||||
#if MUTEX == 0
|
||||
movl %edi, %ecx
|
||||
movl %edi, %edx
|
||||
#else
|
||||
leal MUTEX(%edi), %ecx
|
||||
leal MUTEX(%edi), %edx
|
||||
#endif
|
||||
call __lll_mutex_lock_wait
|
||||
movl PSHARED(%edi), %ecx
|
||||
call __lll_lock_wait
|
||||
jmp 2b
|
||||
|
||||
3:
|
||||
@ -128,7 +119,8 @@ __pthread_rwlock_unlock:
|
||||
#else
|
||||
leal MUTEX(%edi), %eax
|
||||
#endif
|
||||
call __lll_mutex_unlock_wake
|
||||
movl PSHARED(%edi), %ecx
|
||||
call __lll_unlock_wake
|
||||
jmp 4b
|
||||
|
||||
7:
|
||||
@ -137,7 +129,8 @@ __pthread_rwlock_unlock:
|
||||
#else
|
||||
leal MUTEX(%edi), %eax
|
||||
#endif
|
||||
call __lll_mutex_unlock_wake
|
||||
movl PSHARED(%edi), %ecx
|
||||
call __lll_unlock_wake
|
||||
jmp 8b
|
||||
|
||||
.size __pthread_rwlock_unlock,.-__pthread_rwlock_unlock
|
||||
|
@ -18,21 +18,11 @@
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <sysdep.h>
|
||||
#include <lowlevellock.h>
|
||||
#include <lowlevelrwlock.h>
|
||||
#include <pthread-errnos.h>
|
||||
|
||||
|
||||
#define SYS_futex 240
|
||||
#define FUTEX_WAIT 0
|
||||
#define FUTEX_WAKE 1
|
||||
|
||||
#ifndef UP
|
||||
# define LOCK lock
|
||||
#else
|
||||
# define LOCK
|
||||
#endif
|
||||
|
||||
|
||||
.text
|
||||
|
||||
.globl __pthread_rwlock_wrlock
|
||||
@ -106,7 +96,7 @@ __pthread_rwlock_wrlock:
|
||||
13: subl $1, WRITERS_QUEUED(%ebx)
|
||||
jmp 2b
|
||||
|
||||
5: xorl %ecx, %ecx
|
||||
5: xorl %edx, %edx
|
||||
movl %gs:TID, %eax
|
||||
movl %eax, WRITER(%ebx)
|
||||
9: LOCK
|
||||
@ -118,23 +108,24 @@ __pthread_rwlock_wrlock:
|
||||
jne 6f
|
||||
7:
|
||||
|
||||
movl %ecx, %eax
|
||||
movl %edx, %eax
|
||||
popl %ebx
|
||||
popl %esi
|
||||
ret
|
||||
|
||||
1:
|
||||
#if MUTEX == 0
|
||||
movl %ebx, %ecx
|
||||
movl %ebx, %edx
|
||||
#else
|
||||
leal MUTEX(%ebx), %ecx
|
||||
leal MUTEX(%ebx), %edx
|
||||
#endif
|
||||
call __lll_mutex_lock_wait
|
||||
movl PSHARED(%ebx), %ecx
|
||||
call __lll_lock_wait
|
||||
jmp 2b
|
||||
|
||||
14: cmpl %gs:TID , %eax
|
||||
jne 3b
|
||||
movl $EDEADLK, %ecx
|
||||
movl $EDEADLK, %edx
|
||||
jmp 9b
|
||||
|
||||
6:
|
||||
@ -143,11 +134,12 @@ __pthread_rwlock_wrlock:
|
||||
#else
|
||||
leal MUTEX(%ebx), %eax
|
||||
#endif
|
||||
call __lll_mutex_unlock_wake
|
||||
movl PSHARED(%ebx), %ecx
|
||||
call __lll_unlock_wake
|
||||
jmp 7b
|
||||
|
||||
4: subl $1, WRITERS_QUEUED(%ebx)
|
||||
movl $EAGAIN, %ecx
|
||||
movl $EAGAIN, %edx
|
||||
jmp 9b
|
||||
|
||||
10:
|
||||
@ -156,16 +148,18 @@ __pthread_rwlock_wrlock:
|
||||
#else
|
||||
leal MUTEX(%ebx), %eax
|
||||
#endif
|
||||
call __lll_mutex_unlock_wake
|
||||
movl PSHARED(%ebx), %ecx
|
||||
call __lll_unlock_wake
|
||||
jmp 11b
|
||||
|
||||
12:
|
||||
#if MUTEX == 0
|
||||
movl %ebx, %ecx
|
||||
movl %ebx, %edx
|
||||
#else
|
||||
leal MUTEX(%ebx), %ecx
|
||||
leal MUTEX(%ebx), %edx
|
||||
#endif
|
||||
call __lll_mutex_lock_wait
|
||||
movl PSHARED(%ebx), %ecx
|
||||
call __lll_lock_wait
|
||||
jmp 13b
|
||||
.size __pthread_rwlock_wrlock,.-__pthread_rwlock_wrlock
|
||||
|
||||
|
@ -21,15 +21,7 @@
|
||||
#include <shlib-compat.h>
|
||||
#include <pthread-errnos.h>
|
||||
#include <structsem.h>
|
||||
|
||||
#ifndef UP
|
||||
# define LOCK lock
|
||||
#else
|
||||
# define
|
||||
#endif
|
||||
|
||||
#define SYS_futex 240
|
||||
#define FUTEX_WAKE 1
|
||||
#include <lowlevellock.h>
|
||||
|
||||
|
||||
.text
|
||||
|
@ -21,16 +21,7 @@
|
||||
#include <shlib-compat.h>
|
||||
#include <pthread-errnos.h>
|
||||
#include <structsem.h>
|
||||
|
||||
#ifndef UP
|
||||
# define LOCK lock
|
||||
#else
|
||||
# define
|
||||
#endif
|
||||
|
||||
#define SYS_gettimeofday __NR_gettimeofday
|
||||
#define SYS_futex 240
|
||||
#define FUTEX_WAIT 0
|
||||
#include <lowlevellock.h>
|
||||
|
||||
|
||||
#if VALUE != 0
|
||||
@ -82,7 +73,7 @@ sem_timedwait:
|
||||
7: xorl %ecx, %ecx
|
||||
movl %esp, %ebx
|
||||
movl %ecx, %edx
|
||||
movl $SYS_gettimeofday, %eax
|
||||
movl $__NR_gettimeofday, %eax
|
||||
ENTER_KERNEL
|
||||
|
||||
/* Compute relative timeout. */
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2005, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -20,12 +20,7 @@
|
||||
#include <sysdep.h>
|
||||
#include <shlib-compat.h>
|
||||
#include <pthread-errnos.h>
|
||||
|
||||
#ifndef UP
|
||||
# define LOCK lock
|
||||
#else
|
||||
# define
|
||||
#endif
|
||||
#include <lowlevellock.h>
|
||||
|
||||
.text
|
||||
|
||||
|
@ -21,15 +21,7 @@
|
||||
#include <shlib-compat.h>
|
||||
#include <pthread-errnos.h>
|
||||
#include <structsem.h>
|
||||
|
||||
#ifndef UP
|
||||
# define LOCK lock
|
||||
#else
|
||||
# define
|
||||
#endif
|
||||
|
||||
#define SYS_futex 240
|
||||
#define FUTEX_WAIT 0
|
||||
#include <lowlevellock.h>
|
||||
|
||||
|
||||
#if VALUE != 0
|
||||
|
@ -20,28 +20,41 @@
|
||||
#ifndef _LOWLEVELLOCK_H
|
||||
#define _LOWLEVELLOCK_H 1
|
||||
|
||||
#include <time.h>
|
||||
#include <sys/param.h>
|
||||
#include <bits/pthreadtypes.h>
|
||||
#include <kernel-features.h>
|
||||
#include <tcb-offsets.h>
|
||||
#ifndef __ASSEMBLER__
|
||||
# include <time.h>
|
||||
# include <sys/param.h>
|
||||
# include <bits/pthreadtypes.h>
|
||||
# include <kernel-features.h>
|
||||
# include <tcb-offsets.h>
|
||||
|
||||
#ifndef LOCK_INSTR
|
||||
# ifdef UP
|
||||
# define LOCK_INSTR /* nothing */
|
||||
# else
|
||||
# define LOCK_INSTR "lock;"
|
||||
# ifndef LOCK_INSTR
|
||||
# ifdef UP
|
||||
# define LOCK_INSTR /* nothing */
|
||||
# else
|
||||
# define LOCK_INSTR "lock;"
|
||||
# endif
|
||||
# endif
|
||||
#else
|
||||
# ifndef LOCK
|
||||
# ifdef UP
|
||||
# define LOCK
|
||||
# else
|
||||
# define LOCK lock
|
||||
# endif
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#define SYS_futex 240
|
||||
#define FUTEX_WAIT 0
|
||||
#define FUTEX_WAKE 1
|
||||
#define FUTEX_CMP_REQUEUE 4
|
||||
#define FUTEX_WAKE_OP 5
|
||||
#define FUTEX_LOCK_PI 6
|
||||
#define FUTEX_UNLOCK_PI 7
|
||||
#define FUTEX_TRYLOCK_PI 8
|
||||
#define FUTEX_PRIVATE_FLAG 128
|
||||
|
||||
#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1)
|
||||
|
||||
/* Values for 'private' parameter of locking macros. Yes, the
|
||||
definition seems to be backwards. But it is not. The bit will be
|
||||
@ -76,11 +89,12 @@
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLER__
|
||||
|
||||
/* Initializer for compatibility lock. */
|
||||
#define LLL_MUTEX_LOCK_INITIALIZER (0)
|
||||
#define LLL_MUTEX_LOCK_INITIALIZER_LOCKED (1)
|
||||
#define LLL_MUTEX_LOCK_INITIALIZER_WAITERS (2)
|
||||
#define LLL_LOCK_INITIALIZER (0)
|
||||
#define LLL_LOCK_INITIALIZER_LOCKED (1)
|
||||
#define LLL_LOCK_INITIALIZER_WAITERS (2)
|
||||
|
||||
|
||||
#ifdef PIC
|
||||
@ -102,7 +116,7 @@
|
||||
#endif
|
||||
|
||||
/* Delay in spinlock loop. */
|
||||
#define BUSY_WAIT_NOP asm ("rep; nop")
|
||||
#define BUSY_WAIT_NOP asm ("rep; nop")
|
||||
|
||||
|
||||
#define LLL_STUB_UNWIND_INFO_START \
|
||||
@ -217,332 +231,309 @@ LLL_STUB_UNWIND_INFO_END
|
||||
} while (0)
|
||||
|
||||
|
||||
/* Does not preserve %eax and %ecx. */
|
||||
extern int __lll_mutex_lock_wait (int val, int *__futex)
|
||||
__attribute ((regparm (2))) attribute_hidden;
|
||||
/* Does not preserve %eax, %ecx, and %edx. */
|
||||
extern int __lll_mutex_timedlock_wait (int val, int *__futex,
|
||||
const struct timespec *abstime)
|
||||
__attribute ((regparm (3))) attribute_hidden;
|
||||
/* Preserves all registers but %eax. */
|
||||
extern int __lll_mutex_unlock_wake (int *__futex)
|
||||
__attribute ((regparm (1))) attribute_hidden;
|
||||
|
||||
|
||||
/* NB: in the lll_mutex_trylock macro we simply return the value in %eax
|
||||
/* NB: in the lll_trylock macro we simply return the value in %eax
|
||||
after the cmpxchg instruction. In case the operation succeded this
|
||||
value is zero. In case the operation failed, the cmpxchg instruction
|
||||
has loaded the current value of the memory work which is guaranteed
|
||||
to be nonzero. */
|
||||
#define lll_mutex_trylock(futex) \
|
||||
#if defined NOT_IN_libc || defined UP
|
||||
# define __lll_trylock_asm LOCK_INSTR "cmpxchgl %2, %1"
|
||||
#else
|
||||
# define __lll_trylock_asm "cmpl $0, %%gs:%P5\n\t" \
|
||||
"je 0f\n\t" \
|
||||
"lock\n" \
|
||||
"0:\tcmpxchgl %2, %1"
|
||||
#endif
|
||||
|
||||
#define lll_trylock(futex) \
|
||||
({ int ret; \
|
||||
__asm __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
|
||||
__asm __volatile (__lll_trylock_asm \
|
||||
: "=a" (ret), "=m" (futex) \
|
||||
: "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\
|
||||
"0" (LLL_MUTEX_LOCK_INITIALIZER) \
|
||||
: "r" (LLL_LOCK_INITIALIZER_LOCKED), "m" (futex), \
|
||||
"0" (LLL_LOCK_INITIALIZER), \
|
||||
"i" (MULTIPLE_THREADS_OFFSET) \
|
||||
: "memory"); \
|
||||
ret; })
|
||||
|
||||
|
||||
#define lll_robust_mutex_trylock(futex, id) \
|
||||
#define lll_robust_trylock(futex, id) \
|
||||
({ int ret; \
|
||||
__asm __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
|
||||
: "=a" (ret), "=m" (futex) \
|
||||
: "r" (id), "m" (futex), \
|
||||
"0" (LLL_MUTEX_LOCK_INITIALIZER) \
|
||||
"0" (LLL_LOCK_INITIALIZER) \
|
||||
: "memory"); \
|
||||
ret; })
|
||||
|
||||
|
||||
#define lll_mutex_cond_trylock(futex) \
|
||||
#define lll_cond_trylock(futex) \
|
||||
({ int ret; \
|
||||
__asm __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
|
||||
: "=a" (ret), "=m" (futex) \
|
||||
: "r" (LLL_MUTEX_LOCK_INITIALIZER_WAITERS), \
|
||||
"m" (futex), "0" (LLL_MUTEX_LOCK_INITIALIZER) \
|
||||
: "r" (LLL_LOCK_INITIALIZER_WAITERS), \
|
||||
"m" (futex), "0" (LLL_LOCK_INITIALIZER) \
|
||||
: "memory"); \
|
||||
ret; })
|
||||
|
||||
|
||||
#define lll_mutex_lock(futex) \
|
||||
(void) ({ int ignore1, ignore2; \
|
||||
__asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
|
||||
"jnz _L_mutex_lock_%=\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_mutex_lock_%=,@function\n" \
|
||||
"_L_mutex_lock_%=:\n" \
|
||||
"1:\tleal %2, %%ecx\n" \
|
||||
"2:\tcall __lll_mutex_lock_wait\n" \
|
||||
"3:\tjmp 18f\n" \
|
||||
"4:\t.size _L_mutex_lock_%=, 4b-1b\n\t" \
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_3 \
|
||||
"18:" \
|
||||
: "=a" (ignore1), "=c" (ignore2), "=m" (futex) \
|
||||
: "0" (0), "1" (1), "m" (futex) \
|
||||
: "memory"); })
|
||||
|
||||
|
||||
#define lll_robust_mutex_lock(futex, id) \
|
||||
({ int result, ignore; \
|
||||
__asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
|
||||
"jnz _L_robust_mutex_lock_%=\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_robust_mutex_lock_%=,@function\n" \
|
||||
"_L_robust_mutex_lock_%=:\n" \
|
||||
"1:\tleal %2, %%ecx\n" \
|
||||
"2:\tcall __lll_robust_mutex_lock_wait\n" \
|
||||
"3:\tjmp 18f\n" \
|
||||
"4:\t.size _L_robust_mutex_lock_%=, 4b-1b\n\t" \
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_3 \
|
||||
"18:" \
|
||||
: "=a" (result), "=c" (ignore), "=m" (futex) \
|
||||
: "0" (0), "1" (id), "m" (futex) \
|
||||
: "memory"); \
|
||||
result; })
|
||||
|
||||
|
||||
/* Special version of lll_mutex_lock which causes the unlock function to
|
||||
always wakeup waiters. */
|
||||
#define lll_mutex_cond_lock(futex) \
|
||||
(void) ({ int ignore1, ignore2; \
|
||||
__asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
|
||||
"jnz _L_mutex_cond_lock_%=\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_mutex_cond_lock_%=,@function\n" \
|
||||
"_L_mutex_cond_lock_%=:\n" \
|
||||
"1:\tleal %2, %%ecx\n" \
|
||||
"2:\tcall __lll_mutex_lock_wait\n" \
|
||||
"3:\tjmp 18f\n" \
|
||||
"4:\t.size _L_mutex_cond_lock_%=, 4b-1b\n\t" \
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_3 \
|
||||
"18:" \
|
||||
: "=a" (ignore1), "=c" (ignore2), "=m" (futex) \
|
||||
: "0" (0), "1" (2), "m" (futex) \
|
||||
: "memory"); })
|
||||
|
||||
|
||||
#define lll_robust_mutex_cond_lock(futex, id) \
|
||||
({ int result, ignore; \
|
||||
__asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
|
||||
"jnz _L_robust_mutex_cond_lock_%=\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_robust_mutex_cond_lock_%=,@function\n" \
|
||||
"_L_robust_mutex_cond_lock_%=:\n" \
|
||||
"1:\tleal %2, %%ecx\n" \
|
||||
"2:\tcall __lll_robust_mutex_lock_wait\n" \
|
||||
"3:\tjmp 18f\n" \
|
||||
"4:\t.size _L_robust_mutex_cond_lock_%=, 4b-1b\n\t" \
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_3 \
|
||||
"18:" \
|
||||
: "=a" (result), "=c" (ignore), "=m" (futex) \
|
||||
: "0" (0), "1" (id | FUTEX_WAITERS), "m" (futex) \
|
||||
: "memory"); \
|
||||
result; })
|
||||
|
||||
|
||||
#define lll_mutex_timedlock(futex, timeout) \
|
||||
({ int result, ignore1, ignore2; \
|
||||
__asm __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t" \
|
||||
"jnz _L_mutex_timedlock_%=\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_mutex_timedlock_%=,@function\n" \
|
||||
"_L_mutex_timedlock_%=:\n" \
|
||||
"1:\tleal %3, %%ecx\n" \
|
||||
"0:\tmovl %7, %%edx\n" \
|
||||
"2:\tcall __lll_mutex_timedlock_wait\n" \
|
||||
"3:\tjmp 18f\n" \
|
||||
"4:\t.size _L_mutex_timedlock_%=, 4b-1b\n\t" \
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_4 \
|
||||
"18:" \
|
||||
: "=a" (result), "=c" (ignore1), "=&d" (ignore2), \
|
||||
"=m" (futex) \
|
||||
: "0" (0), "1" (1), "m" (futex), "m" (timeout) \
|
||||
: "memory"); \
|
||||
result; })
|
||||
|
||||
|
||||
#define lll_robust_mutex_timedlock(futex, timeout, id) \
|
||||
({ int result, ignore1, ignore2; \
|
||||
__asm __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t" \
|
||||
"jnz _L_robust_mutex_timedlock_%=\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_robust_mutex_timedlock_%=,@function\n" \
|
||||
"_L_robust_mutex_timedlock_%=:\n" \
|
||||
"1:\tleal %3, %%ecx\n" \
|
||||
"0:\tmovl %7, %%edx\n" \
|
||||
"2:\tcall __lll_robust_mutex_timedlock_wait\n" \
|
||||
"3:\tjmp 18f\n" \
|
||||
"4:\t.size _L_robust_mutex_timedlock_%=, 4b-1b\n\t" \
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_4 \
|
||||
"18:" \
|
||||
: "=a" (result), "=c" (ignore1), "=&d" (ignore2), \
|
||||
"=m" (futex) \
|
||||
: "0" (0), "1" (id), "m" (futex), "m" (timeout) \
|
||||
: "memory"); \
|
||||
result; })
|
||||
|
||||
|
||||
#define lll_mutex_unlock(futex) \
|
||||
(void) ({ int ignore; \
|
||||
__asm __volatile (LOCK_INSTR "subl $1, %0\n\t" \
|
||||
"jne _L_mutex_unlock_%=\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_mutex_unlock_%=,@function\n" \
|
||||
"_L_mutex_unlock_%=:\n" \
|
||||
"1:\tleal %0, %%eax\n" \
|
||||
"2:\tcall __lll_mutex_unlock_wake\n" \
|
||||
"3:\tjmp 18f\n" \
|
||||
"4:\t.size _L_mutex_unlock_%=, 4b-1b\n\t" \
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_3 \
|
||||
"18:" \
|
||||
: "=m" (futex), "=&a" (ignore) \
|
||||
: "m" (futex) \
|
||||
: "memory"); })
|
||||
|
||||
|
||||
#define lll_robust_mutex_unlock(futex) \
|
||||
(void) ({ int ignore; \
|
||||
__asm __volatile (LOCK_INSTR "andl %2, %0\n\t" \
|
||||
"jne _L_robust_mutex_unlock_%=\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_robust_mutex_unlock_%=,@function\n" \
|
||||
"_L_robust_mutex_unlock_%=:\n\t" \
|
||||
"1:\tleal %0, %%eax\n" \
|
||||
"2:\tcall __lll_mutex_unlock_wake\n" \
|
||||
"3:\tjmp 18f\n" \
|
||||
"4:\t.size _L_robust_mutex_unlock_%=, 4b-1b\n\t"\
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_3 \
|
||||
"18:" \
|
||||
: "=m" (futex), "=&a" (ignore) \
|
||||
: "i" (FUTEX_WAITERS), "m" (futex) \
|
||||
: "memory"); })
|
||||
|
||||
|
||||
#define lll_robust_mutex_dead(futex) \
|
||||
(void) ({ int __ignore; \
|
||||
register int _nr asm ("edx") = 1; \
|
||||
__asm __volatile (LOCK_INSTR "orl %5, (%2)\n\t" \
|
||||
LLL_EBX_LOAD \
|
||||
LLL_ENTER_KERNEL \
|
||||
LLL_EBX_LOAD \
|
||||
: "=a" (__ignore) \
|
||||
: "0" (SYS_futex), LLL_EBX_REG (&(futex)), \
|
||||
"c" (FUTEX_WAKE), "d" (_nr), \
|
||||
"i" (FUTEX_OWNER_DIED), \
|
||||
"i" (offsetof (tcbhead_t, sysinfo))); })
|
||||
|
||||
|
||||
#define lll_mutex_islocked(futex) \
|
||||
(futex != 0)
|
||||
|
||||
|
||||
/* We have a separate internal lock implementation which is not tied
|
||||
to binary compatibility. */
|
||||
|
||||
/* Type for lock object. */
|
||||
typedef int lll_lock_t;
|
||||
|
||||
/* Initializers for lock. */
|
||||
#define LLL_LOCK_INITIALIZER (0)
|
||||
#define LLL_LOCK_INITIALIZER_LOCKED (1)
|
||||
|
||||
|
||||
extern int __lll_lock_wait (int val, int *__futex)
|
||||
__attribute ((regparm (2))) attribute_hidden;
|
||||
extern int __lll_unlock_wake (int *__futex)
|
||||
__attribute ((regparm (1))) attribute_hidden;
|
||||
|
||||
|
||||
/* The states of a lock are:
|
||||
0 - untaken
|
||||
1 - taken by one user
|
||||
2 - taken by more users */
|
||||
|
||||
|
||||
#if defined NOT_IN_libc || defined UP
|
||||
# define lll_trylock(futex) lll_mutex_trylock (futex)
|
||||
# define lll_lock(futex) lll_mutex_lock (futex)
|
||||
# define lll_unlock(futex) lll_mutex_unlock (futex)
|
||||
# define __lll_lock_asm_start LOCK_INSTR "cmpxchgl %1, %2\n\t"
|
||||
#else
|
||||
/* Special versions of the macros for use in libc itself. They avoid
|
||||
the lock prefix when the thread library is not used.
|
||||
|
||||
XXX In future we might even want to avoid it on UP machines. */
|
||||
# include <tls.h>
|
||||
|
||||
# define lll_trylock(futex) \
|
||||
({ unsigned char ret; \
|
||||
__asm __volatile ("cmpl $0, %%gs:%P5\n\t" \
|
||||
"je 0f\n\t" \
|
||||
"lock\n" \
|
||||
"0:\tcmpxchgl %2, %1; setne %0" \
|
||||
: "=a" (ret), "=m" (futex) \
|
||||
: "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\
|
||||
"0" (LLL_MUTEX_LOCK_INITIALIZER), \
|
||||
"i" (offsetof (tcbhead_t, multiple_threads)) \
|
||||
: "memory"); \
|
||||
ret; })
|
||||
|
||||
|
||||
# define lll_lock(futex) \
|
||||
(void) ({ int ignore1, ignore2; \
|
||||
__asm __volatile ("cmpl $0, %%gs:%P6\n\t" \
|
||||
# define __lll_lock_asm_start "cmpl $0, %%gs:%P6\n\t" \
|
||||
"je 0f\n\t" \
|
||||
"lock\n" \
|
||||
"0:\tcmpxchgl %1, %2\n\t" \
|
||||
"jnz _L_lock_%=\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_lock_%=,@function\n" \
|
||||
"_L_lock_%=:\n" \
|
||||
"1:\tleal %2, %%ecx\n" \
|
||||
"2:\tcall __lll_mutex_lock_wait\n" \
|
||||
"3:\tjmp 18f\n" \
|
||||
"4:\t.size _L_lock_%=, 4b-1b\n\t" \
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_3 \
|
||||
"18:" \
|
||||
: "=a" (ignore1), "=c" (ignore2), "=m" (futex) \
|
||||
: "0" (0), "1" (1), "m" (futex), \
|
||||
"i" (offsetof (tcbhead_t, multiple_threads)) \
|
||||
: "memory"); })
|
||||
|
||||
|
||||
# define lll_unlock(futex) \
|
||||
(void) ({ int ignore; \
|
||||
__asm __volatile ("cmpl $0, %%gs:%P3\n\t" \
|
||||
"je 0f\n\t" \
|
||||
"lock\n" \
|
||||
"0:\tsubl $1,%0\n\t" \
|
||||
"jne _L_unlock_%=\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_unlock_%=,@function\n" \
|
||||
"_L_unlock_%=:\n" \
|
||||
"1:\tleal %0, %%eax\n" \
|
||||
"2:\tcall __lll_mutex_unlock_wake\n" \
|
||||
"3:\tjmp 18f\n\t" \
|
||||
"4:\t.size _L_unlock_%=, 4b-1b\n\t" \
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_3 \
|
||||
"18:" \
|
||||
: "=m" (futex), "=&a" (ignore) \
|
||||
: "m" (futex), \
|
||||
"i" (offsetof (tcbhead_t, multiple_threads)) \
|
||||
: "memory"); })
|
||||
"0:\tcmpxchgl %1, %2\n\t"
|
||||
#endif
|
||||
|
||||
#define lll_lock(futex, private) \
|
||||
(void) \
|
||||
({ int ignore1, ignore2; \
|
||||
if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
|
||||
__asm __volatile (__lll_lock_asm_start \
|
||||
"jnz _L_lock_%=\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_lock_%=,@function\n" \
|
||||
"_L_lock_%=:\n" \
|
||||
"1:\tleal %2, %%ecx\n" \
|
||||
"2:\tcall __lll_lock_wait_private\n" \
|
||||
"3:\tjmp 18f\n" \
|
||||
"4:\t.size _L_lock_%=, 4b-1b\n\t" \
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_3 \
|
||||
"18:" \
|
||||
: "=a" (ignore1), "=c" (ignore2), "=m" (futex) \
|
||||
: "0" (0), "1" (1), "m" (futex), \
|
||||
"i" (MULTIPLE_THREADS_OFFSET) \
|
||||
: "memory"); \
|
||||
else \
|
||||
{ \
|
||||
int ignore3; \
|
||||
__asm __volatile (__lll_lock_asm_start \
|
||||
"jnz _L_lock_%=\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_lock_%=,@function\n" \
|
||||
"_L_lock_%=:\n" \
|
||||
"1:\tleal %2, %%edx\n" \
|
||||
"0:\tmovl %8, %%ecx\n" \
|
||||
"2:\tcall __lll_lock_wait\n" \
|
||||
"3:\tjmp 18f\n" \
|
||||
"4:\t.size _L_lock_%=, 4b-1b\n\t" \
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_4 \
|
||||
"18:" \
|
||||
: "=a" (ignore1), "=c" (ignore2), \
|
||||
"=m" (futex), "=&d" (ignore3) \
|
||||
: "1" (1), "m" (futex), \
|
||||
"i" (MULTIPLE_THREADS_OFFSET), "0" (0), \
|
||||
"g" (private) \
|
||||
: "memory"); \
|
||||
} \
|
||||
})
|
||||
|
||||
#define lll_robust_lock(futex, id, private) \
|
||||
({ int result, ignore1, ignore2; \
|
||||
__asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
|
||||
"jnz _L_robust_lock_%=\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_robust_lock_%=,@function\n" \
|
||||
"_L_robust_lock_%=:\n" \
|
||||
"1:\tleal %2, %%edx\n" \
|
||||
"0:\tmovl %7, %%ecx\n" \
|
||||
"2:\tcall __lll_robust_lock_wait\n" \
|
||||
"3:\tjmp 18f\n" \
|
||||
"4:\t.size _L_robust_lock_%=, 4b-1b\n\t" \
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_4 \
|
||||
"18:" \
|
||||
: "=a" (result), "=c" (ignore1), "=m" (futex), \
|
||||
"=&d" (ignore2) \
|
||||
: "0" (0), "1" (id), "m" (futex), "g" (private) \
|
||||
: "memory"); \
|
||||
result; })
|
||||
|
||||
|
||||
/* Special version of lll_lock which causes the unlock function to
|
||||
always wakeup waiters. */
|
||||
#define lll_cond_lock(futex, private) \
|
||||
(void) \
|
||||
({ int ignore1, ignore2, ignore3; \
|
||||
__asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
|
||||
"jnz _L_cond_lock_%=\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_cond_lock_%=,@function\n" \
|
||||
"_L_cond_lock_%=:\n" \
|
||||
"1:\tleal %2, %%edx\n" \
|
||||
"0:\tmovl %7, %%ecx\n" \
|
||||
"2:\tcall __lll_lock_wait\n" \
|
||||
"3:\tjmp 18f\n" \
|
||||
"4:\t.size _L_cond_lock_%=, 4b-1b\n\t" \
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_4 \
|
||||
"18:" \
|
||||
: "=a" (ignore1), "=c" (ignore2), "=m" (futex), \
|
||||
"=&d" (ignore3) \
|
||||
: "0" (0), "1" (2), "m" (futex), "g" (private) \
|
||||
: "memory"); \
|
||||
})
|
||||
|
||||
|
||||
#define lll_robust_cond_lock(futex, id, private) \
|
||||
({ int result, ignore1, ignore2; \
|
||||
__asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
|
||||
"jnz _L_robust_cond_lock_%=\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_robust_cond_lock_%=,@function\n" \
|
||||
"_L_robust_cond_lock_%=:\n" \
|
||||
"1:\tleal %2, %%edx\n" \
|
||||
"0:\tmovl %7, %%ecx\n" \
|
||||
"2:\tcall __lll_robust_lock_wait\n" \
|
||||
"3:\tjmp 18f\n" \
|
||||
"4:\t.size _L_robust_cond_lock_%=, 4b-1b\n\t" \
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_4 \
|
||||
"18:" \
|
||||
: "=a" (result), "=c" (ignore1), "=m" (futex), \
|
||||
"=&d" (ignore2) \
|
||||
: "0" (0), "1" (id | FUTEX_WAITERS), "m" (futex), \
|
||||
"g" (private) \
|
||||
: "memory"); \
|
||||
result; })
|
||||
|
||||
|
||||
#define lll_timedlock(futex, timeout, private) \
|
||||
({ int result, ignore1, ignore2, ignore3; \
|
||||
__asm __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t" \
|
||||
"jnz _L_timedlock_%=\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_timedlock_%=,@function\n" \
|
||||
"_L_timedlock_%=:\n" \
|
||||
"1:\tleal %3, %%ecx\n" \
|
||||
"0:\tmovl %8, %%edx\n" \
|
||||
"2:\tcall __lll_timedlock_wait\n" \
|
||||
"3:\tjmp 18f\n" \
|
||||
"4:\t.size _L_timedlock_%=, 4b-1b\n\t" \
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_4 \
|
||||
"18:" \
|
||||
: "=a" (result), "=c" (ignore1), "=&d" (ignore2), \
|
||||
"=m" (futex), "=S" (ignore3) \
|
||||
: "0" (0), "1" (1), "m" (futex), "m" (timeout), \
|
||||
"4" (private) \
|
||||
: "memory"); \
|
||||
result; })
|
||||
|
||||
|
||||
#define lll_robust_timedlock(futex, timeout, id, private) \
|
||||
({ int result, ignore1, ignore2, ignore3; \
|
||||
__asm __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t" \
|
||||
"jnz _L_robust_timedlock_%=\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_robust_timedlock_%=,@function\n" \
|
||||
"_L_robust_timedlock_%=:\n" \
|
||||
"1:\tleal %3, %%ecx\n" \
|
||||
"0:\tmovl %8, %%edx\n" \
|
||||
"2:\tcall __lll_robust_timedlock_wait\n" \
|
||||
"3:\tjmp 18f\n" \
|
||||
"4:\t.size _L_robust_timedlock_%=, 4b-1b\n\t" \
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_4 \
|
||||
"18:" \
|
||||
: "=a" (result), "=c" (ignore1), "=&d" (ignore2), \
|
||||
"=m" (futex), "=S" (ignore3) \
|
||||
: "0" (0), "1" (id), "m" (futex), "m" (timeout), \
|
||||
"4" (private) \
|
||||
: "memory"); \
|
||||
result; })
|
||||
|
||||
#if defined NOT_IN_libc || defined UP
|
||||
# define __lll_unlock_asm LOCK_INSTR "subl $1, %0\n\t"
|
||||
#else
|
||||
# define __lll_unlock_asm "cmpl $0, %%gs:%P3\n\t" \
|
||||
"je 0f\n\t" \
|
||||
"lock\n" \
|
||||
"0:\tsubl $1,%0\n\t"
|
||||
#endif
|
||||
|
||||
#define lll_unlock(futex, private) \
|
||||
(void) \
|
||||
({ int ignore; \
|
||||
if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
|
||||
__asm __volatile (__lll_unlock_asm \
|
||||
"jne _L_unlock_%=\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_unlock_%=,@function\n" \
|
||||
"_L_unlock_%=:\n" \
|
||||
"1:\tleal %0, %%eax\n" \
|
||||
"2:\tcall __lll_unlock_wake_private\n" \
|
||||
"3:\tjmp 18f\n" \
|
||||
"4:\t.size _L_unlock_%=, 4b-1b\n\t" \
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_3 \
|
||||
"18:" \
|
||||
: "=m" (futex), "=&a" (ignore) \
|
||||
: "m" (futex), "i" (MULTIPLE_THREADS_OFFSET) \
|
||||
: "memory"); \
|
||||
else \
|
||||
{ \
|
||||
int ignore2; \
|
||||
__asm __volatile (__lll_unlock_asm \
|
||||
"jne _L_unlock_%=\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_unlock_%=,@function\n" \
|
||||
"_L_unlock_%=:\n" \
|
||||
"1:\tleal %0, %%eax\n" \
|
||||
"0:\tmovl %5, %%ecx\n" \
|
||||
"2:\tcall __lll_unlock_wake\n" \
|
||||
"3:\tjmp 18f\n" \
|
||||
"4:\t.size _L_unlock_%=, 4b-1b\n\t" \
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_4 \
|
||||
"18:" \
|
||||
: "=m" (futex), "=&a" (ignore), "=&c" (ignore2) \
|
||||
: "i" (MULTIPLE_THREADS_OFFSET), "m" (futex), \
|
||||
"g" (private) \
|
||||
: "memory"); \
|
||||
} \
|
||||
})
|
||||
|
||||
#define lll_robust_unlock(futex, private) \
|
||||
(void) \
|
||||
({ int ignore, ignore2; \
|
||||
__asm __volatile (LOCK_INSTR "andl %3, %0\n\t" \
|
||||
"jne _L_robust_unlock_%=\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_robust_unlock_%=,@function\n" \
|
||||
"_L_robust_unlock_%=:\n\t" \
|
||||
"1:\tleal %0, %%eax\n" \
|
||||
"0:\tmovl %5, %%ecx\n" \
|
||||
"2:\tcall __lll_unlock_wake\n" \
|
||||
"3:\tjmp 18f\n" \
|
||||
"4:\t.size _L_robust_unlock_%=, 4b-1b\n\t" \
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_4 \
|
||||
"18:" \
|
||||
: "=m" (futex), "=&a" (ignore), "=&c" (ignore2) \
|
||||
: "i" (FUTEX_WAITERS), "m" (futex), "g" (private) \
|
||||
: "memory"); \
|
||||
})
|
||||
|
||||
|
||||
#define lll_robust_dead(futex, private) \
|
||||
(void) \
|
||||
({ int __ignore; \
|
||||
register int _nr asm ("edx") = 1; \
|
||||
__asm __volatile (LOCK_INSTR "orl %5, (%2)\n\t" \
|
||||
LLL_EBX_LOAD \
|
||||
LLL_ENTER_KERNEL \
|
||||
LLL_EBX_LOAD \
|
||||
: "=a" (__ignore) \
|
||||
: "0" (SYS_futex), LLL_EBX_REG (&(futex)), \
|
||||
"c" (__lll_private_flag (FUTEX_WAKE, private)), \
|
||||
"d" (_nr), "i" (FUTEX_OWNER_DIED), \
|
||||
"i" (offsetof (tcbhead_t, sysinfo))); \
|
||||
})
|
||||
|
||||
#define lll_islocked(futex) \
|
||||
(futex != LLL_LOCK_INITIALIZER)
|
||||
|
||||
|
||||
/* The kernel notifies a process with uses CLONE_CLEARTID via futex
|
||||
wakeup when the clone terminates. The memory location contains the
|
||||
thread ID while the clone is running and is reset to zero
|
||||
@ -581,28 +572,6 @@ extern int __lll_timedwait_tid (int *tid, const struct timespec *abstime)
|
||||
} \
|
||||
__result; })
|
||||
|
||||
|
||||
/* Conditional variable handling. */
|
||||
|
||||
extern void __lll_cond_wait (pthread_cond_t *cond)
|
||||
__attribute ((regparm (1))) attribute_hidden;
|
||||
extern int __lll_cond_timedwait (pthread_cond_t *cond,
|
||||
const struct timespec *abstime)
|
||||
__attribute ((regparm (2))) attribute_hidden;
|
||||
extern void __lll_cond_wake (pthread_cond_t *cond)
|
||||
__attribute ((regparm (1))) attribute_hidden;
|
||||
extern void __lll_cond_broadcast (pthread_cond_t *cond)
|
||||
__attribute ((regparm (1))) attribute_hidden;
|
||||
|
||||
|
||||
#define lll_cond_wait(cond) \
|
||||
__lll_cond_wait (cond)
|
||||
#define lll_cond_timedwait(cond, abstime) \
|
||||
__lll_cond_timedwait (cond, abstime)
|
||||
#define lll_cond_wake(cond) \
|
||||
__lll_cond_wake (cond)
|
||||
#define lll_cond_broadcast(cond) \
|
||||
__lll_cond_broadcast (cond)
|
||||
|
||||
#endif /* !__ASSEMBLER__ */
|
||||
|
||||
#endif /* lowlevellock.h */
|
||||
|
@ -20,19 +20,9 @@
|
||||
#include <unwindbuf.h>
|
||||
#include <sysdep.h>
|
||||
#include <kernel-features.h>
|
||||
#include <lowlevellock.h>
|
||||
|
||||
|
||||
#ifndef UP
|
||||
# define LOCK lock
|
||||
#else
|
||||
# define LOCK
|
||||
#endif
|
||||
|
||||
#define SYS_futex 240
|
||||
#define FUTEX_WAIT 0
|
||||
#define FUTEX_WAKE 1
|
||||
#define FUTEX_PRIVATE_FLAG 128
|
||||
|
||||
.comm __fork_generation, 4, 4
|
||||
|
||||
.text
|
||||
|
@ -73,9 +73,6 @@
|
||||
/* Delay in spinlock loop. */
|
||||
#define BUSY_WAIT_NOP asm ("hint @pause")
|
||||
|
||||
/* Initializer for compatibility lock. */
|
||||
#define LLL_MUTEX_LOCK_INITIALIZER (0)
|
||||
|
||||
#define lll_futex_wait(futex, val, private) \
|
||||
lll_futex_timed_wait (futex, val, NULL, private)
|
||||
|
||||
@ -95,12 +92,13 @@
|
||||
_r10 == -1 ? -_retval : _retval; \
|
||||
})
|
||||
|
||||
#define lll_robust_mutex_dead(futexv) \
|
||||
#define lll_robust_dead(futexv, private) \
|
||||
do \
|
||||
{ \
|
||||
int *__futexp = &(futexv); \
|
||||
atomic_or (__futexp, FUTEX_OWNER_DIED); \
|
||||
DO_INLINE_SYSCALL(futex, 3, (long) __futexp, FUTEX_WAKE, 1); \
|
||||
DO_INLINE_SYSCALL(futex, 3, (long) __futexp, \
|
||||
__lll_private_flag (FUTEX_WAKE, private), 1); \
|
||||
} \
|
||||
while (0)
|
||||
|
||||
@ -123,156 +121,144 @@ while (0)
|
||||
})
|
||||
|
||||
|
||||
#define __lll_mutex_trylock(futex) \
|
||||
#define __lll_trylock(futex) \
|
||||
(atomic_compare_and_exchange_val_acq (futex, 1, 0) != 0)
|
||||
#define lll_mutex_trylock(futex) __lll_mutex_trylock (&(futex))
|
||||
#define lll_trylock(futex) __lll_trylock (&(futex))
|
||||
|
||||
|
||||
#define __lll_robust_mutex_trylock(futex, id) \
|
||||
#define __lll_robust_trylock(futex, id) \
|
||||
(atomic_compare_and_exchange_val_acq (futex, id, 0) != 0)
|
||||
#define lll_robust_mutex_trylock(futex, id) \
|
||||
__lll_robust_mutex_trylock (&(futex), id)
|
||||
#define lll_robust_trylock(futex, id) \
|
||||
__lll_robust_trylock (&(futex), id)
|
||||
|
||||
|
||||
#define __lll_mutex_cond_trylock(futex) \
|
||||
#define __lll_cond_trylock(futex) \
|
||||
(atomic_compare_and_exchange_val_acq (futex, 2, 0) != 0)
|
||||
#define lll_mutex_cond_trylock(futex) __lll_mutex_cond_trylock (&(futex))
|
||||
#define lll_cond_trylock(futex) __lll_cond_trylock (&(futex))
|
||||
|
||||
|
||||
extern void __lll_lock_wait (int *futex) attribute_hidden;
|
||||
extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
|
||||
extern void __lll_lock_wait_private (int *futex) attribute_hidden;
|
||||
extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
|
||||
extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
|
||||
|
||||
|
||||
#define __lll_mutex_lock(futex) \
|
||||
((void) ({ \
|
||||
int *__futex = (futex); \
|
||||
if (atomic_compare_and_exchange_bool_acq (__futex, 1, 0) != 0) \
|
||||
__lll_lock_wait (__futex); \
|
||||
#define __lll_lock(futex, private) \
|
||||
((void) ({ \
|
||||
int *__futex = (futex); \
|
||||
if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, \
|
||||
1, 0), 0)) \
|
||||
{ \
|
||||
if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
|
||||
__lll_lock_wait_private (__futex); \
|
||||
else \
|
||||
__lll_lock_wait (__futex, private); \
|
||||
}))
|
||||
#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex))
|
||||
#define lll_lock(futex, private) __lll_lock (&(futex), private)
|
||||
|
||||
|
||||
#define __lll_robust_mutex_lock(futex, id) \
|
||||
({ \
|
||||
int *__futex = (futex); \
|
||||
int __val = 0; \
|
||||
\
|
||||
if (atomic_compare_and_exchange_bool_acq (__futex, id, 0) != 0) \
|
||||
__val = __lll_robust_lock_wait (__futex); \
|
||||
__val; \
|
||||
#define __lll_robust_lock(futex, id, private) \
|
||||
({ \
|
||||
int *__futex = (futex); \
|
||||
int __val = 0; \
|
||||
\
|
||||
if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \
|
||||
0), 0)) \
|
||||
__val = __lll_robust_lock_wait (__futex, private); \
|
||||
__val; \
|
||||
})
|
||||
#define lll_robust_mutex_lock(futex, id) __lll_robust_mutex_lock (&(futex), id)
|
||||
#define lll_robust_lock(futex, id, private) \
|
||||
__lll_robust_lock (&(futex), id, private)
|
||||
|
||||
|
||||
#define __lll_mutex_cond_lock(futex) \
|
||||
((void) ({ \
|
||||
int *__futex = (futex); \
|
||||
if (atomic_compare_and_exchange_bool_acq (__futex, 2, 0) != 0) \
|
||||
__lll_lock_wait (__futex); \
|
||||
#define __lll_cond_lock(futex, private) \
|
||||
((void) ({ \
|
||||
int *__futex = (futex); \
|
||||
if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, 2, \
|
||||
0), 0)) \
|
||||
__lll_lock_wait (__futex, private); \
|
||||
}))
|
||||
#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex))
|
||||
#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
|
||||
|
||||
|
||||
#define __lll_robust_mutex_cond_lock(futex, id) \
|
||||
({ \
|
||||
int *__futex = (futex); \
|
||||
int __val = 0; \
|
||||
int __id = (id) | FUTEX_WAITERS; \
|
||||
\
|
||||
if (atomic_compare_and_exchange_bool_acq (__futex, __id, 0) != 0) \
|
||||
__val = __lll_robust_lock_wait (__futex); \
|
||||
__val; \
|
||||
#define __lll_robust_cond_lock(futex, id, private) \
|
||||
({ \
|
||||
int *__futex = (futex); \
|
||||
int __val = 0; \
|
||||
int __id = (id) | FUTEX_WAITERS; \
|
||||
\
|
||||
if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, \
|
||||
__id, 0), 0)) \
|
||||
__val = __lll_robust_lock_wait (__futex, private); \
|
||||
__val; \
|
||||
})
|
||||
#define lll_robust_mutex_cond_lock(futex, id) \
|
||||
__lll_robust_mutex_cond_lock (&(futex), id)
|
||||
#define lll_robust_cond_lock(futex, id, private) \
|
||||
__lll_robust_cond_lock (&(futex), id, private)
|
||||
|
||||
|
||||
extern int __lll_timedlock_wait (int *futex, const struct timespec *)
|
||||
attribute_hidden;
|
||||
extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *)
|
||||
attribute_hidden;
|
||||
extern int __lll_timedlock_wait (int *futex, const struct timespec *,
|
||||
int private) attribute_hidden;
|
||||
extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
|
||||
int private) attribute_hidden;
|
||||
|
||||
|
||||
#define __lll_mutex_timedlock(futex, abstime) \
|
||||
({ \
|
||||
int *__futex = (futex); \
|
||||
int __val = 0; \
|
||||
\
|
||||
if (atomic_compare_and_exchange_bool_acq (__futex, 1, 0) != 0) \
|
||||
__val = __lll_timedlock_wait (__futex, abstime); \
|
||||
__val; \
|
||||
#define __lll_timedlock(futex, abstime, private) \
|
||||
({ \
|
||||
int *__futex = (futex); \
|
||||
int __val = 0; \
|
||||
\
|
||||
if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, 1, \
|
||||
0), 0)) \
|
||||
__val = __lll_timedlock_wait (__futex, abstime, private); \
|
||||
__val; \
|
||||
})
|
||||
#define lll_mutex_timedlock(futex, abstime) \
|
||||
__lll_mutex_timedlock (&(futex), abstime)
|
||||
#define lll_timedlock(futex, abstime, private) \
|
||||
__lll_timedlock (&(futex), abstime, private)
|
||||
|
||||
|
||||
#define __lll_robust_mutex_timedlock(futex, abstime, id) \
|
||||
({ \
|
||||
int *__futex = (futex); \
|
||||
int __val = 0; \
|
||||
\
|
||||
if (atomic_compare_and_exchange_bool_acq (__futex, id, 0) != 0) \
|
||||
__val = __lll_robust_timedlock_wait (__futex, abstime); \
|
||||
__val; \
|
||||
#define __lll_robust_timedlock(futex, abstime, id, private) \
|
||||
({ \
|
||||
int *__futex = (futex); \
|
||||
int __val = 0; \
|
||||
\
|
||||
if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \
|
||||
0), 0)) \
|
||||
__val = __lll_robust_timedlock_wait (__futex, abstime, private); \
|
||||
__val; \
|
||||
})
|
||||
#define lll_robust_mutex_timedlock(futex, abstime, id) \
|
||||
__lll_robust_mutex_timedlock (&(futex), abstime, id)
|
||||
#define lll_robust_timedlock(futex, abstime, id, private) \
|
||||
__lll_robust_timedlock (&(futex), abstime, id, private)
|
||||
|
||||
|
||||
#define __lll_mutex_unlock(futex) \
|
||||
((void) ({ \
|
||||
int *__futex = (futex); \
|
||||
int __val = atomic_exchange_rel (__futex, 0); \
|
||||
\
|
||||
if (__builtin_expect (__val > 1, 0)) \
|
||||
lll_futex_wake (__futex, 1, LLL_SHARED); \
|
||||
#define __lll_unlock(futex, private) \
|
||||
((void) ({ \
|
||||
int *__futex = (futex); \
|
||||
int __val = atomic_exchange_rel (__futex, 0); \
|
||||
\
|
||||
if (__builtin_expect (__val > 1, 0)) \
|
||||
lll_futex_wake (__futex, 1, private); \
|
||||
}))
|
||||
#define lll_mutex_unlock(futex) \
|
||||
__lll_mutex_unlock(&(futex))
|
||||
#define lll_unlock(futex, private) __lll_unlock(&(futex), private)
|
||||
|
||||
|
||||
#define __lll_robust_mutex_unlock(futex) \
|
||||
((void) ({ \
|
||||
int *__futex = (futex); \
|
||||
int __val = atomic_exchange_rel (__futex, 0); \
|
||||
\
|
||||
if (__builtin_expect (__val & FUTEX_WAITERS, 0)) \
|
||||
lll_futex_wake (__futex, 1, LLL_SHARED); \
|
||||
#define __lll_robust_unlock(futex, private) \
|
||||
((void) ({ \
|
||||
int *__futex = (futex); \
|
||||
int __val = atomic_exchange_rel (__futex, 0); \
|
||||
\
|
||||
if (__builtin_expect (__val & FUTEX_WAITERS, 0)) \
|
||||
lll_futex_wake (__futex, 1, private); \
|
||||
}))
|
||||
#define lll_robust_mutex_unlock(futex) \
|
||||
__lll_robust_mutex_unlock(&(futex))
|
||||
#define lll_robust_unlock(futex, private) \
|
||||
__lll_robust_unlock(&(futex), private)
|
||||
|
||||
|
||||
#define __lll_mutex_unlock_force(futex) \
|
||||
((void) ({ \
|
||||
int *__futex = (futex); \
|
||||
(void) atomic_exchange_rel (__futex, 0); \
|
||||
lll_futex_wake (__futex, 1, LLL_SHARED); \
|
||||
}))
|
||||
#define lll_mutex_unlock_force(futex) \
|
||||
__lll_mutex_unlock_force(&(futex))
|
||||
|
||||
|
||||
#define lll_mutex_islocked(futex) \
|
||||
#define lll_islocked(futex) \
|
||||
(futex != 0)
|
||||
|
||||
|
||||
/* We have a separate internal lock implementation which is not tied
|
||||
to binary compatibility. We can use the lll_mutex_*. */
|
||||
|
||||
/* Type for lock object. */
|
||||
typedef int lll_lock_t;
|
||||
|
||||
/* Initializers for lock. */
|
||||
#define LLL_LOCK_INITIALIZER (0)
|
||||
#define LLL_LOCK_INITIALIZER_LOCKED (1)
|
||||
|
||||
#define lll_trylock(futex) lll_mutex_trylock (futex)
|
||||
#define lll_lock(futex) lll_mutex_lock (futex)
|
||||
#define lll_unlock(futex) lll_mutex_unlock (futex)
|
||||
#define lll_islocked(futex) lll_mutex_islocked (futex)
|
||||
|
||||
|
||||
/* The kernel notifies a process with uses CLONE_CLEARTID via futex
|
||||
wakeup when the clone terminates. The memory location contains the
|
||||
thread ID while the clone is running and is reset to zero
|
||||
@ -297,26 +283,4 @@ extern int __lll_timedwait_tid (int *, const struct timespec *)
|
||||
__res; \
|
||||
})
|
||||
|
||||
|
||||
/* Conditional variable handling. */
|
||||
|
||||
extern void __lll_cond_wait (pthread_cond_t *cond)
|
||||
attribute_hidden;
|
||||
extern int __lll_cond_timedwait (pthread_cond_t *cond,
|
||||
const struct timespec *abstime)
|
||||
attribute_hidden;
|
||||
extern void __lll_cond_wake (pthread_cond_t *cond)
|
||||
attribute_hidden;
|
||||
extern void __lll_cond_broadcast (pthread_cond_t *cond)
|
||||
attribute_hidden;
|
||||
|
||||
#define lll_cond_wait(cond) \
|
||||
__lll_cond_wait (cond)
|
||||
#define lll_cond_timedwait(cond, abstime) \
|
||||
__lll_cond_timedwait (cond, abstime)
|
||||
#define lll_cond_wake(cond) \
|
||||
__lll_cond_wake (cond)
|
||||
#define lll_cond_broadcast(cond) \
|
||||
__lll_cond_broadcast (cond)
|
||||
|
||||
#endif /* lowlevellock.h */
|
||||
|
@ -25,22 +25,35 @@
|
||||
|
||||
|
||||
void
|
||||
__lll_lock_wait (int *futex)
|
||||
__lll_lock_wait_private (int *futex)
|
||||
{
|
||||
do
|
||||
{
|
||||
int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1);
|
||||
if (oldval != 0)
|
||||
lll_futex_wait (futex, 2,
|
||||
// XYZ check mutex flag
|
||||
LLL_SHARED);
|
||||
lll_futex_wait (futex, 2, LLL_PRIVATE);
|
||||
}
|
||||
while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0);
|
||||
}
|
||||
|
||||
|
||||
/* These functions doesn't get included in libc.so */
|
||||
#ifdef IS_IN_libpthread
|
||||
void
|
||||
__lll_lock_wait (int *futex, int private)
|
||||
{
|
||||
do
|
||||
{
|
||||
int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1);
|
||||
if (oldval != 0)
|
||||
lll_futex_wait (futex, 2, private);
|
||||
}
|
||||
while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0);
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
__lll_timedlock_wait (int *futex, const struct timespec *abstime)
|
||||
__lll_timedlock_wait (int *futex, const struct timespec *abstime, int private)
|
||||
{
|
||||
/* Reject invalid timeouts. */
|
||||
if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
|
||||
@ -70,9 +83,7 @@ __lll_timedlock_wait (int *futex, const struct timespec *abstime)
|
||||
/* Wait. */
|
||||
int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1);
|
||||
if (oldval != 0)
|
||||
lll_futex_timed_wait (futex, 2, &rt,
|
||||
// XYZ check mutex flag
|
||||
LLL_SHARED);
|
||||
lll_futex_timed_wait (futex, 2, &rt, private);
|
||||
}
|
||||
while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0);
|
||||
|
||||
@ -80,8 +91,6 @@ __lll_timedlock_wait (int *futex, const struct timespec *abstime)
|
||||
}
|
||||
|
||||
|
||||
/* This function doesn't get included in libc.so */
|
||||
#ifdef IS_IN_libpthread
|
||||
int
|
||||
__lll_timedwait_tid (int *tidp, const struct timespec *abstime)
|
||||
{
|
||||
|
@ -25,7 +25,7 @@
|
||||
|
||||
|
||||
int
|
||||
__lll_robust_lock_wait (int *futex)
|
||||
__lll_robust_lock_wait (int *futex, int private)
|
||||
{
|
||||
int oldval = *futex;
|
||||
int tid = THREAD_GETMEM (THREAD_SELF, tid);
|
||||
@ -44,9 +44,7 @@ __lll_robust_lock_wait (int *futex)
|
||||
&& atomic_compare_and_exchange_bool_acq (futex, newval, oldval))
|
||||
continue;
|
||||
|
||||
lll_futex_wait (futex, newval,
|
||||
// XYZ check mutex flag
|
||||
LLL_SHARED);
|
||||
lll_futex_wait (futex, newval, private);
|
||||
|
||||
try:
|
||||
;
|
||||
@ -59,7 +57,8 @@ __lll_robust_lock_wait (int *futex)
|
||||
|
||||
|
||||
int
|
||||
__lll_robust_timedlock_wait (int *futex, const struct timespec *abstime)
|
||||
__lll_robust_timedlock_wait (int *futex, const struct timespec *abstime,
|
||||
int private)
|
||||
{
|
||||
/* Reject invalid timeouts. */
|
||||
if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
|
||||
@ -102,9 +101,7 @@ __lll_robust_timedlock_wait (int *futex, const struct timespec *abstime)
|
||||
&& atomic_compare_and_exchange_bool_acq (futex, newval, oldval))
|
||||
continue;
|
||||
|
||||
lll_futex_timed_wait (futex, newval, &rt,
|
||||
// XYZ check mutex flag
|
||||
LLL_SHARED);
|
||||
lll_futex_timed_wait (futex, newval, &rt, private);
|
||||
|
||||
try:
|
||||
;
|
||||
|
@ -69,9 +69,6 @@
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* Initializer for compatibility lock. */
|
||||
#define LLL_MUTEX_LOCK_INITIALIZER (0)
|
||||
|
||||
#define lll_futex_wait(futexp, val, private) \
|
||||
lll_futex_timed_wait (futexp, val, NULL, private)
|
||||
|
||||
@ -97,14 +94,15 @@
|
||||
INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \
|
||||
})
|
||||
|
||||
#define lll_robust_mutex_dead(futexv) \
|
||||
#define lll_robust_dead(futexv, private) \
|
||||
do \
|
||||
{ \
|
||||
INTERNAL_SYSCALL_DECL (__err); \
|
||||
int *__futexp = &(futexv); \
|
||||
\
|
||||
atomic_or (__futexp, FUTEX_OWNER_DIED); \
|
||||
INTERNAL_SYSCALL (futex, __err, 4, __futexp, FUTEX_WAKE, 1, 0); \
|
||||
INTERNAL_SYSCALL (futex, __err, 4, __futexp, \
|
||||
__lll_private_flag (FUTEX_WAKE, private), 1, 0); \
|
||||
} \
|
||||
while (0)
|
||||
|
||||
@ -171,119 +169,111 @@
|
||||
__val; \
|
||||
})
|
||||
|
||||
#define lll_robust_mutex_trylock(lock, id) __lll_robust_trylock (&(lock), id)
|
||||
#define lll_robust_trylock(lock, id) __lll_robust_trylock (&(lock), id)
|
||||
|
||||
/* Set *futex to 1 if it is 0, atomically. Returns the old value */
|
||||
#define __lll_trylock(futex) __lll_robust_trylock (futex, 1)
|
||||
|
||||
#define lll_mutex_trylock(lock) __lll_trylock (&(lock))
|
||||
#define lll_trylock(lock) __lll_trylock (&(lock))
|
||||
|
||||
/* Set *futex to 2 if it is 0, atomically. Returns the old value */
|
||||
#define __lll_cond_trylock(futex) __lll_robust_trylock (futex, 2)
|
||||
|
||||
#define lll_mutex_cond_trylock(lock) __lll_cond_trylock (&(lock))
|
||||
#define lll_cond_trylock(lock) __lll_cond_trylock (&(lock))
|
||||
|
||||
|
||||
extern void __lll_lock_wait (int *futex) attribute_hidden;
|
||||
extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
|
||||
extern void __lll_lock_wait_private (int *futex) attribute_hidden;
|
||||
extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
|
||||
extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
|
||||
|
||||
#define lll_mutex_lock(lock) \
|
||||
#define lll_lock(lock, private) \
|
||||
(void) ({ \
|
||||
int *__futex = &(lock); \
|
||||
if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 1, 0),\
|
||||
0) != 0) \
|
||||
__lll_lock_wait (__futex); \
|
||||
{ \
|
||||
if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
|
||||
__lll_lock_wait_private (__futex); \
|
||||
else \
|
||||
__lll_lock_wait (__futex, private); \
|
||||
} \
|
||||
})
|
||||
|
||||
#define lll_robust_mutex_lock(lock, id) \
|
||||
#define lll_robust_lock(lock, id, private) \
|
||||
({ \
|
||||
int *__futex = &(lock); \
|
||||
int __val = 0; \
|
||||
if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \
|
||||
0), 0)) \
|
||||
__val = __lll_robust_lock_wait (__futex); \
|
||||
__val = __lll_robust_lock_wait (__futex, private); \
|
||||
__val; \
|
||||
})
|
||||
|
||||
#define lll_mutex_cond_lock(lock) \
|
||||
#define lll_cond_lock(lock, private) \
|
||||
(void) ({ \
|
||||
int *__futex = &(lock); \
|
||||
if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 2, 0),\
|
||||
0) != 0) \
|
||||
__lll_lock_wait (__futex); \
|
||||
__lll_lock_wait (__futex, private); \
|
||||
})
|
||||
|
||||
#define lll_robust_mutex_cond_lock(lock, id) \
|
||||
#define lll_robust_cond_lock(lock, id, private) \
|
||||
({ \
|
||||
int *__futex = &(lock); \
|
||||
int __val = 0; \
|
||||
int __id = id | FUTEX_WAITERS; \
|
||||
if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, __id,\
|
||||
0), 0)) \
|
||||
__val = __lll_robust_lock_wait (__futex); \
|
||||
__val = __lll_robust_lock_wait (__futex, private); \
|
||||
__val; \
|
||||
})
|
||||
|
||||
|
||||
extern int __lll_timedlock_wait
|
||||
(int *futex, const struct timespec *) attribute_hidden;
|
||||
(int *futex, const struct timespec *, int private) attribute_hidden;
|
||||
extern int __lll_robust_timedlock_wait
|
||||
(int *futex, const struct timespec *) attribute_hidden;
|
||||
(int *futex, const struct timespec *, int private) attribute_hidden;
|
||||
|
||||
#define lll_mutex_timedlock(lock, abstime) \
|
||||
#define lll_timedlock(lock, abstime, private) \
|
||||
({ \
|
||||
int *__futex = &(lock); \
|
||||
int __val = 0; \
|
||||
if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 1, 0),\
|
||||
0) != 0) \
|
||||
__val = __lll_timedlock_wait (__futex, abstime); \
|
||||
__val = __lll_timedlock_wait (__futex, abstime, private); \
|
||||
__val; \
|
||||
})
|
||||
|
||||
#define lll_robust_mutex_timedlock(lock, abstime, id) \
|
||||
#define lll_robust_timedlock(lock, abstime, id, private) \
|
||||
({ \
|
||||
int *__futex = &(lock); \
|
||||
int __val = 0; \
|
||||
if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \
|
||||
0), 0)) \
|
||||
__val = __lll_robust_timedlock_wait (__futex, abstime); \
|
||||
__val = __lll_robust_timedlock_wait (__futex, abstime, private); \
|
||||
__val; \
|
||||
})
|
||||
|
||||
#define lll_mutex_unlock(lock) \
|
||||
#define lll_unlock(lock, private) \
|
||||
((void) ({ \
|
||||
int *__futex = &(lock); \
|
||||
int __val = atomic_exchange_rel (__futex, 0); \
|
||||
if (__builtin_expect (__val > 1, 0)) \
|
||||
lll_futex_wake (__futex, 1, LLL_SHARED); \
|
||||
lll_futex_wake (__futex, 1, private); \
|
||||
}))
|
||||
|
||||
#define lll_robust_mutex_unlock(lock) \
|
||||
#define lll_robust_unlock(lock, private) \
|
||||
((void) ({ \
|
||||
int *__futex = &(lock); \
|
||||
int __val = atomic_exchange_rel (__futex, 0); \
|
||||
if (__builtin_expect (__val & FUTEX_WAITERS, 0)) \
|
||||
lll_futex_wake (__futex, 1, LLL_SHARED); \
|
||||
lll_futex_wake (__futex, 1, private); \
|
||||
}))
|
||||
|
||||
#define lll_mutex_unlock_force(lock) \
|
||||
((void) ({ \
|
||||
int *__futex = &(lock); \
|
||||
*__futex = 0; \
|
||||
__asm __volatile (__lll_rel_instr ::: "memory"); \
|
||||
lll_futex_wake (__futex, 1, LLL_SHARED); \
|
||||
}))
|
||||
|
||||
#define lll_mutex_islocked(futex) \
|
||||
#define lll_islocked(futex) \
|
||||
(futex != 0)
|
||||
|
||||
|
||||
/* Our internal lock implementation is identical to the binary-compatible
|
||||
mutex implementation. */
|
||||
|
||||
/* Type for lock object. */
|
||||
typedef int lll_lock_t;
|
||||
|
||||
/* Initializers for lock. */
|
||||
#define LLL_LOCK_INITIALIZER (0)
|
||||
#define LLL_LOCK_INITIALIZER_LOCKED (1)
|
||||
@ -293,11 +283,6 @@ typedef int lll_lock_t;
|
||||
1 - taken by one user
|
||||
>1 - taken by more users */
|
||||
|
||||
#define lll_trylock(lock) lll_mutex_trylock (lock)
|
||||
#define lll_lock(lock) lll_mutex_lock (lock)
|
||||
#define lll_unlock(lock) lll_mutex_unlock (lock)
|
||||
#define lll_islocked(lock) lll_mutex_islocked (lock)
|
||||
|
||||
/* The kernel notifies a process which uses CLONE_CLEARTID via futex
|
||||
wakeup when the clone terminates. The memory location contains the
|
||||
thread ID while the clone is running and is reset to zero
|
||||
@ -320,26 +305,4 @@ extern int __lll_timedwait_tid (int *, const struct timespec *)
|
||||
__res; \
|
||||
})
|
||||
|
||||
|
||||
/* Conditional variable handling. */
|
||||
|
||||
extern void __lll_cond_wait (pthread_cond_t *cond)
|
||||
attribute_hidden;
|
||||
extern int __lll_cond_timedwait (pthread_cond_t *cond,
|
||||
const struct timespec *abstime)
|
||||
attribute_hidden;
|
||||
extern void __lll_cond_wake (pthread_cond_t *cond)
|
||||
attribute_hidden;
|
||||
extern void __lll_cond_broadcast (pthread_cond_t *cond)
|
||||
attribute_hidden;
|
||||
|
||||
#define lll_cond_wait(cond) \
|
||||
__lll_cond_wait (cond)
|
||||
#define lll_cond_timedwait(cond, abstime) \
|
||||
__lll_cond_timedwait (cond, abstime)
|
||||
#define lll_cond_wake(cond) \
|
||||
__lll_cond_wake (cond)
|
||||
#define lll_cond_broadcast(cond) \
|
||||
__lll_cond_broadcast (cond)
|
||||
|
||||
#endif /* lowlevellock.h */
|
||||
|
@ -28,12 +28,38 @@
|
||||
|
||||
int
|
||||
__new_sem_post (sem_t *sem)
|
||||
{
|
||||
struct new_sem *isem = (struct new_sem *) sem;
|
||||
|
||||
__asm __volatile (__lll_rel_instr ::: "memory");
|
||||
atomic_increment (&isem->value);
|
||||
__asm __volatile (__lll_acq_instr ::: "memory");
|
||||
if (isem->nwaiters > 0)
|
||||
{
|
||||
int err = lll_futex_wake (&isem->value, 1,
|
||||
isem->private ^ FUTEX_PRIVATE_FLAG);
|
||||
if (__builtin_expect (err, 0) < 0)
|
||||
{
|
||||
__set_errno (-err);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
versioned_symbol (libpthread, __new_sem_post, sem_post, GLIBC_2_1);
|
||||
|
||||
#if SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_1)
|
||||
|
||||
int
|
||||
attribute_compat_text_section
|
||||
__old_sem_post (sem_t *sem)
|
||||
{
|
||||
int *futex = (int *) sem;
|
||||
|
||||
__asm __volatile (__lll_rel_instr ::: "memory");
|
||||
int nr = atomic_increment_val (futex);
|
||||
int err = lll_futex_wake (futex, nr, LLL_SHARED);
|
||||
/* We always have to assume it is a shared semaphore. */
|
||||
int err = lll_futex_wake (futex, 1, LLL_SHARED);
|
||||
if (__builtin_expect (err, 0) < 0)
|
||||
{
|
||||
__set_errno (-err);
|
||||
@ -41,8 +67,6 @@ __new_sem_post (sem_t *sem)
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
versioned_symbol (libpthread, __new_sem_post, sem_post, GLIBC_2_1);
|
||||
#if SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_1)
|
||||
strong_alias (__new_sem_post, __old_sem_post)
|
||||
|
||||
compat_symbol (libpthread, __old_sem_post, sem_post, GLIBC_2_0);
|
||||
#endif
|
||||
|
@ -1,8 +1,8 @@
|
||||
#include <pthreadP.h>
|
||||
|
||||
#define LLL_MUTEX_LOCK(mutex) lll_mutex_cond_lock (mutex)
|
||||
#define LLL_MUTEX_TRYLOCK(mutex) lll_mutex_cond_trylock (mutex)
|
||||
#define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_mutex_cond_lock (mutex, id)
|
||||
#define LLL_MUTEX_LOCK(mutex) lll_cond_lock (mutex, /* XYZ */ LLL_SHARED)
|
||||
#define LLL_MUTEX_TRYLOCK(mutex) lll_cond_trylock (mutex)
|
||||
#define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_cond_lock (mutex, id, /* XYZ */ LLL_SHARED)
|
||||
#define __pthread_mutex_lock __pthread_mutex_cond_lock
|
||||
#define NO_INCR
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2005, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -24,7 +24,7 @@
|
||||
|
||||
|
||||
/* Lock to protect allocation and deallocation of fork handlers. */
|
||||
lll_lock_t __fork_lock = LLL_LOCK_INITIALIZER;
|
||||
int __fork_lock = LLL_LOCK_INITIALIZER;
|
||||
|
||||
|
||||
/* Number of pre-allocated handler entries. */
|
||||
@ -85,7 +85,7 @@ __register_atfork (prepare, parent, child, dso_handle)
|
||||
void *dso_handle;
|
||||
{
|
||||
/* Get the lock to not conflict with other allocations. */
|
||||
lll_lock (__fork_lock);
|
||||
lll_lock (__fork_lock, LLL_PRIVATE);
|
||||
|
||||
struct fork_handler *newp = fork_handler_alloc ();
|
||||
|
||||
@ -102,7 +102,7 @@ __register_atfork (prepare, parent, child, dso_handle)
|
||||
}
|
||||
|
||||
/* Release the lock. */
|
||||
lll_unlock (__fork_lock);
|
||||
lll_unlock (__fork_lock, LLL_PRIVATE);
|
||||
|
||||
return newp == NULL ? ENOMEM : 0;
|
||||
}
|
||||
@ -112,7 +112,7 @@ libc_hidden_def (__register_atfork)
|
||||
libc_freeres_fn (free_mem)
|
||||
{
|
||||
/* Get the lock to not conflict with running forks. */
|
||||
lll_lock (__fork_lock);
|
||||
lll_lock (__fork_lock, LLL_PRIVATE);
|
||||
|
||||
/* No more fork handlers. */
|
||||
__fork_handlers = NULL;
|
||||
@ -123,7 +123,7 @@ libc_freeres_fn (free_mem)
|
||||
memset (&fork_handler_pool, '\0', sizeof (fork_handler_pool));
|
||||
|
||||
/* Release the lock. */
|
||||
lll_unlock (__fork_lock);
|
||||
lll_unlock (__fork_lock, LLL_PRIVATE);
|
||||
|
||||
/* We can free the memory after releasing the lock. */
|
||||
while (runp != NULL)
|
||||
|
@ -68,9 +68,6 @@
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* Initializer for compatibility lock. */
|
||||
#define LLL_MUTEX_LOCK_INITIALIZER (0)
|
||||
|
||||
#define lll_futex_wait(futex, val, private) \
|
||||
lll_futex_timed_wait (futex, val, NULL, private)
|
||||
|
||||
@ -108,13 +105,13 @@
|
||||
})
|
||||
|
||||
|
||||
#define lll_robust_mutex_dead(futexv) \
|
||||
#define lll_robust_dead(futexv, private) \
|
||||
do \
|
||||
{ \
|
||||
int *__futexp = &(futexv); \
|
||||
\
|
||||
atomic_or (__futexp, FUTEX_OWNER_DIED); \
|
||||
lll_futex_wake (__futexp, 1, LLL_SHARED); \
|
||||
lll_futex_wake (__futexp, 1, private); \
|
||||
} \
|
||||
while (0)
|
||||
|
||||
@ -175,7 +172,7 @@
|
||||
|
||||
static inline int
|
||||
__attribute__ ((always_inline))
|
||||
__lll_mutex_trylock (int *futex)
|
||||
__lll_trylock (int *futex)
|
||||
{
|
||||
unsigned int old;
|
||||
|
||||
@ -184,12 +181,12 @@ __lll_mutex_trylock (int *futex)
|
||||
: "0" (0), "d" (1), "m" (*futex) : "cc", "memory" );
|
||||
return old != 0;
|
||||
}
|
||||
#define lll_mutex_trylock(futex) __lll_mutex_trylock (&(futex))
|
||||
#define lll_trylock(futex) __lll_trylock (&(futex))
|
||||
|
||||
|
||||
static inline int
|
||||
__attribute__ ((always_inline))
|
||||
__lll_mutex_cond_trylock (int *futex)
|
||||
__lll_cond_trylock (int *futex)
|
||||
{
|
||||
unsigned int old;
|
||||
|
||||
@ -198,12 +195,12 @@ __lll_mutex_cond_trylock (int *futex)
|
||||
: "0" (0), "d" (2), "m" (*futex) : "cc", "memory" );
|
||||
return old != 0;
|
||||
}
|
||||
#define lll_mutex_cond_trylock(futex) __lll_mutex_cond_trylock (&(futex))
|
||||
#define lll_cond_trylock(futex) __lll_cond_trylock (&(futex))
|
||||
|
||||
|
||||
static inline int
|
||||
__attribute__ ((always_inline))
|
||||
__lll_robust_mutex_trylock (int *futex, int id)
|
||||
__lll_robust_trylock (int *futex, int id)
|
||||
{
|
||||
unsigned int old;
|
||||
|
||||
@ -212,141 +209,121 @@ __lll_robust_mutex_trylock (int *futex, int id)
|
||||
: "0" (0), "d" (id), "m" (*futex) : "cc", "memory" );
|
||||
return old != 0;
|
||||
}
|
||||
#define lll_robust_mutex_trylock(futex, id) \
|
||||
__lll_robust_mutex_trylock (&(futex), id)
|
||||
#define lll_robust_trylock(futex, id) \
|
||||
__lll_robust_trylock (&(futex), id)
|
||||
|
||||
|
||||
extern void __lll_lock_wait (int *futex) attribute_hidden;
|
||||
extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
|
||||
extern void __lll_lock_wait_private (int *futex) attribute_hidden;
|
||||
extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
|
||||
extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
|
||||
|
||||
static inline void
|
||||
__attribute__ ((always_inline))
|
||||
__lll_mutex_lock (int *futex)
|
||||
__lll_lock (int *futex, int private)
|
||||
{
|
||||
if (atomic_compare_and_exchange_bool_acq (futex, 1, 0) != 0)
|
||||
__lll_lock_wait (futex);
|
||||
if (__builtin_expect (atomic_compare_and_exchange_bool_acq (futex, 1, 0), 0))
|
||||
{
|
||||
if (__builtin_constant_p (private) && private == LLL_PRIVATE)
|
||||
__lll_lock_wait_private (futex);
|
||||
else
|
||||
__lll_lock_wait (futex, private);
|
||||
}
|
||||
}
|
||||
#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex))
|
||||
#define lll_lock(futex, private) __lll_lock (&(futex), private)
|
||||
|
||||
static inline int
|
||||
__attribute__ ((always_inline))
|
||||
__lll_robust_mutex_lock (int *futex, int id)
|
||||
__lll_robust_lock (int *futex, int id, int private)
|
||||
{
|
||||
int result = 0;
|
||||
if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
|
||||
result = __lll_robust_lock_wait (futex);
|
||||
if (__builtin_expect (atomic_compare_and_exchange_bool_acq (futex, id, 0),
|
||||
0))
|
||||
result = __lll_robust_lock_wait (futex, private);
|
||||
return result;
|
||||
}
|
||||
#define lll_robust_mutex_lock(futex, id) __lll_robust_mutex_lock (&(futex), id)
|
||||
#define lll_robust_lock(futex, id, private) \
|
||||
__lll_robust_lock (&(futex), id, private)
|
||||
|
||||
static inline void
|
||||
__attribute__ ((always_inline))
|
||||
__lll_mutex_cond_lock (int *futex)
|
||||
__lll_cond_lock (int *futex, int private)
|
||||
{
|
||||
if (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0)
|
||||
__lll_lock_wait (futex);
|
||||
if (__builtin_expect (atomic_compare_and_exchange_bool_acq (futex, 2, 0), 0))
|
||||
__lll_lock_wait (futex, private);
|
||||
}
|
||||
#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex))
|
||||
#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
|
||||
|
||||
#define lll_robust_mutex_cond_lock(futex, id) \
|
||||
__lll_robust_mutex_lock (&(futex), (id) | FUTEX_WAITERS)
|
||||
#define lll_robust_cond_lock(futex, id, private) \
|
||||
__lll_robust_lock (&(futex), (id) | FUTEX_WAITERS, private)
|
||||
|
||||
extern int __lll_timedlock_wait
|
||||
(int *futex, const struct timespec *) attribute_hidden;
|
||||
(int *futex, const struct timespec *, int private) attribute_hidden;
|
||||
extern int __lll_robust_timedlock_wait
|
||||
(int *futex, const struct timespec *) attribute_hidden;
|
||||
(int *futex, const struct timespec *, int private) attribute_hidden;
|
||||
|
||||
static inline int
|
||||
__attribute__ ((always_inline))
|
||||
__lll_mutex_timedlock (int *futex, const struct timespec *abstime)
|
||||
__lll_timedlock (int *futex, const struct timespec *abstime, int private)
|
||||
{
|
||||
int result = 0;
|
||||
if (atomic_compare_and_exchange_bool_acq (futex, 1, 0) != 0)
|
||||
result = __lll_timedlock_wait (futex, abstime);
|
||||
if (__builtin_expect (atomic_compare_and_exchange_bool_acq (futex, 1, 0), 0))
|
||||
result = __lll_timedlock_wait (futex, abstime, private);
|
||||
return result;
|
||||
}
|
||||
#define lll_mutex_timedlock(futex, abstime) \
|
||||
__lll_mutex_timedlock (&(futex), abstime)
|
||||
#define lll_timedlock(futex, abstime, private) \
|
||||
__lll_timedlock (&(futex), abstime, private)
|
||||
|
||||
static inline int
|
||||
__attribute__ ((always_inline))
|
||||
__lll_robust_mutex_timedlock (int *futex, const struct timespec *abstime,
|
||||
int id)
|
||||
__lll_robust_timedlock (int *futex, const struct timespec *abstime,
|
||||
int id, int private)
|
||||
{
|
||||
int result = 0;
|
||||
if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
|
||||
result = __lll_robust_timedlock_wait (futex, abstime);
|
||||
if (__builtin_expect (atomic_compare_and_exchange_bool_acq (futex, id, 0),
|
||||
0))
|
||||
result = __lll_robust_timedlock_wait (futex, abstime, private);
|
||||
return result;
|
||||
}
|
||||
#define lll_robust_mutex_timedlock(futex, abstime, id) \
|
||||
__lll_robust_mutex_timedlock (&(futex), abstime, id)
|
||||
#define lll_robust_timedlock(futex, abstime, id, private) \
|
||||
__lll_robust_timedlock (&(futex), abstime, id, private)
|
||||
|
||||
|
||||
static inline void
|
||||
__attribute__ ((always_inline))
|
||||
__lll_mutex_unlock (int *futex)
|
||||
__lll_unlock (int *futex, int private)
|
||||
{
|
||||
int oldval;
|
||||
int newval = 0;
|
||||
|
||||
lll_compare_and_swap (futex, oldval, newval, "slr %2,%2");
|
||||
if (oldval > 1)
|
||||
lll_futex_wake (futex, 1, LLL_SHARED);
|
||||
if (__builtin_expect (oldval > 1, 0))
|
||||
lll_futex_wake (futex, 1, private);
|
||||
}
|
||||
#define lll_mutex_unlock(futex) \
|
||||
__lll_mutex_unlock(&(futex))
|
||||
#define lll_unlock(futex, private) __lll_unlock(&(futex), private)
|
||||
|
||||
|
||||
static inline void
|
||||
__attribute__ ((always_inline))
|
||||
__lll_robust_mutex_unlock (int *futex, int mask)
|
||||
__lll_robust_unlock (int *futex, int private)
|
||||
{
|
||||
int oldval;
|
||||
int newval = 0;
|
||||
|
||||
lll_compare_and_swap (futex, oldval, newval, "slr %2,%2");
|
||||
if (oldval & mask)
|
||||
lll_futex_wake (futex, 1, LLL_SHARED);
|
||||
if (__builtin_expect (oldval & FUTEX_WAITERS, 0))
|
||||
lll_futex_wake (futex, 1, private);
|
||||
}
|
||||
#define lll_robust_mutex_unlock(futex) \
|
||||
__lll_robust_mutex_unlock(&(futex), FUTEX_WAITERS)
|
||||
#define lll_robust_unlock(futex, private) \
|
||||
__lll_robust_unlock(&(futex), private)
|
||||
|
||||
|
||||
static inline void
|
||||
__attribute__ ((always_inline))
|
||||
__lll_mutex_unlock_force (int *futex)
|
||||
{
|
||||
*futex = 0;
|
||||
lll_futex_wake (futex, 1, LLL_SHARED);
|
||||
}
|
||||
#define lll_mutex_unlock_force(futex) \
|
||||
__lll_mutex_unlock_force(&(futex))
|
||||
|
||||
#define lll_mutex_islocked(futex) \
|
||||
#define lll_islocked(futex) \
|
||||
(futex != 0)
|
||||
|
||||
|
||||
/* We have a separate internal lock implementation which is not tied
|
||||
to binary compatibility. We can use the lll_mutex_*. */
|
||||
|
||||
/* Type for lock object. */
|
||||
typedef int lll_lock_t;
|
||||
|
||||
/* Initializers for lock. */
|
||||
#define LLL_LOCK_INITIALIZER (0)
|
||||
#define LLL_LOCK_INITIALIZER_LOCKED (1)
|
||||
|
||||
#define lll_trylock(futex) lll_mutex_trylock (futex)
|
||||
#define lll_lock(futex) lll_mutex_lock (futex)
|
||||
#define lll_unlock(futex) lll_mutex_unlock (futex)
|
||||
#define lll_islocked(futex) lll_mutex_islocked (futex)
|
||||
|
||||
/* The states of a lock are:
|
||||
1 - untaken
|
||||
0 - taken by one user
|
||||
<0 - taken by more users */
|
||||
|
||||
|
||||
/* The kernel notifies a process with uses CLONE_CLEARTID via futex
|
||||
wakeup when the clone terminates. The memory location contains the
|
||||
thread ID while the clone is running and is reset to zero
|
||||
@ -373,25 +350,4 @@ extern int __lll_timedwait_tid (int *, const struct timespec *)
|
||||
__res; \
|
||||
})
|
||||
|
||||
/* Conditional variable handling. */
|
||||
|
||||
extern void __lll_cond_wait (pthread_cond_t *cond)
|
||||
attribute_hidden;
|
||||
extern int __lll_cond_timedwait (pthread_cond_t *cond,
|
||||
const struct timespec *abstime)
|
||||
attribute_hidden;
|
||||
extern void __lll_cond_wake (pthread_cond_t *cond)
|
||||
attribute_hidden;
|
||||
extern void __lll_cond_broadcast (pthread_cond_t *cond)
|
||||
attribute_hidden;
|
||||
|
||||
#define lll_cond_wait(cond) \
|
||||
__lll_cond_wait (cond)
|
||||
#define lll_cond_timedwait(cond, abstime) \
|
||||
__lll_cond_timedwait (cond, abstime)
|
||||
#define lll_cond_wake(cond) \
|
||||
__lll_cond_wake (cond)
|
||||
#define lll_cond_broadcast(cond) \
|
||||
__lll_cond_broadcast (cond)
|
||||
|
||||
#endif /* lowlevellock.h */
|
||||
|
@ -36,8 +36,7 @@ __new_sem_post (sem_t *sem)
|
||||
if (isem->nwaiters > 0)
|
||||
{
|
||||
int err = lll_futex_wake (&isem->value, 1,
|
||||
// XYZ check mutex flag
|
||||
LLL_SHARED);
|
||||
isem->private ^ FUTEX_PRIVATE_FLAG);
|
||||
if (__builtin_expect (err, 0) < 0)
|
||||
{
|
||||
__set_errno (-err);
|
||||
|
@ -85,8 +85,7 @@ sem_timedwait (sem_t *sem, const struct timespec *abstime)
|
||||
int oldtype = __pthread_enable_asynccancel ();
|
||||
|
||||
err = lll_futex_timed_wait (&isem->value, 0, &rt,
|
||||
// XYZ check mutex flag
|
||||
LLL_SHARED);
|
||||
isem->private ^ FUTEX_PRIVATE_FLAG);
|
||||
|
||||
/* Disable asynchronous cancellation. */
|
||||
__pthread_disable_asynccancel (oldtype);
|
||||
|
@ -57,8 +57,7 @@ __new_sem_wait (sem_t *sem)
|
||||
int oldtype = __pthread_enable_asynccancel ();
|
||||
|
||||
err = lll_futex_wait (&isem->value, 0,
|
||||
// XYZ check mutex flag
|
||||
LLL_SHARED);
|
||||
isem->private ^ FUTEX_PRIVATE_FLAG);
|
||||
|
||||
/* Disable asynchronous cancellation. */
|
||||
__pthread_disable_asynccancel (oldtype);
|
||||
|
18
nptl/sysdeps/unix/sysv/linux/sparc/internaltypes.h
Normal file
18
nptl/sysdeps/unix/sysv/linux/sparc/internaltypes.h
Normal file
@ -0,0 +1,18 @@
|
||||
#ifndef _INTERNALTYPES_H
|
||||
#include "../internaltypes.h"
|
||||
|
||||
union sparc_pthread_barrier
|
||||
{
|
||||
struct pthread_barrier b;
|
||||
struct sparc_pthread_barrier_s
|
||||
{
|
||||
unsigned int curr_event;
|
||||
int lock;
|
||||
unsigned int left;
|
||||
unsigned int init_count;
|
||||
unsigned char left_lock;
|
||||
unsigned char pshared;
|
||||
} s;
|
||||
};
|
||||
|
||||
#endif
|
@ -70,9 +70,6 @@
|
||||
#endif
|
||||
|
||||
|
||||
/* Initializer for compatibility lock. */
|
||||
#define LLL_MUTEX_LOCK_INITIALIZER (0)
|
||||
|
||||
#define lll_futex_wait(futexp, val, private) \
|
||||
lll_futex_timed_wait (futexp, val, NULL, private)
|
||||
|
||||
@ -110,12 +107,12 @@
|
||||
INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
|
||||
})
|
||||
|
||||
#define lll_robust_mutex_dead(futexv) \
|
||||
#define lll_robust_dead(futexv, private) \
|
||||
do \
|
||||
{ \
|
||||
int *__futexp = &(futexv); \
|
||||
atomic_or (__futexp, FUTEX_OWNER_DIED); \
|
||||
lll_futex_wake (__futexp, 1, LLL_SHARED); \
|
||||
lll_futex_wake (__futexp, 1, private); \
|
||||
} \
|
||||
while (0)
|
||||
|
||||
@ -139,146 +136,132 @@
|
||||
|
||||
static inline int
|
||||
__attribute__ ((always_inline))
|
||||
__lll_mutex_trylock (int *futex)
|
||||
__lll_trylock (int *futex)
|
||||
{
|
||||
return atomic_compare_and_exchange_val_24_acq (futex, 1, 0) != 0;
|
||||
}
|
||||
#define lll_mutex_trylock(futex) __lll_mutex_trylock (&(futex))
|
||||
#define lll_trylock(futex) __lll_trylock (&(futex))
|
||||
|
||||
static inline int
|
||||
__attribute__ ((always_inline))
|
||||
__lll_mutex_cond_trylock (int *futex)
|
||||
__lll_cond_trylock (int *futex)
|
||||
{
|
||||
return atomic_compare_and_exchange_val_24_acq (futex, 2, 0) != 0;
|
||||
}
|
||||
#define lll_mutex_cond_trylock(futex) __lll_mutex_cond_trylock (&(futex))
|
||||
#define lll_cond_trylock(futex) __lll_cond_trylock (&(futex))
|
||||
|
||||
static inline int
|
||||
__attribute__ ((always_inline))
|
||||
__lll_robust_mutex_trylock (int *futex, int id)
|
||||
__lll_robust_trylock (int *futex, int id)
|
||||
{
|
||||
return atomic_compare_and_exchange_val_acq (futex, id, 0) != 0;
|
||||
}
|
||||
#define lll_robust_mutex_trylock(futex, id) \
|
||||
__lll_robust_mutex_trylock (&(futex), id)
|
||||
#define lll_robust_trylock(futex, id) \
|
||||
__lll_robust_trylock (&(futex), id)
|
||||
|
||||
|
||||
extern void __lll_lock_wait (int *futex) attribute_hidden;
|
||||
extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
|
||||
extern void __lll_lock_wait_private (int *futex) attribute_hidden;
|
||||
extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
|
||||
extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
|
||||
|
||||
static inline void
|
||||
__attribute__ ((always_inline))
|
||||
__lll_mutex_lock (int *futex)
|
||||
__lll_lock (int *futex, int private)
|
||||
{
|
||||
int val = atomic_compare_and_exchange_val_24_acq (futex, 1, 0);
|
||||
|
||||
if (__builtin_expect (val != 0, 0))
|
||||
__lll_lock_wait (futex);
|
||||
{
|
||||
if (__builtin_constant_p (private) && private == LLL_PRIVATE)
|
||||
__lll_lock_wait_private (futex);
|
||||
else
|
||||
__lll_lock_wait (futex, private);
|
||||
}
|
||||
}
|
||||
#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex))
|
||||
#define lll_lock(futex, private) __lll_lock (&(futex), private)
|
||||
|
||||
static inline int
|
||||
__attribute__ ((always_inline))
|
||||
__lll_robust_mutex_lock (int *futex, int id)
|
||||
__lll_robust_lock (int *futex, int id, int private)
|
||||
{
|
||||
int result = 0;
|
||||
if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
|
||||
result = __lll_robust_lock_wait (futex);
|
||||
result = __lll_robust_lock_wait (futex, private);
|
||||
return result;
|
||||
}
|
||||
#define lll_robust_mutex_lock(futex, id) \
|
||||
__lll_robust_mutex_lock (&(futex), id)
|
||||
#define lll_robust_lock(futex, id, private) \
|
||||
__lll_robust_lock (&(futex), id, private)
|
||||
|
||||
static inline void
|
||||
__attribute__ ((always_inline))
|
||||
__lll_mutex_cond_lock (int *futex)
|
||||
__lll_cond_lock (int *futex, int private)
|
||||
{
|
||||
int val = atomic_compare_and_exchange_val_24_acq (futex, 2, 0);
|
||||
|
||||
if (__builtin_expect (val != 0, 0))
|
||||
__lll_lock_wait (futex);
|
||||
__lll_lock_wait (futex, private);
|
||||
}
|
||||
#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex))
|
||||
#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
|
||||
|
||||
#define lll_robust_mutex_cond_lock(futex, id) \
|
||||
__lll_robust_mutex_lock (&(futex), (id) | FUTEX_WAITERS)
|
||||
#define lll_robust_cond_lock(futex, id, private) \
|
||||
__lll_robust_lock (&(futex), (id) | FUTEX_WAITERS, private)
|
||||
|
||||
|
||||
extern int __lll_timedlock_wait (int *futex, const struct timespec *)
|
||||
attribute_hidden;
|
||||
extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *)
|
||||
attribute_hidden;
|
||||
extern int __lll_timedlock_wait (int *futex, const struct timespec *,
|
||||
int private) attribute_hidden;
|
||||
extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
|
||||
int private) attribute_hidden;
|
||||
|
||||
static inline int
|
||||
__attribute__ ((always_inline))
|
||||
__lll_mutex_timedlock (int *futex, const struct timespec *abstime)
|
||||
__lll_timedlock (int *futex, const struct timespec *abstime, int private)
|
||||
{
|
||||
int val = atomic_compare_and_exchange_val_24_acq (futex, 1, 0);
|
||||
int result = 0;
|
||||
|
||||
if (__builtin_expect (val != 0, 0))
|
||||
result = __lll_timedlock_wait (futex, abstime);
|
||||
result = __lll_timedlock_wait (futex, abstime, private);
|
||||
return result;
|
||||
}
|
||||
#define lll_mutex_timedlock(futex, abstime) \
|
||||
__lll_mutex_timedlock (&(futex), abstime)
|
||||
#define lll_timedlock(futex, abstime, private) \
|
||||
__lll_timedlock (&(futex), abstime, private)
|
||||
|
||||
static inline int
|
||||
__attribute__ ((always_inline))
|
||||
__lll_robust_mutex_timedlock (int *futex, const struct timespec *abstime,
|
||||
int id)
|
||||
__lll_robust_timedlock (int *futex, const struct timespec *abstime,
|
||||
int id, int private)
|
||||
{
|
||||
int result = 0;
|
||||
if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
|
||||
result = __lll_robust_timedlock_wait (futex, abstime);
|
||||
result = __lll_robust_timedlock_wait (futex, abstime, private);
|
||||
return result;
|
||||
}
|
||||
#define lll_robust_mutex_timedlock(futex, abstime, id) \
|
||||
__lll_robust_mutex_timedlock (&(futex), abstime, id)
|
||||
#define lll_robust_timedlock(futex, abstime, id, private) \
|
||||
__lll_robust_timedlock (&(futex), abstime, id, private)
|
||||
|
||||
#define lll_mutex_unlock(lock) \
|
||||
#define lll_unlock(lock, private) \
|
||||
((void) ({ \
|
||||
int *__futex = &(lock); \
|
||||
int __val = atomic_exchange_24_rel (__futex, 0); \
|
||||
if (__builtin_expect (__val > 1, 0)) \
|
||||
lll_futex_wake (__futex, 1, LLL_SHARED); \
|
||||
lll_futex_wake (__futex, 1, private); \
|
||||
}))
|
||||
|
||||
#define lll_robust_mutex_unlock(lock) \
|
||||
#define lll_robust_unlock(lock, private) \
|
||||
((void) ({ \
|
||||
int *__futex = &(lock); \
|
||||
int __val = atomic_exchange_rel (__futex, 0); \
|
||||
if (__builtin_expect (__val & FUTEX_WAITERS, 0)) \
|
||||
lll_futex_wake (__futex, 1, LLL_SHARED); \
|
||||
lll_futex_wake (__futex, 1, private); \
|
||||
}))
|
||||
|
||||
#define lll_mutex_unlock_force(lock) \
|
||||
((void) ({ \
|
||||
int *__futex = &(lock); \
|
||||
(void) atomic_exchange_24_rel (__futex, 0); \
|
||||
lll_futex_wake (__futex, 1, LLL_SHARED); \
|
||||
}))
|
||||
|
||||
#define lll_mutex_islocked(futex) \
|
||||
#define lll_islocked(futex) \
|
||||
(futex != 0)
|
||||
|
||||
|
||||
/* We have a separate internal lock implementation which is not tied
|
||||
to binary compatibility. We can use the lll_mutex_*. */
|
||||
|
||||
/* Type for lock object. */
|
||||
typedef int lll_lock_t;
|
||||
|
||||
/* Initializers for lock. */
|
||||
#define LLL_LOCK_INITIALIZER (0)
|
||||
#define LLL_LOCK_INITIALIZER_LOCKED (1)
|
||||
|
||||
#define lll_trylock(futex) lll_mutex_trylock (futex)
|
||||
#define lll_lock(futex) lll_mutex_lock (futex)
|
||||
#define lll_unlock(futex) lll_mutex_unlock (futex)
|
||||
#define lll_islocked(futex) lll_mutex_islocked (futex)
|
||||
|
||||
|
||||
/* The kernel notifies a process with uses CLONE_CLEARTID via futex
|
||||
wakeup when the clone terminates. The memory location contains the
|
||||
thread ID while the clone is running and is reset to zero
|
||||
@ -303,26 +286,4 @@ extern int __lll_timedwait_tid (int *, const struct timespec *)
|
||||
__res; \
|
||||
})
|
||||
|
||||
|
||||
/* Conditional variable handling. */
|
||||
|
||||
extern void __lll_cond_wait (pthread_cond_t *cond)
|
||||
attribute_hidden;
|
||||
extern int __lll_cond_timedwait (pthread_cond_t *cond,
|
||||
const struct timespec *abstime)
|
||||
attribute_hidden;
|
||||
extern void __lll_cond_wake (pthread_cond_t *cond)
|
||||
attribute_hidden;
|
||||
extern void __lll_cond_broadcast (pthread_cond_t *cond)
|
||||
attribute_hidden;
|
||||
|
||||
#define lll_cond_wait(cond) \
|
||||
__lll_cond_wait (cond)
|
||||
#define lll_cond_timedwait(cond, abstime) \
|
||||
__lll_cond_timedwait (cond, abstime)
|
||||
#define lll_cond_wake(cond) \
|
||||
__lll_cond_wake (cond)
|
||||
#define lll_cond_broadcast(cond) \
|
||||
__lll_cond_broadcast (cond)
|
||||
|
||||
#endif /* lowlevellock.h */
|
||||
|
45
nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_destroy.c
Normal file
45
nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_destroy.c
Normal file
@ -0,0 +1,45 @@
|
||||
/* Copyright (C) 2002, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
The GNU C Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with the GNU C Library; if not, write to the Free
|
||||
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <errno.h>
|
||||
#include "pthreadP.h"
|
||||
#include <lowlevellock.h>
|
||||
|
||||
int
|
||||
pthread_barrier_destroy (barrier)
|
||||
pthread_barrier_t *barrier;
|
||||
{
|
||||
union sparc_pthread_barrier *ibarrier;
|
||||
int result = EBUSY;
|
||||
|
||||
ibarrier = (union sparc_pthread_barrier *) barrier;
|
||||
|
||||
int private = ibarrier->s.pshared ? LLL_SHARED : LLL_PRIVATE;
|
||||
|
||||
lll_lock (ibarrier->b.lock, private);
|
||||
|
||||
if (__builtin_expect (ibarrier->b.left == ibarrier->b.init_count, 1))
|
||||
/* The barrier is not used anymore. */
|
||||
result = 0;
|
||||
else
|
||||
/* Still used, return with an error. */
|
||||
lll_unlock (ibarrier->b.lock, private);
|
||||
|
||||
return result;
|
||||
}
|
55
nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_init.c
Normal file
55
nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_init.c
Normal file
@ -0,0 +1,55 @@
|
||||
/* Copyright (C) 2002, 2006, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
The GNU C Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with the GNU C Library; if not, write to the Free
|
||||
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <errno.h>
|
||||
#include "pthreadP.h"
|
||||
#include <lowlevellock.h>
|
||||
|
||||
int
|
||||
pthread_barrier_init (barrier, attr, count)
|
||||
pthread_barrier_t *barrier;
|
||||
const pthread_barrierattr_t *attr;
|
||||
unsigned int count;
|
||||
{
|
||||
union sparc_pthread_barrier *ibarrier;
|
||||
|
||||
if (__builtin_expect (count == 0, 0))
|
||||
return EINVAL;
|
||||
|
||||
struct pthread_barrierattr *iattr = (struct pthread_barrierattr *) attr;
|
||||
if (iattr != NULL)
|
||||
{
|
||||
if (iattr->pshared != PTHREAD_PROCESS_PRIVATE
|
||||
&& __builtin_expect (iattr->pshared != PTHREAD_PROCESS_SHARED, 0))
|
||||
/* Invalid attribute. */
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
ibarrier = (union sparc_pthread_barrier *) barrier;
|
||||
|
||||
/* Initialize the individual fields. */
|
||||
ibarrier->b.lock = LLL_LOCK_INITIALIZER;
|
||||
ibarrier->b.left = count;
|
||||
ibarrier->b.init_count = count;
|
||||
ibarrier->b.curr_event = 0;
|
||||
ibarrier->s.left_lock = 0;
|
||||
ibarrier->s.pshared = (iattr && iattr->pshared == PTHREAD_PROCESS_SHARED);
|
||||
|
||||
return 0;
|
||||
}
|
78
nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_wait.c
Normal file
78
nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_wait.c
Normal file
@ -0,0 +1,78 @@
|
||||
/* Copyright (C) 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
The GNU C Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with the GNU C Library; if not, write to the Free
|
||||
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <errno.h>
|
||||
#include <sysdep.h>
|
||||
#include <lowlevellock.h>
|
||||
#include <pthreadP.h>
|
||||
|
||||
/* Wait on barrier. */
|
||||
int
|
||||
pthread_barrier_wait (barrier)
|
||||
pthread_barrier_t *barrier;
|
||||
{
|
||||
union sparc_pthread_barrier *ibarrier
|
||||
= (union sparc_pthread_barrier *) barrier;
|
||||
int result = 0;
|
||||
int private = ibarrier->s.pshared ? LLL_SHARED : LLL_PRIVATE;
|
||||
|
||||
/* Make sure we are alone. */
|
||||
lll_lock (ibarrier->b.lock, private);
|
||||
|
||||
/* One more arrival. */
|
||||
--ibarrier->b.left;
|
||||
|
||||
/* Are these all? */
|
||||
if (ibarrier->b.left == 0)
|
||||
{
|
||||
/* Yes. Increment the event counter to avoid invalid wake-ups and
|
||||
tell the current waiters that it is their turn. */
|
||||
++ibarrier->b.curr_event;
|
||||
|
||||
/* Wake up everybody. */
|
||||
lll_futex_wake (&ibarrier->b.curr_event, INT_MAX, private);
|
||||
|
||||
/* This is the thread which finished the serialization. */
|
||||
result = PTHREAD_BARRIER_SERIAL_THREAD;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* The number of the event we are waiting for. The barrier's event
|
||||
number must be bumped before we continue. */
|
||||
unsigned int event = ibarrier->b.curr_event;
|
||||
|
||||
/* Before suspending, make the barrier available to others. */
|
||||
lll_unlock (ibarrier->b.lock, private);
|
||||
|
||||
/* Wait for the event counter of the barrier to change. */
|
||||
do
|
||||
lll_futex_wait (&ibarrier->b.curr_event, event, private);
|
||||
while (event == ibarrier->b.curr_event);
|
||||
}
|
||||
|
||||
/* Make sure the init_count is stored locally or in a register. */
|
||||
unsigned int init_count = ibarrier->b.init_count;
|
||||
|
||||
/* If this was the last woken thread, unlock. */
|
||||
if (atomic_increment_val (&ibarrier->b.left) == init_count)
|
||||
/* We are done. */
|
||||
lll_unlock (ibarrier->b.lock, private);
|
||||
|
||||
return result;
|
||||
}
|
@ -25,20 +25,35 @@
|
||||
|
||||
|
||||
void
|
||||
__lll_lock_wait (int *futex)
|
||||
__lll_lock_wait_private (int *futex)
|
||||
{
|
||||
do
|
||||
{
|
||||
int oldval = atomic_compare_and_exchange_val_24_acq (futex, 2, 1);
|
||||
if (oldval != 0)
|
||||
lll_futex_wait (futex, 2);
|
||||
lll_futex_wait (futex, 2, LLL_PRIVATE);
|
||||
}
|
||||
while (atomic_compare_and_exchange_val_24_acq (futex, 2, 0) != 0);
|
||||
}
|
||||
|
||||
#ifdef IS_IN_libpthread
|
||||
/* These functions don't get included in libc.so */
|
||||
|
||||
void
|
||||
__lll_lock_wait (int *futex, int private)
|
||||
{
|
||||
do
|
||||
{
|
||||
int oldval = atomic_compare_and_exchange_val_24_acq (futex, 2, 1);
|
||||
if (oldval != 0)
|
||||
lll_futex_wait (futex, 2, private);
|
||||
}
|
||||
while (atomic_compare_and_exchange_val_24_acq (futex, 2, 0) != 0);
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
__lll_timedlock_wait (int *futex, const struct timespec *abstime)
|
||||
__lll_timedlock_wait (int *futex, const struct timespec *abstime, int private)
|
||||
{
|
||||
/* Reject invalid timeouts. */
|
||||
if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
|
||||
@ -68,7 +83,7 @@ __lll_timedlock_wait (int *futex, const struct timespec *abstime)
|
||||
/* Wait. */
|
||||
int oldval = atomic_compare_and_exchange_val_24_acq (futex, 2, 1);
|
||||
if (oldval != 0)
|
||||
lll_futex_timed_wait (futex, 2, &rt);
|
||||
lll_futex_timed_wait (futex, 2, &rt, private);
|
||||
}
|
||||
while (atomic_compare_and_exchange_val_24_acq (futex, 2, 0) != 0);
|
||||
|
||||
@ -76,8 +91,6 @@ __lll_timedlock_wait (int *futex, const struct timespec *abstime)
|
||||
}
|
||||
|
||||
|
||||
/* This function doesn't get included in libc.so */
|
||||
#ifdef IS_IN_libpthread
|
||||
int
|
||||
__lll_timedwait_tid (int *tidp, const struct timespec *abstime)
|
||||
{
|
||||
|
@ -22,24 +22,18 @@
|
||||
#include <lowlevellock.h>
|
||||
#include <pthreadP.h>
|
||||
|
||||
struct sparc_pthread_barrier
|
||||
{
|
||||
struct pthread_barrier b;
|
||||
unsigned char left_lock;
|
||||
unsigned char pshared;
|
||||
};
|
||||
|
||||
/* Wait on barrier. */
|
||||
int
|
||||
pthread_barrier_wait (barrier)
|
||||
pthread_barrier_t *barrier;
|
||||
{
|
||||
struct sparc_pthread_barrier *ibarrier
|
||||
= (struct sparc_pthread_barrier *) barrier;
|
||||
union sparc_pthread_barrier *ibarrier
|
||||
= (union sparc_pthread_barrier *) barrier;
|
||||
int result = 0;
|
||||
int private = ibarrier->s.pshared ? LLL_SHARED : LLL_PRIVATE;
|
||||
|
||||
/* Make sure we are alone. */
|
||||
lll_lock (ibarrier->b.lock);
|
||||
lll_lock (ibarrier->b.lock, private);
|
||||
|
||||
/* One more arrival. */
|
||||
--ibarrier->b.left;
|
||||
@ -52,9 +46,7 @@ pthread_barrier_wait (barrier)
|
||||
++ibarrier->b.curr_event;
|
||||
|
||||
/* Wake up everybody. */
|
||||
lll_futex_wake (&ibarrier->b.curr_event, INT_MAX,
|
||||
// XYZ check mutex flag
|
||||
LLL_SHARED);
|
||||
lll_futex_wake (&ibarrier->b.curr_event, INT_MAX, private);
|
||||
|
||||
/* This is the thread which finished the serialization. */
|
||||
result = PTHREAD_BARRIER_SERIAL_THREAD;
|
||||
@ -66,13 +58,11 @@ pthread_barrier_wait (barrier)
|
||||
unsigned int event = ibarrier->b.curr_event;
|
||||
|
||||
/* Before suspending, make the barrier available to others. */
|
||||
lll_unlock (ibarrier->b.lock);
|
||||
lll_unlock (ibarrier->b.lock, private);
|
||||
|
||||
/* Wait for the event counter of the barrier to change. */
|
||||
do
|
||||
lll_futex_wait (&ibarrier->b.curr_event, event,
|
||||
// XYZ check mutex flag
|
||||
LLL_SHARED);
|
||||
lll_futex_wait (&ibarrier->b.curr_event, event, private);
|
||||
while (event == ibarrier->b.curr_event);
|
||||
}
|
||||
|
||||
@ -80,11 +70,11 @@ pthread_barrier_wait (barrier)
|
||||
unsigned int init_count = ibarrier->b.init_count;
|
||||
|
||||
/* If this was the last woken thread, unlock. */
|
||||
if (__atomic_is_v9 || ibarrier->pshared == 0)
|
||||
if (__atomic_is_v9 || ibarrier->s.pshared == 0)
|
||||
{
|
||||
if (atomic_increment_val (&ibarrier->b.left) == init_count)
|
||||
/* We are done. */
|
||||
lll_unlock (ibarrier->b.lock);
|
||||
lll_unlock (ibarrier->b.lock, private);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -92,12 +82,12 @@ pthread_barrier_wait (barrier)
|
||||
/* Slightly more complicated. On pre-v9 CPUs, atomic_increment_val
|
||||
is only atomic for threads within the same process, not for
|
||||
multiple processes. */
|
||||
__sparc32_atomic_do_lock24 (&ibarrier->left_lock);
|
||||
__sparc32_atomic_do_lock24 (&ibarrier->s.left_lock);
|
||||
left = ++ibarrier->b.left;
|
||||
__sparc32_atomic_do_unlock24 (&ibarrier->left_lock);
|
||||
__sparc32_atomic_do_unlock24 (&ibarrier->s.left_lock);
|
||||
if (left == init_count)
|
||||
/* We are done. */
|
||||
lll_unlock (ibarrier->b.lock);
|
||||
lll_unlock (ibarrier->b.lock, private);
|
||||
}
|
||||
|
||||
return result;
|
||||
|
@ -1 +1 @@
|
||||
#include "../../../../../../../pthread_barrier_wait.c"
|
||||
#include "../../pthread_barrier_wait.c"
|
||||
|
@ -54,7 +54,7 @@ __unregister_atfork (dso_handle)
|
||||
that there couldn't have been another thread deleting something.
|
||||
The __unregister_atfork function is only called from the
|
||||
dlclose() code which itself serializes the operations. */
|
||||
lll_lock (__fork_lock);
|
||||
lll_lock (__fork_lock, LLL_PRIVATE);
|
||||
|
||||
/* We have to create a new list with all the entries we don't remove. */
|
||||
struct deleted_handler
|
||||
@ -89,7 +89,7 @@ __unregister_atfork (dso_handle)
|
||||
while (runp != NULL);
|
||||
|
||||
/* Release the lock. */
|
||||
lll_unlock (__fork_lock);
|
||||
lll_unlock (__fork_lock, LLL_PRIVATE);
|
||||
|
||||
/* Walk the list of all entries which have to be deleted. */
|
||||
while (deleted != NULL)
|
||||
|
@ -17,19 +17,4 @@
|
||||
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <kernel-features.h>
|
||||
|
||||
/* All locks in libc are private. Use the kernel feature if possible. */
|
||||
#define FUTEX_PRIVATE_FLAG 128
|
||||
#ifdef __ASSUME_PRIVATE_FUTEX
|
||||
# define FUTEX_WAIT (0 | FUTEX_PRIVATE_FLAG)
|
||||
# define FUTEX_WAKE (1 | FUTEX_PRIVATE_FLAG)
|
||||
#else
|
||||
# define LOAD_FUTEX_WAIT(reg) \
|
||||
movl %fs:PRIVATE_FUTEX, reg
|
||||
# define LOAD_FUTEX_WAKE(reg) \
|
||||
movl %fs:PRIVATE_FUTEX, reg ; \
|
||||
orl $FUTEX_WAKE, reg
|
||||
#endif
|
||||
|
||||
#include "lowlevellock.S"
|
||||
|
@ -19,33 +19,46 @@
|
||||
|
||||
#include <sysdep.h>
|
||||
#include <pthread-errnos.h>
|
||||
#include <kernel-features.h>
|
||||
#include <lowlevellock.h>
|
||||
|
||||
.text
|
||||
|
||||
#ifndef LOCK
|
||||
# ifdef UP
|
||||
# define LOCK
|
||||
#ifdef __ASSUME_PRIVATE_FUTEX
|
||||
# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
|
||||
movl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
|
||||
# define LOAD_PRIVATE_FUTEX_WAKE(reg) \
|
||||
movl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
|
||||
# define LOAD_FUTEX_WAIT(reg) \
|
||||
xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
|
||||
# define LOAD_FUTEX_WAKE(reg) \
|
||||
xorl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
|
||||
#else
|
||||
# if FUTEX_WAIT == 0
|
||||
# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
|
||||
movl %fs:PRIVATE_FUTEX, reg
|
||||
# else
|
||||
# define LOCK lock
|
||||
# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
|
||||
movl %fs:PRIVATE_FUTEX, reg ; \
|
||||
orl $FUTEX_WAIT, reg
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#define SYS_futex 202
|
||||
#ifndef FUTEX_WAIT
|
||||
# define FUTEX_WAIT 0
|
||||
# define FUTEX_WAKE 1
|
||||
#endif
|
||||
|
||||
#ifndef LOAD_FUTEX_WAIT
|
||||
# define LOAD_PRIVATE_FUTEX_WAKE(reg) \
|
||||
movl %fs:PRIVATE_FUTEX, reg ; \
|
||||
orl $FUTEX_WAKE, reg
|
||||
# if FUTEX_WAIT == 0
|
||||
# define LOAD_FUTEX_WAIT(reg) \
|
||||
xorl reg, reg
|
||||
xorl $FUTEX_PRIVATE_FLAG, reg ; \
|
||||
andl %fs:PRIVATE_FUTEX, reg
|
||||
# else
|
||||
# define LOAD_FUTEX_WAIT(reg) \
|
||||
movl $FUTEX_WAIT, reg
|
||||
xorl $FUTEX_PRIVATE_FLAG, reg ; \
|
||||
andl %fs:PRIVATE_FUTEX, reg ; \
|
||||
orl $FUTEX_WAIT, reg
|
||||
# endif
|
||||
# define LOAD_FUTEX_WAKE(reg) \
|
||||
movl $FUTEX_WAKE, reg
|
||||
xorl $FUTEX_PRIVATE_FLAG, reg ; \
|
||||
andl %fs:PRIVATE_FUTEX, reg ; \
|
||||
orl $FUTEX_WAKE, reg
|
||||
#endif
|
||||
|
||||
|
||||
@ -53,11 +66,50 @@
|
||||
#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
|
||||
|
||||
|
||||
.globl __lll_mutex_lock_wait
|
||||
.type __lll_mutex_lock_wait,@function
|
||||
.hidden __lll_mutex_lock_wait
|
||||
.globl __lll_lock_wait_private
|
||||
.type __lll_lock_wait_private,@function
|
||||
.hidden __lll_lock_wait_private
|
||||
.align 16
|
||||
__lll_mutex_lock_wait:
|
||||
__lll_lock_wait_private:
|
||||
cfi_startproc
|
||||
pushq %r10
|
||||
cfi_adjust_cfa_offset(8)
|
||||
pushq %rdx
|
||||
cfi_adjust_cfa_offset(8)
|
||||
cfi_offset(%r10, -16)
|
||||
cfi_offset(%rdx, -24)
|
||||
xorq %r10, %r10 /* No timeout. */
|
||||
movl $2, %edx
|
||||
LOAD_PRIVATE_FUTEX_WAIT (%esi)
|
||||
|
||||
cmpl %edx, %eax /* NB: %edx == 2 */
|
||||
jne 2f
|
||||
|
||||
1: movl $SYS_futex, %eax
|
||||
syscall
|
||||
|
||||
2: movl %edx, %eax
|
||||
xchgl %eax, (%rdi) /* NB: lock is implied */
|
||||
|
||||
testl %eax, %eax
|
||||
jnz 1b
|
||||
|
||||
popq %rdx
|
||||
cfi_adjust_cfa_offset(-8)
|
||||
cfi_restore(%rdx)
|
||||
popq %r10
|
||||
cfi_adjust_cfa_offset(-8)
|
||||
cfi_restore(%r10)
|
||||
retq
|
||||
cfi_endproc
|
||||
.size __lll_lock_wait_private,.-__lll_lock_wait_private
|
||||
|
||||
#ifdef NOT_IN_libc
|
||||
.globl __lll_lock_wait
|
||||
.type __lll_lock_wait,@function
|
||||
.hidden __lll_lock_wait
|
||||
.align 16
|
||||
__lll_lock_wait:
|
||||
cfi_startproc
|
||||
pushq %r10
|
||||
cfi_adjust_cfa_offset(8)
|
||||
@ -89,15 +141,13 @@ __lll_mutex_lock_wait:
|
||||
cfi_restore(%r10)
|
||||
retq
|
||||
cfi_endproc
|
||||
.size __lll_mutex_lock_wait,.-__lll_mutex_lock_wait
|
||||
.size __lll_lock_wait,.-__lll_lock_wait
|
||||
|
||||
|
||||
#ifdef NOT_IN_libc
|
||||
.globl __lll_mutex_timedlock_wait
|
||||
.type __lll_mutex_timedlock_wait,@function
|
||||
.hidden __lll_mutex_timedlock_wait
|
||||
.globl __lll_timedlock_wait
|
||||
.type __lll_timedlock_wait,@function
|
||||
.hidden __lll_timedlock_wait
|
||||
.align 16
|
||||
__lll_mutex_timedlock_wait:
|
||||
__lll_timedlock_wait:
|
||||
cfi_startproc
|
||||
/* Check for a valid timeout value. */
|
||||
cmpq $1000000000, 8(%rdx)
|
||||
@ -118,10 +168,12 @@ __lll_mutex_timedlock_wait:
|
||||
cfi_offset(%r12, -32)
|
||||
cfi_offset(%r13, -40)
|
||||
cfi_offset(%r14, -48)
|
||||
pushq %rsi
|
||||
cfi_adjust_cfa_offset(8)
|
||||
|
||||
/* Stack frame for the timespec and timeval structs. */
|
||||
subq $16, %rsp
|
||||
cfi_adjust_cfa_offset(16)
|
||||
subq $24, %rsp
|
||||
cfi_adjust_cfa_offset(24)
|
||||
|
||||
movq %rdi, %r12
|
||||
movq %rdx, %r13
|
||||
@ -162,6 +214,7 @@ __lll_mutex_timedlock_wait:
|
||||
je 8f
|
||||
|
||||
movq %rsp, %r10
|
||||
movl 24(%rsp), %esi
|
||||
LOAD_FUTEX_WAIT (%esi)
|
||||
movq %r12, %rdi
|
||||
movl $SYS_futex, %eax
|
||||
@ -174,8 +227,8 @@ __lll_mutex_timedlock_wait:
|
||||
cmpxchgl %edx, (%r12)
|
||||
jnz 7f
|
||||
|
||||
6: addq $16, %rsp
|
||||
cfi_adjust_cfa_offset(-16)
|
||||
6: addq $32, %rsp
|
||||
cfi_adjust_cfa_offset(-32)
|
||||
popq %r14
|
||||
cfi_adjust_cfa_offset(-8)
|
||||
cfi_restore(%r14)
|
||||
@ -196,7 +249,7 @@ __lll_mutex_timedlock_wait:
|
||||
3: movl $EINVAL, %eax
|
||||
retq
|
||||
|
||||
cfi_adjust_cfa_offset(56)
|
||||
cfi_adjust_cfa_offset(72)
|
||||
cfi_offset(%r8, -16)
|
||||
cfi_offset(%r9, -24)
|
||||
cfi_offset(%r12, -32)
|
||||
@ -216,15 +269,45 @@ __lll_mutex_timedlock_wait:
|
||||
5: movl $ETIMEDOUT, %eax
|
||||
jmp 6b
|
||||
cfi_endproc
|
||||
.size __lll_mutex_timedlock_wait,.-__lll_mutex_timedlock_wait
|
||||
.size __lll_timedlock_wait,.-__lll_timedlock_wait
|
||||
#endif
|
||||
|
||||
|
||||
.globl __lll_mutex_unlock_wake
|
||||
.type __lll_mutex_unlock_wake,@function
|
||||
.hidden __lll_mutex_unlock_wake
|
||||
.globl __lll_unlock_wake_private
|
||||
.type __lll_unlock_wake_private,@function
|
||||
.hidden __lll_unlock_wake_private
|
||||
.align 16
|
||||
__lll_mutex_unlock_wake:
|
||||
__lll_unlock_wake_private:
|
||||
cfi_startproc
|
||||
pushq %rsi
|
||||
cfi_adjust_cfa_offset(8)
|
||||
pushq %rdx
|
||||
cfi_adjust_cfa_offset(8)
|
||||
cfi_offset(%rsi, -16)
|
||||
cfi_offset(%rdx, -24)
|
||||
|
||||
movl $0, (%rdi)
|
||||
LOAD_PRIVATE_FUTEX_WAKE (%esi)
|
||||
movl $1, %edx /* Wake one thread. */
|
||||
movl $SYS_futex, %eax
|
||||
syscall
|
||||
|
||||
popq %rdx
|
||||
cfi_adjust_cfa_offset(-8)
|
||||
cfi_restore(%rdx)
|
||||
popq %rsi
|
||||
cfi_adjust_cfa_offset(-8)
|
||||
cfi_restore(%rsi)
|
||||
retq
|
||||
cfi_endproc
|
||||
.size __lll_unlock_wake_private,.-__lll_unlock_wake_private
|
||||
|
||||
#ifdef NOT_IN_libc
|
||||
.globl __lll_unlock_wake
|
||||
.type __lll_unlock_wake,@function
|
||||
.hidden __lll_unlock_wake
|
||||
.align 16
|
||||
__lll_unlock_wake:
|
||||
cfi_startproc
|
||||
pushq %rsi
|
||||
cfi_adjust_cfa_offset(8)
|
||||
@ -247,10 +330,8 @@ __lll_mutex_unlock_wake:
|
||||
cfi_restore(%rsi)
|
||||
retq
|
||||
cfi_endproc
|
||||
.size __lll_mutex_unlock_wake,.-__lll_mutex_unlock_wake
|
||||
.size __lll_unlock_wake,.-__lll_unlock_wake
|
||||
|
||||
|
||||
#ifdef NOT_IN_libc
|
||||
.globl __lll_timedwait_tid
|
||||
.type __lll_timedwait_tid,@function
|
||||
.hidden __lll_timedwait_tid
|
||||
|
@ -20,17 +20,27 @@
|
||||
#ifndef _LOWLEVELLOCK_H
|
||||
#define _LOWLEVELLOCK_H 1
|
||||
|
||||
#include <time.h>
|
||||
#include <sys/param.h>
|
||||
#include <bits/pthreadtypes.h>
|
||||
#include <kernel-features.h>
|
||||
#include <tcb-offsets.h>
|
||||
#ifndef __ASSEMBLER__
|
||||
# include <time.h>
|
||||
# include <sys/param.h>
|
||||
# include <bits/pthreadtypes.h>
|
||||
# include <kernel-features.h>
|
||||
# include <tcb-offsets.h>
|
||||
|
||||
#ifndef LOCK_INSTR
|
||||
# ifdef UP
|
||||
# define LOCK_INSTR /* nothing */
|
||||
# else
|
||||
# define LOCK_INSTR "lock;"
|
||||
# ifndef LOCK_INSTR
|
||||
# ifdef UP
|
||||
# define LOCK_INSTR /* nothing */
|
||||
# else
|
||||
# define LOCK_INSTR "lock;"
|
||||
# endif
|
||||
# endif
|
||||
#else
|
||||
# ifndef LOCK
|
||||
# ifdef UP
|
||||
# define LOCK
|
||||
# else
|
||||
# define LOCK lock
|
||||
# endif
|
||||
# endif
|
||||
#endif
|
||||
|
||||
@ -38,11 +48,13 @@
|
||||
#define FUTEX_WAIT 0
|
||||
#define FUTEX_WAKE 1
|
||||
#define FUTEX_CMP_REQUEUE 4
|
||||
#define FUTEX_WAKE_OP 5
|
||||
#define FUTEX_LOCK_PI 6
|
||||
#define FUTEX_UNLOCK_PI 7
|
||||
#define FUTEX_TRYLOCK_PI 8
|
||||
#define FUTEX_PRIVATE_FLAG 128
|
||||
|
||||
#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1)
|
||||
|
||||
/* Values for 'private' parameter of locking macros. Yes, the
|
||||
definition seems to be backwards. But it is not. The bit will be
|
||||
@ -50,6 +62,8 @@
|
||||
#define LLL_PRIVATE 0
|
||||
#define LLL_SHARED FUTEX_PRIVATE_FLAG
|
||||
|
||||
#ifndef __ASSEMBLER__
|
||||
|
||||
#if !defined NOT_IN_libc || defined IS_IN_rtld
|
||||
/* In libc.so or ld.so all futexes are private. */
|
||||
# ifdef __ASSUME_PRIVATE_FUTEX
|
||||
@ -76,13 +90,13 @@
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* Initializer for compatibility lock. */
|
||||
#define LLL_MUTEX_LOCK_INITIALIZER (0)
|
||||
#define LLL_MUTEX_LOCK_INITIALIZER_LOCKED (1)
|
||||
#define LLL_MUTEX_LOCK_INITIALIZER_WAITERS (2)
|
||||
/* Initializer for lock. */
|
||||
#define LLL_LOCK_INITIALIZER (0)
|
||||
#define LLL_LOCK_INITIALIZER_LOCKED (1)
|
||||
#define LLL_LOCK_INITIALIZER_WAITERS (2)
|
||||
|
||||
/* Delay in spinlock loop. */
|
||||
#define BUSY_WAIT_NOP asm ("rep; nop")
|
||||
#define BUSY_WAIT_NOP asm ("rep; nop")
|
||||
|
||||
|
||||
#define LLL_STUB_UNWIND_INFO_START \
|
||||
@ -196,7 +210,7 @@ LLL_STUB_UNWIND_INFO_END
|
||||
: "=a" (__status) \
|
||||
: "0" (SYS_futex), "D" (futex), \
|
||||
"S" (__lll_private_flag (FUTEX_WAIT, private)), \
|
||||
"d" (_val), "r" (__to) \
|
||||
"d" (_val), "r" (__to) \
|
||||
: "memory", "cc", "r11", "cx"); \
|
||||
__status; \
|
||||
})
|
||||
@ -215,242 +229,308 @@ LLL_STUB_UNWIND_INFO_END
|
||||
} while (0)
|
||||
|
||||
|
||||
|
||||
/* Does not preserve %eax and %ecx. */
|
||||
extern int __lll_mutex_lock_wait (int *__futex, int __val) attribute_hidden;
|
||||
/* Does not preserver %eax, %ecx, and %edx. */
|
||||
extern int __lll_mutex_timedlock_wait (int *__futex, int __val,
|
||||
const struct timespec *__abstime)
|
||||
attribute_hidden;
|
||||
/* Preserves all registers but %eax. */
|
||||
extern int __lll_mutex_unlock_wait (int *__futex) attribute_hidden;
|
||||
|
||||
|
||||
/* NB: in the lll_mutex_trylock macro we simply return the value in %eax
|
||||
/* NB: in the lll_trylock macro we simply return the value in %eax
|
||||
after the cmpxchg instruction. In case the operation succeded this
|
||||
value is zero. In case the operation failed, the cmpxchg instruction
|
||||
has loaded the current value of the memory work which is guaranteed
|
||||
to be nonzero. */
|
||||
#define lll_mutex_trylock(futex) \
|
||||
#if defined NOT_IN_libc || defined UP
|
||||
# define __lll_trylock_asm LOCK_INSTR "cmpxchgl %2, %1"
|
||||
#else
|
||||
# define __lll_trylock_asm "cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
|
||||
"je 0f\n\t" \
|
||||
"lock; cmpxchgl %2, %1\n\t" \
|
||||
"jmp 1f\n\t" \
|
||||
"0:\tcmpxchgl %2, %1\n\t" \
|
||||
"1:"
|
||||
#endif
|
||||
|
||||
#define lll_trylock(futex) \
|
||||
({ int ret; \
|
||||
__asm __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
|
||||
__asm __volatile (__lll_trylock_asm \
|
||||
: "=a" (ret), "=m" (futex) \
|
||||
: "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\
|
||||
"0" (LLL_MUTEX_LOCK_INITIALIZER) \
|
||||
: "r" (LLL_LOCK_INITIALIZER_LOCKED), "m" (futex), \
|
||||
"0" (LLL_LOCK_INITIALIZER) \
|
||||
: "memory"); \
|
||||
ret; })
|
||||
|
||||
|
||||
#define lll_robust_mutex_trylock(futex, id) \
|
||||
#define lll_robust_trylock(futex, id) \
|
||||
({ int ret; \
|
||||
__asm __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
|
||||
: "=a" (ret), "=m" (futex) \
|
||||
: "r" (id), "m" (futex), \
|
||||
"0" (LLL_MUTEX_LOCK_INITIALIZER) \
|
||||
: "r" (id), "m" (futex), "0" (LLL_LOCK_INITIALIZER) \
|
||||
: "memory"); \
|
||||
ret; })
|
||||
|
||||
|
||||
#define lll_mutex_cond_trylock(futex) \
|
||||
#define lll_cond_trylock(futex) \
|
||||
({ int ret; \
|
||||
__asm __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
|
||||
: "=a" (ret), "=m" (futex) \
|
||||
: "r" (LLL_MUTEX_LOCK_INITIALIZER_WAITERS), \
|
||||
"m" (futex), "0" (LLL_MUTEX_LOCK_INITIALIZER) \
|
||||
: "r" (LLL_LOCK_INITIALIZER_WAITERS), \
|
||||
"m" (futex), "0" (LLL_LOCK_INITIALIZER) \
|
||||
: "memory"); \
|
||||
ret; })
|
||||
|
||||
|
||||
#define lll_mutex_lock(futex) \
|
||||
(void) ({ int ignore1, ignore2, ignore3; \
|
||||
__asm __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t" \
|
||||
#if defined NOT_IN_libc || defined UP
|
||||
# define __lll_lock_asm_start LOCK_INSTR "cmpxchgl %4, %2\n\t" \
|
||||
"jnz 1f\n\t"
|
||||
#else
|
||||
# define __lll_lock_asm_start "cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
|
||||
"je 0f\n\t" \
|
||||
"lock; cmpxchgl %4, %2\n\t" \
|
||||
"jnz 1f\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_mutex_lock_%=, @function\n" \
|
||||
"_L_mutex_lock_%=:\n" \
|
||||
"1:\tleaq %2, %%rdi\n" \
|
||||
"2:\tsubq $128, %%rsp\n" \
|
||||
"3:\tcallq __lll_mutex_lock_wait\n" \
|
||||
"4:\taddq $128, %%rsp\n" \
|
||||
"5:\tjmp 24f\n" \
|
||||
"6:\t.size _L_mutex_lock_%=, 6b-1b\n\t" \
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_5 \
|
||||
"24:" \
|
||||
: "=S" (ignore1), "=&D" (ignore2), "=m" (futex),\
|
||||
"=a" (ignore3) \
|
||||
: "0" (1), "m" (futex), "3" (0) \
|
||||
: "cx", "r11", "cc", "memory"); })
|
||||
"jmp 24f\n" \
|
||||
"0:\tcmpxchgl %4, %2\n\t" \
|
||||
"jnz 1f\n\t"
|
||||
#endif
|
||||
|
||||
#define lll_lock(futex, private) \
|
||||
(void) \
|
||||
({ int ignore1, ignore2, ignore3; \
|
||||
if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
|
||||
__asm __volatile (__lll_lock_asm_start \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_lock_%=, @function\n" \
|
||||
"_L_lock_%=:\n" \
|
||||
"1:\tleaq %2, %%rdi\n" \
|
||||
"2:\tsubq $128, %%rsp\n" \
|
||||
"3:\tcallq __lll_lock_wait_private\n" \
|
||||
"4:\taddq $128, %%rsp\n" \
|
||||
"5:\tjmp 24f\n" \
|
||||
"6:\t.size _L_lock_%=, 6b-1b\n\t" \
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_5 \
|
||||
"24:" \
|
||||
: "=S" (ignore1), "=&D" (ignore2), "=m" (futex), \
|
||||
"=a" (ignore3) \
|
||||
: "0" (1), "m" (futex), "3" (0) \
|
||||
: "cx", "r11", "cc", "memory"); \
|
||||
else \
|
||||
__asm __volatile (__lll_lock_asm_start \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_lock_%=, @function\n" \
|
||||
"_L_lock_%=:\n" \
|
||||
"1:\tleaq %2, %%rdi\n" \
|
||||
"2:\tsubq $128, %%rsp\n" \
|
||||
"3:\tcallq __lll_lock_wait\n" \
|
||||
"4:\taddq $128, %%rsp\n" \
|
||||
"5:\tjmp 24f\n" \
|
||||
"6:\t.size _L_lock_%=, 6b-1b\n\t" \
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_5 \
|
||||
"24:" \
|
||||
: "=S" (ignore1), "=D" (ignore2), "=m" (futex), \
|
||||
"=a" (ignore3) \
|
||||
: "1" (1), "m" (futex), "3" (0), "0" (private) \
|
||||
: "cx", "r11", "cc", "memory"); \
|
||||
}) \
|
||||
|
||||
#define lll_robust_mutex_lock(futex, id) \
|
||||
#define lll_robust_lock(futex, id, private) \
|
||||
({ int result, ignore1, ignore2; \
|
||||
__asm __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t" \
|
||||
__asm __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
|
||||
"jnz 1f\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_robust_mutex_lock_%=, @function\n" \
|
||||
"_L_robust_mutex_lock_%=:\n" \
|
||||
".type _L_robust_lock_%=, @function\n" \
|
||||
"_L_robust_lock_%=:\n" \
|
||||
"1:\tleaq %2, %%rdi\n" \
|
||||
"2:\tsubq $128, %%rsp\n" \
|
||||
"3:\tcallq __lll_robust_mutex_lock_wait\n" \
|
||||
"3:\tcallq __lll_robust_lock_wait\n" \
|
||||
"4:\taddq $128, %%rsp\n" \
|
||||
"5:\tjmp 24f\n" \
|
||||
"6:\t.size _L_robust_mutex_lock_%=, 6b-1b\n\t" \
|
||||
"6:\t.size _L_robust_lock_%=, 6b-1b\n\t" \
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_5 \
|
||||
"24:" \
|
||||
: "=S" (ignore1), "=&D" (ignore2), "=m" (futex), \
|
||||
: "=S" (ignore1), "=D" (ignore2), "=m" (futex), \
|
||||
"=a" (result) \
|
||||
: "0" (id), "m" (futex), "3" (0) \
|
||||
: "1" (id), "m" (futex), "3" (0), "0" (private) \
|
||||
: "cx", "r11", "cc", "memory"); \
|
||||
result; })
|
||||
|
||||
#define lll_cond_lock(futex, private) \
|
||||
(void) \
|
||||
({ int ignore1, ignore2, ignore3; \
|
||||
__asm __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
|
||||
"jnz 1f\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_cond_lock_%=, @function\n" \
|
||||
"_L_cond_lock_%=:\n" \
|
||||
"1:\tleaq %2, %%rdi\n" \
|
||||
"2:\tsubq $128, %%rsp\n" \
|
||||
"3:\tcallq __lll_lock_wait\n" \
|
||||
"4:\taddq $128, %%rsp\n" \
|
||||
"5:\tjmp 24f\n" \
|
||||
"6:\t.size _L_cond_lock_%=, 6b-1b\n\t" \
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_5 \
|
||||
"24:" \
|
||||
: "=S" (ignore1), "=D" (ignore2), "=m" (futex), \
|
||||
"=a" (ignore3) \
|
||||
: "1" (2), "m" (futex), "3" (0), "0" (private) \
|
||||
: "cx", "r11", "cc", "memory"); \
|
||||
})
|
||||
|
||||
#define lll_mutex_cond_lock(futex) \
|
||||
(void) ({ int ignore1, ignore2, ignore3; \
|
||||
__asm __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t" \
|
||||
"jnz 1f\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_mutex_cond_lock_%=, @function\n" \
|
||||
"_L_mutex_cond_lock_%=:\n" \
|
||||
"1:\tleaq %2, %%rdi\n" \
|
||||
"2:\tsubq $128, %%rsp\n" \
|
||||
"3:\tcallq __lll_mutex_lock_wait\n" \
|
||||
"4:\taddq $128, %%rsp\n" \
|
||||
"5:\tjmp 24f\n" \
|
||||
"6:\t.size _L_mutex_cond_lock_%=, 6b-1b\n\t" \
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_5 \
|
||||
"24:" \
|
||||
: "=S" (ignore1), "=&D" (ignore2), "=m" (futex),\
|
||||
"=a" (ignore3) \
|
||||
: "0" (2), "m" (futex), "3" (0) \
|
||||
: "cx", "r11", "cc", "memory"); })
|
||||
|
||||
|
||||
#define lll_robust_mutex_cond_lock(futex, id) \
|
||||
#define lll_robust_cond_lock(futex, id, private) \
|
||||
({ int result, ignore1, ignore2; \
|
||||
__asm __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t" \
|
||||
__asm __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
|
||||
"jnz 1f\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_robust_mutex_cond_lock_%=, @function\n" \
|
||||
"_L_robust_mutex_cond_lock_%=:\n" \
|
||||
".type _L_robust_cond_lock_%=, @function\n" \
|
||||
"_L_robust_cond_lock_%=:\n" \
|
||||
"1:\tleaq %2, %%rdi\n" \
|
||||
"2:\tsubq $128, %%rsp\n" \
|
||||
"3:\tcallq __lll_robust_mutex_lock_wait\n" \
|
||||
"3:\tcallq __lll_robust_lock_wait\n" \
|
||||
"4:\taddq $128, %%rsp\n" \
|
||||
"5:\tjmp 24f\n" \
|
||||
"6:\t.size _L_robust_mutex_cond_lock_%=, 6b-1b\n\t" \
|
||||
"6:\t.size _L_robust_cond_lock_%=, 6b-1b\n\t" \
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_5 \
|
||||
"24:" \
|
||||
: "=S" (ignore1), "=&D" (ignore2), "=m" (futex), \
|
||||
: "=S" (ignore1), "=D" (ignore2), "=m" (futex), \
|
||||
"=a" (result) \
|
||||
: "0" (id | FUTEX_WAITERS), "m" (futex), "3" (0) \
|
||||
: "1" (id | FUTEX_WAITERS), "m" (futex), "3" (0), \
|
||||
"0" (private) \
|
||||
: "cx", "r11", "cc", "memory"); \
|
||||
result; })
|
||||
|
||||
|
||||
#define lll_mutex_timedlock(futex, timeout) \
|
||||
#define lll_timedlock(futex, timeout, private) \
|
||||
({ int result, ignore1, ignore2, ignore3; \
|
||||
__asm __volatile (LOCK_INSTR "cmpxchgl %2, %4\n\t" \
|
||||
__asm __volatile (LOCK_INSTR "cmpxchgl %1, %4\n\t" \
|
||||
"jnz 1f\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_mutex_timedlock_%=, @function\n" \
|
||||
"_L_mutex_timedlock_%=:\n" \
|
||||
".type _L_timedlock_%=, @function\n" \
|
||||
"_L_timedlock_%=:\n" \
|
||||
"1:\tleaq %4, %%rdi\n" \
|
||||
"0:\tmovq %8, %%rdx\n" \
|
||||
"2:\tsubq $128, %%rsp\n" \
|
||||
"3:\tcallq __lll_mutex_timedlock_wait\n" \
|
||||
"3:\tcallq __lll_timedlock_wait\n" \
|
||||
"4:\taddq $128, %%rsp\n" \
|
||||
"5:\tjmp 24f\n" \
|
||||
"6:\t.size _L_mutex_timedlock_%=, 6b-1b\n\t" \
|
||||
"6:\t.size _L_timedlock_%=, 6b-1b\n\t" \
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_6 \
|
||||
"24:" \
|
||||
: "=a" (result), "=&D" (ignore1), "=S" (ignore2), \
|
||||
: "=a" (result), "=D" (ignore1), "=S" (ignore2), \
|
||||
"=&d" (ignore3), "=m" (futex) \
|
||||
: "0" (0), "2" (1), "m" (futex), "m" (timeout) \
|
||||
: "0" (0), "1" (1), "m" (futex), "m" (timeout), \
|
||||
"2" (private) \
|
||||
: "memory", "cx", "cc", "r10", "r11"); \
|
||||
result; })
|
||||
|
||||
|
||||
#define lll_robust_mutex_timedlock(futex, timeout, id) \
|
||||
#define lll_robust_timedlock(futex, timeout, id, private) \
|
||||
({ int result, ignore1, ignore2, ignore3; \
|
||||
__asm __volatile (LOCK_INSTR "cmpxchgl %2, %4\n\t" \
|
||||
__asm __volatile (LOCK_INSTR "cmpxchgl %1, %4\n\t" \
|
||||
"jnz 1f\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_robust_mutex_timedlock_%=, @function\n" \
|
||||
"_L_robust_mutex_timedlock_%=:\n" \
|
||||
".type _L_robust_timedlock_%=, @function\n" \
|
||||
"_L_robust_timedlock_%=:\n" \
|
||||
"1:\tleaq %4, %%rdi\n" \
|
||||
"0:\tmovq %8, %%rdx\n" \
|
||||
"2:\tsubq $128, %%rsp\n" \
|
||||
"3:\tcallq __lll_robust_mutex_timedlock_wait\n" \
|
||||
"3:\tcallq __lll_robust_timedlock_wait\n" \
|
||||
"4:\taddq $128, %%rsp\n" \
|
||||
"5:\tjmp 24f\n" \
|
||||
"6:\t.size _L_robust_mutex_timedlock_%=, 6b-1b\n\t" \
|
||||
"6:\t.size _L_robust_timedlock_%=, 6b-1b\n\t" \
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_6 \
|
||||
"24:" \
|
||||
: "=a" (result), "=&D" (ignore1), "=S" (ignore2), \
|
||||
: "=a" (result), "=D" (ignore1), "=S" (ignore2), \
|
||||
"=&d" (ignore3), "=m" (futex) \
|
||||
: "0" (0), "2" (id), "m" (futex), "m" (timeout) \
|
||||
: "0" (0), "1" (id), "m" (futex), "m" (timeout), \
|
||||
"2" (private) \
|
||||
: "memory", "cx", "cc", "r10", "r11"); \
|
||||
result; })
|
||||
|
||||
#if defined NOT_IN_libc || defined UP
|
||||
# define __lll_unlock_asm_start LOCK_INSTR "decl %0\n\t" \
|
||||
"jne 1f\n\t"
|
||||
#else
|
||||
# define __lll_unlock_asm_start "cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
|
||||
"je 0f\n\t" \
|
||||
"lock; decl %0\n\t" \
|
||||
"jne 1f\n\t" \
|
||||
"jmp 24f\n\t" \
|
||||
"0:\tdecl %0\n\t" \
|
||||
"jne 1f\n\t"
|
||||
#endif
|
||||
|
||||
#define lll_mutex_unlock(futex) \
|
||||
(void) ({ int ignore; \
|
||||
__asm __volatile (LOCK_INSTR "decl %0\n\t" \
|
||||
"jne 1f\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_mutex_unlock_%=, @function\n" \
|
||||
"_L_mutex_unlock_%=:\n" \
|
||||
"1:\tleaq %0, %%rdi\n" \
|
||||
"2:\tsubq $128, %%rsp\n" \
|
||||
"3:\tcallq __lll_mutex_unlock_wake\n" \
|
||||
"4:\taddq $128, %%rsp\n" \
|
||||
"5:\tjmp 24f\n" \
|
||||
"6:\t.size _L_mutex_unlock_%=, 6b-1b\n\t" \
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_5 \
|
||||
"24:" \
|
||||
: "=m" (futex), "=&D" (ignore) \
|
||||
: "m" (futex) \
|
||||
: "ax", "cx", "r11", "cc", "memory"); })
|
||||
#define lll_unlock(futex, private) \
|
||||
(void) \
|
||||
({ int ignore; \
|
||||
if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
|
||||
__asm __volatile (__lll_unlock_asm_start \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_unlock_%=, @function\n" \
|
||||
"_L_unlock_%=:\n" \
|
||||
"1:\tleaq %0, %%rdi\n" \
|
||||
"2:\tsubq $128, %%rsp\n" \
|
||||
"3:\tcallq __lll_unlock_wake_private\n" \
|
||||
"4:\taddq $128, %%rsp\n" \
|
||||
"5:\tjmp 24f\n" \
|
||||
"6:\t.size _L_unlock_%=, 6b-1b\n\t" \
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_5 \
|
||||
"24:" \
|
||||
: "=m" (futex), "=&D" (ignore) \
|
||||
: "m" (futex) \
|
||||
: "ax", "cx", "r11", "cc", "memory"); \
|
||||
else \
|
||||
__asm __volatile (__lll_unlock_asm_start \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_unlock_%=, @function\n" \
|
||||
"_L_unlock_%=:\n" \
|
||||
"1:\tleaq %0, %%rdi\n" \
|
||||
"2:\tsubq $128, %%rsp\n" \
|
||||
"3:\tcallq __lll_unlock_wake\n" \
|
||||
"4:\taddq $128, %%rsp\n" \
|
||||
"5:\tjmp 24f\n" \
|
||||
"6:\t.size _L_unlock_%=, 6b-1b\n\t" \
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_5 \
|
||||
"24:" \
|
||||
: "=m" (futex), "=&D" (ignore) \
|
||||
: "m" (futex), "S" (private) \
|
||||
: "ax", "cx", "r11", "cc", "memory"); \
|
||||
})
|
||||
|
||||
#define lll_robust_unlock(futex, private) \
|
||||
do \
|
||||
{ \
|
||||
int ignore; \
|
||||
__asm __volatile (LOCK_INSTR "andl %2, %0\n\t" \
|
||||
"jne 1f\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_robust_unlock_%=, @function\n" \
|
||||
"_L_robust_unlock_%=:\n" \
|
||||
"1:\tleaq %0, %%rdi\n" \
|
||||
"2:\tsubq $128, %%rsp\n" \
|
||||
"3:\tcallq __lll_unlock_wake\n" \
|
||||
"4:\taddq $128, %%rsp\n" \
|
||||
"5:\tjmp 24f\n" \
|
||||
"6:\t.size _L_robust_unlock_%=, 6b-1b\n\t" \
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_5 \
|
||||
"24:" \
|
||||
: "=m" (futex), "=&D" (ignore) \
|
||||
: "i" (FUTEX_WAITERS), "m" (futex), \
|
||||
"S" (private) \
|
||||
: "ax", "cx", "r11", "cc", "memory"); \
|
||||
} \
|
||||
while (0)
|
||||
|
||||
#define lll_robust_mutex_unlock(futex) \
|
||||
(void) ({ int ignore; \
|
||||
__asm __volatile (LOCK_INSTR "andl %2, %0\n\t" \
|
||||
"jne 1f\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_robust_mutex_unlock_%=, @function\n" \
|
||||
"_L_robust_mutex_unlock_%=:\n" \
|
||||
"1:\tleaq %0, %%rdi\n" \
|
||||
"2:\tsubq $128, %%rsp\n" \
|
||||
"3:\tcallq __lll_mutex_unlock_wake\n" \
|
||||
"4:\taddq $128, %%rsp\n" \
|
||||
"5:\tjmp 24f\n" \
|
||||
"6:\t.size _L_robust_mutex_unlock_%=, 6b-1b\n\t"\
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_5 \
|
||||
"24:" \
|
||||
: "=m" (futex), "=&D" (ignore) \
|
||||
: "i" (FUTEX_WAITERS), "m" (futex) \
|
||||
: "ax", "cx", "r11", "cc", "memory"); })
|
||||
|
||||
|
||||
#define lll_robust_mutex_dead(futex) \
|
||||
(void) ({ int ignore; \
|
||||
__asm __volatile (LOCK_INSTR "orl %3, (%2)\n\t" \
|
||||
"syscall" \
|
||||
: "=m" (futex), "=a" (ignore) \
|
||||
: "D" (&(futex)), "i" (FUTEX_OWNER_DIED), \
|
||||
"S" (FUTEX_WAKE), "1" (__NR_futex), \
|
||||
"d" (1) \
|
||||
: "cx", "r11", "cc", "memory"); })
|
||||
|
||||
#define lll_robust_dead(futex, private) \
|
||||
do \
|
||||
{ \
|
||||
int ignore; \
|
||||
__asm __volatile (LOCK_INSTR "orl %3, (%2)\n\t" \
|
||||
"syscall" \
|
||||
: "=m" (futex), "=a" (ignore) \
|
||||
: "D" (&(futex)), "i" (FUTEX_OWNER_DIED), \
|
||||
"S" (__lll_private_flag (FUTEX_WAKE, private)), \
|
||||
"1" (__NR_futex), "d" (1) \
|
||||
: "cx", "r11", "cc", "memory"); \
|
||||
} \
|
||||
while (0)
|
||||
|
||||
/* Returns non-zero if error happened, zero if success. */
|
||||
#define lll_futex_requeue(ftx, nr_wake, nr_move, mutex, val) \
|
||||
@ -461,117 +541,13 @@ extern int __lll_mutex_unlock_wait (int *__futex) attribute_hidden;
|
||||
__asm __volatile ("syscall" \
|
||||
: "=a" (__res) \
|
||||
: "0" (__NR_futex), "D" ((void *) ftx), \
|
||||
"S" (FUTEX_CMP_REQUEUE), "d" (nr_wake), \
|
||||
"r" (__nr_move), "r" (__mutex), "r" (__val) \
|
||||
"S" (FUTEX_CMP_REQUEUE), "d" (nr_wake), \
|
||||
"r" (__nr_move), "r" (__mutex), "r" (__val) \
|
||||
: "cx", "r11", "cc", "memory"); \
|
||||
__res < 0; })
|
||||
|
||||
|
||||
#define lll_mutex_islocked(futex) \
|
||||
(futex != LLL_MUTEX_LOCK_INITIALIZER)
|
||||
|
||||
|
||||
/* We have a separate internal lock implementation which is not tied
|
||||
to binary compatibility. */
|
||||
|
||||
/* Type for lock object. */
|
||||
typedef int lll_lock_t;
|
||||
|
||||
/* Initializers for lock. */
|
||||
#define LLL_LOCK_INITIALIZER (0)
|
||||
#define LLL_LOCK_INITIALIZER_LOCKED (1)
|
||||
|
||||
|
||||
/* The states of a lock are:
|
||||
0 - untaken
|
||||
1 - taken by one user
|
||||
2 - taken by more users */
|
||||
|
||||
|
||||
#if defined NOT_IN_libc || defined UP
|
||||
# define lll_trylock(futex) lll_mutex_trylock (futex)
|
||||
# define lll_lock(futex) lll_mutex_lock (futex)
|
||||
# define lll_unlock(futex) lll_mutex_unlock (futex)
|
||||
#else
|
||||
/* Special versions of the macros for use in libc itself. They avoid
|
||||
the lock prefix when the thread library is not used.
|
||||
|
||||
The code sequence to avoid unnecessary lock prefixes is what the AMD
|
||||
guys suggested. If you do not like it, bring it up with AMD.
|
||||
|
||||
XXX In future we might even want to avoid it on UP machines. */
|
||||
|
||||
# define lll_trylock(futex) \
|
||||
({ unsigned char ret; \
|
||||
__asm __volatile ("cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
|
||||
"je 0f\n\t" \
|
||||
"lock; cmpxchgl %2, %1\n\t" \
|
||||
"jmp 1f\n" \
|
||||
"0:\tcmpxchgl %2, %1\n\t" \
|
||||
"1:setne %0" \
|
||||
: "=a" (ret), "=m" (futex) \
|
||||
: "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\
|
||||
"0" (LLL_MUTEX_LOCK_INITIALIZER) \
|
||||
: "memory"); \
|
||||
ret; })
|
||||
|
||||
|
||||
# define lll_lock(futex) \
|
||||
(void) ({ int ignore1, ignore2, ignore3; \
|
||||
__asm __volatile ("cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
|
||||
"je 0f\n\t" \
|
||||
"lock; cmpxchgl %0, %2\n\t" \
|
||||
"jnz 1f\n\t" \
|
||||
"jmp 24f\n" \
|
||||
"0:\tcmpxchgl %0, %2\n\t" \
|
||||
"jnz 1f\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_lock_%=, @function\n" \
|
||||
"_L_lock_%=:\n" \
|
||||
"1:\tleaq %2, %%rdi\n" \
|
||||
"2:\tsubq $128, %%rsp\n" \
|
||||
"3:\tcallq __lll_mutex_lock_wait\n" \
|
||||
"4:\taddq $128, %%rsp\n" \
|
||||
"5:\tjmp 24f\n" \
|
||||
"6:\t.size _L_lock_%=, 6b-1b\n\t" \
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_5 \
|
||||
"24:" \
|
||||
: "=S" (ignore1), "=&D" (ignore2), "=m" (futex),\
|
||||
"=a" (ignore3) \
|
||||
: "0" (1), "m" (futex), "3" (0) \
|
||||
: "cx", "r11", "cc", "memory"); })
|
||||
|
||||
|
||||
# define lll_unlock(futex) \
|
||||
(void) ({ int ignore; \
|
||||
__asm __volatile ("cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
|
||||
"je 0f\n\t" \
|
||||
"lock; decl %0\n\t" \
|
||||
"jne 1f\n\t" \
|
||||
"jmp 24f\n" \
|
||||
"0:\tdecl %0\n\t" \
|
||||
"jne 1f\n\t" \
|
||||
".subsection 1\n\t" \
|
||||
".type _L_unlock_%=, @function\n" \
|
||||
"_L_unlock_%=:\n" \
|
||||
"1:\tleaq %0, %%rdi\n" \
|
||||
"2:\tsubq $128, %%rsp\n" \
|
||||
"3:\tcallq __lll_mutex_unlock_wake\n" \
|
||||
"4:\taddq $128, %%rsp\n" \
|
||||
"5:\tjmp 24f\n" \
|
||||
"6:\t.size _L_unlock_%=, 6b-1b\n\t" \
|
||||
".previous\n" \
|
||||
LLL_STUB_UNWIND_INFO_5 \
|
||||
"24:" \
|
||||
: "=m" (futex), "=&D" (ignore) \
|
||||
: "m" (futex) \
|
||||
: "ax", "cx", "r11", "cc", "memory"); })
|
||||
#endif
|
||||
|
||||
|
||||
#define lll_islocked(futex) \
|
||||
(futex != LLL_MUTEX_LOCK_INITIALIZER)
|
||||
(futex != LLL_LOCK_INITIALIZER)
|
||||
|
||||
|
||||
/* The kernel notifies a process with uses CLONE_CLEARTID via futex
|
||||
@ -610,25 +586,6 @@ extern int __lll_timedwait_tid (int *tid, const struct timespec *abstime)
|
||||
} \
|
||||
__result; })
|
||||
|
||||
|
||||
/* Conditional variable handling. */
|
||||
|
||||
extern void __lll_cond_wait (pthread_cond_t *cond) attribute_hidden;
|
||||
extern int __lll_cond_timedwait (pthread_cond_t *cond,
|
||||
const struct timespec *abstime)
|
||||
attribute_hidden;
|
||||
extern void __lll_cond_wake (pthread_cond_t *cond) attribute_hidden;
|
||||
extern void __lll_cond_broadcast (pthread_cond_t *cond) attribute_hidden;
|
||||
|
||||
|
||||
#define lll_cond_wait(cond) \
|
||||
__lll_cond_wait (cond)
|
||||
#define lll_cond_timedwait(cond, abstime) \
|
||||
__lll_cond_timedwait (cond, abstime)
|
||||
#define lll_cond_wake(cond) \
|
||||
__lll_cond_wake (cond)
|
||||
#define lll_cond_broadcast(cond) \
|
||||
__lll_cond_broadcast (cond)
|
||||
|
||||
#endif /* !__ASSEMBLER__ */
|
||||
|
||||
#endif /* lowlevellock.h */
|
||||
|
@ -1,4 +1,5 @@
|
||||
/* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007
|
||||
Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -19,33 +20,40 @@
|
||||
|
||||
#include <sysdep.h>
|
||||
#include <pthread-errnos.h>
|
||||
#include <lowlevellock.h>
|
||||
#include <lowlevelrobustlock.h>
|
||||
#include <kernel-features.h>
|
||||
|
||||
.text
|
||||
|
||||
#ifndef LOCK
|
||||
# ifdef UP
|
||||
# define LOCK
|
||||
# else
|
||||
# define LOCK lock
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#define SYS_futex 202
|
||||
#define FUTEX_WAIT 0
|
||||
#define FUTEX_WAKE 1
|
||||
#define FUTEX_WAITERS 0x80000000
|
||||
#define FUTEX_OWNER_DIED 0x40000000
|
||||
|
||||
#ifdef __ASSUME_PRIVATE_FUTEX
|
||||
# define LOAD_FUTEX_WAIT(reg) \
|
||||
xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
|
||||
#else
|
||||
# if FUTEX_WAIT == 0
|
||||
# define LOAD_FUTEX_WAIT(reg) \
|
||||
xorl $FUTEX_PRIVATE_FLAG, reg ; \
|
||||
andl %fs:PRIVATE_FUTEX, reg
|
||||
# else
|
||||
# define LOAD_FUTEX_WAIT(reg) \
|
||||
xorl $FUTEX_PRIVATE_FLAG, reg ; \
|
||||
andl %fs:PRIVATE_FUTEX, reg ; \
|
||||
orl $FUTEX_WAIT, reg
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* For the calculation see asm/vsyscall.h. */
|
||||
#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
|
||||
|
||||
|
||||
.globl __lll_robust_mutex_lock_wait
|
||||
.type __lll_robust_mutex_lock_wait,@function
|
||||
.hidden __lll_robust_mutex_lock_wait
|
||||
.globl __lll_robust_lock_wait
|
||||
.type __lll_robust_lock_wait,@function
|
||||
.hidden __lll_robust_lock_wait
|
||||
.align 16
|
||||
__lll_robust_mutex_lock_wait:
|
||||
__lll_robust_lock_wait:
|
||||
cfi_startproc
|
||||
pushq %r10
|
||||
cfi_adjust_cfa_offset(8)
|
||||
@ -55,11 +63,7 @@ __lll_robust_mutex_lock_wait:
|
||||
cfi_offset(%rdx, -24)
|
||||
|
||||
xorq %r10, %r10 /* No timeout. */
|
||||
#if FUTEX_WAIT == 0
|
||||
xorl %esi, %esi
|
||||
#else
|
||||
movl $FUTEX_WAIT, %esi
|
||||
#endif
|
||||
LOAD_FUTEX_WAIT (%esi)
|
||||
|
||||
4: movl %eax, %edx
|
||||
orl $FUTEX_WAITERS, %edx
|
||||
@ -97,14 +101,14 @@ __lll_robust_mutex_lock_wait:
|
||||
cfi_restore(%r10)
|
||||
retq
|
||||
cfi_endproc
|
||||
.size __lll_robust_mutex_lock_wait,.-__lll_robust_mutex_lock_wait
|
||||
.size __lll_robust_lock_wait,.-__lll_robust_lock_wait
|
||||
|
||||
|
||||
.globl __lll_robust_mutex_timedlock_wait
|
||||
.type __lll_robust_mutex_timedlock_wait,@function
|
||||
.hidden __lll_robust_mutex_timedlock_wait
|
||||
.globl __lll_robust_timedlock_wait
|
||||
.type __lll_robust_timedlock_wait,@function
|
||||
.hidden __lll_robust_timedlock_wait
|
||||
.align 16
|
||||
__lll_robust_mutex_timedlock_wait:
|
||||
__lll_robust_timedlock_wait:
|
||||
cfi_startproc
|
||||
/* Check for a valid timeout value. */
|
||||
cmpq $1000000000, 8(%rdx)
|
||||
@ -122,10 +126,12 @@ __lll_robust_mutex_timedlock_wait:
|
||||
cfi_offset(%r9, -24)
|
||||
cfi_offset(%r12, -32)
|
||||
cfi_offset(%r13, -40)
|
||||
pushq %rsi
|
||||
cfi_adjust_cfa_offset(8)
|
||||
|
||||
/* Stack frame for the timespec and timeval structs. */
|
||||
subq $24, %rsp
|
||||
cfi_adjust_cfa_offset(24)
|
||||
subq $32, %rsp
|
||||
cfi_adjust_cfa_offset(32)
|
||||
|
||||
movq %rdi, %r12
|
||||
movq %rdx, %r13
|
||||
@ -174,11 +180,8 @@ __lll_robust_mutex_timedlock_wait:
|
||||
jnz 5f
|
||||
|
||||
2: movq %rsp, %r10
|
||||
#if FUTEX_WAIT == 0
|
||||
xorl %esi, %esi
|
||||
#else
|
||||
movl $FUTEX_WAIT, %esi
|
||||
#endif
|
||||
movl 32(%rsp), %esi
|
||||
LOAD_FUTEX_WAIT (%esi)
|
||||
movq %r12, %rdi
|
||||
movl $SYS_futex, %eax
|
||||
syscall
|
||||
@ -195,8 +198,8 @@ __lll_robust_mutex_timedlock_wait:
|
||||
cmpxchgl %edx, (%r12)
|
||||
jnz 7f
|
||||
|
||||
6: addq $24, %rsp
|
||||
cfi_adjust_cfa_offset(-24)
|
||||
6: addq $40, %rsp
|
||||
cfi_adjust_cfa_offset(-40)
|
||||
popq %r13
|
||||
cfi_adjust_cfa_offset(-8)
|
||||
cfi_restore(%r13)
|
||||
@ -214,7 +217,7 @@ __lll_robust_mutex_timedlock_wait:
|
||||
3: movl $EINVAL, %eax
|
||||
retq
|
||||
|
||||
cfi_adjust_cfa_offset(56)
|
||||
cfi_adjust_cfa_offset(72)
|
||||
cfi_offset(%r8, -16)
|
||||
cfi_offset(%r9, -24)
|
||||
cfi_offset(%r12, -32)
|
||||
@ -226,4 +229,4 @@ __lll_robust_mutex_timedlock_wait:
|
||||
8: movl $ETIMEDOUT, %eax
|
||||
jmp 6b
|
||||
cfi_endproc
|
||||
.size __lll_robust_mutex_timedlock_wait,.-__lll_robust_mutex_timedlock_wait
|
||||
.size __lll_robust_timedlock_wait,.-__lll_robust_timedlock_wait
|
||||
|
@ -18,18 +18,9 @@
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <sysdep.h>
|
||||
#include <lowlevellock.h>
|
||||
#include <lowlevelbarrier.h>
|
||||
|
||||
#define SYS_futex 202
|
||||
#define FUTEX_WAIT 0
|
||||
#define FUTEX_WAKE 1
|
||||
|
||||
#ifndef UP
|
||||
# define LOCK lock
|
||||
#else
|
||||
# define LOCK
|
||||
#endif
|
||||
|
||||
|
||||
.text
|
||||
|
||||
@ -142,21 +133,29 @@ pthread_barrier_wait:
|
||||
|
||||
retq
|
||||
|
||||
1: addq $MUTEX, %rdi
|
||||
callq __lll_mutex_lock_wait
|
||||
1: movl PRIVATE(%rdi), %esi
|
||||
addq $MUTEX, %rdi
|
||||
xorl $LLL_SHARED, %esi
|
||||
callq __lll_lock_wait
|
||||
subq $MUTEX, %rdi
|
||||
jmp 2b
|
||||
|
||||
4: addq $MUTEX, %rdi
|
||||
callq __lll_mutex_unlock_wake
|
||||
4: movl PRIVATE(%rdi), %esi
|
||||
addq $MUTEX, %rdi
|
||||
xorl $LLL_SHARED, %esi
|
||||
callq __lll_unlock_wake
|
||||
jmp 5b
|
||||
|
||||
6: addq $MUTEX, %rdi
|
||||
callq __lll_mutex_unlock_wake
|
||||
6: movl PRIVATE(%rdi), %esi
|
||||
addq $MUTEX, %rdi
|
||||
xorl $LLL_SHARED, %esi
|
||||
callq __lll_unlock_wake
|
||||
subq $MUTEX, %rdi
|
||||
jmp 7b
|
||||
|
||||
9: addq $MUTEX, %rdi
|
||||
callq __lll_mutex_unlock_wake
|
||||
9: movl PRIVATE(%rdi), %esi
|
||||
addq $MUTEX, %rdi
|
||||
xorl $LLL_SHARED, %esi
|
||||
callq __lll_unlock_wake
|
||||
jmp 10b
|
||||
.size pthread_barrier_wait,.-pthread_barrier_wait
|
||||
|
@ -1,4 +1,5 @@
|
||||
/* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007
|
||||
Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -19,23 +20,11 @@
|
||||
|
||||
#include <sysdep.h>
|
||||
#include <shlib-compat.h>
|
||||
#include <lowlevellock.h>
|
||||
#include <lowlevelcond.h>
|
||||
#include <kernel-features.h>
|
||||
#include <pthread-pi-defines.h>
|
||||
|
||||
#ifdef UP
|
||||
# define LOCK
|
||||
#else
|
||||
# define LOCK lock
|
||||
#endif
|
||||
|
||||
#define SYS_futex 202
|
||||
#define FUTEX_WAIT 0
|
||||
#define FUTEX_WAKE 1
|
||||
#define FUTEX_REQUEUE 3
|
||||
#define FUTEX_CMP_REQUEUE 4
|
||||
|
||||
#define EINVAL 22
|
||||
#include <pthread-errnos.h>
|
||||
|
||||
|
||||
.text
|
||||
@ -115,7 +104,9 @@ __pthread_cond_broadcast:
|
||||
#if cond_lock != 0
|
||||
addq $cond_lock, %rdi
|
||||
#endif
|
||||
callq __lll_mutex_lock_wait
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %esi
|
||||
callq __lll_lock_wait
|
||||
#if cond_lock != 0
|
||||
subq $cond_lock, %rdi
|
||||
#endif
|
||||
@ -123,12 +114,16 @@ __pthread_cond_broadcast:
|
||||
|
||||
/* Unlock in loop requires wakeup. */
|
||||
5: addq $cond_lock-cond_futex, %rdi
|
||||
callq __lll_mutex_unlock_wake
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %esi
|
||||
callq __lll_unlock_wake
|
||||
jmp 6b
|
||||
|
||||
/* Unlock in loop requires wakeup. */
|
||||
7: addq $cond_lock-cond_futex, %rdi
|
||||
callq __lll_mutex_unlock_wake
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %esi
|
||||
callq __lll_unlock_wake
|
||||
subq $cond_lock-cond_futex, %rdi
|
||||
jmp 8b
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2004, 2005, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -19,23 +19,10 @@
|
||||
|
||||
#include <sysdep.h>
|
||||
#include <shlib-compat.h>
|
||||
#include <lowlevellock.h>
|
||||
#include <lowlevelcond.h>
|
||||
#include <kernel-features.h>
|
||||
|
||||
#ifdef UP
|
||||
# define LOCK
|
||||
#else
|
||||
# define LOCK lock
|
||||
#endif
|
||||
|
||||
#define SYS_futex 202
|
||||
#define FUTEX_WAIT 0
|
||||
#define FUTEX_WAKE 1
|
||||
#define FUTEX_WAKE_OP 5
|
||||
|
||||
#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1)
|
||||
|
||||
#define EINVAL 22
|
||||
#include <pthread-errnos.h>
|
||||
|
||||
|
||||
.text
|
||||
@ -111,7 +98,9 @@ __pthread_cond_signal:
|
||||
#if cond_lock != 0
|
||||
addq $cond_lock, %rdi
|
||||
#endif
|
||||
callq __lll_mutex_lock_wait
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %esi
|
||||
callq __lll_lock_wait
|
||||
#if cond_lock != 0
|
||||
subq $cond_lock, %rdi
|
||||
#endif
|
||||
@ -120,7 +109,9 @@ __pthread_cond_signal:
|
||||
/* Unlock in loop requires wakeup. */
|
||||
5:
|
||||
movq %r8, %rdi
|
||||
callq __lll_mutex_unlock_wake
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %esi
|
||||
callq __lll_unlock_wake
|
||||
jmp 6b
|
||||
.size __pthread_cond_signal, .-__pthread_cond_signal
|
||||
versioned_symbol (libpthread, __pthread_cond_signal, pthread_cond_signal,
|
||||
|
@ -19,19 +19,10 @@
|
||||
|
||||
#include <sysdep.h>
|
||||
#include <shlib-compat.h>
|
||||
#include <lowlevellock.h>
|
||||
#include <lowlevelcond.h>
|
||||
#include <pthread-errnos.h>
|
||||
|
||||
#ifdef UP
|
||||
# define LOCK
|
||||
#else
|
||||
# define LOCK lock
|
||||
#endif
|
||||
|
||||
#define SYS_futex 202
|
||||
#define FUTEX_WAIT 0
|
||||
#define FUTEX_WAKE 1
|
||||
|
||||
/* For the calculation see asm/vsyscall.h. */
|
||||
#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
|
||||
|
||||
@ -301,7 +292,9 @@ __pthread_cond_timedwait:
|
||||
#if cond_lock != 0
|
||||
addq $cond_lock, %rdi
|
||||
#endif
|
||||
callq __lll_mutex_lock_wait
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %esi
|
||||
callq __lll_lock_wait
|
||||
jmp 2b
|
||||
|
||||
/* Unlock in loop requires wakeup. */
|
||||
@ -309,7 +302,9 @@ __pthread_cond_timedwait:
|
||||
#if cond_lock != 0
|
||||
addq $cond_lock, %rdi
|
||||
#endif
|
||||
callq __lll_mutex_unlock_wake
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %esi
|
||||
callq __lll_unlock_wake
|
||||
jmp 4b
|
||||
|
||||
/* Locking in loop failed. */
|
||||
@ -317,7 +312,9 @@ __pthread_cond_timedwait:
|
||||
#if cond_lock != 0
|
||||
addq $cond_lock, %rdi
|
||||
#endif
|
||||
callq __lll_mutex_lock_wait
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %esi
|
||||
callq __lll_lock_wait
|
||||
#if cond_lock != 0
|
||||
subq $cond_lock, %rdi
|
||||
#endif
|
||||
@ -328,7 +325,9 @@ __pthread_cond_timedwait:
|
||||
#if cond_lock != 0
|
||||
addq $cond_lock, %rdi
|
||||
#endif
|
||||
callq __lll_mutex_unlock_wake
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %esi
|
||||
callq __lll_unlock_wake
|
||||
jmp 11b
|
||||
|
||||
/* The initial unlocking of the mutex failed. */
|
||||
@ -345,7 +344,9 @@ __pthread_cond_timedwait:
|
||||
#if cond_lock != 0
|
||||
addq $cond_lock, %rdi
|
||||
#endif
|
||||
callq __lll_mutex_unlock_wake
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %esi
|
||||
callq __lll_unlock_wake
|
||||
|
||||
17: movq (%rsp), %rax
|
||||
jmp 18b
|
||||
|
@ -19,19 +19,10 @@
|
||||
|
||||
#include <sysdep.h>
|
||||
#include <shlib-compat.h>
|
||||
#include <lowlevellock.h>
|
||||
#include <lowlevelcond.h>
|
||||
#include <tcb-offsets.h>
|
||||
|
||||
#ifdef UP
|
||||
# define LOCK
|
||||
#else
|
||||
# define LOCK lock
|
||||
#endif
|
||||
|
||||
#define SYS_futex 202
|
||||
#define FUTEX_WAIT 0
|
||||
#define FUTEX_WAKE 1
|
||||
|
||||
|
||||
.text
|
||||
|
||||
@ -58,7 +49,9 @@ __condvar_cleanup:
|
||||
#if cond_lock != 0
|
||||
addq $cond_lock, %rdi
|
||||
#endif
|
||||
callq __lll_mutex_lock_wait
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %esi
|
||||
callq __lll_lock_wait
|
||||
#if cond_lock != 0
|
||||
subq $cond_lock, %rdi
|
||||
#endif
|
||||
@ -105,7 +98,9 @@ __condvar_cleanup:
|
||||
#if cond_lock != 0
|
||||
addq $cond_lock, %rdi
|
||||
#endif
|
||||
callq __lll_mutex_unlock_wake
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %esi
|
||||
callq __lll_unlock_wake
|
||||
|
||||
/* Wake up all waiters to make sure no signal gets lost. */
|
||||
2: testq %r12, %r12
|
||||
@ -307,7 +302,9 @@ __pthread_cond_wait:
|
||||
#if cond_lock != 0
|
||||
addq $cond_lock, %rdi
|
||||
#endif
|
||||
callq __lll_mutex_lock_wait
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %esi
|
||||
callq __lll_lock_wait
|
||||
jmp 2b
|
||||
|
||||
/* Unlock in loop requires wakeup. */
|
||||
@ -315,7 +312,9 @@ __pthread_cond_wait:
|
||||
#if cond_lock != 0
|
||||
addq $cond_lock, %rdi
|
||||
#endif
|
||||
callq __lll_mutex_unlock_wake
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %esi
|
||||
callq __lll_unlock_wake
|
||||
jmp 4b
|
||||
|
||||
/* Locking in loop failed. */
|
||||
@ -323,7 +322,9 @@ __pthread_cond_wait:
|
||||
#if cond_lock != 0
|
||||
addq $cond_lock, %rdi
|
||||
#endif
|
||||
callq __lll_mutex_lock_wait
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %esi
|
||||
callq __lll_lock_wait
|
||||
#if cond_lock != 0
|
||||
subq $cond_lock, %rdi
|
||||
#endif
|
||||
@ -334,7 +335,9 @@ __pthread_cond_wait:
|
||||
#if cond_lock != 0
|
||||
addq $cond_lock, %rdi
|
||||
#endif
|
||||
callq __lll_mutex_unlock_wake
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %esi
|
||||
callq __lll_unlock_wake
|
||||
jmp 11b
|
||||
|
||||
/* The initial unlocking of the mutex failed. */
|
||||
@ -351,7 +354,9 @@ __pthread_cond_wait:
|
||||
#if cond_lock != 0
|
||||
addq $cond_lock, %rdi
|
||||
#endif
|
||||
callq __lll_mutex_unlock_wake
|
||||
/* XYZ */
|
||||
movl $LLL_SHARED, %esi
|
||||
callq __lll_unlock_wake
|
||||
|
||||
13: movq %r10, %rax
|
||||
jmp 14b
|
||||
|
@ -19,17 +19,8 @@
|
||||
|
||||
#include <kernel-features.h>
|
||||
#include <tcb-offsets.h>
|
||||
#include <lowlevellock.h>
|
||||
|
||||
#ifndef UP
|
||||
# define LOCK lock
|
||||
#else
|
||||
# define LOCK
|
||||
#endif
|
||||
|
||||
#define SYS_futex 202
|
||||
#define FUTEX_WAIT 0
|
||||
#define FUTEX_WAKE 1
|
||||
#define FUTEX_PRIVATE_FLAG 128
|
||||
|
||||
.comm __fork_generation, 4, 4
|
||||
|
||||
|
@ -18,23 +18,12 @@
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <sysdep.h>
|
||||
#include <lowlevellock.h>
|
||||
#include <lowlevelrwlock.h>
|
||||
#include <pthread-errnos.h>
|
||||
#include <kernel-features.h>
|
||||
|
||||
|
||||
#define SYS_futex 202
|
||||
#define FUTEX_WAIT 0
|
||||
#define FUTEX_WAKE 1
|
||||
#define FUTEX_PRIVATE_FLAG 128
|
||||
|
||||
#ifndef UP
|
||||
# define LOCK lock
|
||||
#else
|
||||
# define LOCK
|
||||
#endif
|
||||
|
||||
|
||||
.text
|
||||
|
||||
.globl __pthread_rwlock_rdlock
|
||||
@ -123,11 +112,11 @@ __pthread_rwlock_rdlock:
|
||||
movq %rdx, %rax
|
||||
retq
|
||||
|
||||
1:
|
||||
1: movl PSHARED(%rdi), %esi
|
||||
#if MUTEX != 0
|
||||
addq $MUTEX, %rdi
|
||||
#endif
|
||||
callq __lll_mutex_lock_wait
|
||||
callq __lll_lock_wait
|
||||
#if MUTEX != 0
|
||||
subq $MUTEX, %rdi
|
||||
#endif
|
||||
@ -139,11 +128,11 @@ __pthread_rwlock_rdlock:
|
||||
movl $EDEADLK, %edx
|
||||
jmp 9b
|
||||
|
||||
6:
|
||||
6: movl PSHARED(%rdi), %esi
|
||||
#if MUTEX != 0
|
||||
addq $MUTEX, %rdi
|
||||
#endif
|
||||
callq __lll_mutex_unlock_wake
|
||||
callq __lll_unlock_wake
|
||||
#if MUTEX != 0
|
||||
subq $MUTEX, %rdi
|
||||
#endif
|
||||
@ -159,21 +148,21 @@ __pthread_rwlock_rdlock:
|
||||
movl $EAGAIN, %edx
|
||||
jmp 9b
|
||||
|
||||
10:
|
||||
10: movl PSHARED(%rdi), %esi
|
||||
#if MUTEX != 0
|
||||
addq $MUTEX, %rdi
|
||||
#endif
|
||||
callq __lll_mutex_unlock_wake
|
||||
callq __lll_unlock_wake
|
||||
#if MUTEX != 0
|
||||
subq $MUTEX, %rdi
|
||||
#endif
|
||||
jmp 11b
|
||||
|
||||
12:
|
||||
12: movl PSHARED(%rdi), %esi
|
||||
#if MUTEX == 0
|
||||
addq $MUTEX, %rdi
|
||||
#endif
|
||||
callq __lll_mutex_lock_wait
|
||||
callq __lll_lock_wait
|
||||
#if MUTEX != 0
|
||||
subq $MUTEX, %rdi
|
||||
#endif
|
||||
|
@ -18,27 +18,15 @@
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <sysdep.h>
|
||||
#include <lowlevellock.h>
|
||||
#include <lowlevelrwlock.h>
|
||||
#include <pthread-errnos.h>
|
||||
#include <kernel-features.h>
|
||||
|
||||
|
||||
#define SYS_futex 202
|
||||
#define FUTEX_WAIT 0
|
||||
#define FUTEX_WAKE 1
|
||||
#define FUTEX_PRIVATE_FLAG 128
|
||||
|
||||
/* For the calculation see asm/vsyscall.h. */
|
||||
#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
|
||||
|
||||
|
||||
#ifndef UP
|
||||
# define LOCK lock
|
||||
#else
|
||||
# define LOCK
|
||||
#endif
|
||||
|
||||
|
||||
.text
|
||||
|
||||
.globl pthread_rwlock_timedrdlock
|
||||
@ -172,11 +160,11 @@ pthread_rwlock_timedrdlock:
|
||||
popq %r12
|
||||
retq
|
||||
|
||||
1:
|
||||
1: movl PSHARED(%rdi), %esi
|
||||
#if MUTEX != 0
|
||||
addq $MUTEX, %rdi
|
||||
#endif
|
||||
callq __lll_mutex_lock_wait
|
||||
callq __lll_lock_wait
|
||||
jmp 2b
|
||||
|
||||
14: cmpl %fs:TID, %eax
|
||||
@ -184,13 +172,13 @@ pthread_rwlock_timedrdlock:
|
||||
movl $EDEADLK, %edx
|
||||
jmp 9b
|
||||
|
||||
6:
|
||||
6: movl PSHARED(%r12), %esi
|
||||
#if MUTEX == 0
|
||||
movq %r12, %rdi
|
||||
#else
|
||||
leal MUTEX(%r12), %rdi
|
||||
#endif
|
||||
callq __lll_mutex_unlock_wake
|
||||
callq __lll_unlock_wake
|
||||
jmp 7b
|
||||
|
||||
/* Overflow. */
|
||||
@ -203,22 +191,22 @@ pthread_rwlock_timedrdlock:
|
||||
movl $EAGAIN, %edx
|
||||
jmp 9b
|
||||
|
||||
10:
|
||||
10: movl PSHARED(%r12), %esi
|
||||
#if MUTEX == 0
|
||||
movq %r12, %rdi
|
||||
#else
|
||||
leaq MUTEX(%r12), %rdi
|
||||
#endif
|
||||
callq __lll_mutex_unlock_wake
|
||||
callq __lll_unlock_wake
|
||||
jmp 11b
|
||||
|
||||
12:
|
||||
12: movl PSHARED(%r12), %esi
|
||||
#if MUTEX == 0
|
||||
movq %r12, %rdi
|
||||
#else
|
||||
leaq MUTEX(%r12), %rdi
|
||||
#endif
|
||||
callq __lll_mutex_lock_wait
|
||||
callq __lll_lock_wait
|
||||
jmp 13b
|
||||
|
||||
16: movq $-ETIMEDOUT, %rdx
|
||||
|
@ -18,26 +18,15 @@
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <sysdep.h>
|
||||
#include <lowlevellock.h>
|
||||
#include <lowlevelrwlock.h>
|
||||
#include <pthread-errnos.h>
|
||||
#include <kernel-features.h>
|
||||
|
||||
|
||||
#define SYS_futex 202
|
||||
#define FUTEX_WAIT 0
|
||||
#define FUTEX_WAKE 1
|
||||
#define FUTEX_PRIVATE_FLAG 128
|
||||
|
||||
/* For the calculation see asm/vsyscall.h. */
|
||||
#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
|
||||
|
||||
#ifndef UP
|
||||
# define LOCK lock
|
||||
#else
|
||||
# define LOCK
|
||||
#endif
|
||||
|
||||
|
||||
.text
|
||||
|
||||
.globl pthread_rwlock_timedwrlock
|
||||
@ -168,11 +157,11 @@ pthread_rwlock_timedwrlock:
|
||||
popq %r12
|
||||
retq
|
||||
|
||||
1:
|
||||
1: movl PSHARED(%rdi), %esi
|
||||
#if MUTEX != 0
|
||||
addq $MUTEX, %rdi
|
||||
#endif
|
||||
callq __lll_mutex_lock_wait
|
||||
callq __lll_lock_wait
|
||||
jmp 2b
|
||||
|
||||
14: cmpl %fs:TID, %eax
|
||||
@ -180,13 +169,13 @@ pthread_rwlock_timedwrlock:
|
||||
20: movl $EDEADLK, %edx
|
||||
jmp 9b
|
||||
|
||||
6:
|
||||
6: movl PSHARED(%r12), %esi
|
||||
#if MUTEX == 0
|
||||
movq %r12, %rdi
|
||||
#else
|
||||
leal MUTEX(%r12), %rdi
|
||||
#endif
|
||||
callq __lll_mutex_unlock_wake
|
||||
callq __lll_unlock_wake
|
||||
jmp 7b
|
||||
|
||||
/* Overflow. */
|
||||
@ -194,22 +183,22 @@ pthread_rwlock_timedwrlock:
|
||||
movl $EAGAIN, %edx
|
||||
jmp 9b
|
||||
|
||||
10:
|
||||
10: movl PSHARED(%r12), %esi
|
||||
#if MUTEX == 0
|
||||
movq %r12, %rdi
|
||||
#else
|
||||
leaq MUTEX(%r12), %rdi
|
||||
#endif
|
||||
callq __lll_mutex_unlock_wake
|
||||
callq __lll_unlock_wake
|
||||
jmp 11b
|
||||
|
||||
12:
|
||||
12: movl PSHARED(%r12), %esi
|
||||
#if MUTEX == 0
|
||||
movq %r12, %rdi
|
||||
#else
|
||||
leaq MUTEX(%r12), %rdi
|
||||
#endif
|
||||
callq __lll_mutex_lock_wait
|
||||
callq __lll_lock_wait
|
||||
jmp 13b
|
||||
|
||||
16: movq $-ETIMEDOUT, %rdx
|
||||
|
@ -18,22 +18,11 @@
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <sysdep.h>
|
||||
#include <lowlevellock.h>
|
||||
#include <lowlevelrwlock.h>
|
||||
#include <kernel-features.h>
|
||||
|
||||
|
||||
#define SYS_futex 202
|
||||
#define FUTEX_WAIT 0
|
||||
#define FUTEX_WAKE 1
|
||||
#define FUTEX_PRIVATE_FLAG 128
|
||||
|
||||
#ifndef UP
|
||||
# define LOCK lock
|
||||
#else
|
||||
# define LOCK
|
||||
#endif
|
||||
|
||||
|
||||
.text
|
||||
|
||||
.globl __pthread_rwlock_unlock
|
||||
@ -107,28 +96,28 @@ __pthread_rwlock_unlock:
|
||||
4: xorl %eax, %eax
|
||||
retq
|
||||
|
||||
1:
|
||||
1: movl PSHARED(%rdi), %esi
|
||||
#if MUTEX != 0
|
||||
addq $MUTEX, %rdi
|
||||
#endif
|
||||
callq __lll_mutex_lock_wait
|
||||
callq __lll_lock_wait
|
||||
#if MUTEX != 0
|
||||
subq $MUTEX, %rdi
|
||||
#endif
|
||||
jmp 2b
|
||||
|
||||
3:
|
||||
3: movl PSHARED(%rdi), %esi
|
||||
#if MUTEX != 0
|
||||
addq $MUTEX, %rdi
|
||||
#endif
|
||||
callq __lll_mutex_unlock_wake
|
||||
callq __lll_unlock_wake
|
||||
jmp 4b
|
||||
|
||||
7:
|
||||
7: movl PSHARED(%rdi), %esi
|
||||
#if MUTEX != 0
|
||||
addq $MUTEX, %rdi
|
||||
#endif
|
||||
callq __lll_mutex_unlock_wake
|
||||
callq __lll_unlock_wake
|
||||
jmp 8b
|
||||
|
||||
.size __pthread_rwlock_unlock,.-__pthread_rwlock_unlock
|
||||
|
@ -18,23 +18,12 @@
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <sysdep.h>
|
||||
#include <lowlevellock.h>
|
||||
#include <lowlevelrwlock.h>
|
||||
#include <pthread-errnos.h>
|
||||
#include <kernel-features.h>
|
||||
|
||||
|
||||
#define SYS_futex 202
|
||||
#define FUTEX_WAIT 0
|
||||
#define FUTEX_WAKE 1
|
||||
#define FUTEX_PRIVATE_FLAG 128
|
||||
|
||||
#ifndef UP
|
||||
# define LOCK lock
|
||||
#else
|
||||
# define LOCK
|
||||
#endif
|
||||
|
||||
|
||||
.text
|
||||
|
||||
.globl __pthread_rwlock_wrlock
|
||||
@ -121,11 +110,11 @@ __pthread_rwlock_wrlock:
|
||||
movq %rdx, %rax
|
||||
retq
|
||||
|
||||
1:
|
||||
1: movl PSHARED(%rdi), %esi
|
||||
#if MUTEX != 0
|
||||
addq $MUTEX, %rdi
|
||||
#endif
|
||||
callq __lll_mutex_lock_wait
|
||||
callq __lll_lock_wait
|
||||
#if MUTEX != 0
|
||||
subq $MUTEX, %rdi
|
||||
#endif
|
||||
@ -136,32 +125,32 @@ __pthread_rwlock_wrlock:
|
||||
movl $EDEADLK, %edx
|
||||
jmp 9b
|
||||
|
||||
6:
|
||||
6: movl PSHARED(%rdi), %esi
|
||||
#if MUTEX != 0
|
||||
addq $MUTEX, %rdi
|
||||
#endif
|
||||
callq __lll_mutex_unlock_wake
|
||||
callq __lll_unlock_wake
|
||||
jmp 7b
|
||||
|
||||
4: decl WRITERS_QUEUED(%rdi)
|
||||
movl $EAGAIN, %edx
|
||||
jmp 9b
|
||||
|
||||
10:
|
||||
10: movl PSHARED(%rdi), %esi
|
||||
#if MUTEX != 0
|
||||
addq $MUTEX, %rdi
|
||||
#endif
|
||||
callq __lll_mutex_unlock_wake
|
||||
callq __lll_unlock_wake
|
||||
#if MUTEX != 0
|
||||
subq $MUTEX, %rdi
|
||||
#endif
|
||||
jmp 11b
|
||||
|
||||
12:
|
||||
12: movl PSHARED(%rdi), %esi
|
||||
#if MUTEX != 0
|
||||
addq $MUTEX, %rdi
|
||||
#endif
|
||||
callq __lll_mutex_lock_wait
|
||||
callq __lll_lock_wait
|
||||
#if MUTEX != 0
|
||||
subq $MUTEX, %rdi
|
||||
#endif
|
||||
|
@ -18,19 +18,11 @@
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <sysdep.h>
|
||||
#include <lowlevellock.h>
|
||||
#include <shlib-compat.h>
|
||||
#include <pthread-errnos.h>
|
||||
#include <structsem.h>
|
||||
|
||||
#ifndef UP
|
||||
# define LOCK lock
|
||||
#else
|
||||
# define
|
||||
#endif
|
||||
|
||||
#define SYS_futex 202
|
||||
#define FUTEX_WAKE 1
|
||||
|
||||
|
||||
.text
|
||||
|
||||
|
@ -18,23 +18,15 @@
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <sysdep.h>
|
||||
#include <lowlevellock.h>
|
||||
#include <shlib-compat.h>
|
||||
#include <pthread-errnos.h>
|
||||
#include <structsem.h>
|
||||
|
||||
#ifndef UP
|
||||
# define LOCK lock
|
||||
#else
|
||||
# define
|
||||
#endif
|
||||
|
||||
#define SYS_futex 202
|
||||
#define FUTEX_WAIT 0
|
||||
|
||||
/* For the calculation see asm/vsyscall.h. */
|
||||
#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
|
||||
|
||||
|
||||
.text
|
||||
|
||||
.globl sem_timedwait
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user