glibc/nptl/pthread_mutex_lock.c
Ulrich Drepper e51deae7f6 * sysdeps/unix/sysv/linux/powerpc/sem_post.c (__new_sem_post):
Use __asm __volatile (__lll_acq_instr ::: "memory") instead of
	atomic_full_barrier.

2007-07-31  Jakub Jelinek  <jakub@redhat.com>

	* allocatestack.c (stack_cache_lock): Change type to int.
	(get_cached_stack, allocate_stack, __deallocate_stack,
	__make_stacks_executable, __find_thread_by_id, __nptl_setxid,
	__pthread_init_static_tls, __wait_lookup_done): Add LLL_PRIVATE
	as second argument to lll_lock and lll_unlock macros on
	stack_cache_lock.
	* pthread_create.c (__find_in_stack_list): Likewise.
	(start_thread): Similarly with pd->lock.  Use lll_robust_dead
	macro instead of lll_robust_mutex_dead, pass LLL_SHARED to it
	as second argument.
	* descr.h (struct pthread): Change lock and setxid_futex field
	type to int.
	* old_pthread_cond_broadcast.c (__pthread_cond_broadcast_2_0): Use
	LLL_LOCK_INITIALIZER instead of LLL_MUTEX_LOCK_INITIALIZER.
	* old_pthread_cond_signal.c (__pthread_cond_signal_2_0): Likewise.
	* old_pthread_cond_timedwait.c (__pthread_cond_timedwait_2_0):
	Likewise.
	* old_pthread_cond_wait.c (__pthread_cond_wait_2_0): Likewise.
	* pthread_cond_init.c (__pthread_cond_init): Likewise.
	* pthreadP.h (__attr_list_lock): Change type to int.
	* pthread_attr_init.c (__attr_list_lock): Likewise.
	* pthread_barrier_destroy.c (pthread_barrier_destroy): Pass
	ibarrier->private ^ FUTEX_PRIVATE_FLAG as second argument to
	lll_{,un}lock.
	* pthread_barrier_wait.c (pthread_barrier_wait): Likewise and
	also for lll_futex_{wake,wait}.
	* pthread_barrier_init.c (pthread_barrier_init): Make iattr
	a pointer to const.
	* pthread_cond_broadcast.c (__pthread_cond_broadcast): Pass
	LLL_SHARED as second argument to lll_{,un}lock.
	* pthread_cond_destroy.c (__pthread_cond_destroy): Likewise.
	* pthread_cond_signal.c (__pthread_cond_singal): Likewise.
	* pthread_cond_timedwait.c (__pthread_cond_timedwait): Likewise.
	* pthread_cond_wait.c (__condvar_cleanup, __pthread_cond_wait):
	Likewise.
	* pthread_getattr_np.c (pthread_getattr_np): Add LLL_PRIVATE
	as second argument to lll_{,un}lock macros on pd->lock.
	* pthread_getschedparam.c (__pthread_getschedparam): Likewise.
	* pthread_setschedparam.c (__pthread_setschedparam): Likewise.
	* pthread_setschedprio.c (pthread_setschedprio): Likewise.
	* tpp.c (__pthread_tpp_change_priority, __pthread_current_priority):
	Likewise.
	* sysdeps/pthread/createthread.c (do_clone, create_thread):
	Likewise.
	* pthread_once.c (once_lock): Change type to int.
	(__pthread_once): Pass LLL_PRIVATE as second argument to
	lll_{,un}lock macros on once_lock.
	* pthread_rwlock_rdlock.c (__pthread_rwlock_rdlock): Use
	lll_{,un}lock macros instead of lll_mutex_{,un}lock, pass
	rwlock->__data.__shared as second argument to them and similarly
	for lll_futex_w*.
	* pthread_rwlock_timedrdlock.c (pthread_rwlock_timedrdlock):
	Likewise.
	* pthread_rwlock_timedwrlock.c (pthread_rwlock_timedwrlock):
	Likewise.
	* pthread_rwlock_tryrdlock.c (__pthread_rwlock_tryrdlock): Likewise.
	* pthread_rwlock_trywrlock.c (__pthread_rwlock_trywrlock): Likewise.
	* pthread_rwlock_unlock.c (__pthread_rwlock_unlock): Likewise.
	* pthread_rwlock_wrlock.c (__pthread_rwlock_wrlock): Likewise.
	* sem_close.c (sem_close): Pass LLL_PRIVATE as second argument
	to lll_{,un}lock macros on __sem_mappings_lock.
	* sem_open.c (check_add_mapping): Likewise.
	(__sem_mappings_lock): Change type to int.
	* semaphoreP.h (__sem_mappings_lock): Likewise.
	* pthread_mutex_lock.c (LLL_MUTEX_LOCK, LLL_MUTEX_TRYLOCK,
	LLL_ROBUST_MUTEX_LOCK): Use lll_{,try,robust_}lock macros
	instead of lll_*mutex_*, pass LLL_SHARED as last
	argument.
	(__pthread_mutex_lock): Use lll_unlock instead of lll_mutex_unlock,
	pass LLL_SHARED as last argument.
	* sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c (LLL_MUTEX_LOCK,
	LLL_MUTEX_TRYLOCK, LLL_ROBUST_MUTEX_LOCK): Use
	lll_{cond_,cond_try,robust_cond}lock macros instead of lll_*mutex_*,
	pass LLL_SHARED as last argument.
	* pthread_mutex_timedlock.c (pthread_mutex_timedlock): Use
	lll_{timed,try,robust_timed,un}lock instead of lll_*mutex*, pass
	LLL_SHARED as last argument.
	* pthread_mutex_trylock.c (__pthread_mutex_trylock): Similarly.
	* pthread_mutex_unlock.c (__pthread_mutex_unlock_usercnt):
	Similarly.
	* sysdeps/pthread/bits/libc-lock.h (__libc_lock_lock,
	__libc_lock_lock_recursive, __libc_lock_unlock,
	__libc_lock_unlock_recursive): Pass LLL_PRIVATE as second
	argument to lll_{,un}lock.
	* sysdeps/pthread/bits/stdio-lock.h (_IO_lock_lock,
	_IO_lock_unlock): Likewise.
	* sysdeps/unix/sysv/linux/fork.c (__libc_fork): Don't use
	compound literal.
	* sysdeps/unix/sysv/linux/unregister-atfork.c (__unregister_atfork):
	Pass LLL_PRIVATE as second argument to lll_{,un}lock macros on
	__fork_lock.
	* sysdeps/unix/sysv/linux/register-atfork.c (__register_atfork,
	free_mem): Likewise.
	(__fork_lock): Change type to int.
	* sysdeps/unix/sysv/linux/fork.h (__fork_lock): Likewise.
	* sysdeps/unix/sysv/linux/sem_post.c (__new_sem_post): Pass
	isem->private ^ FUTEX_PRIVATE_FLAG as second argument to
	lll_futex_wake.
	* sysdeps/unix/sysv/linux/sem_timedwait.c (sem_timedwait): Likewise.
	* sysdeps/unix/sysv/linux/sem_wait.c (__new_sem_wait): Likewise.
	* sysdeps/unix/sysv/linux/lowlevellock.c (__lll_lock_wait_private):
	New function.
	(__lll_lock_wait, __lll_timedlock_wait): Add private argument and
	pass it through to lll_futex_*wait, only compile in when
	IS_IN_libpthread.
	* sysdeps/unix/sysv/linux/lowlevelrobustlock.c
	(__lll_robust_lock_wait, __lll_robust_timedlock_wait): Add private
	argument and pass it through to lll_futex_*wait.
	* sysdeps/unix/sysv/linux/alpha/lowlevellock.h: Renamed all
	lll_mutex_* resp. lll_robust_mutex_* macros to lll_* resp.
	lll_robust_*.  Renamed all __lll_mutex_* resp. __lll_robust_mutex_*
	inline functions to __lll_* resp. __lll_robust_*.
	(LLL_MUTEX_LOCK_INITIALIZER): Remove.
	(lll_mutex_dead): Add private argument.
	(__lll_lock_wait_private): New prototype.
	(__lll_lock_wait, __lll_robust_lock_wait, __lll_lock_timedwait,
	__lll_robust_lock_timedwait): Add private argument to prototypes.
	(__lll_lock): Add private argument, if it is constant LLL_PRIVATE,
	call __lll_lock_wait_private, otherwise pass private to
	__lll_lock_wait.
	(__lll_robust_lock, __lll_cond_lock, __lll_timedlock,
	__lll_robust_timedlock): Add private argument, pass it to
	__lll_*wait functions.
	(__lll_unlock): Add private argument, if it is constant LLL_PRIVATE,
	call __lll_unlock_wake_private, otherwise pass private to
	__lll_unlock_wake.
	(__lll_robust_unlock): Add private argument, pass it to
	__lll_robust_unlock_wake.
	(lll_lock, lll_robust_lock, lll_cond_lock, lll_timedlock,
	lll_robust_timedlock, lll_unlock, lll_robust_unlock): Add private
	argument, pass it through to __lll_* inline function.
	(__lll_mutex_unlock_force, lll_mutex_unlock_force): Remove.
	(lll_lock_t): Remove.
	(__lll_cond_wait, __lll_cond_timedwait, __lll_cond_wake,
	__lll_cond_broadcast, lll_cond_wait, lll_cond_timedwait,
	lll_cond_wake, lll_cond_broadcast): Remove.
	* sysdeps/unix/sysv/linux/ia64/lowlevellock.h: Likewise.
	* sysdeps/unix/sysv/linux/powerpc/lowlevellock.h: Likewise.
	* sysdeps/unix/sysv/linux/s390/lowlevellock.h: Likewise.
	* sysdeps/unix/sysv/linux/sparc/lowlevellock.h: Likewise.
	* sysdeps/unix/sysv/linux/i386/lowlevellock.h: Allow including
	the header from assembler.  Renamed all lll_mutex_* resp.
	lll_robust_mutex_* macros to lll_* resp. lll_robust_*.
	(LOCK, FUTEX_CMP_REQUEUE, FUTEX_WAKE_OP,
	FUTEX_OP_CLEAR_WAKE_IF_GT_ONE): Define.
	(LLL_MUTEX_LOCK_INITIALIZER, LLL_MUTEX_LOCK_INITIALIZER_LOCKED,
	LLL_MUTEX_LOCK_INITIALIZER_WAITERS): Remove.
	(__lll_mutex_lock_wait, __lll_mutex_timedlock_wait,
	__lll_mutex_unlock_wake, __lll_lock_wait, __lll_unlock_wake):
	Remove prototype.
	(__lll_trylock_asm, __lll_lock_asm_start, __lll_unlock_asm): Define.
	(lll_robust_trylock, lll_cond_trylock): Use LLL_LOCK_INITIALIZER*
	rather than LLL_MUTEX_LOCK_INITIALIZER* macros.
	(lll_trylock): Likewise, use __lll_trylock_asm, pass
	MULTIPLE_THREADS_OFFSET as another asm operand.
	(lll_lock): Add private argument, use __lll_lock_asm_start, pass
	MULTIPLE_THREADS_OFFSET as last asm operand, call
	__lll_lock_wait_private if private is constant LLL_PRIVATE,
	otherwise pass private as another argument to __lll_lock_wait.
	(lll_robust_lock, lll_cond_lock, lll_robust_cond_lock,
	lll_timedlock, lll_robust_timedlock): Add private argument, pass
	private as another argument to __lll_*lock_wait call.
	(lll_unlock): Add private argument, use __lll_unlock_asm, pass
	MULTIPLE_THREADS_OFFSET as another asm operand, call
	__lll_unlock_wake_private if private is constant LLL_PRIVATE,
	otherwise pass private as another argument to __lll_unlock_wake.
	(lll_robust_unlock): Add private argument, pass private as another
	argument to __lll_unlock_wake.
	(lll_robust_dead): Add private argument, use __lll_private_flag
	macro.
	(lll_islocked): Use LLL_LOCK_INITIALIZER instead of
	LLL_MUTEX_LOCK_INITIALIZER.
	(lll_lock_t): Remove.
	(LLL_LOCK_INITIALIZER_WAITERS): Define.
	(__lll_cond_wait, __lll_cond_timedwait, __lll_cond_wake,
	__lll_cond_broadcast, lll_cond_wait, lll_cond_timedwait,
	lll_cond_wake, lll_cond_broadcast): Remove.
	* sysdeps/unix/sysv/linux/x86_64/lowlevellock.h: Likewise.
	* sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S: Revert
	2007-05-2{3,9} changes.
	* sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S: Include
	kernel-features.h and lowlevellock.h.
	(LOAD_PRIVATE_FUTEX_WAIT): Define.
	(LOAD_FUTEX_WAIT): Rewritten.
	(LOCK, SYS_gettimeofday, SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Don't
	define.
	(__lll_lock_wait_private, __lll_unlock_wake_private): New functions.
	(__lll_mutex_lock_wait): Rename to ...
	(__lll_lock_wait): ... this.  Take futex addr from %edx instead of
	%ecx, %ecx is now private argument.  Don't compile in for libc.so.
	(__lll_mutex_timedlock_wait): Rename to ...
	(__lll_timedlock_wait): ... this.  Use __NR_gettimeofday.  %esi
	contains private argument.  Don't compile in for libc.so.
	(__lll_mutex_unlock_wake): Rename to ...
	(__lll_unlock_wake): ... this.  %ecx contains private argument.
	Don't compile in for libc.so.
	(__lll_timedwait_tid): Use __NR_gettimeofday.
	* sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S: Include
	kernel-features.h and lowlevellock.h.
	(LOAD_FUTEX_WAIT): Define.
	(LOCK, SYS_gettimeofday, SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Don't
	define.
	(__lll_robust_mutex_lock_wait): Rename to ...
	(__lll_robust_lock_wait): ... this.  Futex addr is now in %edx
	argument, %ecx argument contains private.  Use LOAD_FUTEX_WAIT
	macro.
	(__lll_robust_mutex_timedlock_wait): Rename to ...
	(__lll_robust_timedlock_wait): ... this.  Use __NR_gettimeofday.
	%esi argument contains private, use LOAD_FUTEX_WAIT macro.
	* sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S: Include
	lowlevellock.h.
	(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
	(pthread_barrier_wait): Rename __lll_mutex_* to __lll_*, pass
	PRIVATE(%ebx) ^ LLL_SHARED as private argument in %ecx to
	__lll_lock_wait and __lll_unlock_wake, pass MUTEX(%ebx) address
	to __lll_lock_wait in %edx.
	* sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S:
	Include lowlevellock.h and pthread-errnos.h.
	(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_REQUEUE,
	FUTEX_CMP_REQUEUE, EINVAL, LOCK): Don't define.
	(__pthread_cond_broadcast): Rename __lll_mutex_* to __lll_*, pass
	cond_lock address in %edx rather than %ecx to __lll_lock_wait,
	pass LLL_SHARED in %ecx to both __lll_lock_wait and
	__lll_unlock_wake.
	* sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S:
	Include lowlevellock.h and pthread-errnos.h.
	(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_WAKE_OP,
	FUTEX_OP_CLEAR_WAKE_IF_GT_ONE, EINVAL, LOCK): Don't define.
	(__pthread_cond_signal): Rename __lll_mutex_* to __lll_*, pass
	cond_lock address in %edx rather than %ecx to __lll_lock_wait,
	pass LLL_SHARED in %ecx to both __lll_lock_wait and
	__lll_unlock_wake.
	* sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S:
	Include lowlevellock.h.
	(SYS_futex, SYS_gettimeofday, FUTEX_WAIT, FUTEX_WAKE, LOCK):
	Don't define.
	(__pthread_cond_timedwait): Rename __lll_mutex_* to __lll_*, pass
	cond_lock address in %edx rather than %ecx to __lll_lock_wait,
	pass LLL_SHARED in %ecx to both __lll_lock_wait and
	__lll_unlock_wake.  Use __NR_gettimeofday.
	* sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S:
	Include lowlevellock.h.
	(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
	(__pthread_cond_wait, __condvar_w_cleanup): Rename __lll_mutex_*
	to __lll_*, pass cond_lock address in %edx rather than %ecx to
	__lll_lock_wait, pass LLL_SHARED in %ecx to both __lll_lock_wait
	and __lll_unlock_wake.
	* sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S:
	Include lowlevellock.h.
	(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
	(__pthread_rwlock_rdlock): Rename __lll_mutex_* to __lll_*, pass
	MUTEX(%ebx) address in %edx rather than %ecx to
	__lll_lock_wait, pass PSHARED(%ebx) in %ecx to both __lll_lock_wait
	and __lll_unlock_wake.  Move return value from %ecx to %edx
	register.
	* sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S:
	Include lowlevellock.h.
	(SYS_futex, SYS_gettimeofday, FUTEX_WAIT, FUTEX_WAKE, LOCK):
	Don't define.
	(__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*, pass
	MUTEX(%ebp) address in %edx rather than %ecx to
	__lll_lock_wait, pass PSHARED(%ebp) in %ecx to both __lll_lock_wait
	and __lll_unlock_wake.  Move return value from %ecx to %edx
	register.  Use __NR_gettimeofday.
	* sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S:
	Include lowlevellock.h.
	(SYS_futex, SYS_gettimeofday, FUTEX_WAIT, FUTEX_WAKE, LOCK):
	Don't define.
	(__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*, pass
	MUTEX(%ebp) address in %edx rather than %ecx to
	__lll_lock_wait, pass PSHARED(%ebp) in %ecx to both __lll_lock_wait
	and __lll_unlock_wake.  Move return value from %ecx to %edx
	register.  Use __NR_gettimeofday.
	* sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S:
	Include lowlevellock.h.
	(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
	(__pthread_rwlock_unlock): Rename __lll_mutex_* to __lll_*, pass
	MUTEX(%edi) address in %edx rather than %ecx to
	__lll_lock_wait, pass PSHARED(%edi) in %ecx to both __lll_lock_wait
	and __lll_unlock_wake.
	* sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S:
	Include lowlevellock.h.
	(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
	(__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*, pass
	MUTEX(%ebx) address in %edx rather than %ecx to
	__lll_lock_wait, pass PSHARED(%ebx) in %ecx to both __lll_lock_wait
	and __lll_unlock_wake.  Move return value from %ecx to %edx
	register.
	* sysdeps/unix/sysv/linux/i386/pthread_once.S: Include
	lowlevellock.h.
	(LOCK, SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG): Don't
	define.
	* sysdeps/unix/sysv/linux/i386/i486/sem_post.S: Include lowlevellock.h.
	(LOCK, SYS_futex, FUTEX_WAKE): Don't define.
	* sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S: Include
	lowlevellock.h.
	(LOCK, SYS_futex, SYS_gettimeofday, FUTEX_WAIT): Don't define.
	(sem_timedwait): Use __NR_gettimeofday.
	* sysdeps/unix/sysv/linux/i386/i486/sem_trywait.S: Include
	lowlevellock.h.
	(LOCK): Don't define.
	* sysdeps/unix/sysv/linux/i386/i486/sem_wait.S: Include
	lowlevellock.h.
	(LOCK, SYS_futex, FUTEX_WAIT): Don't define.
	* sysdeps/unix/sysv/linux/powerpc/sem_post.c: Wake only when there
	are waiters.
	* sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S: Revert
	2007-05-2{3,9} changes.
	* sysdeps/unix/sysv/linux/x86_64/lowlevellock.S: Include
	kernel-features.h and lowlevellock.h.
	(LOAD_PRIVATE_FUTEX_WAIT): Define.
	(LOAD_FUTEX_WAIT): Rewritten.
	(LOCK, SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Don't define.
	(__lll_lock_wait_private, __lll_unlock_wake_private): New functions.
	(__lll_mutex_lock_wait): Rename to ...
	(__lll_lock_wait): ... this.  %esi is now private argument.
	Don't compile in for libc.so.
	(__lll_mutex_timedlock_wait): Rename to ...
	(__lll_timedlock_wait): ... this.  %esi contains private argument.
	Don't compile in for libc.so.
	(__lll_mutex_unlock_wake): Rename to ...
	(__lll_unlock_wake): ... this.  %esi contains private argument.
	Don't compile in for libc.so.
	* sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S: Include
	kernel-features.h and lowlevellock.h.
	(LOAD_FUTEX_WAIT): Define.
	(LOCK, SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Don't define.
	(__lll_robust_mutex_lock_wait): Rename to ...
	(__lll_robust_lock_wait): ... this.  %esi argument contains private.
	Use LOAD_FUTEX_WAIT macro.
	(__lll_robust_mutex_timedlock_wait): Rename to ...
	(__lll_robust_timedlock_wait): ... this. %esi argument contains
	private, use LOAD_FUTEX_WAIT macro.
	* sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S: Include
	lowlevellock.h.
	(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
	(pthread_barrier_wait): Rename __lll_mutex_* to __lll_*, pass
	PRIVATE(%rdi) ^ LLL_SHARED as private argument in %esi to
	__lll_lock_wait and __lll_unlock_wake.
	* sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S:
	Include lowlevellock.h and pthread-errnos.h.
	(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_REQUEUE,
	FUTEX_CMP_REQUEUE, EINVAL, LOCK): Don't define.
	(__pthread_cond_broadcast): Rename __lll_mutex_* to __lll_*,
	pass LLL_SHARED in %esi to both __lll_lock_wait and
	__lll_unlock_wake.
	* sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S:
	Include lowlevellock.h and pthread-errnos.h.
	(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_WAKE_OP,
	FUTEX_OP_CLEAR_WAKE_IF_GT_ONE, EINVAL, LOCK): Don't define.
	(__pthread_cond_signal): Rename __lll_mutex_* to __lll_*,
	pass LLL_SHARED in %esi to both __lll_lock_wait and
	__lll_unlock_wake.
	* sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S:
	Include lowlevellock.h.
	(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
	(__pthread_cond_timedwait): Rename __lll_mutex_* to __lll_*,
	pass LLL_SHARED in %esi to both __lll_lock_wait and
	__lll_unlock_wake.
	* sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S:
	Include lowlevellock.h.
	(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
	(__pthread_cond_wait, __condvar_cleanup): Rename __lll_mutex_*
	to __lll_*, pass LLL_SHARED in %esi to both __lll_lock_wait
	and __lll_unlock_wake.
	* sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S:
	Include lowlevellock.h.
	(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK):
	Don't define.
	(__pthread_rwlock_rdlock): Rename __lll_mutex_* to __lll_*,
	pass PSHARED(%rdi) in %esi to both __lll_lock_wait
	and __lll_unlock_wake.
	* sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S:
	Include lowlevellock.h.
	(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK):
	Don't define.
	(__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*,
	pass PSHARED(%rdi) in %esi to both __lll_lock_wait
	and __lll_unlock_wake.
	* sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S:
	Include lowlevellock.h.
	(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK):
	Don't define.
	(__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*,
	pass PSHARED(%rdi) in %esi to both __lll_lock_wait
	and __lll_unlock_wake.
	* sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S:
	Include lowlevellock.h.
	(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK):
	Don't define.
	(__pthread_rwlock_unlock): Rename __lll_mutex_* to __lll_*,
	pass PSHARED(%rdi) in %esi to both __lll_lock_wait
	and __lll_unlock_wake.
	* sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S:
	Include lowlevellock.h.
	(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK):
	Don't define.
	(__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*,
	pass PSHARED(%rdi) in %ecx to both __lll_lock_wait
	and __lll_unlock_wake.
	* sysdeps/unix/sysv/linux/x86_64/pthread_once.S: Include
	lowlevellock.h.
	(LOCK, SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG): Don't
	define.
	* sysdeps/unix/sysv/linux/x86_64/sem_post.S: Include lowlevellock.h.
	(LOCK, SYS_futex, FUTEX_WAKE): Don't define.
	* sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S: Include
	lowlevellock.h.
	(LOCK, SYS_futex, FUTEX_WAIT): Don't define.
	* sysdeps/unix/sysv/linux/x86_64/sem_trywait.S: Include
	lowlevellock.h.
	(LOCK): Don't define.
	* sysdeps/unix/sysv/linux/x86_64/sem_wait.S: Include
	lowlevellock.h.
	(LOCK, SYS_futex, FUTEX_WAIT): Don't define.
	* sysdeps/unix/sysv/linux/sparc/internaltypes.h: New file.
	* sysdeps/unix/sysv/linux/sparc/pthread_barrier_destroy.c: New file.
	* sysdeps/unix/sysv/linux/sparc/pthread_barrier_init.c: New file.
	* sysdeps/unix/sysv/linux/sparc/pthread_barrier_wait.c: New file.
	* sysdeps/unix/sysv/linux/sparc/sparc32/lowlevellock.c
	(__lll_lock_wait_private): New function.
	(__lll_lock_wait, __lll_timedlock_wait): Add private argument, pass
	it to lll_futex_*wait.  Don't compile in for libc.so.
	* sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_init.c:
	Remove.
	* sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_wait.c
	(struct sparc_pthread_barrier): Remove.
	(pthread_barrier_wait): Use union sparc_pthread_barrier instead of
	struct sparc_pthread_barrier.  Pass
	ibarrier->s.pshared ? LLL_SHARED : LLL_PRIVATE to lll_{,un}lock
	and lll_futex_wait macros.
	* sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/pthread_barrier_init.c:
	Remove.
	* sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/pthread_barrier_wait.c:
	Include sparc pthread_barrier_wait.c instead of generic one.
2007-08-01 04:47:26 +00:00

444 lines
12 KiB
C

/* Copyright (C) 2002,2003,2004,2005,2006,2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
#include <assert.h>
#include <errno.h>
#include <stdlib.h>
#include <unistd.h>
#include <not-cancel.h>
#include "pthreadP.h"
#include <lowlevellock.h>
#ifndef LLL_MUTEX_LOCK
# define LLL_MUTEX_LOCK(mutex) lll_lock (mutex, /* XYZ */ LLL_SHARED)
# define LLL_MUTEX_TRYLOCK(mutex) lll_trylock (mutex)
# define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_lock (mutex, id, /* XYZ */ LLL_SHARED)
#endif
int
__pthread_mutex_lock (mutex)
pthread_mutex_t *mutex;
{
assert (sizeof (mutex->__size) >= sizeof (mutex->__data));
int oldval;
pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
int retval = 0;
switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
PTHREAD_MUTEX_TIMED_NP))
{
/* Recursive mutex. */
case PTHREAD_MUTEX_RECURSIVE_NP:
/* Check whether we already hold the mutex. */
if (mutex->__data.__owner == id)
{
/* Just bump the counter. */
if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
/* Overflow of the counter. */
return EAGAIN;
++mutex->__data.__count;
return 0;
}
/* We have to get the mutex. */
LLL_MUTEX_LOCK (mutex->__data.__lock);
assert (mutex->__data.__owner == 0);
mutex->__data.__count = 1;
break;
/* Error checking mutex. */
case PTHREAD_MUTEX_ERRORCHECK_NP:
/* Check whether we already hold the mutex. */
if (__builtin_expect (mutex->__data.__owner == id, 0))
return EDEADLK;
/* FALLTHROUGH */
case PTHREAD_MUTEX_TIMED_NP:
simple:
/* Normal mutex. */
LLL_MUTEX_LOCK (mutex->__data.__lock);
assert (mutex->__data.__owner == 0);
break;
case PTHREAD_MUTEX_ADAPTIVE_NP:
if (! __is_smp)
goto simple;
if (LLL_MUTEX_TRYLOCK (mutex->__data.__lock) != 0)
{
int cnt = 0;
int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
mutex->__data.__spins * 2 + 10);
do
{
if (cnt++ >= max_cnt)
{
LLL_MUTEX_LOCK (mutex->__data.__lock);
break;
}
#ifdef BUSY_WAIT_NOP
BUSY_WAIT_NOP;
#endif
}
while (LLL_MUTEX_TRYLOCK (mutex->__data.__lock) != 0);
mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
}
assert (mutex->__data.__owner == 0);
break;
case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
&mutex->__data.__list.__next);
oldval = mutex->__data.__lock;
do
{
again:
if ((oldval & FUTEX_OWNER_DIED) != 0)
{
/* The previous owner died. Try locking the mutex. */
int newval = id;
#ifdef NO_INCR
newval |= FUTEX_WAITERS;
#else
newval |= (oldval & FUTEX_WAITERS);
#endif
newval
= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
newval, oldval);
if (newval != oldval)
{
oldval = newval;
goto again;
}
/* We got the mutex. */
mutex->__data.__count = 1;
/* But it is inconsistent unless marked otherwise. */
mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
ENQUEUE_MUTEX (mutex);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
/* Note that we deliberately exit here. If we fall
through to the end of the function __nusers would be
incremented which is not correct because the old
owner has to be discounted. If we are not supposed
to increment __nusers we actually have to decrement
it here. */
#ifdef NO_INCR
--mutex->__data.__nusers;
#endif
return EOWNERDEAD;
}
/* Check whether we already hold the mutex. */
if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
{
if (mutex->__data.__kind
== PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
{
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
NULL);
return EDEADLK;
}
if (mutex->__data.__kind
== PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
{
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
NULL);
/* Just bump the counter. */
if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
/* Overflow of the counter. */
return EAGAIN;
++mutex->__data.__count;
return 0;
}
}
oldval = LLL_ROBUST_MUTEX_LOCK (mutex->__data.__lock, id);
if (__builtin_expect (mutex->__data.__owner
== PTHREAD_MUTEX_NOTRECOVERABLE, 0))
{
/* This mutex is now not recoverable. */
mutex->__data.__count = 0;
lll_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
return ENOTRECOVERABLE;
}
}
while ((oldval & FUTEX_OWNER_DIED) != 0);
mutex->__data.__count = 1;
ENQUEUE_MUTEX (mutex);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
break;
case PTHREAD_MUTEX_PI_RECURSIVE_NP:
case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
case PTHREAD_MUTEX_PI_NORMAL_NP:
case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
{
int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
if (robust)
/* Note: robust PI futexes are signaled by setting bit 0. */
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
(void *) (((uintptr_t) &mutex->__data.__list.__next)
| 1));
oldval = mutex->__data.__lock;
/* Check whether we already hold the mutex. */
if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
{
if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
{
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
return EDEADLK;
}
if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
{
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
/* Just bump the counter. */
if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
/* Overflow of the counter. */
return EAGAIN;
++mutex->__data.__count;
return 0;
}
}
int newval = id;
#ifdef NO_INCR
newval |= FUTEX_WAITERS;
#endif
oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
newval, 0);
if (oldval != 0)
{
/* The mutex is locked. The kernel will now take care of
everything. */
INTERNAL_SYSCALL_DECL (__err);
int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
FUTEX_LOCK_PI, 1, 0);
if (INTERNAL_SYSCALL_ERROR_P (e, __err)
&& (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
|| INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK))
{
assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
|| (kind != PTHREAD_MUTEX_ERRORCHECK_NP
&& kind != PTHREAD_MUTEX_RECURSIVE_NP));
/* ESRCH can happen only for non-robust PI mutexes where
the owner of the lock died. */
assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH || !robust);
/* Delay the thread indefinitely. */
while (1)
pause_not_cancel ();
}
oldval = mutex->__data.__lock;
assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
}
if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
{
atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
/* We got the mutex. */
mutex->__data.__count = 1;
/* But it is inconsistent unless marked otherwise. */
mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
ENQUEUE_MUTEX_PI (mutex);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
/* Note that we deliberately exit here. If we fall
through to the end of the function __nusers would be
incremented which is not correct because the old owner
has to be discounted. If we are not supposed to
increment __nusers we actually have to decrement it here. */
#ifdef NO_INCR
--mutex->__data.__nusers;
#endif
return EOWNERDEAD;
}
if (robust
&& __builtin_expect (mutex->__data.__owner
== PTHREAD_MUTEX_NOTRECOVERABLE, 0))
{
/* This mutex is now not recoverable. */
mutex->__data.__count = 0;
INTERNAL_SYSCALL_DECL (__err);
INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
FUTEX_UNLOCK_PI, 0, 0);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
return ENOTRECOVERABLE;
}
mutex->__data.__count = 1;
if (robust)
{
ENQUEUE_MUTEX_PI (mutex);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
}
}
break;
case PTHREAD_MUTEX_PP_RECURSIVE_NP:
case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
case PTHREAD_MUTEX_PP_NORMAL_NP:
case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
{
int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
oldval = mutex->__data.__lock;
/* Check whether we already hold the mutex. */
if (mutex->__data.__owner == id)
{
if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
return EDEADLK;
if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
{
/* Just bump the counter. */
if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
/* Overflow of the counter. */
return EAGAIN;
++mutex->__data.__count;
return 0;
}
}
int oldprio = -1, ceilval;
do
{
int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
>> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
if (__pthread_current_priority () > ceiling)
{
if (oldprio != -1)
__pthread_tpp_change_priority (oldprio, -1);
return EINVAL;
}
retval = __pthread_tpp_change_priority (oldprio, ceiling);
if (retval)
return retval;
ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
oldprio = ceiling;
oldval
= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
#ifdef NO_INCR
ceilval | 2,
#else
ceilval | 1,
#endif
ceilval);
if (oldval == ceilval)
break;
do
{
oldval
= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
ceilval | 2,
ceilval | 1);
if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
break;
if (oldval != ceilval)
lll_futex_wait (&mutex->__data.__lock, ceilval | 2,
// XYZ check mutex flag
LLL_SHARED);
}
while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
ceilval | 2, ceilval)
!= ceilval);
}
while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
assert (mutex->__data.__owner == 0);
mutex->__data.__count = 1;
}
break;
default:
/* Correct code cannot set any other type. */
return EINVAL;
}
/* Record the ownership. */
mutex->__data.__owner = id;
#ifndef NO_INCR
++mutex->__data.__nusers;
#endif
return retval;
}
#ifndef __pthread_mutex_lock
strong_alias (__pthread_mutex_lock, pthread_mutex_lock)
strong_alias (__pthread_mutex_lock, __pthread_mutex_lock_internal)
#endif