glibc/sysdeps/unix/sysv/linux/i386/pthread_barrier_wait.S
H.J. Lu f6d1d86d0c Move sysdeps/unix/sysv/linux/i386/i486/*.? to i386
Since glibc doesn't support i386 any more, we can move
sysdeps/unix/sysv/linux/i386/i486/*.? to i386.

	[BZ #19006]
	* sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S: Moved
	to ...
	* sysdeps/unix/sysv/linux/i386/libc-lowlevellock.S: Here.
	* sysdeps/unix/sysv/linux/i386/i486/lll_timedlock_wait.c: Moved
	to ...
	* sysdeps/unix/sysv/linux/i386/lll_timedlock_wait.c: Here.
	* sysdeps/unix/sysv/linux/i386/i486/lll_timedwait_tid.c: Moved
	to ...
	* sysdeps/unix/sysv/linux/i386/lll_timedwait_tid.c: Here.
	* sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S: Moved to ...
	* sysdeps/unix/sysv/linux/i386/lowlevellock.S: Here.
	* sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S: Moved
	to ...
	* sysdeps/unix/sysv/linux/i386/lowlevelrobustlock.S: Here.
	* sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S:
	Moved to ...
	* sysdeps/unix/sysv/linux/i386/pthread_barrier_wait.S: Here.
	* sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S:
	Moved to ...
	* sysdeps/unix/sysv/linux/i386/pthread_cond_broadcast.S: Here.
	* sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S: Moved
	to ...
	* sysdeps/unix/sysv/linux/i386/pthread_cond_signal.S: Here.
	* sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S:
	Moved to ...
	* sysdeps/unix/sysv/linux/i386/pthread_cond_timedwait.S: Here.
	* sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S: Moved
	to ...
	* sysdeps/unix/sysv/linux/i386/pthread_cond_wait.S: Here.
	* sysdeps/unix/sysv/linux/i386/i586/libc-lowlevellock.S:
	Removed.
	* sysdeps/unix/sysv/linux/i386/i586/lll_timedlock_wait.c:
	Likewise.
	* sysdeps/unix/sysv/linux/i386/i586/lll_timedwait_tid.c:
	Likewise.
	* sysdeps/unix/sysv/linux/i386/i586/lowlevellock.S: Likewise.
	* sysdeps/unix/sysv/linux/i386/i586/lowlevelrobustlock.S:
	Likewise.
	* sysdeps/unix/sysv/linux/i386/i586/pthread_barrier_wait.S:
	Likewise.
	* sysdeps/unix/sysv/linux/i386/i586/pthread_cond_broadcast.S:
	Likewise.
	* sysdeps/unix/sysv/linux/i386/i586/pthread_cond_signal.S:
	Likewise.
	* sysdeps/unix/sysv/linux/i386/i586/pthread_cond_timedwait.S:
	Likewise.
	* sysdeps/unix/sysv/linux/i386/i586/pthread_cond_wait.S:
	Likewise.
	* sysdeps/unix/sysv/linux/i386/i686/libc-lowlevellock.S:
	Likewise.
	* sysdeps/unix/sysv/linux/i386/i686/lll_timedlock_wait.c:
	Likewise.
	* sysdeps/unix/sysv/linux/i386/i686/lll_timedwait_tid.c:
	Likewise.
	* sysdeps/unix/sysv/linux/i386/i686/lowlevellock.S: Likewise.
	* sysdeps/unix/sysv/linux/i386/i686/lowlevelrobustlock.S:
	Likewise.
	* sysdeps/unix/sysv/linux/i386/i686/pthread_barrier_wait.S:
	Likewise.
	* sysdeps/unix/sysv/linux/i386/i686/pthread_cond_broadcast.S:
	Likewise.
	* sysdeps/unix/sysv/linux/i386/i686/pthread_cond_signal.S:
	Likewise.
	* sysdeps/unix/sysv/linux/i386/i686/pthread_cond_wait.S:
	Likewise.
	* sysdeps/unix/sysv/linux/i386/i686/pthread_cond_timedwait.S:
	Replace ../i486/pthread_cond_timedwait.S with
	../pthread_cond_timedwait.S.
2015-09-30 10:12:44 -07:00

188 lines
4.3 KiB
ArmAsm

/* Copyright (C) 2002-2015 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#include <sysdep.h>
#include <lowlevellock.h>
#include <lowlevelbarrier.h>
.text
.globl __pthread_barrier_wait
.type __pthread_barrier_wait,@function
.align 16
__pthread_barrier_wait:
cfi_startproc
pushl %ebx
cfi_adjust_cfa_offset(4)
cfi_offset(%ebx, -8)
movl 8(%esp), %ebx
/* Get the mutex. */
movl $1, %edx
xorl %eax, %eax
LOCK
cmpxchgl %edx, MUTEX(%ebx)
jnz 1f
/* One less waiter. If this was the last one needed wake
everybody. */
2: subl $1, LEFT(%ebx)
je 3f
/* There are more threads to come. */
pushl %esi
cfi_adjust_cfa_offset(4)
cfi_offset(%esi, -12)
#if CURR_EVENT == 0
movl (%ebx), %edx
#else
movl CURR_EVENT(%ebx), %edx
#endif
/* Release the mutex. */
LOCK
subl $1, MUTEX(%ebx)
jne 6f
/* Wait for the remaining threads. The call will return immediately
if the CURR_EVENT memory has meanwhile been changed. */
7:
#if FUTEX_WAIT == 0
movl PRIVATE(%ebx), %ecx
#else
movl $FUTEX_WAIT, %ecx
orl PRIVATE(%ebx), %ecx
#endif
xorl %esi, %esi
8: movl $SYS_futex, %eax
ENTER_KERNEL
/* Don't return on spurious wakeups. The syscall does not change
any register except %eax so there is no need to reload any of
them. */
#if CURR_EVENT == 0
cmpl %edx, (%ebx)
#else
cmpl %edx, CURR_EVENT(%ebx)
#endif
je 8b
/* Increment LEFT. If this brings the count back to the
initial count unlock the object. */
movl $1, %edx
movl INIT_COUNT(%ebx), %ecx
LOCK
xaddl %edx, LEFT(%ebx)
subl $1, %ecx
cmpl %ecx, %edx
jne 10f
/* Release the mutex. We cannot release the lock before
waking the waiting threads since otherwise a new thread might
arrive and gets waken up, too. */
LOCK
subl $1, MUTEX(%ebx)
jne 9f
/* Note: %esi is still zero. */
10: movl %esi, %eax /* != PTHREAD_BARRIER_SERIAL_THREAD */
popl %esi
cfi_adjust_cfa_offset(-4)
cfi_restore(%esi)
popl %ebx
cfi_adjust_cfa_offset(-4)
cfi_restore(%ebx)
ret
cfi_adjust_cfa_offset(4)
cfi_offset(%ebx, -8)
/* The necessary number of threads arrived. */
3:
#if CURR_EVENT == 0
addl $1, (%ebx)
#else
addl $1, CURR_EVENT(%ebx)
#endif
/* Wake up all waiters. The count is a signed number in the kernel
so 0x7fffffff is the highest value. */
movl $0x7fffffff, %edx
movl $FUTEX_WAKE, %ecx
orl PRIVATE(%ebx), %ecx
movl $SYS_futex, %eax
ENTER_KERNEL
/* Increment LEFT. If this brings the count back to the
initial count unlock the object. */
movl $1, %edx
movl INIT_COUNT(%ebx), %ecx
LOCK
xaddl %edx, LEFT(%ebx)
subl $1, %ecx
cmpl %ecx, %edx
jne 5f
/* Release the mutex. We cannot release the lock before
waking the waiting threads since otherwise a new thread might
arrive and gets waken up, too. */
LOCK
subl $1, MUTEX(%ebx)
jne 4f
5: orl $-1, %eax /* == PTHREAD_BARRIER_SERIAL_THREAD */
popl %ebx
cfi_adjust_cfa_offset(-4)
cfi_restore(%ebx)
ret
cfi_adjust_cfa_offset(4)
cfi_offset(%ebx, -8)
1: movl PRIVATE(%ebx), %ecx
leal MUTEX(%ebx), %edx
xorl $LLL_SHARED, %ecx
call __lll_lock_wait
jmp 2b
4: movl PRIVATE(%ebx), %ecx
leal MUTEX(%ebx), %eax
xorl $LLL_SHARED, %ecx
call __lll_unlock_wake
jmp 5b
cfi_adjust_cfa_offset(4)
cfi_offset(%esi, -12)
6: movl PRIVATE(%ebx), %ecx
leal MUTEX(%ebx), %eax
xorl $LLL_SHARED, %ecx
call __lll_unlock_wake
jmp 7b
9: movl PRIVATE(%ebx), %ecx
leal MUTEX(%ebx), %eax
xorl $LLL_SHARED, %ecx
call __lll_unlock_wake
jmp 10b
cfi_endproc
.size __pthread_barrier_wait,.-__pthread_barrier_wait
weak_alias (__pthread_barrier_wait, pthread_barrier_wait)