mirror of
https://sourceware.org/git/glibc.git
synced 2024-12-26 12:41:05 +00:00
c50e1c263e
This patch removes the arch-specific x86 assembly implementation for low level locking and consolidate both 64 bits and 32 bits in a single implementation. Different than other architectures, x86 lll_trylock, lll_lock, and lll_unlock implements a single-thread optimization to avoid atomic operation, using cmpxchgl instead. This patch implements by using the new single-thread.h definitions in a generic way, although using the previous semantic. The lll_cond_trylock, lll_cond_lock, and lll_timedlock just use atomic operations plus calls to lll_lock_wait*. For __lll_lock_wait_private and __lll_lock_wait the generic implemtation there is no indication that assembly implementation is required performance-wise. Checked on x86_64-linux-gnu and i686-linux-gnu. * sysdeps/nptl/lowlevellock.h (__lll_trylock): New macro. (lll_trylock): Call __lll_trylock. * sysdeps/unix/sysv/linux/i386/libc-lowlevellock.S: Remove file. * sysdeps/unix/sysv/linux/i386/lll_timedlock_wait.c: Likewise. * sysdeps/unix/sysv/linux/i386/lowlevellock.S: Likewise. * sysdeps/unix/sysv/linux/i386/lowlevellock.h: Likewise. * sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S: Likewise. * sysdeps/unix/sysv/linux/x86_64/lll_timedlock_wait.c: Likewise. * sysdeps/unix/sysv/linux/x86_64/lowlevellock.S: Likewise. * sysdeps/unix/sysv/linux/x86_64/lowlevellock.h: Likewise. * sysdeps/unix/sysv/linux/x86/lowlevellock.h: New file. * sysdeps/unix/sysv/linux/x86_64/cancellation.S: Include lowlevellock-futex.h.
105 lines
2.9 KiB
ArmAsm
105 lines
2.9 KiB
ArmAsm
/* Copyright (C) 2009-2019 Free Software Foundation, Inc.
|
|
This file is part of the GNU C Library.
|
|
Contributed by Ulrich Drepper <drepper@redhat.com>, 2009.
|
|
|
|
The GNU C Library is free software; you can redistribute it and/or
|
|
modify it under the terms of the GNU Lesser General Public
|
|
License as published by the Free Software Foundation; either
|
|
version 2.1 of the License, or (at your option) any later version.
|
|
|
|
The GNU C Library is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
Lesser General Public License for more details.
|
|
|
|
You should have received a copy of the GNU Lesser General Public
|
|
License along with the GNU C Library; if not, see
|
|
<http://www.gnu.org/licenses/>. */
|
|
|
|
#include <sysdep.h>
|
|
#include <tcb-offsets.h>
|
|
#include <kernel-features.h>
|
|
#include <lowlevellock-futex.h>
|
|
|
|
#define PTHREAD_UNWIND JUMPTARGET(__pthread_unwind)
|
|
#if IS_IN (libpthread)
|
|
# if defined SHARED && !defined NO_HIDDEN
|
|
# undef PTHREAD_UNWIND
|
|
# define PTHREAD_UNWIND __GI___pthread_unwind
|
|
# endif
|
|
#else
|
|
# ifndef SHARED
|
|
.weak __pthread_unwind
|
|
# endif
|
|
#endif
|
|
|
|
|
|
#define LOAD_PRIVATE_FUTEX_WAIT(reg) \
|
|
movl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
|
|
|
|
/* It is crucial that the functions in this file don't modify registers
|
|
other than %rax and %r11. The syscall wrapper code depends on this
|
|
because it doesn't explicitly save the other registers which hold
|
|
relevant values. */
|
|
.text
|
|
|
|
.hidden __pthread_enable_asynccancel
|
|
ENTRY(__pthread_enable_asynccancel)
|
|
movl %fs:CANCELHANDLING, %eax
|
|
2: movl %eax, %r11d
|
|
orl $TCB_CANCELTYPE_BITMASK, %r11d
|
|
cmpl %eax, %r11d
|
|
je 1f
|
|
|
|
lock
|
|
cmpxchgl %r11d, %fs:CANCELHANDLING
|
|
jnz 2b
|
|
|
|
andl $(TCB_CANCELSTATE_BITMASK|TCB_CANCELTYPE_BITMASK|TCB_CANCELED_BITMASK|TCB_EXITING_BITMASK|TCB_CANCEL_RESTMASK|TCB_TERMINATED_BITMASK), %r11d
|
|
cmpl $(TCB_CANCELTYPE_BITMASK|TCB_CANCELED_BITMASK), %r11d
|
|
je 3f
|
|
|
|
1: ret
|
|
|
|
3: subq $8, %rsp
|
|
cfi_adjust_cfa_offset(8)
|
|
LP_OP(mov) $TCB_PTHREAD_CANCELED, %fs:RESULT
|
|
lock
|
|
orl $TCB_EXITING_BITMASK, %fs:CANCELHANDLING
|
|
mov %fs:CLEANUP_JMP_BUF, %RDI_LP
|
|
call PTHREAD_UNWIND
|
|
hlt
|
|
END(__pthread_enable_asynccancel)
|
|
|
|
|
|
.hidden __pthread_disable_asynccancel
|
|
ENTRY(__pthread_disable_asynccancel)
|
|
testl $TCB_CANCELTYPE_BITMASK, %edi
|
|
jnz 1f
|
|
|
|
movl %fs:CANCELHANDLING, %eax
|
|
2: movl %eax, %r11d
|
|
andl $~TCB_CANCELTYPE_BITMASK, %r11d
|
|
lock
|
|
cmpxchgl %r11d, %fs:CANCELHANDLING
|
|
jnz 2b
|
|
|
|
movl %r11d, %eax
|
|
3: andl $(TCB_CANCELING_BITMASK|TCB_CANCELED_BITMASK), %eax
|
|
cmpl $TCB_CANCELING_BITMASK, %eax
|
|
je 4f
|
|
1: ret
|
|
|
|
/* Performance doesn't matter in this loop. We will
|
|
delay until the thread is canceled. And we will unlikely
|
|
enter the loop twice. */
|
|
4: mov %fs:0, %RDI_LP
|
|
movl $__NR_futex, %eax
|
|
xorq %r10, %r10
|
|
addq $CANCELHANDLING, %rdi
|
|
LOAD_PRIVATE_FUTEX_WAIT (%esi)
|
|
syscall
|
|
movl %fs:CANCELHANDLING, %eax
|
|
jmp 3b
|
|
END(__pthread_disable_asynccancel)
|