glibc/sysdeps/x86_64/nptl/pthread_spin_lock.S
Noah Goldstein 653c12c7d8 x86: Cleanup pthread_spin_{try}lock.S
Save a jmp on the lock path coming from an initial failure in
pthread_spin_lock.S.  This costs 4-bytes of code but since the
function still fits in the same number of 16-byte blocks (default
function alignment) it does not have affect on the total binary size
of libc.so (unchanged after this commit).

pthread_spin_trylock was using a CAS when a simple xchg works which
is often more expensive.

Full check passes on x86-64.
2022-10-03 14:13:49 -07:00

49 lines
1.5 KiB
ArmAsm

/* Copyright (C) 2012-2022 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <sysdep.h>
#include <shlib-compat.h>
ENTRY(__pthread_spin_lock)
/* Always return zero. */
xor %eax, %eax
LOCK
decl 0(%rdi)
jne 1f
ret
.align 16
1:
/* `rep nop` == `pause`. */
rep
nop
cmpl %eax, 0(%rdi)
jle 1b
/* Just repeat the `lock decl` logic here. The code size save
of jumping back to entry doesn't change how many 16-byte
chunks (default function alignment) that the code fits in. */
LOCK
decl 0(%rdi)
jne 1b
ret
END(__pthread_spin_lock)
versioned_symbol (libc, __pthread_spin_lock, pthread_spin_lock, GLIBC_2_34)
#if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_2, GLIBC_2_34)
compat_symbol (libpthread, __pthread_spin_lock, pthread_spin_lock, GLIBC_2_2)
#endif