glibc/sysdeps/powerpc/powerpc64/sysdep.h

543 lines
14 KiB
C
Raw Normal View History

/* Assembly macros for 64-bit PowerPC.
Copyright (C) 2002-2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
Prefer https to http for gnu.org and fsf.org URLs Also, change sources.redhat.com to sourceware.org. This patch was automatically generated by running the following shell script, which uses GNU sed, and which avoids modifying files imported from upstream: sed -ri ' s,(http|ftp)(://(.*\.)?(gnu|fsf|sourceware)\.org($|[^.]|\.[^a-z])),https\2,g s,(http|ftp)(://(.*\.)?)sources\.redhat\.com($|[^.]|\.[^a-z]),https\2sourceware.org\4,g ' \ $(find $(git ls-files) -prune -type f \ ! -name '*.po' \ ! -name 'ChangeLog*' \ ! -path COPYING ! -path COPYING.LIB \ ! -path manual/fdl-1.3.texi ! -path manual/lgpl-2.1.texi \ ! -path manual/texinfo.tex ! -path scripts/config.guess \ ! -path scripts/config.sub ! -path scripts/install-sh \ ! -path scripts/mkinstalldirs ! -path scripts/move-if-change \ ! -path INSTALL ! -path locale/programs/charmap-kw.h \ ! -path po/libc.pot ! -path sysdeps/gnu/errlist.c \ ! '(' -name configure \ -execdir test -f configure.ac -o -f configure.in ';' ')' \ ! '(' -name preconfigure \ -execdir test -f preconfigure.ac ';' ')' \ -print) and then by running 'make dist-prepare' to regenerate files built from the altered files, and then executing the following to cleanup: chmod a+x sysdeps/unix/sysv/linux/riscv/configure # Omit irrelevant whitespace and comment-only changes, # perhaps from a slightly-different Autoconf version. git checkout -f \ sysdeps/csky/configure \ sysdeps/hppa/configure \ sysdeps/riscv/configure \ sysdeps/unix/sysv/linux/csky/configure # Omit changes that caused a pre-commit check to fail like this: # remote: *** error: sysdeps/powerpc/powerpc64/ppc-mcount.S: trailing lines git checkout -f \ sysdeps/powerpc/powerpc64/ppc-mcount.S \ sysdeps/unix/sysv/linux/s390/s390-64/syscall.S # Omit change that caused a pre-commit check to fail like this: # remote: *** error: sysdeps/sparc/sparc64/multiarch/memcpy-ultra3.S: last line does not end in newline git checkout -f sysdeps/sparc/sparc64/multiarch/memcpy-ultra3.S
2019-09-07 05:40:42 +00:00
<https://www.gnu.org/licenses/>. */
#include <sysdeps/powerpc/sysdep.h>
powerpc: Runtime selection between sc and scv for syscalls Linux kernel v5.9 added support for system calls using the scv instruction for POWER9 and later. The new codepath provides better performance (see below) if compared to using sc. For the foreseeable future, both sc and scv mechanisms will co-exist, so this patch enables glibc to do a runtime check and use scv when it is available. Before issuing the system call to the kernel, we check hwcap2 in the TCB for PPC_FEATURE2_SCV to see if scv is supported by the kernel. If not, we fallback to sc and keep the old behavior. The kernel implements a different error return convention for scv, so when returning from a system call we need to handle the return value differently depending on the instruction we used to enter the kernel. For syscalls implemented in ASM, entry and exit are implemented by different macros (PSEUDO and PSEUDO_RET, resp.), which may be used in sequence (e.g. for templated syscalls) or with other instructions in between (e.g. clone). To avoid accessing the TCB a second time on PSEUDO_RET to check which instruction we used, the value read from hwcap2 is cached on a non-volatile register. This is not needed when using INTERNAL_SYSCALL macro, since entry and exit are bundled into the same inline asm directive. The dynamic loader may issue syscalls before the TCB has been setup so it always uses sc with no extra checks. For the static case, there is no compile-time way to determine if we are inside startup code, so we also check the value of the thread pointer before effectively accessing the TCB. For such situations in which the availability of scv cannot be determined, sc is always used. Support for scv in syscalls implemented in their own ASM file (clone and vfork) will be added later. For now simply use sc as before. Average performance over 1M calls for each syscall "type": - stat: C wrapper calling INTERNAL_SYSCALL - getpid: templated ASM syscall - syscall: call to gettid using syscall function Standard: stat : 1.573445 us / ~3619 cycles getpid : 0.164986 us / ~379 cycles syscall : 0.162743 us / ~374 cycles With scv: stat : 1.537049 us / ~3535 cycles <~ -84 cycles / -2.32% getpid : 0.109923 us / ~253 cycles <~ -126 cycles / -33.25% syscall : 0.116410 us / ~268 cycles <~ -106 cycles / -28.34% Tested on powerpc, powerpc64, powerpc64le (with and without scv) Tested-by: Lucas A. M. Magalhães <lamm@linux.ibm.com> Reviewed-by: Tulio Magno Quites Machado Filho <tuliom@linux.ibm.com>
2020-12-03 17:15:27 +00:00
#include <tls.h>
#ifdef __ASSEMBLER__
PowerPC64 ELFv2 ABI 4/6: Stack frame layout changes This updates glibc for the changes in the ELFv2 relating to the stack frame layout. These are described in more detail here: http://gcc.gnu.org/ml/gcc-patches/2013-11/msg01149.html http://gcc.gnu.org/ml/gcc-patches/2013-11/msg01146.html Specifically, the "compiler and linker doublewords" were removed, which has the effect that the save slot for the TOC register is now at offset 24 rather than 40 to the stack pointer. In addition, a function may now no longer necessarily assume that its caller has set up a 64-byte register save area its use. To address the first change, the patch goes through all assembler files and replaces immediate offsets in instructions accessing the ABI-defined stack slots by symbolic offsets. Those already were defined in ucontext_i.sym and used in some of the context routines, but that doesn't really seem like the right place for those defines. The patch instead defines those symbolic offsets in sysdeps.h, in two variants for the old and new ABI, and uses them systematically in all assembler files, not just the context routines. The second change only affected a few assembler files that used the save area to temporarily store some registers. In those cases where this happens within a leaf function, this patch changes the code to store those registers to the "red zone" below the stack pointer. Otherwise, the functions already allocate a stack frame, and the patch changes them to add extra space in these frames as temporary space for the ELFv2 ABI.
2013-12-04 12:55:03 +00:00
/* Stack frame offsets. */
#define FRAME_BACKCHAIN 0
#define FRAME_CR_SAVE 8
#define FRAME_LR_SAVE 16
#if _CALL_ELF != 2
#define FRAME_MIN_SIZE 112
#define FRAME_MIN_SIZE_PARM 112
PowerPC64 ELFv2 ABI 4/6: Stack frame layout changes This updates glibc for the changes in the ELFv2 relating to the stack frame layout. These are described in more detail here: http://gcc.gnu.org/ml/gcc-patches/2013-11/msg01149.html http://gcc.gnu.org/ml/gcc-patches/2013-11/msg01146.html Specifically, the "compiler and linker doublewords" were removed, which has the effect that the save slot for the TOC register is now at offset 24 rather than 40 to the stack pointer. In addition, a function may now no longer necessarily assume that its caller has set up a 64-byte register save area its use. To address the first change, the patch goes through all assembler files and replaces immediate offsets in instructions accessing the ABI-defined stack slots by symbolic offsets. Those already were defined in ucontext_i.sym and used in some of the context routines, but that doesn't really seem like the right place for those defines. The patch instead defines those symbolic offsets in sysdeps.h, in two variants for the old and new ABI, and uses them systematically in all assembler files, not just the context routines. The second change only affected a few assembler files that used the save area to temporarily store some registers. In those cases where this happens within a leaf function, this patch changes the code to store those registers to the "red zone" below the stack pointer. Otherwise, the functions already allocate a stack frame, and the patch changes them to add extra space in these frames as temporary space for the ELFv2 ABI.
2013-12-04 12:55:03 +00:00
#define FRAME_TOC_SAVE 40
#define FRAME_PARM_SAVE 48
#else
#define FRAME_MIN_SIZE 32
#define FRAME_MIN_SIZE_PARM 96
#define FRAME_TOC_SAVE 24
#define FRAME_PARM_SAVE 32
#endif
Update. 2004-10-06 Alan Modra <amodra@bigpond.net.au> * gmon/Makefile (CFLAGS-mcount.c): Move before inclusion of "Rules". * sysdeps/powerpc/powerpc64/Makefile (CFLAGS-mcount.c): Add -msoft-float. * sysdeps/powerpc/powerpc64/sysdep.h (SAVE_ARG, REST_ARG): New macros. (CALL_MCOUNT): Replace with a gas macro implementation. (EALIGN): Delete PROF version. * sysdeps/powerpc/powerpc64/__longjmp-common.S: Invoke CALL_MCOUNT. * sysdeps/powerpc/powerpc64/bsd-_setjmp.S: Likewise. * sysdeps/powerpc/powerpc64/bsd-setjmp.S: Likewise. * sysdeps/powerpc/powerpc64/setjmp-common.S: Likewise. * sysdeps/powerpc/powerpc64/memcpy.S: Likewise. * sysdeps/powerpc/powerpc64/memset.S: Likewise. * sysdeps/powerpc/powerpc64/stpcpy.S: Likewise. * sysdeps/powerpc/powerpc64/strchr.S: Likewise. * sysdeps/powerpc/powerpc64/strcmp.S: Likewise. * sysdeps/powerpc/powerpc64/strcpy.S: Likewise. * sysdeps/powerpc/powerpc64/strlen.S: Likewise. * sysdeps/powerpc/powerpc64/strncmp.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_ceil.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_ceilf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_copysign.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_floor.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_floorf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llrint.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llrintf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llround.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llroundf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_rint.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_rintf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_round.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_roundf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_trunc.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_truncf.S: Likewise. * sysdeps/powerpc/powerpc64/setjmp-common.S: Add extra entry point past _mcount call. * sysdeps/powerpc/powerpc64/bsd-_setjmp.S: Use it. * sysdeps/powerpc/powerpc64/bsd-setjmp.S: Likewise.
2004-10-06 22:09:35 +00:00
/* Support macros for CALL_MCOUNT. */
.macro SAVE_ARG NARG
.if \NARG
SAVE_ARG \NARG-1
std 2+\NARG,-FRAME_MIN_SIZE_PARM+FRAME_PARM_SAVE-8+8*(\NARG)(1)
Update. 2004-10-06 Alan Modra <amodra@bigpond.net.au> * gmon/Makefile (CFLAGS-mcount.c): Move before inclusion of "Rules". * sysdeps/powerpc/powerpc64/Makefile (CFLAGS-mcount.c): Add -msoft-float. * sysdeps/powerpc/powerpc64/sysdep.h (SAVE_ARG, REST_ARG): New macros. (CALL_MCOUNT): Replace with a gas macro implementation. (EALIGN): Delete PROF version. * sysdeps/powerpc/powerpc64/__longjmp-common.S: Invoke CALL_MCOUNT. * sysdeps/powerpc/powerpc64/bsd-_setjmp.S: Likewise. * sysdeps/powerpc/powerpc64/bsd-setjmp.S: Likewise. * sysdeps/powerpc/powerpc64/setjmp-common.S: Likewise. * sysdeps/powerpc/powerpc64/memcpy.S: Likewise. * sysdeps/powerpc/powerpc64/memset.S: Likewise. * sysdeps/powerpc/powerpc64/stpcpy.S: Likewise. * sysdeps/powerpc/powerpc64/strchr.S: Likewise. * sysdeps/powerpc/powerpc64/strcmp.S: Likewise. * sysdeps/powerpc/powerpc64/strcpy.S: Likewise. * sysdeps/powerpc/powerpc64/strlen.S: Likewise. * sysdeps/powerpc/powerpc64/strncmp.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_ceil.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_ceilf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_copysign.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_floor.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_floorf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llrint.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llrintf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llround.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llroundf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_rint.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_rintf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_round.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_roundf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_trunc.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_truncf.S: Likewise. * sysdeps/powerpc/powerpc64/setjmp-common.S: Add extra entry point past _mcount call. * sysdeps/powerpc/powerpc64/bsd-_setjmp.S: Use it. * sysdeps/powerpc/powerpc64/bsd-setjmp.S: Likewise.
2004-10-06 22:09:35 +00:00
.endif
.endm
.macro REST_ARG NARG
.if \NARG
REST_ARG \NARG-1
ld 2+\NARG,FRAME_PARM_SAVE-8+8*(\NARG)(1)
2010-08-12 16:19:19 +00:00
.endif
.endm
.macro CFI_SAVE_ARG NARG
.if \NARG
CFI_SAVE_ARG \NARG-1
cfi_offset(2+\NARG,-FRAME_MIN_SIZE_PARM+FRAME_PARM_SAVE-8+8*(\NARG))
2010-08-12 16:19:19 +00:00
.endif
.endm
.macro CFI_REST_ARG NARG
.if \NARG
CFI_REST_ARG \NARG-1
cfi_restore(2+\NARG)
Update. 2004-10-06 Alan Modra <amodra@bigpond.net.au> * gmon/Makefile (CFLAGS-mcount.c): Move before inclusion of "Rules". * sysdeps/powerpc/powerpc64/Makefile (CFLAGS-mcount.c): Add -msoft-float. * sysdeps/powerpc/powerpc64/sysdep.h (SAVE_ARG, REST_ARG): New macros. (CALL_MCOUNT): Replace with a gas macro implementation. (EALIGN): Delete PROF version. * sysdeps/powerpc/powerpc64/__longjmp-common.S: Invoke CALL_MCOUNT. * sysdeps/powerpc/powerpc64/bsd-_setjmp.S: Likewise. * sysdeps/powerpc/powerpc64/bsd-setjmp.S: Likewise. * sysdeps/powerpc/powerpc64/setjmp-common.S: Likewise. * sysdeps/powerpc/powerpc64/memcpy.S: Likewise. * sysdeps/powerpc/powerpc64/memset.S: Likewise. * sysdeps/powerpc/powerpc64/stpcpy.S: Likewise. * sysdeps/powerpc/powerpc64/strchr.S: Likewise. * sysdeps/powerpc/powerpc64/strcmp.S: Likewise. * sysdeps/powerpc/powerpc64/strcpy.S: Likewise. * sysdeps/powerpc/powerpc64/strlen.S: Likewise. * sysdeps/powerpc/powerpc64/strncmp.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_ceil.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_ceilf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_copysign.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_floor.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_floorf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llrint.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llrintf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llround.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llroundf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_rint.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_rintf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_round.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_roundf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_trunc.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_truncf.S: Likewise. * sysdeps/powerpc/powerpc64/setjmp-common.S: Add extra entry point past _mcount call. * sysdeps/powerpc/powerpc64/bsd-_setjmp.S: Use it. * sysdeps/powerpc/powerpc64/bsd-setjmp.S: Likewise.
2004-10-06 22:09:35 +00:00
.endif
.endm
/* If compiled for profiling, call `_mcount' at the start of each function.
see ppc-mcount.S for more details. */
Update. 2004-10-06 Alan Modra <amodra@bigpond.net.au> * gmon/Makefile (CFLAGS-mcount.c): Move before inclusion of "Rules". * sysdeps/powerpc/powerpc64/Makefile (CFLAGS-mcount.c): Add -msoft-float. * sysdeps/powerpc/powerpc64/sysdep.h (SAVE_ARG, REST_ARG): New macros. (CALL_MCOUNT): Replace with a gas macro implementation. (EALIGN): Delete PROF version. * sysdeps/powerpc/powerpc64/__longjmp-common.S: Invoke CALL_MCOUNT. * sysdeps/powerpc/powerpc64/bsd-_setjmp.S: Likewise. * sysdeps/powerpc/powerpc64/bsd-setjmp.S: Likewise. * sysdeps/powerpc/powerpc64/setjmp-common.S: Likewise. * sysdeps/powerpc/powerpc64/memcpy.S: Likewise. * sysdeps/powerpc/powerpc64/memset.S: Likewise. * sysdeps/powerpc/powerpc64/stpcpy.S: Likewise. * sysdeps/powerpc/powerpc64/strchr.S: Likewise. * sysdeps/powerpc/powerpc64/strcmp.S: Likewise. * sysdeps/powerpc/powerpc64/strcpy.S: Likewise. * sysdeps/powerpc/powerpc64/strlen.S: Likewise. * sysdeps/powerpc/powerpc64/strncmp.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_ceil.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_ceilf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_copysign.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_floor.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_floorf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llrint.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llrintf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llround.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llroundf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_rint.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_rintf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_round.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_roundf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_trunc.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_truncf.S: Likewise. * sysdeps/powerpc/powerpc64/setjmp-common.S: Add extra entry point past _mcount call. * sysdeps/powerpc/powerpc64/bsd-_setjmp.S: Use it. * sysdeps/powerpc/powerpc64/bsd-setjmp.S: Likewise.
2004-10-06 22:09:35 +00:00
.macro CALL_MCOUNT NARG
#ifdef PROF
Update. 2004-10-06 Alan Modra <amodra@bigpond.net.au> * gmon/Makefile (CFLAGS-mcount.c): Move before inclusion of "Rules". * sysdeps/powerpc/powerpc64/Makefile (CFLAGS-mcount.c): Add -msoft-float. * sysdeps/powerpc/powerpc64/sysdep.h (SAVE_ARG, REST_ARG): New macros. (CALL_MCOUNT): Replace with a gas macro implementation. (EALIGN): Delete PROF version. * sysdeps/powerpc/powerpc64/__longjmp-common.S: Invoke CALL_MCOUNT. * sysdeps/powerpc/powerpc64/bsd-_setjmp.S: Likewise. * sysdeps/powerpc/powerpc64/bsd-setjmp.S: Likewise. * sysdeps/powerpc/powerpc64/setjmp-common.S: Likewise. * sysdeps/powerpc/powerpc64/memcpy.S: Likewise. * sysdeps/powerpc/powerpc64/memset.S: Likewise. * sysdeps/powerpc/powerpc64/stpcpy.S: Likewise. * sysdeps/powerpc/powerpc64/strchr.S: Likewise. * sysdeps/powerpc/powerpc64/strcmp.S: Likewise. * sysdeps/powerpc/powerpc64/strcpy.S: Likewise. * sysdeps/powerpc/powerpc64/strlen.S: Likewise. * sysdeps/powerpc/powerpc64/strncmp.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_ceil.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_ceilf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_copysign.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_floor.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_floorf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llrint.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llrintf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llround.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llroundf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_rint.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_rintf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_round.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_roundf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_trunc.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_truncf.S: Likewise. * sysdeps/powerpc/powerpc64/setjmp-common.S: Add extra entry point past _mcount call. * sysdeps/powerpc/powerpc64/bsd-_setjmp.S: Use it. * sysdeps/powerpc/powerpc64/bsd-setjmp.S: Likewise.
2004-10-06 22:09:35 +00:00
mflr r0
SAVE_ARG \NARG
PowerPC64 ELFv2 ABI 4/6: Stack frame layout changes This updates glibc for the changes in the ELFv2 relating to the stack frame layout. These are described in more detail here: http://gcc.gnu.org/ml/gcc-patches/2013-11/msg01149.html http://gcc.gnu.org/ml/gcc-patches/2013-11/msg01146.html Specifically, the "compiler and linker doublewords" were removed, which has the effect that the save slot for the TOC register is now at offset 24 rather than 40 to the stack pointer. In addition, a function may now no longer necessarily assume that its caller has set up a 64-byte register save area its use. To address the first change, the patch goes through all assembler files and replaces immediate offsets in instructions accessing the ABI-defined stack slots by symbolic offsets. Those already were defined in ucontext_i.sym and used in some of the context routines, but that doesn't really seem like the right place for those defines. The patch instead defines those symbolic offsets in sysdeps.h, in two variants for the old and new ABI, and uses them systematically in all assembler files, not just the context routines. The second change only affected a few assembler files that used the save area to temporarily store some registers. In those cases where this happens within a leaf function, this patch changes the code to store those registers to the "red zone" below the stack pointer. Otherwise, the functions already allocate a stack frame, and the patch changes them to add extra space in these frames as temporary space for the ELFv2 ABI.
2013-12-04 12:55:03 +00:00
std r0,FRAME_LR_SAVE(r1)
stdu r1,-FRAME_MIN_SIZE_PARM(r1)
cfi_adjust_cfa_offset(FRAME_MIN_SIZE_PARM)
cfi_offset(lr,FRAME_LR_SAVE)
2010-08-12 16:19:19 +00:00
CFI_SAVE_ARG \NARG
Update. 2004-10-06 Alan Modra <amodra@bigpond.net.au> * gmon/Makefile (CFLAGS-mcount.c): Move before inclusion of "Rules". * sysdeps/powerpc/powerpc64/Makefile (CFLAGS-mcount.c): Add -msoft-float. * sysdeps/powerpc/powerpc64/sysdep.h (SAVE_ARG, REST_ARG): New macros. (CALL_MCOUNT): Replace with a gas macro implementation. (EALIGN): Delete PROF version. * sysdeps/powerpc/powerpc64/__longjmp-common.S: Invoke CALL_MCOUNT. * sysdeps/powerpc/powerpc64/bsd-_setjmp.S: Likewise. * sysdeps/powerpc/powerpc64/bsd-setjmp.S: Likewise. * sysdeps/powerpc/powerpc64/setjmp-common.S: Likewise. * sysdeps/powerpc/powerpc64/memcpy.S: Likewise. * sysdeps/powerpc/powerpc64/memset.S: Likewise. * sysdeps/powerpc/powerpc64/stpcpy.S: Likewise. * sysdeps/powerpc/powerpc64/strchr.S: Likewise. * sysdeps/powerpc/powerpc64/strcmp.S: Likewise. * sysdeps/powerpc/powerpc64/strcpy.S: Likewise. * sysdeps/powerpc/powerpc64/strlen.S: Likewise. * sysdeps/powerpc/powerpc64/strncmp.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_ceil.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_ceilf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_copysign.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_floor.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_floorf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llrint.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llrintf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llround.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llroundf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_rint.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_rintf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_round.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_roundf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_trunc.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_truncf.S: Likewise. * sysdeps/powerpc/powerpc64/setjmp-common.S: Add extra entry point past _mcount call. * sysdeps/powerpc/powerpc64/bsd-_setjmp.S: Use it. * sysdeps/powerpc/powerpc64/bsd-setjmp.S: Likewise.
2004-10-06 22:09:35 +00:00
bl JUMPTARGET (_mcount)
2010-08-12 16:19:19 +00:00
#ifndef SHARED
nop
#endif
PowerPC64 ELFv2 ABI 4/6: Stack frame layout changes This updates glibc for the changes in the ELFv2 relating to the stack frame layout. These are described in more detail here: http://gcc.gnu.org/ml/gcc-patches/2013-11/msg01149.html http://gcc.gnu.org/ml/gcc-patches/2013-11/msg01146.html Specifically, the "compiler and linker doublewords" were removed, which has the effect that the save slot for the TOC register is now at offset 24 rather than 40 to the stack pointer. In addition, a function may now no longer necessarily assume that its caller has set up a 64-byte register save area its use. To address the first change, the patch goes through all assembler files and replaces immediate offsets in instructions accessing the ABI-defined stack slots by symbolic offsets. Those already were defined in ucontext_i.sym and used in some of the context routines, but that doesn't really seem like the right place for those defines. The patch instead defines those symbolic offsets in sysdeps.h, in two variants for the old and new ABI, and uses them systematically in all assembler files, not just the context routines. The second change only affected a few assembler files that used the save area to temporarily store some registers. In those cases where this happens within a leaf function, this patch changes the code to store those registers to the "red zone" below the stack pointer. Otherwise, the functions already allocate a stack frame, and the patch changes them to add extra space in these frames as temporary space for the ELFv2 ABI.
2013-12-04 12:55:03 +00:00
ld r0,FRAME_MIN_SIZE_PARM+FRAME_LR_SAVE(r1)
Update. 2004-10-06 Alan Modra <amodra@bigpond.net.au> * gmon/Makefile (CFLAGS-mcount.c): Move before inclusion of "Rules". * sysdeps/powerpc/powerpc64/Makefile (CFLAGS-mcount.c): Add -msoft-float. * sysdeps/powerpc/powerpc64/sysdep.h (SAVE_ARG, REST_ARG): New macros. (CALL_MCOUNT): Replace with a gas macro implementation. (EALIGN): Delete PROF version. * sysdeps/powerpc/powerpc64/__longjmp-common.S: Invoke CALL_MCOUNT. * sysdeps/powerpc/powerpc64/bsd-_setjmp.S: Likewise. * sysdeps/powerpc/powerpc64/bsd-setjmp.S: Likewise. * sysdeps/powerpc/powerpc64/setjmp-common.S: Likewise. * sysdeps/powerpc/powerpc64/memcpy.S: Likewise. * sysdeps/powerpc/powerpc64/memset.S: Likewise. * sysdeps/powerpc/powerpc64/stpcpy.S: Likewise. * sysdeps/powerpc/powerpc64/strchr.S: Likewise. * sysdeps/powerpc/powerpc64/strcmp.S: Likewise. * sysdeps/powerpc/powerpc64/strcpy.S: Likewise. * sysdeps/powerpc/powerpc64/strlen.S: Likewise. * sysdeps/powerpc/powerpc64/strncmp.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_ceil.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_ceilf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_copysign.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_floor.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_floorf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llrint.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llrintf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llround.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llroundf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_rint.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_rintf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_round.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_roundf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_trunc.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_truncf.S: Likewise. * sysdeps/powerpc/powerpc64/setjmp-common.S: Add extra entry point past _mcount call. * sysdeps/powerpc/powerpc64/bsd-_setjmp.S: Use it. * sysdeps/powerpc/powerpc64/bsd-setjmp.S: Likewise.
2004-10-06 22:09:35 +00:00
REST_ARG \NARG
mtlr r0
PowerPC64 ELFv2 ABI 4/6: Stack frame layout changes This updates glibc for the changes in the ELFv2 relating to the stack frame layout. These are described in more detail here: http://gcc.gnu.org/ml/gcc-patches/2013-11/msg01149.html http://gcc.gnu.org/ml/gcc-patches/2013-11/msg01146.html Specifically, the "compiler and linker doublewords" were removed, which has the effect that the save slot for the TOC register is now at offset 24 rather than 40 to the stack pointer. In addition, a function may now no longer necessarily assume that its caller has set up a 64-byte register save area its use. To address the first change, the patch goes through all assembler files and replaces immediate offsets in instructions accessing the ABI-defined stack slots by symbolic offsets. Those already were defined in ucontext_i.sym and used in some of the context routines, but that doesn't really seem like the right place for those defines. The patch instead defines those symbolic offsets in sysdeps.h, in two variants for the old and new ABI, and uses them systematically in all assembler files, not just the context routines. The second change only affected a few assembler files that used the save area to temporarily store some registers. In those cases where this happens within a leaf function, this patch changes the code to store those registers to the "red zone" below the stack pointer. Otherwise, the functions already allocate a stack frame, and the patch changes them to add extra space in these frames as temporary space for the ELFv2 ABI.
2013-12-04 12:55:03 +00:00
addi r1,r1,FRAME_MIN_SIZE_PARM
cfi_adjust_cfa_offset(-FRAME_MIN_SIZE_PARM)
2010-08-12 16:19:19 +00:00
cfi_restore(lr)
CFI_REST_ARG \NARG
Update. 2004-10-06 Alan Modra <amodra@bigpond.net.au> * gmon/Makefile (CFLAGS-mcount.c): Move before inclusion of "Rules". * sysdeps/powerpc/powerpc64/Makefile (CFLAGS-mcount.c): Add -msoft-float. * sysdeps/powerpc/powerpc64/sysdep.h (SAVE_ARG, REST_ARG): New macros. (CALL_MCOUNT): Replace with a gas macro implementation. (EALIGN): Delete PROF version. * sysdeps/powerpc/powerpc64/__longjmp-common.S: Invoke CALL_MCOUNT. * sysdeps/powerpc/powerpc64/bsd-_setjmp.S: Likewise. * sysdeps/powerpc/powerpc64/bsd-setjmp.S: Likewise. * sysdeps/powerpc/powerpc64/setjmp-common.S: Likewise. * sysdeps/powerpc/powerpc64/memcpy.S: Likewise. * sysdeps/powerpc/powerpc64/memset.S: Likewise. * sysdeps/powerpc/powerpc64/stpcpy.S: Likewise. * sysdeps/powerpc/powerpc64/strchr.S: Likewise. * sysdeps/powerpc/powerpc64/strcmp.S: Likewise. * sysdeps/powerpc/powerpc64/strcpy.S: Likewise. * sysdeps/powerpc/powerpc64/strlen.S: Likewise. * sysdeps/powerpc/powerpc64/strncmp.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_ceil.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_ceilf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_copysign.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_floor.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_floorf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llrint.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llrintf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llround.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llroundf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_rint.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_rintf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_round.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_roundf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_trunc.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_truncf.S: Likewise. * sysdeps/powerpc/powerpc64/setjmp-common.S: Add extra entry point past _mcount call. * sysdeps/powerpc/powerpc64/bsd-_setjmp.S: Use it. * sysdeps/powerpc/powerpc64/bsd-setjmp.S: Likewise.
2004-10-06 22:09:35 +00:00
#endif
.endm
#if _CALL_ELF != 2
/* Macro to prepare for calling via a function pointer. */
.macro PPC64_LOAD_FUNCPTR PTR
ld r12,0(\PTR)
ld r2,8(\PTR)
mtctr r12
ld r11,16(\PTR)
.endm
#ifdef USE_PPC64_OVERLAPPING_OPD
# define OPD_ENT(name) .quad BODY_LABEL (name), .TOC.@tocbase
#else
# define OPD_ENT(name) .quad BODY_LABEL (name), .TOC.@tocbase, 0
#endif
#define ENTRY_1(name) \
.type BODY_LABEL(name),@function; \
.globl name; \
.section ".opd","aw"; \
.p2align 3;FUNC_LABEL(name): \
OPD_ENT (name); \
.previous
#define FUNC_LABEL(X) X
#define BODY_LABEL(X) .LY##X
#define ENTRY_2(name) \
.type name,@function; \
ENTRY_1(name)
#define END_2(name) \
.size name,.-BODY_LABEL(name); \
.size BODY_LABEL(name),.-BODY_LABEL(name)
#define LOCALENTRY(name)
#else /* _CALL_ELF == 2 */
/* Macro to prepare for calling via a function pointer. */
.macro PPC64_LOAD_FUNCPTR PTR
mr r12,\PTR
mtctr r12
.endm
#define FUNC_LABEL(X) X
#define BODY_LABEL(X) X
#define ENTRY_2(name) \
.globl name; \
.type name,@function
#define END_2(name) \
.size name,.-name
#define LOCALENTRY(name) \
1: addis r2,r12,.TOC.-1b@ha; \
addi r2,r2,.TOC.-1b@l; \
.localentry name,.-name
#endif /* _CALL_ELF */
PowerPC64 ENTRY_TOCLESS A number of functions in the sysdeps/powerpc/powerpc64/ tree don't use or change r2, yet declare a global entry that sets up r2. This patch fixes that problem, and consolidates the ENTRY and EALIGN macros. * sysdeps/powerpc/powerpc64/sysdep.h: Formatting. (NOPS, ENTRY_3): New macros. (ENTRY): Rewrite. (ENTRY_TOCLESS): Define. (EALIGN, EALIGN_W_0, EALIGN_W_1, EALIGN_W_2, EALIGN_W_4, EALIGN_W_5, EALIGN_W_6, EALIGN_W_7, EALIGN_W_8): Delete. * sysdeps/powerpc/powerpc64/a2/memcpy.S: Replace EALIGN with ENTRY. * sysdeps/powerpc/powerpc64/dl-trampoline.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_ceil.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_ceilf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_floor.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_floorf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_nearbyint.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_nearbyintf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_rint.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_rintf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_round.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_roundf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_trunc.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_truncf.S: Likewise. * sysdeps/powerpc/powerpc64/memset.S: Likewise. * sysdeps/powerpc/powerpc64/power7/fpu/s_finite.S: Likewise. * sysdeps/powerpc/powerpc64/power7/fpu/s_isinf.S: Likewise. * sysdeps/powerpc/powerpc64/power7/fpu/s_isnan.S: Likewise. * sysdeps/powerpc/powerpc64/power7/strstr.S: Likewise. * sysdeps/powerpc/powerpc64/power8/fpu/e_expf.S: Likewise. * sysdeps/powerpc/powerpc64/power8/fpu/s_cosf.S: Likewise. * sysdeps/powerpc/powerpc64/power8/fpu/s_sinf.S: Likewise. * sysdeps/powerpc/powerpc64/power8/strcasestr.S: Likewise. * sysdeps/powerpc/powerpc64/addmul_1.S: Use ENTRY_TOCLESS. * sysdeps/powerpc/powerpc64/cell/memcpy.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_copysign.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_copysignl.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_fabsl.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_isnan.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llrint.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llrintf.S: Likewise. * sysdeps/powerpc/powerpc64/lshift.S: Likewise. * sysdeps/powerpc/powerpc64/memcpy.S: Likewise. * sysdeps/powerpc/powerpc64/mul_1.S: Likewise. * sysdeps/powerpc/powerpc64/power4/memcmp.S: Likewise. * sysdeps/powerpc/powerpc64/power4/memcpy.S: Likewise. * sysdeps/powerpc/powerpc64/power4/memset.S: Likewise. * sysdeps/powerpc/powerpc64/power4/strncmp.S: Likewise. * sysdeps/powerpc/powerpc64/power5+/fpu/s_ceil.S: Likewise. * sysdeps/powerpc/powerpc64/power5+/fpu/s_ceilf.S: Likewise. * sysdeps/powerpc/powerpc64/power5+/fpu/s_floor.S: Likewise. * sysdeps/powerpc/powerpc64/power5+/fpu/s_floorf.S: Likewise. * sysdeps/powerpc/powerpc64/power5+/fpu/s_llround.S: Likewise. * sysdeps/powerpc/powerpc64/power5+/fpu/s_round.S: Likewise. * sysdeps/powerpc/powerpc64/power5+/fpu/s_roundf.S: Likewise. * sysdeps/powerpc/powerpc64/power5+/fpu/s_trunc.S: Likewise. * sysdeps/powerpc/powerpc64/power5+/fpu/s_truncf.S: Likewise. * sysdeps/powerpc/powerpc64/power5/fpu/s_isnan.S: Likewise. * sysdeps/powerpc/powerpc64/power6/fpu/s_copysign.S: Likewise. * sysdeps/powerpc/powerpc64/power6/fpu/s_isnan.S: Likewise. * sysdeps/powerpc/powerpc64/power6/memcpy.S: Likewise. * sysdeps/powerpc/powerpc64/power6/memset.S: Likewise. * sysdeps/powerpc/powerpc64/power6x/fpu/s_isnan.S: Likewise. * sysdeps/powerpc/powerpc64/power6x/fpu/s_llrint.S: Likewise. * sysdeps/powerpc/powerpc64/power6x/fpu/s_llround.S: Likewise. * sysdeps/powerpc/powerpc64/power7/add_n.S: Likewise. * sysdeps/powerpc/powerpc64/power7/memchr.S: Likewise. * sysdeps/powerpc/powerpc64/power7/memcmp.S: Likewise. * sysdeps/powerpc/powerpc64/power7/memcpy.S: Likewise. * sysdeps/powerpc/powerpc64/power7/memmove.S: Likewise. * sysdeps/powerpc/powerpc64/power7/mempcpy.S: Likewise. * sysdeps/powerpc/powerpc64/power7/memrchr.S: Likewise. * sysdeps/powerpc/powerpc64/power7/memset.S: Likewise. * sysdeps/powerpc/powerpc64/power7/rawmemchr.S: Likewise. * sysdeps/powerpc/powerpc64/power7/strcasecmp.S (strcasecmp_l): Likewise. * sysdeps/powerpc/powerpc64/power7/strchr.S: Likewise. * sysdeps/powerpc/powerpc64/power7/strchrnul.S: Likewise. * sysdeps/powerpc/powerpc64/power7/strcmp.S: Likewise. * sysdeps/powerpc/powerpc64/power7/strlen.S: Likewise. * sysdeps/powerpc/powerpc64/power7/strncmp.S: Likewise. * sysdeps/powerpc/powerpc64/power7/strncpy.S: Likewise. * sysdeps/powerpc/powerpc64/power7/strnlen.S: Likewise. * sysdeps/powerpc/powerpc64/power7/strrchr.S: Likewise. * sysdeps/powerpc/powerpc64/power8/fpu/s_finite.S: Likewise. * sysdeps/powerpc/powerpc64/power8/fpu/s_isinf.S: Likewise. * sysdeps/powerpc/powerpc64/power8/fpu/s_isnan.S: Likewise. * sysdeps/powerpc/powerpc64/power8/fpu/s_llrint.S: Likewise. * sysdeps/powerpc/powerpc64/power8/fpu/s_llround.S: Likewise. * sysdeps/powerpc/powerpc64/power8/memcmp.S: Likewise. * sysdeps/powerpc/powerpc64/power8/memset.S: Likewise. * sysdeps/powerpc/powerpc64/power8/strchr.S: Likewise. * sysdeps/powerpc/powerpc64/power8/strcmp.S: Likewise. * sysdeps/powerpc/powerpc64/power8/strcpy.S: Likewise. * sysdeps/powerpc/powerpc64/power8/strlen.S: Likewise. * sysdeps/powerpc/powerpc64/power8/strncmp.S: Likewise. * sysdeps/powerpc/powerpc64/power8/strncpy.S: Likewise. * sysdeps/powerpc/powerpc64/power8/strnlen.S: Likewise. * sysdeps/powerpc/powerpc64/power8/strrchr.S: Likewise. * sysdeps/powerpc/powerpc64/power8/strspn.S: Likewise. * sysdeps/powerpc/powerpc64/power9/strcmp.S: Likewise. * sysdeps/powerpc/powerpc64/power9/strncmp.S: Likewise. * sysdeps/powerpc/powerpc64/strchr.S: Likewise. * sysdeps/powerpc/powerpc64/strcmp.S: Likewise. * sysdeps/powerpc/powerpc64/strlen.S: Likewise. * sysdeps/powerpc/powerpc64/strncmp.S: Likewise. * sysdeps/powerpc/powerpc64/ppc-mcount.S: Store LR earlier. Don't add nop when SHARED. * sysdeps/powerpc/powerpc64/start.S: Fix comment. * sysdeps/powerpc/powerpc64/multiarch/strrchr-power8.S (ENTRY): Don't define. (ENTRY_TOCLESS): Define. * sysdeps/powerpc/powerpc32/sysdep.h (ENTRY_TOCLESS): Define. * sysdeps/powerpc/fpu/s_fma.S: Use ENTRY_TOCLESS. * sysdeps/powerpc/fpu/s_fmaf.S: Likewise.
2017-06-14 01:15:50 +00:00
.macro NOPS NARG
.if \NARG
NOPS \NARG-1
nop
.endif
.endm
PowerPC64 ENTRY_TOCLESS A number of functions in the sysdeps/powerpc/powerpc64/ tree don't use or change r2, yet declare a global entry that sets up r2. This patch fixes that problem, and consolidates the ENTRY and EALIGN macros. * sysdeps/powerpc/powerpc64/sysdep.h: Formatting. (NOPS, ENTRY_3): New macros. (ENTRY): Rewrite. (ENTRY_TOCLESS): Define. (EALIGN, EALIGN_W_0, EALIGN_W_1, EALIGN_W_2, EALIGN_W_4, EALIGN_W_5, EALIGN_W_6, EALIGN_W_7, EALIGN_W_8): Delete. * sysdeps/powerpc/powerpc64/a2/memcpy.S: Replace EALIGN with ENTRY. * sysdeps/powerpc/powerpc64/dl-trampoline.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_ceil.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_ceilf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_floor.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_floorf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_nearbyint.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_nearbyintf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_rint.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_rintf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_round.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_roundf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_trunc.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_truncf.S: Likewise. * sysdeps/powerpc/powerpc64/memset.S: Likewise. * sysdeps/powerpc/powerpc64/power7/fpu/s_finite.S: Likewise. * sysdeps/powerpc/powerpc64/power7/fpu/s_isinf.S: Likewise. * sysdeps/powerpc/powerpc64/power7/fpu/s_isnan.S: Likewise. * sysdeps/powerpc/powerpc64/power7/strstr.S: Likewise. * sysdeps/powerpc/powerpc64/power8/fpu/e_expf.S: Likewise. * sysdeps/powerpc/powerpc64/power8/fpu/s_cosf.S: Likewise. * sysdeps/powerpc/powerpc64/power8/fpu/s_sinf.S: Likewise. * sysdeps/powerpc/powerpc64/power8/strcasestr.S: Likewise. * sysdeps/powerpc/powerpc64/addmul_1.S: Use ENTRY_TOCLESS. * sysdeps/powerpc/powerpc64/cell/memcpy.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_copysign.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_copysignl.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_fabsl.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_isnan.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llrint.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llrintf.S: Likewise. * sysdeps/powerpc/powerpc64/lshift.S: Likewise. * sysdeps/powerpc/powerpc64/memcpy.S: Likewise. * sysdeps/powerpc/powerpc64/mul_1.S: Likewise. * sysdeps/powerpc/powerpc64/power4/memcmp.S: Likewise. * sysdeps/powerpc/powerpc64/power4/memcpy.S: Likewise. * sysdeps/powerpc/powerpc64/power4/memset.S: Likewise. * sysdeps/powerpc/powerpc64/power4/strncmp.S: Likewise. * sysdeps/powerpc/powerpc64/power5+/fpu/s_ceil.S: Likewise. * sysdeps/powerpc/powerpc64/power5+/fpu/s_ceilf.S: Likewise. * sysdeps/powerpc/powerpc64/power5+/fpu/s_floor.S: Likewise. * sysdeps/powerpc/powerpc64/power5+/fpu/s_floorf.S: Likewise. * sysdeps/powerpc/powerpc64/power5+/fpu/s_llround.S: Likewise. * sysdeps/powerpc/powerpc64/power5+/fpu/s_round.S: Likewise. * sysdeps/powerpc/powerpc64/power5+/fpu/s_roundf.S: Likewise. * sysdeps/powerpc/powerpc64/power5+/fpu/s_trunc.S: Likewise. * sysdeps/powerpc/powerpc64/power5+/fpu/s_truncf.S: Likewise. * sysdeps/powerpc/powerpc64/power5/fpu/s_isnan.S: Likewise. * sysdeps/powerpc/powerpc64/power6/fpu/s_copysign.S: Likewise. * sysdeps/powerpc/powerpc64/power6/fpu/s_isnan.S: Likewise. * sysdeps/powerpc/powerpc64/power6/memcpy.S: Likewise. * sysdeps/powerpc/powerpc64/power6/memset.S: Likewise. * sysdeps/powerpc/powerpc64/power6x/fpu/s_isnan.S: Likewise. * sysdeps/powerpc/powerpc64/power6x/fpu/s_llrint.S: Likewise. * sysdeps/powerpc/powerpc64/power6x/fpu/s_llround.S: Likewise. * sysdeps/powerpc/powerpc64/power7/add_n.S: Likewise. * sysdeps/powerpc/powerpc64/power7/memchr.S: Likewise. * sysdeps/powerpc/powerpc64/power7/memcmp.S: Likewise. * sysdeps/powerpc/powerpc64/power7/memcpy.S: Likewise. * sysdeps/powerpc/powerpc64/power7/memmove.S: Likewise. * sysdeps/powerpc/powerpc64/power7/mempcpy.S: Likewise. * sysdeps/powerpc/powerpc64/power7/memrchr.S: Likewise. * sysdeps/powerpc/powerpc64/power7/memset.S: Likewise. * sysdeps/powerpc/powerpc64/power7/rawmemchr.S: Likewise. * sysdeps/powerpc/powerpc64/power7/strcasecmp.S (strcasecmp_l): Likewise. * sysdeps/powerpc/powerpc64/power7/strchr.S: Likewise. * sysdeps/powerpc/powerpc64/power7/strchrnul.S: Likewise. * sysdeps/powerpc/powerpc64/power7/strcmp.S: Likewise. * sysdeps/powerpc/powerpc64/power7/strlen.S: Likewise. * sysdeps/powerpc/powerpc64/power7/strncmp.S: Likewise. * sysdeps/powerpc/powerpc64/power7/strncpy.S: Likewise. * sysdeps/powerpc/powerpc64/power7/strnlen.S: Likewise. * sysdeps/powerpc/powerpc64/power7/strrchr.S: Likewise. * sysdeps/powerpc/powerpc64/power8/fpu/s_finite.S: Likewise. * sysdeps/powerpc/powerpc64/power8/fpu/s_isinf.S: Likewise. * sysdeps/powerpc/powerpc64/power8/fpu/s_isnan.S: Likewise. * sysdeps/powerpc/powerpc64/power8/fpu/s_llrint.S: Likewise. * sysdeps/powerpc/powerpc64/power8/fpu/s_llround.S: Likewise. * sysdeps/powerpc/powerpc64/power8/memcmp.S: Likewise. * sysdeps/powerpc/powerpc64/power8/memset.S: Likewise. * sysdeps/powerpc/powerpc64/power8/strchr.S: Likewise. * sysdeps/powerpc/powerpc64/power8/strcmp.S: Likewise. * sysdeps/powerpc/powerpc64/power8/strcpy.S: Likewise. * sysdeps/powerpc/powerpc64/power8/strlen.S: Likewise. * sysdeps/powerpc/powerpc64/power8/strncmp.S: Likewise. * sysdeps/powerpc/powerpc64/power8/strncpy.S: Likewise. * sysdeps/powerpc/powerpc64/power8/strnlen.S: Likewise. * sysdeps/powerpc/powerpc64/power8/strrchr.S: Likewise. * sysdeps/powerpc/powerpc64/power8/strspn.S: Likewise. * sysdeps/powerpc/powerpc64/power9/strcmp.S: Likewise. * sysdeps/powerpc/powerpc64/power9/strncmp.S: Likewise. * sysdeps/powerpc/powerpc64/strchr.S: Likewise. * sysdeps/powerpc/powerpc64/strcmp.S: Likewise. * sysdeps/powerpc/powerpc64/strlen.S: Likewise. * sysdeps/powerpc/powerpc64/strncmp.S: Likewise. * sysdeps/powerpc/powerpc64/ppc-mcount.S: Store LR earlier. Don't add nop when SHARED. * sysdeps/powerpc/powerpc64/start.S: Fix comment. * sysdeps/powerpc/powerpc64/multiarch/strrchr-power8.S (ENTRY): Don't define. (ENTRY_TOCLESS): Define. * sysdeps/powerpc/powerpc32/sysdep.h (ENTRY_TOCLESS): Define. * sysdeps/powerpc/fpu/s_fma.S: Use ENTRY_TOCLESS. * sysdeps/powerpc/fpu/s_fmaf.S: Likewise.
2017-06-14 01:15:50 +00:00
.macro ENTRY_3 name, alignp2=2, nopwords=0
.text
ENTRY_2(\name)
.p2align \alignp2
NOPS \nopwords
BODY_LABEL(\name):
.endm
/* Use ENTRY_TOCLESS for functions that make no use of r2 and
guarantee r2 is unchanged on exit. Any function that has @toc or
@got relocs uses r2. Functions that call other functions via the
PLT use r2. Use ENTRY for functions that may use or change r2.
The first argument is the function name.
The optional second argument specifies alignment of the function's
code, as the logarithm base two of the byte alignment. For
example, a value of four aligns to a sixteen byte boundary.
The optional third argument specifies the number of NOPs to emit
before the start of the function's code. */
#ifndef PROF
#define ENTRY_TOCLESS(name, ...) \
ENTRY_3 name, ## __VA_ARGS__; \
cfi_startproc
#define ENTRY(name, ...) \
ENTRY_TOCLESS(name, ## __VA_ARGS__); \
LOCALENTRY(name)
#else
/* The call to _mcount is potentially via the plt, so profiling code
is never free of an r2 use. */
#define ENTRY_TOCLESS(name, ...) \
ENTRY_3 name, ## __VA_ARGS__; \
cfi_startproc; \
LOCALENTRY(name)
PowerPC64 ENTRY_TOCLESS A number of functions in the sysdeps/powerpc/powerpc64/ tree don't use or change r2, yet declare a global entry that sets up r2. This patch fixes that problem, and consolidates the ENTRY and EALIGN macros. * sysdeps/powerpc/powerpc64/sysdep.h: Formatting. (NOPS, ENTRY_3): New macros. (ENTRY): Rewrite. (ENTRY_TOCLESS): Define. (EALIGN, EALIGN_W_0, EALIGN_W_1, EALIGN_W_2, EALIGN_W_4, EALIGN_W_5, EALIGN_W_6, EALIGN_W_7, EALIGN_W_8): Delete. * sysdeps/powerpc/powerpc64/a2/memcpy.S: Replace EALIGN with ENTRY. * sysdeps/powerpc/powerpc64/dl-trampoline.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_ceil.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_ceilf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_floor.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_floorf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_nearbyint.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_nearbyintf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_rint.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_rintf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_round.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_roundf.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_trunc.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_truncf.S: Likewise. * sysdeps/powerpc/powerpc64/memset.S: Likewise. * sysdeps/powerpc/powerpc64/power7/fpu/s_finite.S: Likewise. * sysdeps/powerpc/powerpc64/power7/fpu/s_isinf.S: Likewise. * sysdeps/powerpc/powerpc64/power7/fpu/s_isnan.S: Likewise. * sysdeps/powerpc/powerpc64/power7/strstr.S: Likewise. * sysdeps/powerpc/powerpc64/power8/fpu/e_expf.S: Likewise. * sysdeps/powerpc/powerpc64/power8/fpu/s_cosf.S: Likewise. * sysdeps/powerpc/powerpc64/power8/fpu/s_sinf.S: Likewise. * sysdeps/powerpc/powerpc64/power8/strcasestr.S: Likewise. * sysdeps/powerpc/powerpc64/addmul_1.S: Use ENTRY_TOCLESS. * sysdeps/powerpc/powerpc64/cell/memcpy.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_copysign.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_copysignl.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_fabsl.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_isnan.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llrint.S: Likewise. * sysdeps/powerpc/powerpc64/fpu/s_llrintf.S: Likewise. * sysdeps/powerpc/powerpc64/lshift.S: Likewise. * sysdeps/powerpc/powerpc64/memcpy.S: Likewise. * sysdeps/powerpc/powerpc64/mul_1.S: Likewise. * sysdeps/powerpc/powerpc64/power4/memcmp.S: Likewise. * sysdeps/powerpc/powerpc64/power4/memcpy.S: Likewise. * sysdeps/powerpc/powerpc64/power4/memset.S: Likewise. * sysdeps/powerpc/powerpc64/power4/strncmp.S: Likewise. * sysdeps/powerpc/powerpc64/power5+/fpu/s_ceil.S: Likewise. * sysdeps/powerpc/powerpc64/power5+/fpu/s_ceilf.S: Likewise. * sysdeps/powerpc/powerpc64/power5+/fpu/s_floor.S: Likewise. * sysdeps/powerpc/powerpc64/power5+/fpu/s_floorf.S: Likewise. * sysdeps/powerpc/powerpc64/power5+/fpu/s_llround.S: Likewise. * sysdeps/powerpc/powerpc64/power5+/fpu/s_round.S: Likewise. * sysdeps/powerpc/powerpc64/power5+/fpu/s_roundf.S: Likewise. * sysdeps/powerpc/powerpc64/power5+/fpu/s_trunc.S: Likewise. * sysdeps/powerpc/powerpc64/power5+/fpu/s_truncf.S: Likewise. * sysdeps/powerpc/powerpc64/power5/fpu/s_isnan.S: Likewise. * sysdeps/powerpc/powerpc64/power6/fpu/s_copysign.S: Likewise. * sysdeps/powerpc/powerpc64/power6/fpu/s_isnan.S: Likewise. * sysdeps/powerpc/powerpc64/power6/memcpy.S: Likewise. * sysdeps/powerpc/powerpc64/power6/memset.S: Likewise. * sysdeps/powerpc/powerpc64/power6x/fpu/s_isnan.S: Likewise. * sysdeps/powerpc/powerpc64/power6x/fpu/s_llrint.S: Likewise. * sysdeps/powerpc/powerpc64/power6x/fpu/s_llround.S: Likewise. * sysdeps/powerpc/powerpc64/power7/add_n.S: Likewise. * sysdeps/powerpc/powerpc64/power7/memchr.S: Likewise. * sysdeps/powerpc/powerpc64/power7/memcmp.S: Likewise. * sysdeps/powerpc/powerpc64/power7/memcpy.S: Likewise. * sysdeps/powerpc/powerpc64/power7/memmove.S: Likewise. * sysdeps/powerpc/powerpc64/power7/mempcpy.S: Likewise. * sysdeps/powerpc/powerpc64/power7/memrchr.S: Likewise. * sysdeps/powerpc/powerpc64/power7/memset.S: Likewise. * sysdeps/powerpc/powerpc64/power7/rawmemchr.S: Likewise. * sysdeps/powerpc/powerpc64/power7/strcasecmp.S (strcasecmp_l): Likewise. * sysdeps/powerpc/powerpc64/power7/strchr.S: Likewise. * sysdeps/powerpc/powerpc64/power7/strchrnul.S: Likewise. * sysdeps/powerpc/powerpc64/power7/strcmp.S: Likewise. * sysdeps/powerpc/powerpc64/power7/strlen.S: Likewise. * sysdeps/powerpc/powerpc64/power7/strncmp.S: Likewise. * sysdeps/powerpc/powerpc64/power7/strncpy.S: Likewise. * sysdeps/powerpc/powerpc64/power7/strnlen.S: Likewise. * sysdeps/powerpc/powerpc64/power7/strrchr.S: Likewise. * sysdeps/powerpc/powerpc64/power8/fpu/s_finite.S: Likewise. * sysdeps/powerpc/powerpc64/power8/fpu/s_isinf.S: Likewise. * sysdeps/powerpc/powerpc64/power8/fpu/s_isnan.S: Likewise. * sysdeps/powerpc/powerpc64/power8/fpu/s_llrint.S: Likewise. * sysdeps/powerpc/powerpc64/power8/fpu/s_llround.S: Likewise. * sysdeps/powerpc/powerpc64/power8/memcmp.S: Likewise. * sysdeps/powerpc/powerpc64/power8/memset.S: Likewise. * sysdeps/powerpc/powerpc64/power8/strchr.S: Likewise. * sysdeps/powerpc/powerpc64/power8/strcmp.S: Likewise. * sysdeps/powerpc/powerpc64/power8/strcpy.S: Likewise. * sysdeps/powerpc/powerpc64/power8/strlen.S: Likewise. * sysdeps/powerpc/powerpc64/power8/strncmp.S: Likewise. * sysdeps/powerpc/powerpc64/power8/strncpy.S: Likewise. * sysdeps/powerpc/powerpc64/power8/strnlen.S: Likewise. * sysdeps/powerpc/powerpc64/power8/strrchr.S: Likewise. * sysdeps/powerpc/powerpc64/power8/strspn.S: Likewise. * sysdeps/powerpc/powerpc64/power9/strcmp.S: Likewise. * sysdeps/powerpc/powerpc64/power9/strncmp.S: Likewise. * sysdeps/powerpc/powerpc64/strchr.S: Likewise. * sysdeps/powerpc/powerpc64/strcmp.S: Likewise. * sysdeps/powerpc/powerpc64/strlen.S: Likewise. * sysdeps/powerpc/powerpc64/strncmp.S: Likewise. * sysdeps/powerpc/powerpc64/ppc-mcount.S: Store LR earlier. Don't add nop when SHARED. * sysdeps/powerpc/powerpc64/start.S: Fix comment. * sysdeps/powerpc/powerpc64/multiarch/strrchr-power8.S (ENTRY): Don't define. (ENTRY_TOCLESS): Define. * sysdeps/powerpc/powerpc32/sysdep.h (ENTRY_TOCLESS): Define. * sysdeps/powerpc/fpu/s_fma.S: Use ENTRY_TOCLESS. * sysdeps/powerpc/fpu/s_fmaf.S: Likewise.
2017-06-14 01:15:50 +00:00
#define ENTRY(name, ...) \
ENTRY_TOCLESS(name, ## __VA_ARGS__)
#endif
/* Local labels stripped out by the linker. */
#undef L
#define L(x) .L##x
#define tostring(s) #s
#define stringify(s) tostring(s)
#define XGLUE(a,b) a##b
#define GLUE(a,b) XGLUE(a,b)
#define LT_LABEL(name) GLUE(.LT,name)
#define LT_LABELSUFFIX(name,suffix) GLUE(GLUE(.LT,name),suffix)
/* Support Traceback tables */
#define TB_ASM 0x000c000000000000
#define TB_GLOBALLINK 0x0000800000000000
#define TB_IS_EPROL 0x0000400000000000
#define TB_HAS_TBOFF 0x0000200000000000
#define TB_INT_PROC 0x0000100000000000
#define TB_HAS_CTL 0x0000080000000000
#define TB_TOCLESS 0x0000040000000000
#define TB_FP_PRESENT 0x0000020000000000
#define TB_LOG_ABORT 0x0000010000000000
#define TB_INT_HANDL 0x0000008000000000
#define TB_NAME_PRESENT 0x0000004000000000
#define TB_USES_ALLOCA 0x0000002000000000
#define TB_SAVES_CR 0x0000000200000000
#define TB_SAVES_LR 0x0000000100000000
#define TB_STORES_BC 0x0000000080000000
#define TB_FIXUP 0x0000000040000000
#define TB_FP_SAVED(fprs) (((fprs) & 0x3f) << 24)
#define TB_GPR_SAVED(gprs) (((fprs) & 0x3f) << 16)
#define TB_FIXEDPARMS(parms) (((parms) & 0xff) << 8)
#define TB_FLOATPARMS(parms) (((parms) & 0x7f) << 1)
#define TB_PARMSONSTK 0x0000000000000001
#define PPC_HIGHER(v) (((v) >> 32) & 0xffff)
#define TB_DEFAULT TB_ASM | TB_HAS_TBOFF | TB_NAME_PRESENT
#define TRACEBACK(name) \
LT_LABEL(name): ; \
.long 0 ; \
.quad TB_DEFAULT ; \
.long LT_LABEL(name)-BODY_LABEL(name) ; \
.short LT_LABELSUFFIX(name,_name_end)-LT_LABELSUFFIX(name,_name_start) ; \
LT_LABELSUFFIX(name,_name_start): ;\
.ascii stringify(name) ; \
LT_LABELSUFFIX(name,_name_end): ; \
.p2align 2
#define TRACEBACK_MASK(name,mask) \
LT_LABEL(name): ; \
.long 0 ; \
.quad TB_DEFAULT | mask ; \
.long LT_LABEL(name)-BODY_LABEL(name) ; \
.short LT_LABELSUFFIX(name,_name_end)-LT_LABELSUFFIX(name,_name_start) ; \
LT_LABELSUFFIX(name,_name_start): ;\
.ascii stringify(name) ; \
LT_LABELSUFFIX(name,_name_end): ; \
.p2align 2
/* END generates Traceback tables */
#undef END
#define END(name) \
cfi_endproc; \
TRACEBACK(name); \
END_2(name)
/* This form supports more informative traceback tables */
#define END_GEN_TB(name,mask) \
cfi_endproc; \
TRACEBACK_MASK(name,mask); \
END_2(name)
powerpc: Runtime selection between sc and scv for syscalls Linux kernel v5.9 added support for system calls using the scv instruction for POWER9 and later. The new codepath provides better performance (see below) if compared to using sc. For the foreseeable future, both sc and scv mechanisms will co-exist, so this patch enables glibc to do a runtime check and use scv when it is available. Before issuing the system call to the kernel, we check hwcap2 in the TCB for PPC_FEATURE2_SCV to see if scv is supported by the kernel. If not, we fallback to sc and keep the old behavior. The kernel implements a different error return convention for scv, so when returning from a system call we need to handle the return value differently depending on the instruction we used to enter the kernel. For syscalls implemented in ASM, entry and exit are implemented by different macros (PSEUDO and PSEUDO_RET, resp.), which may be used in sequence (e.g. for templated syscalls) or with other instructions in between (e.g. clone). To avoid accessing the TCB a second time on PSEUDO_RET to check which instruction we used, the value read from hwcap2 is cached on a non-volatile register. This is not needed when using INTERNAL_SYSCALL macro, since entry and exit are bundled into the same inline asm directive. The dynamic loader may issue syscalls before the TCB has been setup so it always uses sc with no extra checks. For the static case, there is no compile-time way to determine if we are inside startup code, so we also check the value of the thread pointer before effectively accessing the TCB. For such situations in which the availability of scv cannot be determined, sc is always used. Support for scv in syscalls implemented in their own ASM file (clone and vfork) will be added later. For now simply use sc as before. Average performance over 1M calls for each syscall "type": - stat: C wrapper calling INTERNAL_SYSCALL - getpid: templated ASM syscall - syscall: call to gettid using syscall function Standard: stat : 1.573445 us / ~3619 cycles getpid : 0.164986 us / ~379 cycles syscall : 0.162743 us / ~374 cycles With scv: stat : 1.537049 us / ~3535 cycles <~ -84 cycles / -2.32% getpid : 0.109923 us / ~253 cycles <~ -126 cycles / -33.25% syscall : 0.116410 us / ~268 cycles <~ -106 cycles / -28.34% Tested on powerpc, powerpc64, powerpc64le (with and without scv) Tested-by: Lucas A. M. Magalhães <lamm@linux.ibm.com> Reviewed-by: Tulio Magno Quites Machado Filho <tuliom@linux.ibm.com>
2020-12-03 17:15:27 +00:00
/* We will allocate a new frame to save LR and the non-volatile register used to
read the TCB when checking for scv support on syscall code. We actually just
need the minimum frame size plus room for 1 reg (8 bytes). But the ABI
mandates stack frames should be aligned at 16 Bytes, so we end up allocating
a bit more space then what will actually be used. */
#define SCV_FRAME_SIZE (FRAME_MIN_SIZE+16)
#define SCV_FRAME_NVOLREG_SAVE FRAME_MIN_SIZE
/* Allocate frame and save register */
#define NVOLREG_SAVE \
stdu r1,-SCV_FRAME_SIZE(r1); \
std r31,SCV_FRAME_NVOLREG_SAVE(r1); \
cfi_adjust_cfa_offset(SCV_FRAME_SIZE);
/* Restore register and destroy frame */
#define NVOLREG_RESTORE \
ld r31,SCV_FRAME_NVOLREG_SAVE(r1); \
addi r1,r1,SCV_FRAME_SIZE; \
cfi_adjust_cfa_offset(-SCV_FRAME_SIZE);
/* Check PPC_FEATURE2_SCV bit from hwcap2 in the TCB. If it is not set, scv is
not available, then go to JUMPFALSE (label given by the macro's caller). We
save the value we read from the TCB in a non-volatile register so we can
reuse it later when exiting from the syscall in PSEUDO_RET. Note that for
the static case we need an extra check to guarantee the thread pointer has
already been initialized, otherwise we may try to access an invalid address
if a syscall is called before the TLS has been setup. */
.macro CHECK_SCV_SUPPORT REG JUMPFALSE
#ifndef SHARED
/* Check if thread pointer has already been setup. */
cmpdi r13,0
beq \JUMPFALSE
#endif
/* Read PPC_FEATURE2_SCV from TCB and store it in REG */
ld \REG,TCB_HWCAP(PT_THREAD_POINTER)
andis. \REG,\REG,PPC_FEATURE2_SCV>>16
beq \JUMPFALSE
.endm
#if !defined(USE_PPC_SCV) || IS_IN(rtld)
powerpc: Runtime selection between sc and scv for syscalls Linux kernel v5.9 added support for system calls using the scv instruction for POWER9 and later. The new codepath provides better performance (see below) if compared to using sc. For the foreseeable future, both sc and scv mechanisms will co-exist, so this patch enables glibc to do a runtime check and use scv when it is available. Before issuing the system call to the kernel, we check hwcap2 in the TCB for PPC_FEATURE2_SCV to see if scv is supported by the kernel. If not, we fallback to sc and keep the old behavior. The kernel implements a different error return convention for scv, so when returning from a system call we need to handle the return value differently depending on the instruction we used to enter the kernel. For syscalls implemented in ASM, entry and exit are implemented by different macros (PSEUDO and PSEUDO_RET, resp.), which may be used in sequence (e.g. for templated syscalls) or with other instructions in between (e.g. clone). To avoid accessing the TCB a second time on PSEUDO_RET to check which instruction we used, the value read from hwcap2 is cached on a non-volatile register. This is not needed when using INTERNAL_SYSCALL macro, since entry and exit are bundled into the same inline asm directive. The dynamic loader may issue syscalls before the TCB has been setup so it always uses sc with no extra checks. For the static case, there is no compile-time way to determine if we are inside startup code, so we also check the value of the thread pointer before effectively accessing the TCB. For such situations in which the availability of scv cannot be determined, sc is always used. Support for scv in syscalls implemented in their own ASM file (clone and vfork) will be added later. For now simply use sc as before. Average performance over 1M calls for each syscall "type": - stat: C wrapper calling INTERNAL_SYSCALL - getpid: templated ASM syscall - syscall: call to gettid using syscall function Standard: stat : 1.573445 us / ~3619 cycles getpid : 0.164986 us / ~379 cycles syscall : 0.162743 us / ~374 cycles With scv: stat : 1.537049 us / ~3535 cycles <~ -84 cycles / -2.32% getpid : 0.109923 us / ~253 cycles <~ -126 cycles / -33.25% syscall : 0.116410 us / ~268 cycles <~ -106 cycles / -28.34% Tested on powerpc, powerpc64, powerpc64le (with and without scv) Tested-by: Lucas A. M. Magalhães <lamm@linux.ibm.com> Reviewed-by: Tulio Magno Quites Machado Filho <tuliom@linux.ibm.com>
2020-12-03 17:15:27 +00:00
# define DO_CALL(syscall) \
li r0,syscall; \
DO_CALL_SC
#else
/* Before doing the syscall, check if we can use scv. scv is supported by P9
and later with Linux v5.9 and later. If so, use it. Otherwise, fallback to
sc. We use a non-volatile register to save hwcap2 from the TCB, so we need
to save its content beforehand. */
# define DO_CALL(syscall) \
li r0,syscall; \
NVOLREG_SAVE; \
CHECK_SCV_SUPPORT r31 0f; \
DO_CALL_SCV; \
b 1f; \
0: DO_CALL_SC; \
1:
#endif /* !defined(USE_PPC_SCV) || IS_IN(rtld) */
powerpc: Runtime selection between sc and scv for syscalls Linux kernel v5.9 added support for system calls using the scv instruction for POWER9 and later. The new codepath provides better performance (see below) if compared to using sc. For the foreseeable future, both sc and scv mechanisms will co-exist, so this patch enables glibc to do a runtime check and use scv when it is available. Before issuing the system call to the kernel, we check hwcap2 in the TCB for PPC_FEATURE2_SCV to see if scv is supported by the kernel. If not, we fallback to sc and keep the old behavior. The kernel implements a different error return convention for scv, so when returning from a system call we need to handle the return value differently depending on the instruction we used to enter the kernel. For syscalls implemented in ASM, entry and exit are implemented by different macros (PSEUDO and PSEUDO_RET, resp.), which may be used in sequence (e.g. for templated syscalls) or with other instructions in between (e.g. clone). To avoid accessing the TCB a second time on PSEUDO_RET to check which instruction we used, the value read from hwcap2 is cached on a non-volatile register. This is not needed when using INTERNAL_SYSCALL macro, since entry and exit are bundled into the same inline asm directive. The dynamic loader may issue syscalls before the TCB has been setup so it always uses sc with no extra checks. For the static case, there is no compile-time way to determine if we are inside startup code, so we also check the value of the thread pointer before effectively accessing the TCB. For such situations in which the availability of scv cannot be determined, sc is always used. Support for scv in syscalls implemented in their own ASM file (clone and vfork) will be added later. For now simply use sc as before. Average performance over 1M calls for each syscall "type": - stat: C wrapper calling INTERNAL_SYSCALL - getpid: templated ASM syscall - syscall: call to gettid using syscall function Standard: stat : 1.573445 us / ~3619 cycles getpid : 0.164986 us / ~379 cycles syscall : 0.162743 us / ~374 cycles With scv: stat : 1.537049 us / ~3535 cycles <~ -84 cycles / -2.32% getpid : 0.109923 us / ~253 cycles <~ -126 cycles / -33.25% syscall : 0.116410 us / ~268 cycles <~ -106 cycles / -28.34% Tested on powerpc, powerpc64, powerpc64le (with and without scv) Tested-by: Lucas A. M. Magalhães <lamm@linux.ibm.com> Reviewed-by: Tulio Magno Quites Machado Filho <tuliom@linux.ibm.com>
2020-12-03 17:15:27 +00:00
/* DO_CALL_SC and DO_CALL_SCV expect the syscall number to be in r0. */
#define DO_CALL_SC \
sc
powerpc: Runtime selection between sc and scv for syscalls Linux kernel v5.9 added support for system calls using the scv instruction for POWER9 and later. The new codepath provides better performance (see below) if compared to using sc. For the foreseeable future, both sc and scv mechanisms will co-exist, so this patch enables glibc to do a runtime check and use scv when it is available. Before issuing the system call to the kernel, we check hwcap2 in the TCB for PPC_FEATURE2_SCV to see if scv is supported by the kernel. If not, we fallback to sc and keep the old behavior. The kernel implements a different error return convention for scv, so when returning from a system call we need to handle the return value differently depending on the instruction we used to enter the kernel. For syscalls implemented in ASM, entry and exit are implemented by different macros (PSEUDO and PSEUDO_RET, resp.), which may be used in sequence (e.g. for templated syscalls) or with other instructions in between (e.g. clone). To avoid accessing the TCB a second time on PSEUDO_RET to check which instruction we used, the value read from hwcap2 is cached on a non-volatile register. This is not needed when using INTERNAL_SYSCALL macro, since entry and exit are bundled into the same inline asm directive. The dynamic loader may issue syscalls before the TCB has been setup so it always uses sc with no extra checks. For the static case, there is no compile-time way to determine if we are inside startup code, so we also check the value of the thread pointer before effectively accessing the TCB. For such situations in which the availability of scv cannot be determined, sc is always used. Support for scv in syscalls implemented in their own ASM file (clone and vfork) will be added later. For now simply use sc as before. Average performance over 1M calls for each syscall "type": - stat: C wrapper calling INTERNAL_SYSCALL - getpid: templated ASM syscall - syscall: call to gettid using syscall function Standard: stat : 1.573445 us / ~3619 cycles getpid : 0.164986 us / ~379 cycles syscall : 0.162743 us / ~374 cycles With scv: stat : 1.537049 us / ~3535 cycles <~ -84 cycles / -2.32% getpid : 0.109923 us / ~253 cycles <~ -126 cycles / -33.25% syscall : 0.116410 us / ~268 cycles <~ -106 cycles / -28.34% Tested on powerpc, powerpc64, powerpc64le (with and without scv) Tested-by: Lucas A. M. Magalhães <lamm@linux.ibm.com> Reviewed-by: Tulio Magno Quites Machado Filho <tuliom@linux.ibm.com>
2020-12-03 17:15:27 +00:00
#define DO_CALL_SCV \
mflr r9; \
std r9,FRAME_LR_SAVE(r1); \
cfi_offset(lr,FRAME_LR_SAVE); \
.machine "push"; \
.machine "power9"; \
powerpc: Runtime selection between sc and scv for syscalls Linux kernel v5.9 added support for system calls using the scv instruction for POWER9 and later. The new codepath provides better performance (see below) if compared to using sc. For the foreseeable future, both sc and scv mechanisms will co-exist, so this patch enables glibc to do a runtime check and use scv when it is available. Before issuing the system call to the kernel, we check hwcap2 in the TCB for PPC_FEATURE2_SCV to see if scv is supported by the kernel. If not, we fallback to sc and keep the old behavior. The kernel implements a different error return convention for scv, so when returning from a system call we need to handle the return value differently depending on the instruction we used to enter the kernel. For syscalls implemented in ASM, entry and exit are implemented by different macros (PSEUDO and PSEUDO_RET, resp.), which may be used in sequence (e.g. for templated syscalls) or with other instructions in between (e.g. clone). To avoid accessing the TCB a second time on PSEUDO_RET to check which instruction we used, the value read from hwcap2 is cached on a non-volatile register. This is not needed when using INTERNAL_SYSCALL macro, since entry and exit are bundled into the same inline asm directive. The dynamic loader may issue syscalls before the TCB has been setup so it always uses sc with no extra checks. For the static case, there is no compile-time way to determine if we are inside startup code, so we also check the value of the thread pointer before effectively accessing the TCB. For such situations in which the availability of scv cannot be determined, sc is always used. Support for scv in syscalls implemented in their own ASM file (clone and vfork) will be added later. For now simply use sc as before. Average performance over 1M calls for each syscall "type": - stat: C wrapper calling INTERNAL_SYSCALL - getpid: templated ASM syscall - syscall: call to gettid using syscall function Standard: stat : 1.573445 us / ~3619 cycles getpid : 0.164986 us / ~379 cycles syscall : 0.162743 us / ~374 cycles With scv: stat : 1.537049 us / ~3535 cycles <~ -84 cycles / -2.32% getpid : 0.109923 us / ~253 cycles <~ -126 cycles / -33.25% syscall : 0.116410 us / ~268 cycles <~ -106 cycles / -28.34% Tested on powerpc, powerpc64, powerpc64le (with and without scv) Tested-by: Lucas A. M. Magalhães <lamm@linux.ibm.com> Reviewed-by: Tulio Magno Quites Machado Filho <tuliom@linux.ibm.com>
2020-12-03 17:15:27 +00:00
scv 0; \
.machine "pop"; \
powerpc: Runtime selection between sc and scv for syscalls Linux kernel v5.9 added support for system calls using the scv instruction for POWER9 and later. The new codepath provides better performance (see below) if compared to using sc. For the foreseeable future, both sc and scv mechanisms will co-exist, so this patch enables glibc to do a runtime check and use scv when it is available. Before issuing the system call to the kernel, we check hwcap2 in the TCB for PPC_FEATURE2_SCV to see if scv is supported by the kernel. If not, we fallback to sc and keep the old behavior. The kernel implements a different error return convention for scv, so when returning from a system call we need to handle the return value differently depending on the instruction we used to enter the kernel. For syscalls implemented in ASM, entry and exit are implemented by different macros (PSEUDO and PSEUDO_RET, resp.), which may be used in sequence (e.g. for templated syscalls) or with other instructions in between (e.g. clone). To avoid accessing the TCB a second time on PSEUDO_RET to check which instruction we used, the value read from hwcap2 is cached on a non-volatile register. This is not needed when using INTERNAL_SYSCALL macro, since entry and exit are bundled into the same inline asm directive. The dynamic loader may issue syscalls before the TCB has been setup so it always uses sc with no extra checks. For the static case, there is no compile-time way to determine if we are inside startup code, so we also check the value of the thread pointer before effectively accessing the TCB. For such situations in which the availability of scv cannot be determined, sc is always used. Support for scv in syscalls implemented in their own ASM file (clone and vfork) will be added later. For now simply use sc as before. Average performance over 1M calls for each syscall "type": - stat: C wrapper calling INTERNAL_SYSCALL - getpid: templated ASM syscall - syscall: call to gettid using syscall function Standard: stat : 1.573445 us / ~3619 cycles getpid : 0.164986 us / ~379 cycles syscall : 0.162743 us / ~374 cycles With scv: stat : 1.537049 us / ~3535 cycles <~ -84 cycles / -2.32% getpid : 0.109923 us / ~253 cycles <~ -126 cycles / -33.25% syscall : 0.116410 us / ~268 cycles <~ -106 cycles / -28.34% Tested on powerpc, powerpc64, powerpc64le (with and without scv) Tested-by: Lucas A. M. Magalhães <lamm@linux.ibm.com> Reviewed-by: Tulio Magno Quites Machado Filho <tuliom@linux.ibm.com>
2020-12-03 17:15:27 +00:00
ld r9,FRAME_LR_SAVE(r1); \
mtlr r9; \
cfi_restore(lr);
/* ppc64 is always PIC */
#undef JUMPTARGET
#define JUMPTARGET(name) FUNC_LABEL(name)
#define PSEUDO(name, syscall_name, args) \
.section ".text"; \
ENTRY (name); \
DO_CALL (SYS_ify (syscall_name))
2010-08-12 16:19:19 +00:00
#ifdef SHARED
#define TAIL_CALL_SYSCALL_ERROR \
b JUMPTARGET (NOTOC (__syscall_error))
2010-08-12 16:19:19 +00:00
#else
/* Static version might be linked into a large app with a toc exceeding
64k. We can't put a toc adjusting stub on a plain branch, so can't
tail call __syscall_error. */
#define TAIL_CALL_SYSCALL_ERROR \
.ifdef .Local_syscall_error; \
b .Local_syscall_error; \
.else; \
.Local_syscall_error: \
mflr 0; \
PowerPC64 ELFv2 ABI 4/6: Stack frame layout changes This updates glibc for the changes in the ELFv2 relating to the stack frame layout. These are described in more detail here: http://gcc.gnu.org/ml/gcc-patches/2013-11/msg01149.html http://gcc.gnu.org/ml/gcc-patches/2013-11/msg01146.html Specifically, the "compiler and linker doublewords" were removed, which has the effect that the save slot for the TOC register is now at offset 24 rather than 40 to the stack pointer. In addition, a function may now no longer necessarily assume that its caller has set up a 64-byte register save area its use. To address the first change, the patch goes through all assembler files and replaces immediate offsets in instructions accessing the ABI-defined stack slots by symbolic offsets. Those already were defined in ucontext_i.sym and used in some of the context routines, but that doesn't really seem like the right place for those defines. The patch instead defines those symbolic offsets in sysdeps.h, in two variants for the old and new ABI, and uses them systematically in all assembler files, not just the context routines. The second change only affected a few assembler files that used the save area to temporarily store some registers. In those cases where this happens within a leaf function, this patch changes the code to store those registers to the "red zone" below the stack pointer. Otherwise, the functions already allocate a stack frame, and the patch changes them to add extra space in these frames as temporary space for the ELFv2 ABI.
2013-12-04 12:55:03 +00:00
std 0,FRAME_LR_SAVE(1); \
stdu 1,-FRAME_MIN_SIZE(1); \
cfi_adjust_cfa_offset(FRAME_MIN_SIZE); \
cfi_offset(lr,FRAME_LR_SAVE); \
2010-08-12 16:19:19 +00:00
bl JUMPTARGET(__syscall_error); \
nop; \
PowerPC64 ELFv2 ABI 4/6: Stack frame layout changes This updates glibc for the changes in the ELFv2 relating to the stack frame layout. These are described in more detail here: http://gcc.gnu.org/ml/gcc-patches/2013-11/msg01149.html http://gcc.gnu.org/ml/gcc-patches/2013-11/msg01146.html Specifically, the "compiler and linker doublewords" were removed, which has the effect that the save slot for the TOC register is now at offset 24 rather than 40 to the stack pointer. In addition, a function may now no longer necessarily assume that its caller has set up a 64-byte register save area its use. To address the first change, the patch goes through all assembler files and replaces immediate offsets in instructions accessing the ABI-defined stack slots by symbolic offsets. Those already were defined in ucontext_i.sym and used in some of the context routines, but that doesn't really seem like the right place for those defines. The patch instead defines those symbolic offsets in sysdeps.h, in two variants for the old and new ABI, and uses them systematically in all assembler files, not just the context routines. The second change only affected a few assembler files that used the save area to temporarily store some registers. In those cases where this happens within a leaf function, this patch changes the code to store those registers to the "red zone" below the stack pointer. Otherwise, the functions already allocate a stack frame, and the patch changes them to add extra space in these frames as temporary space for the ELFv2 ABI.
2013-12-04 12:55:03 +00:00
ld 0,FRAME_MIN_SIZE+FRAME_LR_SAVE(1); \
addi 1,1,FRAME_MIN_SIZE; \
cfi_adjust_cfa_offset(-FRAME_MIN_SIZE); \
2010-08-12 16:19:19 +00:00
mtlr 0; \
cfi_restore(lr); \
blr; \
.endif
#endif
#if !defined(USE_PPC_SCV) || IS_IN(rtld)
powerpc: Runtime selection between sc and scv for syscalls Linux kernel v5.9 added support for system calls using the scv instruction for POWER9 and later. The new codepath provides better performance (see below) if compared to using sc. For the foreseeable future, both sc and scv mechanisms will co-exist, so this patch enables glibc to do a runtime check and use scv when it is available. Before issuing the system call to the kernel, we check hwcap2 in the TCB for PPC_FEATURE2_SCV to see if scv is supported by the kernel. If not, we fallback to sc and keep the old behavior. The kernel implements a different error return convention for scv, so when returning from a system call we need to handle the return value differently depending on the instruction we used to enter the kernel. For syscalls implemented in ASM, entry and exit are implemented by different macros (PSEUDO and PSEUDO_RET, resp.), which may be used in sequence (e.g. for templated syscalls) or with other instructions in between (e.g. clone). To avoid accessing the TCB a second time on PSEUDO_RET to check which instruction we used, the value read from hwcap2 is cached on a non-volatile register. This is not needed when using INTERNAL_SYSCALL macro, since entry and exit are bundled into the same inline asm directive. The dynamic loader may issue syscalls before the TCB has been setup so it always uses sc with no extra checks. For the static case, there is no compile-time way to determine if we are inside startup code, so we also check the value of the thread pointer before effectively accessing the TCB. For such situations in which the availability of scv cannot be determined, sc is always used. Support for scv in syscalls implemented in their own ASM file (clone and vfork) will be added later. For now simply use sc as before. Average performance over 1M calls for each syscall "type": - stat: C wrapper calling INTERNAL_SYSCALL - getpid: templated ASM syscall - syscall: call to gettid using syscall function Standard: stat : 1.573445 us / ~3619 cycles getpid : 0.164986 us / ~379 cycles syscall : 0.162743 us / ~374 cycles With scv: stat : 1.537049 us / ~3535 cycles <~ -84 cycles / -2.32% getpid : 0.109923 us / ~253 cycles <~ -126 cycles / -33.25% syscall : 0.116410 us / ~268 cycles <~ -106 cycles / -28.34% Tested on powerpc, powerpc64, powerpc64le (with and without scv) Tested-by: Lucas A. M. Magalhães <lamm@linux.ibm.com> Reviewed-by: Tulio Magno Quites Machado Filho <tuliom@linux.ibm.com>
2020-12-03 17:15:27 +00:00
# define PSEUDO_RET \
RET_SC; \
2010-08-12 16:19:19 +00:00
TAIL_CALL_SYSCALL_ERROR
powerpc: Runtime selection between sc and scv for syscalls Linux kernel v5.9 added support for system calls using the scv instruction for POWER9 and later. The new codepath provides better performance (see below) if compared to using sc. For the foreseeable future, both sc and scv mechanisms will co-exist, so this patch enables glibc to do a runtime check and use scv when it is available. Before issuing the system call to the kernel, we check hwcap2 in the TCB for PPC_FEATURE2_SCV to see if scv is supported by the kernel. If not, we fallback to sc and keep the old behavior. The kernel implements a different error return convention for scv, so when returning from a system call we need to handle the return value differently depending on the instruction we used to enter the kernel. For syscalls implemented in ASM, entry and exit are implemented by different macros (PSEUDO and PSEUDO_RET, resp.), which may be used in sequence (e.g. for templated syscalls) or with other instructions in between (e.g. clone). To avoid accessing the TCB a second time on PSEUDO_RET to check which instruction we used, the value read from hwcap2 is cached on a non-volatile register. This is not needed when using INTERNAL_SYSCALL macro, since entry and exit are bundled into the same inline asm directive. The dynamic loader may issue syscalls before the TCB has been setup so it always uses sc with no extra checks. For the static case, there is no compile-time way to determine if we are inside startup code, so we also check the value of the thread pointer before effectively accessing the TCB. For such situations in which the availability of scv cannot be determined, sc is always used. Support for scv in syscalls implemented in their own ASM file (clone and vfork) will be added later. For now simply use sc as before. Average performance over 1M calls for each syscall "type": - stat: C wrapper calling INTERNAL_SYSCALL - getpid: templated ASM syscall - syscall: call to gettid using syscall function Standard: stat : 1.573445 us / ~3619 cycles getpid : 0.164986 us / ~379 cycles syscall : 0.162743 us / ~374 cycles With scv: stat : 1.537049 us / ~3535 cycles <~ -84 cycles / -2.32% getpid : 0.109923 us / ~253 cycles <~ -126 cycles / -33.25% syscall : 0.116410 us / ~268 cycles <~ -106 cycles / -28.34% Tested on powerpc, powerpc64, powerpc64le (with and without scv) Tested-by: Lucas A. M. Magalhães <lamm@linux.ibm.com> Reviewed-by: Tulio Magno Quites Machado Filho <tuliom@linux.ibm.com>
2020-12-03 17:15:27 +00:00
#else
/* This should only be called after a DO_CALL. In such cases, r31 contains the
value of PPC_FEATURE2_SCV read from hwcap2 by CHECK_SCV_SUPPORT. If it is
set, we know we have entered the kernel using scv, so handle the return code
accordingly. */
# define PSEUDO_RET \
cmpdi cr5,r31,0; \
NVOLREG_RESTORE; \
beq cr5,0f; \
RET_SCV; \
b 1f; \
0: RET_SC; \
1: TAIL_CALL_SYSCALL_ERROR
#endif /* !defined(USE_PPC_SCV) || IS_IN(rtld) */
powerpc: Runtime selection between sc and scv for syscalls Linux kernel v5.9 added support for system calls using the scv instruction for POWER9 and later. The new codepath provides better performance (see below) if compared to using sc. For the foreseeable future, both sc and scv mechanisms will co-exist, so this patch enables glibc to do a runtime check and use scv when it is available. Before issuing the system call to the kernel, we check hwcap2 in the TCB for PPC_FEATURE2_SCV to see if scv is supported by the kernel. If not, we fallback to sc and keep the old behavior. The kernel implements a different error return convention for scv, so when returning from a system call we need to handle the return value differently depending on the instruction we used to enter the kernel. For syscalls implemented in ASM, entry and exit are implemented by different macros (PSEUDO and PSEUDO_RET, resp.), which may be used in sequence (e.g. for templated syscalls) or with other instructions in between (e.g. clone). To avoid accessing the TCB a second time on PSEUDO_RET to check which instruction we used, the value read from hwcap2 is cached on a non-volatile register. This is not needed when using INTERNAL_SYSCALL macro, since entry and exit are bundled into the same inline asm directive. The dynamic loader may issue syscalls before the TCB has been setup so it always uses sc with no extra checks. For the static case, there is no compile-time way to determine if we are inside startup code, so we also check the value of the thread pointer before effectively accessing the TCB. For such situations in which the availability of scv cannot be determined, sc is always used. Support for scv in syscalls implemented in their own ASM file (clone and vfork) will be added later. For now simply use sc as before. Average performance over 1M calls for each syscall "type": - stat: C wrapper calling INTERNAL_SYSCALL - getpid: templated ASM syscall - syscall: call to gettid using syscall function Standard: stat : 1.573445 us / ~3619 cycles getpid : 0.164986 us / ~379 cycles syscall : 0.162743 us / ~374 cycles With scv: stat : 1.537049 us / ~3535 cycles <~ -84 cycles / -2.32% getpid : 0.109923 us / ~253 cycles <~ -126 cycles / -33.25% syscall : 0.116410 us / ~268 cycles <~ -106 cycles / -28.34% Tested on powerpc, powerpc64, powerpc64le (with and without scv) Tested-by: Lucas A. M. Magalhães <lamm@linux.ibm.com> Reviewed-by: Tulio Magno Quites Machado Filho <tuliom@linux.ibm.com>
2020-12-03 17:15:27 +00:00
#define RET_SCV \
li r9,-4095; \
cmpld r3,r9; \
bltlr+; \
powerpc: Runtime selection between sc and scv for syscalls Linux kernel v5.9 added support for system calls using the scv instruction for POWER9 and later. The new codepath provides better performance (see below) if compared to using sc. For the foreseeable future, both sc and scv mechanisms will co-exist, so this patch enables glibc to do a runtime check and use scv when it is available. Before issuing the system call to the kernel, we check hwcap2 in the TCB for PPC_FEATURE2_SCV to see if scv is supported by the kernel. If not, we fallback to sc and keep the old behavior. The kernel implements a different error return convention for scv, so when returning from a system call we need to handle the return value differently depending on the instruction we used to enter the kernel. For syscalls implemented in ASM, entry and exit are implemented by different macros (PSEUDO and PSEUDO_RET, resp.), which may be used in sequence (e.g. for templated syscalls) or with other instructions in between (e.g. clone). To avoid accessing the TCB a second time on PSEUDO_RET to check which instruction we used, the value read from hwcap2 is cached on a non-volatile register. This is not needed when using INTERNAL_SYSCALL macro, since entry and exit are bundled into the same inline asm directive. The dynamic loader may issue syscalls before the TCB has been setup so it always uses sc with no extra checks. For the static case, there is no compile-time way to determine if we are inside startup code, so we also check the value of the thread pointer before effectively accessing the TCB. For such situations in which the availability of scv cannot be determined, sc is always used. Support for scv in syscalls implemented in their own ASM file (clone and vfork) will be added later. For now simply use sc as before. Average performance over 1M calls for each syscall "type": - stat: C wrapper calling INTERNAL_SYSCALL - getpid: templated ASM syscall - syscall: call to gettid using syscall function Standard: stat : 1.573445 us / ~3619 cycles getpid : 0.164986 us / ~379 cycles syscall : 0.162743 us / ~374 cycles With scv: stat : 1.537049 us / ~3535 cycles <~ -84 cycles / -2.32% getpid : 0.109923 us / ~253 cycles <~ -126 cycles / -33.25% syscall : 0.116410 us / ~268 cycles <~ -106 cycles / -28.34% Tested on powerpc, powerpc64, powerpc64le (with and without scv) Tested-by: Lucas A. M. Magalhães <lamm@linux.ibm.com> Reviewed-by: Tulio Magno Quites Machado Filho <tuliom@linux.ibm.com>
2020-12-03 17:15:27 +00:00
neg r3,r3;
#define RET_SC \
bnslr+;
#define ret PSEUDO_RET
#undef PSEUDO_END
#define PSEUDO_END(name) \
END (name)
#define PSEUDO_NOERRNO(name, syscall_name, args) \
.section ".text"; \
ENTRY (name); \
DO_CALL (SYS_ify (syscall_name))
#if !defined(USE_PPC_SCV) || IS_IN(rtld)
powerpc: Runtime selection between sc and scv for syscalls Linux kernel v5.9 added support for system calls using the scv instruction for POWER9 and later. The new codepath provides better performance (see below) if compared to using sc. For the foreseeable future, both sc and scv mechanisms will co-exist, so this patch enables glibc to do a runtime check and use scv when it is available. Before issuing the system call to the kernel, we check hwcap2 in the TCB for PPC_FEATURE2_SCV to see if scv is supported by the kernel. If not, we fallback to sc and keep the old behavior. The kernel implements a different error return convention for scv, so when returning from a system call we need to handle the return value differently depending on the instruction we used to enter the kernel. For syscalls implemented in ASM, entry and exit are implemented by different macros (PSEUDO and PSEUDO_RET, resp.), which may be used in sequence (e.g. for templated syscalls) or with other instructions in between (e.g. clone). To avoid accessing the TCB a second time on PSEUDO_RET to check which instruction we used, the value read from hwcap2 is cached on a non-volatile register. This is not needed when using INTERNAL_SYSCALL macro, since entry and exit are bundled into the same inline asm directive. The dynamic loader may issue syscalls before the TCB has been setup so it always uses sc with no extra checks. For the static case, there is no compile-time way to determine if we are inside startup code, so we also check the value of the thread pointer before effectively accessing the TCB. For such situations in which the availability of scv cannot be determined, sc is always used. Support for scv in syscalls implemented in their own ASM file (clone and vfork) will be added later. For now simply use sc as before. Average performance over 1M calls for each syscall "type": - stat: C wrapper calling INTERNAL_SYSCALL - getpid: templated ASM syscall - syscall: call to gettid using syscall function Standard: stat : 1.573445 us / ~3619 cycles getpid : 0.164986 us / ~379 cycles syscall : 0.162743 us / ~374 cycles With scv: stat : 1.537049 us / ~3535 cycles <~ -84 cycles / -2.32% getpid : 0.109923 us / ~253 cycles <~ -126 cycles / -33.25% syscall : 0.116410 us / ~268 cycles <~ -106 cycles / -28.34% Tested on powerpc, powerpc64, powerpc64le (with and without scv) Tested-by: Lucas A. M. Magalhães <lamm@linux.ibm.com> Reviewed-by: Tulio Magno Quites Machado Filho <tuliom@linux.ibm.com>
2020-12-03 17:15:27 +00:00
# define PSEUDO_RET_NOERRNO \
blr
powerpc: Runtime selection between sc and scv for syscalls Linux kernel v5.9 added support for system calls using the scv instruction for POWER9 and later. The new codepath provides better performance (see below) if compared to using sc. For the foreseeable future, both sc and scv mechanisms will co-exist, so this patch enables glibc to do a runtime check and use scv when it is available. Before issuing the system call to the kernel, we check hwcap2 in the TCB for PPC_FEATURE2_SCV to see if scv is supported by the kernel. If not, we fallback to sc and keep the old behavior. The kernel implements a different error return convention for scv, so when returning from a system call we need to handle the return value differently depending on the instruction we used to enter the kernel. For syscalls implemented in ASM, entry and exit are implemented by different macros (PSEUDO and PSEUDO_RET, resp.), which may be used in sequence (e.g. for templated syscalls) or with other instructions in between (e.g. clone). To avoid accessing the TCB a second time on PSEUDO_RET to check which instruction we used, the value read from hwcap2 is cached on a non-volatile register. This is not needed when using INTERNAL_SYSCALL macro, since entry and exit are bundled into the same inline asm directive. The dynamic loader may issue syscalls before the TCB has been setup so it always uses sc with no extra checks. For the static case, there is no compile-time way to determine if we are inside startup code, so we also check the value of the thread pointer before effectively accessing the TCB. For such situations in which the availability of scv cannot be determined, sc is always used. Support for scv in syscalls implemented in their own ASM file (clone and vfork) will be added later. For now simply use sc as before. Average performance over 1M calls for each syscall "type": - stat: C wrapper calling INTERNAL_SYSCALL - getpid: templated ASM syscall - syscall: call to gettid using syscall function Standard: stat : 1.573445 us / ~3619 cycles getpid : 0.164986 us / ~379 cycles syscall : 0.162743 us / ~374 cycles With scv: stat : 1.537049 us / ~3535 cycles <~ -84 cycles / -2.32% getpid : 0.109923 us / ~253 cycles <~ -126 cycles / -33.25% syscall : 0.116410 us / ~268 cycles <~ -106 cycles / -28.34% Tested on powerpc, powerpc64, powerpc64le (with and without scv) Tested-by: Lucas A. M. Magalhães <lamm@linux.ibm.com> Reviewed-by: Tulio Magno Quites Machado Filho <tuliom@linux.ibm.com>
2020-12-03 17:15:27 +00:00
#else
/* This should only be called after a DO_CALL. */
# define PSEUDO_RET_NOERRNO \
NVOLREG_RESTORE; \
blr
#endif /* !defined(USE_PPC_SCV) || IS_IN(rtld) */
#define ret_NOERRNO PSEUDO_RET_NOERRNO
#undef PSEUDO_END_NOERRNO
#define PSEUDO_END_NOERRNO(name) \
END (name)
Update. 2003-08-16 Jakub Jelinek <jakub@redhat.com> * sysdeps/powerpc/powerpc32/sysdep.h (PSEUDO_ERRVAL, PSEUDO_RET_ERRVAL, ret_ERRVAL, PSEUDO_END_ERRVAL): Define. * sysdeps/powerpc/powerpc64/sysdep.h (PSEUDO_ERRVAL, PSEUDO_RET_ERRVAL, ret_ERRVAL, PSEUDO_END_ERRVAL): Define. * sysdeps/unix/alpha/sysdep.h (PSEUDO_ERRVAL, ret_ERRVAL, PSEUDO_END_ERRVAL): Define. * sysdeps/unix/mips/sysdep.h (PSEUDO_ERRVAL, ret_ERRVAL, PSEUDO_END_ERRVAL): Define. * sysdeps/unix/sparc/sysdep.h (PSEUDO_ERRVAL, ret_ERRVAL): Define. * sysdeps/unix/sysv/linux/s390/s390-32/sysdep.h (PSEUDO_ERRVAL, PSEUDO_END_ERRVAL, ret_ERRVAL): Define. * sysdeps/unix/sysv/linux/s390/s390-64/sysdep.h (PSEUDO_ERRVAL, PSEUDO_END_ERRVAL, ret_ERRVAL): Define. * sysdeps/unix/sysv/linux/sparc/sparc32/sysdep.h (PSEUDO_ERRVAL): Define. * sysdeps/unix/sysv/linux/sparc/sparc64/sysdep.h (PSEUDO_ERRVAL, PSEUDO_END_ERRVAL, ret_ERRVAL): Define. * sysdeps/unix/sysdep.h (PSEUDO_END_ERRVAL): Define. * sysdeps/unix/sysv/linux/syscalls.list (posix_fadvise64, posix_fadvise64_64): Remove. * sysdeps/unix/sysv/linux/alpha/syscalls.list (posix_fadvise64): Add V flag. * sysdeps/unix/sysv/linux/ia64/syscalls.list (posix_fadvise64): Likewise. * sysdeps/unix/sysv/linux/powerpc/powerpc64/syscalls.list (posix_fadvise64): Likewise. * sysdeps/unix/sysv/linux/s390/s390-64/syscalls.list (posix_fadvise64): Likewise. * sysdeps/unix/sysv/linux/sparc/sparc64/syscalls.list (posix_fadvise64): Likewise. * sysdeps/unix/sysv/linux/x86_64/syscalls.list (posix_fadvise64): Likewise. * sysdeps/unix/sysv/linux/i386/posix_fadvise64_64.S: Moved to... * sysdeps/unix/sysv/linux/i386/posix_fadvise64.S: ...here. (__posix_fadvise64_l64): Fix a typo in fadvise64 syscall invocation. (__posix_fadvise64_l32): New function. * sysdeps/unix/sysv/linux/i386/Makefile: Revert last change. * sysdeps/unix/sysv/linux/powerpc/powerpc32/Versions (libc): Export posix_fadvise64@@GLIBC_2.3.3. * sysdeps/unix/sysv/linux/s390/s390-32/Versions (libc): Likewise. * sysdeps/unix/sysv/linux/sparc/sparc32/Versions (libc): Likewise. * sysdeps/unix/sysv/linux/posix_fadvise.c (posix_fadvise): Return error value. * sysdeps/unix/sysv/linux/posix_fadvise64.c: New file. * sysdeps/unix/sysv/linux/sparc/sparc32/sysdep.h (SYSCALL_ERROR_HANDLER): Use TLS errno/__libc_errno if USE___THREAD. 2003-08-15 Jakub Jelinek <jakub@redhat.com> * sysdeps/sparc/sparc32/dl-machine.h (WEAKADDR): Remove. (elf_machine_matches_host): Remove weak extern stuff. Use GL(dl_hwcap) unconditionally and GL(dl_hwcap_mask) if SHARED. (elf_machine_runtime_setup, sparc_fixup_plt): Remove weak extern stuff. Use GL(dl_hwcap) unconditionally.
2003-08-17 00:37:19 +00:00
#define PSEUDO_ERRVAL(name, syscall_name, args) \
.section ".text"; \
ENTRY (name); \
DO_CALL (SYS_ify (syscall_name))
Update. 2003-08-16 Jakub Jelinek <jakub@redhat.com> * sysdeps/powerpc/powerpc32/sysdep.h (PSEUDO_ERRVAL, PSEUDO_RET_ERRVAL, ret_ERRVAL, PSEUDO_END_ERRVAL): Define. * sysdeps/powerpc/powerpc64/sysdep.h (PSEUDO_ERRVAL, PSEUDO_RET_ERRVAL, ret_ERRVAL, PSEUDO_END_ERRVAL): Define. * sysdeps/unix/alpha/sysdep.h (PSEUDO_ERRVAL, ret_ERRVAL, PSEUDO_END_ERRVAL): Define. * sysdeps/unix/mips/sysdep.h (PSEUDO_ERRVAL, ret_ERRVAL, PSEUDO_END_ERRVAL): Define. * sysdeps/unix/sparc/sysdep.h (PSEUDO_ERRVAL, ret_ERRVAL): Define. * sysdeps/unix/sysv/linux/s390/s390-32/sysdep.h (PSEUDO_ERRVAL, PSEUDO_END_ERRVAL, ret_ERRVAL): Define. * sysdeps/unix/sysv/linux/s390/s390-64/sysdep.h (PSEUDO_ERRVAL, PSEUDO_END_ERRVAL, ret_ERRVAL): Define. * sysdeps/unix/sysv/linux/sparc/sparc32/sysdep.h (PSEUDO_ERRVAL): Define. * sysdeps/unix/sysv/linux/sparc/sparc64/sysdep.h (PSEUDO_ERRVAL, PSEUDO_END_ERRVAL, ret_ERRVAL): Define. * sysdeps/unix/sysdep.h (PSEUDO_END_ERRVAL): Define. * sysdeps/unix/sysv/linux/syscalls.list (posix_fadvise64, posix_fadvise64_64): Remove. * sysdeps/unix/sysv/linux/alpha/syscalls.list (posix_fadvise64): Add V flag. * sysdeps/unix/sysv/linux/ia64/syscalls.list (posix_fadvise64): Likewise. * sysdeps/unix/sysv/linux/powerpc/powerpc64/syscalls.list (posix_fadvise64): Likewise. * sysdeps/unix/sysv/linux/s390/s390-64/syscalls.list (posix_fadvise64): Likewise. * sysdeps/unix/sysv/linux/sparc/sparc64/syscalls.list (posix_fadvise64): Likewise. * sysdeps/unix/sysv/linux/x86_64/syscalls.list (posix_fadvise64): Likewise. * sysdeps/unix/sysv/linux/i386/posix_fadvise64_64.S: Moved to... * sysdeps/unix/sysv/linux/i386/posix_fadvise64.S: ...here. (__posix_fadvise64_l64): Fix a typo in fadvise64 syscall invocation. (__posix_fadvise64_l32): New function. * sysdeps/unix/sysv/linux/i386/Makefile: Revert last change. * sysdeps/unix/sysv/linux/powerpc/powerpc32/Versions (libc): Export posix_fadvise64@@GLIBC_2.3.3. * sysdeps/unix/sysv/linux/s390/s390-32/Versions (libc): Likewise. * sysdeps/unix/sysv/linux/sparc/sparc32/Versions (libc): Likewise. * sysdeps/unix/sysv/linux/posix_fadvise.c (posix_fadvise): Return error value. * sysdeps/unix/sysv/linux/posix_fadvise64.c: New file. * sysdeps/unix/sysv/linux/sparc/sparc32/sysdep.h (SYSCALL_ERROR_HANDLER): Use TLS errno/__libc_errno if USE___THREAD. 2003-08-15 Jakub Jelinek <jakub@redhat.com> * sysdeps/sparc/sparc32/dl-machine.h (WEAKADDR): Remove. (elf_machine_matches_host): Remove weak extern stuff. Use GL(dl_hwcap) unconditionally and GL(dl_hwcap_mask) if SHARED. (elf_machine_runtime_setup, sparc_fixup_plt): Remove weak extern stuff. Use GL(dl_hwcap) unconditionally.
2003-08-17 00:37:19 +00:00
#if !defined(USE_PPC_SCV) || IS_IN(rtld)
powerpc: Runtime selection between sc and scv for syscalls Linux kernel v5.9 added support for system calls using the scv instruction for POWER9 and later. The new codepath provides better performance (see below) if compared to using sc. For the foreseeable future, both sc and scv mechanisms will co-exist, so this patch enables glibc to do a runtime check and use scv when it is available. Before issuing the system call to the kernel, we check hwcap2 in the TCB for PPC_FEATURE2_SCV to see if scv is supported by the kernel. If not, we fallback to sc and keep the old behavior. The kernel implements a different error return convention for scv, so when returning from a system call we need to handle the return value differently depending on the instruction we used to enter the kernel. For syscalls implemented in ASM, entry and exit are implemented by different macros (PSEUDO and PSEUDO_RET, resp.), which may be used in sequence (e.g. for templated syscalls) or with other instructions in between (e.g. clone). To avoid accessing the TCB a second time on PSEUDO_RET to check which instruction we used, the value read from hwcap2 is cached on a non-volatile register. This is not needed when using INTERNAL_SYSCALL macro, since entry and exit are bundled into the same inline asm directive. The dynamic loader may issue syscalls before the TCB has been setup so it always uses sc with no extra checks. For the static case, there is no compile-time way to determine if we are inside startup code, so we also check the value of the thread pointer before effectively accessing the TCB. For such situations in which the availability of scv cannot be determined, sc is always used. Support for scv in syscalls implemented in their own ASM file (clone and vfork) will be added later. For now simply use sc as before. Average performance over 1M calls for each syscall "type": - stat: C wrapper calling INTERNAL_SYSCALL - getpid: templated ASM syscall - syscall: call to gettid using syscall function Standard: stat : 1.573445 us / ~3619 cycles getpid : 0.164986 us / ~379 cycles syscall : 0.162743 us / ~374 cycles With scv: stat : 1.537049 us / ~3535 cycles <~ -84 cycles / -2.32% getpid : 0.109923 us / ~253 cycles <~ -126 cycles / -33.25% syscall : 0.116410 us / ~268 cycles <~ -106 cycles / -28.34% Tested on powerpc, powerpc64, powerpc64le (with and without scv) Tested-by: Lucas A. M. Magalhães <lamm@linux.ibm.com> Reviewed-by: Tulio Magno Quites Machado Filho <tuliom@linux.ibm.com>
2020-12-03 17:15:27 +00:00
# define PSEUDO_RET_ERRVAL \
blr
#else
/* This should only be called after a DO_CALL. */
# define PSEUDO_RET_ERRVAL \
NVOLREG_RESTORE; \
Update. 2003-08-16 Jakub Jelinek <jakub@redhat.com> * sysdeps/powerpc/powerpc32/sysdep.h (PSEUDO_ERRVAL, PSEUDO_RET_ERRVAL, ret_ERRVAL, PSEUDO_END_ERRVAL): Define. * sysdeps/powerpc/powerpc64/sysdep.h (PSEUDO_ERRVAL, PSEUDO_RET_ERRVAL, ret_ERRVAL, PSEUDO_END_ERRVAL): Define. * sysdeps/unix/alpha/sysdep.h (PSEUDO_ERRVAL, ret_ERRVAL, PSEUDO_END_ERRVAL): Define. * sysdeps/unix/mips/sysdep.h (PSEUDO_ERRVAL, ret_ERRVAL, PSEUDO_END_ERRVAL): Define. * sysdeps/unix/sparc/sysdep.h (PSEUDO_ERRVAL, ret_ERRVAL): Define. * sysdeps/unix/sysv/linux/s390/s390-32/sysdep.h (PSEUDO_ERRVAL, PSEUDO_END_ERRVAL, ret_ERRVAL): Define. * sysdeps/unix/sysv/linux/s390/s390-64/sysdep.h (PSEUDO_ERRVAL, PSEUDO_END_ERRVAL, ret_ERRVAL): Define. * sysdeps/unix/sysv/linux/sparc/sparc32/sysdep.h (PSEUDO_ERRVAL): Define. * sysdeps/unix/sysv/linux/sparc/sparc64/sysdep.h (PSEUDO_ERRVAL, PSEUDO_END_ERRVAL, ret_ERRVAL): Define. * sysdeps/unix/sysdep.h (PSEUDO_END_ERRVAL): Define. * sysdeps/unix/sysv/linux/syscalls.list (posix_fadvise64, posix_fadvise64_64): Remove. * sysdeps/unix/sysv/linux/alpha/syscalls.list (posix_fadvise64): Add V flag. * sysdeps/unix/sysv/linux/ia64/syscalls.list (posix_fadvise64): Likewise. * sysdeps/unix/sysv/linux/powerpc/powerpc64/syscalls.list (posix_fadvise64): Likewise. * sysdeps/unix/sysv/linux/s390/s390-64/syscalls.list (posix_fadvise64): Likewise. * sysdeps/unix/sysv/linux/sparc/sparc64/syscalls.list (posix_fadvise64): Likewise. * sysdeps/unix/sysv/linux/x86_64/syscalls.list (posix_fadvise64): Likewise. * sysdeps/unix/sysv/linux/i386/posix_fadvise64_64.S: Moved to... * sysdeps/unix/sysv/linux/i386/posix_fadvise64.S: ...here. (__posix_fadvise64_l64): Fix a typo in fadvise64 syscall invocation. (__posix_fadvise64_l32): New function. * sysdeps/unix/sysv/linux/i386/Makefile: Revert last change. * sysdeps/unix/sysv/linux/powerpc/powerpc32/Versions (libc): Export posix_fadvise64@@GLIBC_2.3.3. * sysdeps/unix/sysv/linux/s390/s390-32/Versions (libc): Likewise. * sysdeps/unix/sysv/linux/sparc/sparc32/Versions (libc): Likewise. * sysdeps/unix/sysv/linux/posix_fadvise.c (posix_fadvise): Return error value. * sysdeps/unix/sysv/linux/posix_fadvise64.c: New file. * sysdeps/unix/sysv/linux/sparc/sparc32/sysdep.h (SYSCALL_ERROR_HANDLER): Use TLS errno/__libc_errno if USE___THREAD. 2003-08-15 Jakub Jelinek <jakub@redhat.com> * sysdeps/sparc/sparc32/dl-machine.h (WEAKADDR): Remove. (elf_machine_matches_host): Remove weak extern stuff. Use GL(dl_hwcap) unconditionally and GL(dl_hwcap_mask) if SHARED. (elf_machine_runtime_setup, sparc_fixup_plt): Remove weak extern stuff. Use GL(dl_hwcap) unconditionally.
2003-08-17 00:37:19 +00:00
blr
#endif /* !defined(USE_PPC_SCV) || IS_IN(rtld) */
Update. 2003-08-16 Jakub Jelinek <jakub@redhat.com> * sysdeps/powerpc/powerpc32/sysdep.h (PSEUDO_ERRVAL, PSEUDO_RET_ERRVAL, ret_ERRVAL, PSEUDO_END_ERRVAL): Define. * sysdeps/powerpc/powerpc64/sysdep.h (PSEUDO_ERRVAL, PSEUDO_RET_ERRVAL, ret_ERRVAL, PSEUDO_END_ERRVAL): Define. * sysdeps/unix/alpha/sysdep.h (PSEUDO_ERRVAL, ret_ERRVAL, PSEUDO_END_ERRVAL): Define. * sysdeps/unix/mips/sysdep.h (PSEUDO_ERRVAL, ret_ERRVAL, PSEUDO_END_ERRVAL): Define. * sysdeps/unix/sparc/sysdep.h (PSEUDO_ERRVAL, ret_ERRVAL): Define. * sysdeps/unix/sysv/linux/s390/s390-32/sysdep.h (PSEUDO_ERRVAL, PSEUDO_END_ERRVAL, ret_ERRVAL): Define. * sysdeps/unix/sysv/linux/s390/s390-64/sysdep.h (PSEUDO_ERRVAL, PSEUDO_END_ERRVAL, ret_ERRVAL): Define. * sysdeps/unix/sysv/linux/sparc/sparc32/sysdep.h (PSEUDO_ERRVAL): Define. * sysdeps/unix/sysv/linux/sparc/sparc64/sysdep.h (PSEUDO_ERRVAL, PSEUDO_END_ERRVAL, ret_ERRVAL): Define. * sysdeps/unix/sysdep.h (PSEUDO_END_ERRVAL): Define. * sysdeps/unix/sysv/linux/syscalls.list (posix_fadvise64, posix_fadvise64_64): Remove. * sysdeps/unix/sysv/linux/alpha/syscalls.list (posix_fadvise64): Add V flag. * sysdeps/unix/sysv/linux/ia64/syscalls.list (posix_fadvise64): Likewise. * sysdeps/unix/sysv/linux/powerpc/powerpc64/syscalls.list (posix_fadvise64): Likewise. * sysdeps/unix/sysv/linux/s390/s390-64/syscalls.list (posix_fadvise64): Likewise. * sysdeps/unix/sysv/linux/sparc/sparc64/syscalls.list (posix_fadvise64): Likewise. * sysdeps/unix/sysv/linux/x86_64/syscalls.list (posix_fadvise64): Likewise. * sysdeps/unix/sysv/linux/i386/posix_fadvise64_64.S: Moved to... * sysdeps/unix/sysv/linux/i386/posix_fadvise64.S: ...here. (__posix_fadvise64_l64): Fix a typo in fadvise64 syscall invocation. (__posix_fadvise64_l32): New function. * sysdeps/unix/sysv/linux/i386/Makefile: Revert last change. * sysdeps/unix/sysv/linux/powerpc/powerpc32/Versions (libc): Export posix_fadvise64@@GLIBC_2.3.3. * sysdeps/unix/sysv/linux/s390/s390-32/Versions (libc): Likewise. * sysdeps/unix/sysv/linux/sparc/sparc32/Versions (libc): Likewise. * sysdeps/unix/sysv/linux/posix_fadvise.c (posix_fadvise): Return error value. * sysdeps/unix/sysv/linux/posix_fadvise64.c: New file. * sysdeps/unix/sysv/linux/sparc/sparc32/sysdep.h (SYSCALL_ERROR_HANDLER): Use TLS errno/__libc_errno if USE___THREAD. 2003-08-15 Jakub Jelinek <jakub@redhat.com> * sysdeps/sparc/sparc32/dl-machine.h (WEAKADDR): Remove. (elf_machine_matches_host): Remove weak extern stuff. Use GL(dl_hwcap) unconditionally and GL(dl_hwcap_mask) if SHARED. (elf_machine_runtime_setup, sparc_fixup_plt): Remove weak extern stuff. Use GL(dl_hwcap) unconditionally.
2003-08-17 00:37:19 +00:00
#define ret_ERRVAL PSEUDO_RET_ERRVAL
#undef PSEUDO_END_ERRVAL
#define PSEUDO_END_ERRVAL(name) \
END (name)
#ifdef SHARED
# if IS_IN (rtld)
/* Inside ld.so we use the local alias to avoid runtime GOT
relocations. */
# define __GLRO_DEF(var) \
.LC__ ## var: \
.tc _rtld_local_ro[TC],_rtld_local_ro
# else
# define __GLRO_DEF(var) \
.LC__ ## var: \
.tc _rtld_global_ro[TC],_rtld_global_ro
# endif
# define __GLRO(rOUT, var, offset) \
ld rOUT,.LC__ ## var@toc(r2); \
lwz rOUT,offset(rOUT)
#else
# define __GLRO_DEF(var) \
.LC__ ## var: \
.tc _ ## var[TC],_ ## var
# define __GLRO(rOUT, var, offset) \
ld rOUT,.LC__ ## var@toc(r2); \
lwz rOUT,0(rOUT)
#endif
#ifdef USE_PPC64_NOTOC
# define NOTOC(l) l@notoc
#else
# define NOTOC(l) l
#endif
#else /* !__ASSEMBLER__ */
#if _CALL_ELF != 2
#define PPC64_LOAD_FUNCPTR(ptr) \
"ld 12,0(" #ptr ")\n" \
"ld 2,8(" #ptr ")\n" \
"mtctr 12\n" \
"ld 11,16(" #ptr ")"
#ifdef USE_PPC64_OVERLAPPING_OPD
# define OPD_ENT(name) ".quad " BODY_PREFIX #name ", .TOC.@tocbase"
#else
# define OPD_ENT(name) ".quad " BODY_PREFIX #name ", .TOC.@tocbase, 0"
#endif
#define ENTRY_1(name) \
".type " BODY_PREFIX #name ",@function\n" \
".globl " #name "\n" \
".pushsection \".opd\",\"aw\"\n" \
".p2align 3\n" \
#name ":\n" \
OPD_ENT (name) "\n" \
".popsection"
#define DOT_PREFIX ""
#define BODY_PREFIX ".LY"
#define ENTRY_2(name) \
".type " #name ",@function\n" \
ENTRY_1(name)
#define END_2(name) \
".size " #name ",.-" BODY_PREFIX #name "\n" \
".size " BODY_PREFIX #name ",.-" BODY_PREFIX #name
#define LOCALENTRY(name)
#else /* _CALL_ELF */
#define PPC64_LOAD_FUNCPTR(ptr) \
"mr 12," #ptr "\n" \
"mtctr 12"
#define DOT_PREFIX ""
#define BODY_PREFIX ""
#define ENTRY_2(name) \
".type " #name ",@function\n" \
".globl " #name
#define END_2(name) \
".size " #name ",.-" #name
#define LOCALENTRY(name) \
"1: addis 2,12,.TOC.-1b@ha\n" \
"addi 2,2,.TOC.-1b@l\n" \
".localentry " #name ",.-" #name
#endif /* _CALL_ELF */
#endif /* __ASSEMBLER__ */