2017-11-01 12:37:10 +00:00
|
|
|
/* Assembler macros for x86.
|
2024-01-01 18:12:26 +00:00
|
|
|
Copyright (C) 2017-2024 Free Software Foundation, Inc.
|
2017-11-01 12:37:10 +00:00
|
|
|
This file is part of the GNU C Library.
|
|
|
|
|
|
|
|
The GNU C Library is free software; you can redistribute it and/or
|
|
|
|
modify it under the terms of the GNU Lesser General Public
|
|
|
|
License as published by the Free Software Foundation; either
|
|
|
|
version 2.1 of the License, or (at your option) any later version.
|
|
|
|
|
|
|
|
The GNU C Library is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
Lesser General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU Lesser General Public
|
|
|
|
License along with the GNU C Library; if not, see
|
Prefer https to http for gnu.org and fsf.org URLs
Also, change sources.redhat.com to sourceware.org.
This patch was automatically generated by running the following shell
script, which uses GNU sed, and which avoids modifying files imported
from upstream:
sed -ri '
s,(http|ftp)(://(.*\.)?(gnu|fsf|sourceware)\.org($|[^.]|\.[^a-z])),https\2,g
s,(http|ftp)(://(.*\.)?)sources\.redhat\.com($|[^.]|\.[^a-z]),https\2sourceware.org\4,g
' \
$(find $(git ls-files) -prune -type f \
! -name '*.po' \
! -name 'ChangeLog*' \
! -path COPYING ! -path COPYING.LIB \
! -path manual/fdl-1.3.texi ! -path manual/lgpl-2.1.texi \
! -path manual/texinfo.tex ! -path scripts/config.guess \
! -path scripts/config.sub ! -path scripts/install-sh \
! -path scripts/mkinstalldirs ! -path scripts/move-if-change \
! -path INSTALL ! -path locale/programs/charmap-kw.h \
! -path po/libc.pot ! -path sysdeps/gnu/errlist.c \
! '(' -name configure \
-execdir test -f configure.ac -o -f configure.in ';' ')' \
! '(' -name preconfigure \
-execdir test -f preconfigure.ac ';' ')' \
-print)
and then by running 'make dist-prepare' to regenerate files built
from the altered files, and then executing the following to cleanup:
chmod a+x sysdeps/unix/sysv/linux/riscv/configure
# Omit irrelevant whitespace and comment-only changes,
# perhaps from a slightly-different Autoconf version.
git checkout -f \
sysdeps/csky/configure \
sysdeps/hppa/configure \
sysdeps/riscv/configure \
sysdeps/unix/sysv/linux/csky/configure
# Omit changes that caused a pre-commit check to fail like this:
# remote: *** error: sysdeps/powerpc/powerpc64/ppc-mcount.S: trailing lines
git checkout -f \
sysdeps/powerpc/powerpc64/ppc-mcount.S \
sysdeps/unix/sysv/linux/s390/s390-64/syscall.S
# Omit change that caused a pre-commit check to fail like this:
# remote: *** error: sysdeps/sparc/sparc64/multiarch/memcpy-ultra3.S: last line does not end in newline
git checkout -f sysdeps/sparc/sparc64/multiarch/memcpy-ultra3.S
2019-09-07 05:40:42 +00:00
|
|
|
<https://www.gnu.org/licenses/>. */
|
2017-11-01 12:37:10 +00:00
|
|
|
|
|
|
|
#ifndef _X86_SYSDEP_H
|
|
|
|
#define _X86_SYSDEP_H 1
|
|
|
|
|
|
|
|
#include <sysdeps/generic/sysdep.h>
|
|
|
|
|
2024-02-16 15:17:10 +00:00
|
|
|
/* The extended state feature IDs in the state component bitmap. */
|
|
|
|
#define X86_XSTATE_X87_ID 0
|
|
|
|
#define X86_XSTATE_SSE_ID 1
|
|
|
|
#define X86_XSTATE_AVX_ID 2
|
|
|
|
#define X86_XSTATE_BNDREGS_ID 3
|
|
|
|
#define X86_XSTATE_BNDCFG_ID 4
|
|
|
|
#define X86_XSTATE_K_ID 5
|
|
|
|
#define X86_XSTATE_ZMM_H_ID 6
|
|
|
|
#define X86_XSTATE_ZMM_ID 7
|
|
|
|
#define X86_XSTATE_PKRU_ID 9
|
|
|
|
#define X86_XSTATE_TILECFG_ID 17
|
|
|
|
#define X86_XSTATE_TILEDATA_ID 18
|
|
|
|
#define X86_XSTATE_APX_F_ID 19
|
|
|
|
|
|
|
|
#ifdef __x86_64__
|
2018-08-06 13:25:28 +00:00
|
|
|
/* Offset for fxsave/xsave area used by _dl_runtime_resolve. Also need
|
|
|
|
space to preserve RCX, RDX, RSI, RDI, R8, R9 and RAX. It must be
|
x86-64: Allocate state buffer space for RDI, RSI and RBX
_dl_tlsdesc_dynamic preserves RDI, RSI and RBX before realigning stack.
After realigning stack, it saves RCX, RDX, R8, R9, R10 and R11. Define
TLSDESC_CALL_REGISTER_SAVE_AREA to allocate space for RDI, RSI and RBX
to avoid clobbering saved RDI, RSI and RBX values on stack by xsave to
STATE_SAVE_OFFSET(%rsp).
+==================+<- stack frame start aligned at 8 or 16 bytes
| |<- RDI saved in the red zone
| |<- RSI saved in the red zone
| |<- RBX saved in the red zone
| |<- paddings for stack realignment of 64 bytes
|------------------|<- xsave buffer end aligned at 64 bytes
| |<-
| |<-
| |<-
|------------------|<- xsave buffer start at STATE_SAVE_OFFSET(%rsp)
| |<- 8-byte padding for 64-byte alignment
| |<- 8-byte padding for 64-byte alignment
| |<- R11
| |<- R10
| |<- R9
| |<- R8
| |<- RDX
| |<- RCX
+==================+<- RSP aligned at 64 bytes
Define TLSDESC_CALL_REGISTER_SAVE_AREA, the total register save area size
for all integer registers by adding 24 to STATE_SAVE_OFFSET since RDI, RSI
and RBX are saved onto stack without adjusting stack pointer first, using
the red-zone. This fixes BZ #31501.
Reviewed-by: Sunil K Pandey <skpgkp2@gmail.com>
2024-03-18 13:40:16 +00:00
|
|
|
aligned to 16 bytes for fxsave and 64 bytes for xsave. It is non-zero
|
|
|
|
because MOV, instead of PUSH, is used to save registers onto stack.
|
|
|
|
|
|
|
|
+==================+<- stack frame start aligned at 8 or 16 bytes
|
|
|
|
| |<- paddings for stack realignment of 64 bytes
|
|
|
|
|------------------|<- xsave buffer end aligned at 64 bytes
|
|
|
|
| |<-
|
|
|
|
| |<-
|
|
|
|
| |<-
|
|
|
|
|------------------|<- xsave buffer start at STATE_SAVE_OFFSET(%rsp)
|
|
|
|
| |<- 8-byte padding for 64-byte alignment
|
|
|
|
| |<- R9
|
|
|
|
| |<- R8
|
|
|
|
| |<- RDI
|
|
|
|
| |<- RSI
|
|
|
|
| |<- RDX
|
|
|
|
| |<- RCX
|
|
|
|
| |<- RAX
|
|
|
|
+==================+<- RSP aligned at 64 bytes
|
|
|
|
|
|
|
|
*/
|
2024-02-16 15:17:10 +00:00
|
|
|
# define STATE_SAVE_OFFSET (8 * 7 + 8)
|
|
|
|
|
x86-64: Allocate state buffer space for RDI, RSI and RBX
_dl_tlsdesc_dynamic preserves RDI, RSI and RBX before realigning stack.
After realigning stack, it saves RCX, RDX, R8, R9, R10 and R11. Define
TLSDESC_CALL_REGISTER_SAVE_AREA to allocate space for RDI, RSI and RBX
to avoid clobbering saved RDI, RSI and RBX values on stack by xsave to
STATE_SAVE_OFFSET(%rsp).
+==================+<- stack frame start aligned at 8 or 16 bytes
| |<- RDI saved in the red zone
| |<- RSI saved in the red zone
| |<- RBX saved in the red zone
| |<- paddings for stack realignment of 64 bytes
|------------------|<- xsave buffer end aligned at 64 bytes
| |<-
| |<-
| |<-
|------------------|<- xsave buffer start at STATE_SAVE_OFFSET(%rsp)
| |<- 8-byte padding for 64-byte alignment
| |<- 8-byte padding for 64-byte alignment
| |<- R11
| |<- R10
| |<- R9
| |<- R8
| |<- RDX
| |<- RCX
+==================+<- RSP aligned at 64 bytes
Define TLSDESC_CALL_REGISTER_SAVE_AREA, the total register save area size
for all integer registers by adding 24 to STATE_SAVE_OFFSET since RDI, RSI
and RBX are saved onto stack without adjusting stack pointer first, using
the red-zone. This fixes BZ #31501.
Reviewed-by: Sunil K Pandey <skpgkp2@gmail.com>
2024-03-18 13:40:16 +00:00
|
|
|
/* _dl_tlsdesc_dynamic preserves RDI, RSI and RBX before realigning
|
|
|
|
stack. After realigning stack, it saves RCX, RDX, R8, R9, R10 and
|
|
|
|
R11. Allocate space for RDI, RSI and RBX to avoid clobbering saved
|
|
|
|
RDI, RSI and RBX values on stack by xsave.
|
|
|
|
|
|
|
|
+==================+<- stack frame start aligned at 8 or 16 bytes
|
|
|
|
| |<- RDI saved in the red zone
|
|
|
|
| |<- RSI saved in the red zone
|
|
|
|
| |<- RBX saved in the red zone
|
|
|
|
| |<- paddings for stack realignment of 64 bytes
|
|
|
|
|------------------|<- xsave buffer end aligned at 64 bytes
|
|
|
|
| |<-
|
|
|
|
| |<-
|
|
|
|
| |<-
|
|
|
|
|------------------|<- xsave buffer start at STATE_SAVE_OFFSET(%rsp)
|
|
|
|
| |<- 8-byte padding for 64-byte alignment
|
|
|
|
| |<- 8-byte padding for 64-byte alignment
|
|
|
|
| |<- R11
|
|
|
|
| |<- R10
|
|
|
|
| |<- R9
|
|
|
|
| |<- R8
|
|
|
|
| |<- RDX
|
|
|
|
| |<- RCX
|
|
|
|
+==================+<- RSP aligned at 64 bytes
|
|
|
|
|
|
|
|
Define the total register save area size for all integer registers by
|
|
|
|
adding 24 to STATE_SAVE_OFFSET since RDI, RSI and RBX are saved onto
|
|
|
|
stack without adjusting stack pointer first, using the red-zone. */
|
|
|
|
# define TLSDESC_CALL_REGISTER_SAVE_AREA (STATE_SAVE_OFFSET + 24)
|
|
|
|
|
2024-02-16 15:17:10 +00:00
|
|
|
/* Save SSE, AVX, AVX512, mask, bound and APX registers. Bound and APX
|
|
|
|
registers are mutually exclusive. */
|
|
|
|
# define STATE_SAVE_MASK \
|
|
|
|
((1 << X86_XSTATE_SSE_ID) \
|
|
|
|
| (1 << X86_XSTATE_AVX_ID) \
|
|
|
|
| (1 << X86_XSTATE_BNDREGS_ID) \
|
|
|
|
| (1 << X86_XSTATE_K_ID) \
|
|
|
|
| (1 << X86_XSTATE_ZMM_H_ID) \
|
|
|
|
| (1 << X86_XSTATE_ZMM_ID) \
|
|
|
|
| (1 << X86_XSTATE_APX_F_ID))
|
2024-02-28 20:08:03 +00:00
|
|
|
|
|
|
|
/* AMX state mask. */
|
|
|
|
# define AMX_STATE_SAVE_MASK \
|
|
|
|
((1 << X86_XSTATE_TILECFG_ID) | (1 << X86_XSTATE_TILEDATA_ID))
|
|
|
|
|
|
|
|
/* States to be included in xsave_state_full_size. */
|
|
|
|
# define FULL_STATE_SAVE_MASK \
|
|
|
|
(STATE_SAVE_MASK | AMX_STATE_SAVE_MASK)
|
2024-02-16 15:17:10 +00:00
|
|
|
#else
|
|
|
|
/* Offset for fxsave/xsave area used by _dl_tlsdesc_dynamic. Since i386
|
x86-64: Allocate state buffer space for RDI, RSI and RBX
_dl_tlsdesc_dynamic preserves RDI, RSI and RBX before realigning stack.
After realigning stack, it saves RCX, RDX, R8, R9, R10 and R11. Define
TLSDESC_CALL_REGISTER_SAVE_AREA to allocate space for RDI, RSI and RBX
to avoid clobbering saved RDI, RSI and RBX values on stack by xsave to
STATE_SAVE_OFFSET(%rsp).
+==================+<- stack frame start aligned at 8 or 16 bytes
| |<- RDI saved in the red zone
| |<- RSI saved in the red zone
| |<- RBX saved in the red zone
| |<- paddings for stack realignment of 64 bytes
|------------------|<- xsave buffer end aligned at 64 bytes
| |<-
| |<-
| |<-
|------------------|<- xsave buffer start at STATE_SAVE_OFFSET(%rsp)
| |<- 8-byte padding for 64-byte alignment
| |<- 8-byte padding for 64-byte alignment
| |<- R11
| |<- R10
| |<- R9
| |<- R8
| |<- RDX
| |<- RCX
+==================+<- RSP aligned at 64 bytes
Define TLSDESC_CALL_REGISTER_SAVE_AREA, the total register save area size
for all integer registers by adding 24 to STATE_SAVE_OFFSET since RDI, RSI
and RBX are saved onto stack without adjusting stack pointer first, using
the red-zone. This fixes BZ #31501.
Reviewed-by: Sunil K Pandey <skpgkp2@gmail.com>
2024-03-18 13:40:16 +00:00
|
|
|
uses PUSH to save registers onto stack, use 0 here. */
|
2024-02-16 15:17:10 +00:00
|
|
|
# define STATE_SAVE_OFFSET 0
|
x86-64: Allocate state buffer space for RDI, RSI and RBX
_dl_tlsdesc_dynamic preserves RDI, RSI and RBX before realigning stack.
After realigning stack, it saves RCX, RDX, R8, R9, R10 and R11. Define
TLSDESC_CALL_REGISTER_SAVE_AREA to allocate space for RDI, RSI and RBX
to avoid clobbering saved RDI, RSI and RBX values on stack by xsave to
STATE_SAVE_OFFSET(%rsp).
+==================+<- stack frame start aligned at 8 or 16 bytes
| |<- RDI saved in the red zone
| |<- RSI saved in the red zone
| |<- RBX saved in the red zone
| |<- paddings for stack realignment of 64 bytes
|------------------|<- xsave buffer end aligned at 64 bytes
| |<-
| |<-
| |<-
|------------------|<- xsave buffer start at STATE_SAVE_OFFSET(%rsp)
| |<- 8-byte padding for 64-byte alignment
| |<- 8-byte padding for 64-byte alignment
| |<- R11
| |<- R10
| |<- R9
| |<- R8
| |<- RDX
| |<- RCX
+==================+<- RSP aligned at 64 bytes
Define TLSDESC_CALL_REGISTER_SAVE_AREA, the total register save area size
for all integer registers by adding 24 to STATE_SAVE_OFFSET since RDI, RSI
and RBX are saved onto stack without adjusting stack pointer first, using
the red-zone. This fixes BZ #31501.
Reviewed-by: Sunil K Pandey <skpgkp2@gmail.com>
2024-03-18 13:40:16 +00:00
|
|
|
# define TLSDESC_CALL_REGISTER_SAVE_AREA 0
|
2018-08-06 13:25:28 +00:00
|
|
|
|
2024-02-16 15:17:10 +00:00
|
|
|
/* Save SSE, AVX, AXV512, mask and bound registers. */
|
|
|
|
# define STATE_SAVE_MASK \
|
|
|
|
((1 << X86_XSTATE_SSE_ID) \
|
|
|
|
| (1 << X86_XSTATE_AVX_ID) \
|
|
|
|
| (1 << X86_XSTATE_BNDREGS_ID) \
|
|
|
|
| (1 << X86_XSTATE_K_ID) \
|
|
|
|
| (1 << X86_XSTATE_ZMM_H_ID))
|
2024-02-28 20:08:03 +00:00
|
|
|
|
|
|
|
/* States to be included in xsave_state_size. */
|
|
|
|
# define FULL_STATE_SAVE_MASK STATE_SAVE_MASK
|
2024-02-16 15:17:10 +00:00
|
|
|
#endif
|
2018-08-06 13:25:28 +00:00
|
|
|
|
2024-02-26 14:37:03 +00:00
|
|
|
/* States which should be saved for TLSDESC_CALL and TLS_DESC_CALL.
|
2024-02-28 20:08:03 +00:00
|
|
|
Compiler assumes that all registers, including AMX and x87 FPU
|
|
|
|
stack registers, are unchanged after CALL, except for EFLAGS and
|
|
|
|
RAX/EAX. */
|
2024-02-26 14:37:03 +00:00
|
|
|
#define TLSDESC_CALL_STATE_SAVE_MASK \
|
2024-02-28 20:08:03 +00:00
|
|
|
(FULL_STATE_SAVE_MASK | (1 << X86_XSTATE_X87_ID))
|
2024-02-26 14:37:03 +00:00
|
|
|
|
x86-64: Add Avoid_Short_Distance_REP_MOVSB
commit 3ec5d83d2a237d39e7fd6ef7a0bc8ac4c171a4a5
Author: H.J. Lu <hjl.tools@gmail.com>
Date: Sat Jan 25 14:19:40 2020 -0800
x86-64: Avoid rep movsb with short distance [BZ #27130]
introduced some regressions on Intel processors without Fast Short REP
MOV (FSRM). Add Avoid_Short_Distance_REP_MOVSB to avoid rep movsb with
short distance only on Intel processors with FSRM. bench-memmove-large
on Skylake server shows that cycles of __memmove_evex_unaligned_erms
improves for the following data size:
before after Improvement
length=4127, align1=3, align2=0: 479.38 349.25 27%
length=4223, align1=9, align2=5: 405.62 333.25 18%
length=8223, align1=3, align2=0: 786.12 496.38 37%
length=8319, align1=9, align2=5: 727.50 501.38 31%
length=16415, align1=3, align2=0: 1436.88 840.00 41%
length=16511, align1=9, align2=5: 1375.50 836.38 39%
length=32799, align1=3, align2=0: 2890.00 1860.12 36%
length=32895, align1=9, align2=5: 2891.38 1931.88 33%
2021-07-23 03:26:25 +00:00
|
|
|
/* Constants for bits in __x86_string_control: */
|
|
|
|
|
|
|
|
/* Avoid short distance REP MOVSB. */
|
|
|
|
#define X86_STRING_CONTROL_AVOID_SHORT_DISTANCE_REP_MOVSB (1 << 0)
|
|
|
|
|
2017-11-01 12:37:10 +00:00
|
|
|
#ifdef __ASSEMBLER__
|
|
|
|
|
|
|
|
/* Syntactic details of assembler. */
|
|
|
|
|
|
|
|
/* ELF uses byte-counts for .align, most others use log2 of count of bytes. */
|
|
|
|
#define ALIGNARG(log2) 1<<log2
|
|
|
|
#define ASM_SIZE_DIRECTIVE(name) .size name,.-name;
|
|
|
|
|
2021-09-21 23:31:49 +00:00
|
|
|
/* Common entry 16 byte aligns. */
|
|
|
|
#define ENTRY(name) ENTRY_P2ALIGN (name, 4)
|
|
|
|
|
2017-11-01 12:37:10 +00:00
|
|
|
#undef END
|
|
|
|
#define END(name) \
|
|
|
|
cfi_endproc; \
|
|
|
|
ASM_SIZE_DIRECTIVE(name)
|
|
|
|
|
|
|
|
#define ENTRY_CHK(name) ENTRY (name)
|
|
|
|
#define END_CHK(name) END (name)
|
|
|
|
|
|
|
|
/* Since C identifiers are not normally prefixed with an underscore
|
|
|
|
on this system, the asm identifier `syscall_error' intrudes on the
|
|
|
|
C name space. Make sure we use an innocuous name. */
|
|
|
|
#define syscall_error __syscall_error
|
|
|
|
#define mcount _mcount
|
|
|
|
|
|
|
|
#undef PSEUDO_END
|
|
|
|
#define PSEUDO_END(name) \
|
|
|
|
END (name)
|
|
|
|
|
|
|
|
/* Local label name for asm code. */
|
|
|
|
#ifndef L
|
|
|
|
/* ELF-like local names start with `.L'. */
|
2022-02-05 19:06:01 +00:00
|
|
|
# define LOCAL_LABEL(name) .L##name
|
|
|
|
# define L(name) LOCAL_LABEL(name)
|
2017-11-01 12:37:10 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#define atom_text_section .section ".text.atom", "ax"
|
|
|
|
|
|
|
|
#endif /* __ASSEMBLER__ */
|
|
|
|
|
|
|
|
#endif /* _X86_SYSDEP_H */
|