mirror of
https://sourceware.org/git/glibc.git
synced 2024-11-09 23:00:07 +00:00
25f1e16ef0
CET is only support for x86_64, this patch reverts: -faaee1f07e
x86: Support shadow stack pointer in setjmp/longjmp. -be9ccd27c0
i386: Add _CET_ENDBR to indirect jump targets in add_n.S/sub_n.S -c02695d776
x86/CET: Update vfork to prevent child return -5d844e1b72
i386: Enable CET support in ucontext functions -124bcde683
x86: Add _CET_ENDBR to functions in crti.S -562837c002
x86: Add _CET_ENDBR to functions in dl-tlsdesc.S -f753fa7dea
x86: Support IBT and SHSTK in Intel CET [BZ #21598] -825b58f3fb
i386-mcount.S: Add _CET_ENDBR to _mcount and __fentry__ -7e119cd582
i386: Use _CET_NOTRACK in i686/memcmp.S -177824e232
i386: Use _CET_NOTRACK in memcmp-sse4.S -0a899af097
i386: Use _CET_NOTRACK in memcpy-ssse3-rep.S -7fb613361c
i386: Use _CET_NOTRACK in memcpy-ssse3.S -77a8ae0948
i386: Use _CET_NOTRACK in memset-sse2-rep.S -00e7b76a8f
i386: Use _CET_NOTRACK in memset-sse2.S -90d15dc577
i386: Use _CET_NOTRACK in strcat-sse2.S -f1574581c7
i386: Use _CET_NOTRACK in strcpy-sse2.S -4031d7484a
i386/sub_n.S: Add a missing _CET_ENDBR to indirect jump - target - Checked on i686-linux-gnu.
207 lines
5.7 KiB
C
207 lines
5.7 KiB
C
/* Assembler macros for x86-64.
|
|
Copyright (C) 2001-2024 Free Software Foundation, Inc.
|
|
This file is part of the GNU C Library.
|
|
|
|
The GNU C Library is free software; you can redistribute it and/or
|
|
modify it under the terms of the GNU Lesser General Public
|
|
License as published by the Free Software Foundation; either
|
|
version 2.1 of the License, or (at your option) any later version.
|
|
|
|
The GNU C Library is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
Lesser General Public License for more details.
|
|
|
|
You should have received a copy of the GNU Lesser General Public
|
|
License along with the GNU C Library; if not, see
|
|
<https://www.gnu.org/licenses/>. */
|
|
|
|
#ifndef _X86_64_SYSDEP_H
|
|
#define _X86_64_SYSDEP_H 1
|
|
|
|
#include <sysdeps/x86/sysdep.h>
|
|
#include <x86-lp_size.h>
|
|
|
|
/* __CET__ is defined by GCC with Control-Flow Protection values:
|
|
|
|
enum cf_protection_level
|
|
{
|
|
CF_NONE = 0,
|
|
CF_BRANCH = 1 << 0,
|
|
CF_RETURN = 1 << 1,
|
|
CF_FULL = CF_BRANCH | CF_RETURN,
|
|
CF_SET = 1 << 2
|
|
};
|
|
*/
|
|
|
|
/* Set if CF_BRANCH (IBT) is enabled. */
|
|
#define X86_FEATURE_1_IBT (1U << 0)
|
|
/* Set if CF_RETURN (SHSTK) is enabled. */
|
|
#define X86_FEATURE_1_SHSTK (1U << 1)
|
|
|
|
#ifdef __CET__
|
|
# define CET_ENABLED 1
|
|
# define SHSTK_ENABLED (__CET__ & X86_FEATURE_1_SHSTK)
|
|
#else
|
|
# define CET_ENABLED 0
|
|
# define SHSTK_ENABLED 0
|
|
#endif
|
|
|
|
#ifdef __ASSEMBLER__
|
|
|
|
/* Syntactic details of assembler. */
|
|
|
|
#ifdef _CET_ENDBR
|
|
# define _CET_NOTRACK notrack
|
|
#else
|
|
# define _CET_ENDBR
|
|
# define _CET_NOTRACK
|
|
#endif
|
|
|
|
/* Define an entry point visible from C. */
|
|
#define ENTRY_P2ALIGN(name, alignment) \
|
|
.globl C_SYMBOL_NAME(name); \
|
|
.type C_SYMBOL_NAME(name),@function; \
|
|
.align ALIGNARG(alignment); \
|
|
C_LABEL(name) \
|
|
cfi_startproc; \
|
|
_CET_ENDBR; \
|
|
CALL_MCOUNT
|
|
|
|
/* This macro is for setting proper CFI with DW_CFA_expression describing
|
|
the register as saved relative to %rsp instead of relative to the CFA.
|
|
Expression is DW_OP_drop, DW_OP_breg7 (%rsp is register 7), sleb128 offset
|
|
from %rsp. */
|
|
#define cfi_offset_rel_rsp(regn, off) .cfi_escape 0x10, regn, 0x4, 0x13, \
|
|
0x77, off & 0x7F | 0x80, off >> 7
|
|
|
|
/* If compiled for profiling, call `mcount' at the start of each function. */
|
|
#ifdef PROF
|
|
/* The mcount code relies on a normal frame pointer being on the stack
|
|
to locate our caller, so push one just for its benefit. */
|
|
#define CALL_MCOUNT \
|
|
pushq %rbp; \
|
|
cfi_adjust_cfa_offset(8); \
|
|
movq %rsp, %rbp; \
|
|
cfi_def_cfa_register(%rbp); \
|
|
call JUMPTARGET(mcount); \
|
|
popq %rbp; \
|
|
cfi_def_cfa(rsp,8);
|
|
#else
|
|
#define CALL_MCOUNT /* Do nothing. */
|
|
#endif
|
|
|
|
#define PSEUDO(name, syscall_name, args) \
|
|
lose: \
|
|
jmp JUMPTARGET(syscall_error) \
|
|
.globl syscall_error; \
|
|
ENTRY (name) \
|
|
DO_CALL (syscall_name, args); \
|
|
jb lose
|
|
|
|
#undef JUMPTARGET
|
|
#ifdef SHARED
|
|
# ifdef BIND_NOW
|
|
# define JUMPTARGET(name) *name##@GOTPCREL(%rip)
|
|
# else
|
|
# define JUMPTARGET(name) name##@PLT
|
|
# endif
|
|
#else
|
|
/* For static archives, branch to target directly. */
|
|
# define JUMPTARGET(name) name
|
|
#endif
|
|
|
|
/* Instruction to operate on long and pointer. */
|
|
#define LP_OP(insn) insn##q
|
|
|
|
/* Assembler address directive. */
|
|
#define ASM_ADDR .quad
|
|
|
|
/* Registers to hold long and pointer. */
|
|
#define RAX_LP rax
|
|
#define RBP_LP rbp
|
|
#define RBX_LP rbx
|
|
#define RCX_LP rcx
|
|
#define RDI_LP rdi
|
|
#define RDX_LP rdx
|
|
#define RSI_LP rsi
|
|
#define RSP_LP rsp
|
|
#define R8_LP r8
|
|
#define R9_LP r9
|
|
#define R10_LP r10
|
|
#define R11_LP r11
|
|
#define R12_LP r12
|
|
#define R13_LP r13
|
|
#define R14_LP r14
|
|
#define R15_LP r15
|
|
|
|
/* Zero upper vector registers and return with xtest. NB: Use VZEROALL
|
|
to avoid RTM abort triggered by VZEROUPPER inside transactionally. */
|
|
#define ZERO_UPPER_VEC_REGISTERS_RETURN_XTEST \
|
|
xtest; \
|
|
jnz 1f; \
|
|
vzeroupper; \
|
|
ret; \
|
|
1: \
|
|
vzeroall; \
|
|
ret
|
|
|
|
/* Can be used to replace vzeroupper that is not directly before a
|
|
return. This is useful when hoisting a vzeroupper from multiple
|
|
return paths to decrease the total number of vzerouppers and code
|
|
size. */
|
|
#define COND_VZEROUPPER_XTEST \
|
|
xtest; \
|
|
jz 1f; \
|
|
vzeroall; \
|
|
jmp 2f; \
|
|
1: \
|
|
vzeroupper; \
|
|
2:
|
|
|
|
/* In RTM define this as COND_VZEROUPPER_XTEST. */
|
|
#ifndef COND_VZEROUPPER
|
|
# define COND_VZEROUPPER vzeroupper
|
|
#endif
|
|
|
|
/* Zero upper vector registers and return. */
|
|
#ifndef ZERO_UPPER_VEC_REGISTERS_RETURN
|
|
# define ZERO_UPPER_VEC_REGISTERS_RETURN \
|
|
VZEROUPPER; \
|
|
ret
|
|
#endif
|
|
|
|
#ifndef VZEROUPPER_RETURN
|
|
# define VZEROUPPER_RETURN VZEROUPPER; ret
|
|
#endif
|
|
|
|
#else /* __ASSEMBLER__ */
|
|
|
|
/* Instruction to operate on long and pointer. */
|
|
#define LP_OP(insn) #insn "q"
|
|
|
|
/* Assembler address directive. */
|
|
#define ASM_ADDR ".quad"
|
|
|
|
/* Registers to hold long and pointer. */
|
|
#define RAX_LP "rax"
|
|
#define RBP_LP "rbp"
|
|
#define RBX_LP "rbx"
|
|
#define RCX_LP "rcx"
|
|
#define RDI_LP "rdi"
|
|
#define RDX_LP "rdx"
|
|
#define RSI_LP "rsi"
|
|
#define RSP_LP "rsp"
|
|
#define R8_LP "r8"
|
|
#define R9_LP "r9"
|
|
#define R10_LP "r10"
|
|
#define R11_LP "r11"
|
|
#define R12_LP "r12"
|
|
#define R13_LP "r13"
|
|
#define R14_LP "r14"
|
|
#define R15_LP "r15"
|
|
|
|
#endif /* __ASSEMBLER__ */
|
|
|
|
#endif /* _X86_64_SYSDEP_H */
|