mirror of
https://sourceware.org/git/glibc.git
synced 2024-12-13 23:00:22 +00:00
b8386c2859
This patch removes the PREPARE_VERSION and PREPARE_VERSION_KNOW macro and uses a static inline function instead, get_vdso_symbol. Each architecture that supports vDSO must define the Linux version and its hash for symbol resolution (VDSO_NAME and VDSO_HASH macro respectively). It also organizes the HAVE_*_VSYSCALL for mips, powerpc, and s390 to define them on a common header. The idea is to require less code to configure and enable vDSO support for newer ports. No semantic changes are expected. Checked with a build against all affected architectures. * sysdeps/unix/make-syscalls.sh: Make vDSO call use get_vdso_symbol. * sysdeps/unix/sysv/linux/aarch64/gettimeofday.c (__gettimeofday): Use get_vdso_symbol instead of _dl_vdso_vsym. * sysdeps/unix/sysv/linux/powerpc/time.c (time): Likewise. * sysdeps/unix/sysv/linux/riscv/flush-icache.c (__lookup_riscv_flush_icache): Likewise. * sysdeps/unix/sysv/linux/x86/gettimeofday.c (__gettimeofday): Likewise. * sysdeps/unix/sysv/linux/x86/time.c (time): Likewise. * sysdeps/unix/sysv/linux/powerpc/gettimeofday.c: Likewise. * sysdeps/unix/sysv/linux/aarch64/init-first.c: Likewise. * sysdeps/unix/sysv/linux/arm/init-first.c: Likewise. * sysdeps/unix/sysv/linux/i386/init-first.c: Likewise. * sysdeps/unix/sysv/linux/mips/init-first.c: Likewise. * sysdeps/unix/sysv/linux/powerpc/init-first.c: Likewise. * sysdeps/unix/sysv/linux/riscv/init-first.c: Likewise. * sysdeps/unix/sysv/linux/sparc/init-first.c: Likewise. * sysdeps/unix/sysv/linux/s390/init-first.c: Likewise. * sysdeps/unix/sysv/linux/x86_64/init-first.c: Likewise. * sysdeps/unix/sysv/linux/aarch64/sysdep.h (VDSO_NAME, VDSO_HASH): Define. * sysdeps/unix/sysv/linux/arm/sysdep.h (VDSO_NAME, VDSO_HASH): Likewise. * sysdeps/unix/sysv/linux/i386/sysdep.h (VDSO_NAME, VDSO_HASH): Likewise. * sysdeps/unix/sysv/linux/riscv/sysdep.h (VDSO_NAME, VDSO_HASH): Likewise. * sysdeps/unix/sysv/linux/sparc/sysdep.h (VDSO_NAME, VDSO_HASH): Likewise. * sysdeps/unix/sysv/linux/x86_64/sysdep.h (VDSO_NAME, VDSO_HASH): Likewise. * sysdeps/unix/sysv/linux/mips/mips32/sysdep.h (HAVE_CLOCK_GETTIME_VSYSCALL, HAVE_GETTIMEOFDAY_VSYSCALL): Remove definition. * sysdeps/unix/sysv/linux/mips/mips64/n32/sysdep.h (HAVE_CLOCK_GETTIME_VSYSCALL, HAVE_GETTIMEOFDAY_VSYSCALL): Likewise. * sysdeps/unix/sysv/linux/mips/mips64/n64/sysdep.h (HAVE_CLOCK_GETTIME_VSYSCALL, HAVE_GETTIMEOFDAY_VSYSCALL): Likewise. * sysdeps/unix/sysv/linux/powerpc/powerpc32/sysdep.h (HAVE_CLOCK_GETTIME_VSYSCALL, HAVE_GETTIMEOFDAY_VSYSCALL): Likewise. * sysdeps/unix/sysv/linux/powerpc/powerpc64/sysdep.h (HAVE_CLOCK_GETTIME_VSYSCALL, HAVE_GETTIMEOFDAY_VSYSCALL): Likewise. * sysdeps/unix/sysv/linux/s390/s390-32/sysdep.h (HAVE_CLOCK_GETRES_VSYSCALL, HAVE_CLOCK_GETTIME_VSYSCALL, HAVE_GETTIMEOFDAY_VSYSCALL, HAVE_GETCPU_VSYSCALL): Likewise. * sysdeps/unix/sysv/linux/s390/s390-64/sysdep.h (HAVE_CLOCK_GETRES_VSYSCALL, HAVE_CLOCK_GETTIME_VSYSCALL, HAVE_GETTIMEOFDAY_VSYSCALL, HAVE_GETCPU_VSYSCALL): Likewise. * sysdeps/unix/sysv/linux/mips/sysdep.h: New file. * sysdeps/unix/sysv/linux/powerpc/sysdep.h: Likewise. * sysdeps/unix/sysv/linux/s390/sysdep.h: Likewise. * sysdeps/unix/sysv/linux/dl-vdso.h (PREPARE_VERSION, PREPARE_VERSION_KNOWN, VDSO_NAME_LINUX_2_6, VDSO_HASH_LINUX_2_6, VDSO_NAME_LINUX_2_6_15, VDSO_HASH_LINUX_2_6_15, VDSO_NAME_LINUX_2_6_29, VDSO_HASH_LINUX_2_6_29, VDSO_NAME_LINUX_4_15, VDSO_HASH_LINUX_4_15): Remove defines. (get_vdso_symbol): New function.
319 lines
10 KiB
C
319 lines
10 KiB
C
/* Copyright (C) 2000-2019 Free Software Foundation, Inc.
|
|
This file is part of the GNU C Library.
|
|
|
|
The GNU C Library is free software; you can redistribute it and/or
|
|
modify it under the terms of the GNU Lesser General Public
|
|
License as published by the Free Software Foundation; either
|
|
version 2.1 of the License, or (at your option) any later version.
|
|
|
|
The GNU C Library is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
Lesser General Public License for more details.
|
|
|
|
You should have received a copy of the GNU Lesser General Public
|
|
License along with the GNU C Library. If not, see
|
|
<https://www.gnu.org/licenses/>. */
|
|
|
|
#ifndef _LINUX_MIPS_SYSDEP_H
|
|
#define _LINUX_MIPS_SYSDEP_H 1
|
|
|
|
/* There is some commonality. */
|
|
#include <sysdeps/unix/sysv/linux/mips/sysdep.h>
|
|
#include <sysdeps/unix/sysv/linux/sysdep.h>
|
|
#include <sysdeps/unix/mips/mips64/n32/sysdep.h>
|
|
|
|
#include <tls.h>
|
|
|
|
/* In order to get __set_errno() definition in INLINE_SYSCALL. */
|
|
#ifndef __ASSEMBLER__
|
|
#include <errno.h>
|
|
#endif
|
|
|
|
/* For Linux we can use the system call table in the header file
|
|
/usr/include/asm/unistd.h
|
|
of the kernel. But these symbols do not follow the SYS_* syntax
|
|
so we have to redefine the `SYS_ify' macro here. */
|
|
#undef SYS_ify
|
|
#define SYS_ify(syscall_name) __NR_##syscall_name
|
|
|
|
#ifdef __ASSEMBLER__
|
|
|
|
/* We don't want the label for the error handler to be visible in the symbol
|
|
table when we define it here. */
|
|
# define SYSCALL_ERROR_LABEL 99b
|
|
|
|
#else /* ! __ASSEMBLER__ */
|
|
|
|
/* Convert X to a long long, without losing any bits if it is one
|
|
already or warning if it is a 32-bit pointer. */
|
|
#define ARGIFY(X) ((long long) (__typeof__ ((X) - (X))) (X))
|
|
|
|
/* Define a macro which expands into the inline wrapper code for a system
|
|
call. */
|
|
#undef INLINE_SYSCALL
|
|
#define INLINE_SYSCALL(name, nr, args...) \
|
|
({ INTERNAL_SYSCALL_DECL (_sc_err); \
|
|
long result_var = INTERNAL_SYSCALL (name, _sc_err, nr, args); \
|
|
if ( INTERNAL_SYSCALL_ERROR_P (result_var, _sc_err) ) \
|
|
{ \
|
|
__set_errno (INTERNAL_SYSCALL_ERRNO (result_var, _sc_err)); \
|
|
result_var = -1L; \
|
|
} \
|
|
result_var; })
|
|
|
|
#undef INTERNAL_SYSCALL_DECL
|
|
#define INTERNAL_SYSCALL_DECL(err) long err __attribute__ ((unused))
|
|
|
|
#undef INTERNAL_SYSCALL_ERROR_P
|
|
#define INTERNAL_SYSCALL_ERROR_P(val, err) ((void) (val), (long) (err))
|
|
|
|
#undef INTERNAL_SYSCALL_ERRNO
|
|
#define INTERNAL_SYSCALL_ERRNO(val, err) ((void) (err), val)
|
|
|
|
/* Note that the original Linux syscall restart convention required the
|
|
instruction immediately preceding SYSCALL to initialize $v0 with the
|
|
syscall number. Then if a restart triggered, $v0 would have been
|
|
clobbered by the syscall interrupted, and needed to be reinititalized.
|
|
The kernel would decrement the PC by 4 before switching back to the
|
|
user mode so that $v0 had been reloaded before SYSCALL was executed
|
|
again. This implied the place $v0 was loaded from must have been
|
|
preserved across a syscall, e.g. an immediate, static register, stack
|
|
slot, etc.
|
|
|
|
The convention was relaxed in Linux with a change applied to the kernel
|
|
GIT repository as commit 96187fb0bc30cd7919759d371d810e928048249d, that
|
|
first appeared in the 2.6.36 release. Since then the kernel has had
|
|
code that reloads $v0 upon syscall restart and resumes right at the
|
|
SYSCALL instruction, so no special arrangement is needed anymore.
|
|
|
|
For backwards compatibility with existing kernel binaries we support
|
|
the old convention by choosing the instruction preceding SYSCALL
|
|
carefully. This also means we have to force a 32-bit encoding of the
|
|
microMIPS MOVE instruction if one is used. */
|
|
|
|
#ifdef __mips_micromips
|
|
# define MOVE32 "move32"
|
|
#else
|
|
# define MOVE32 "move"
|
|
#endif
|
|
|
|
#undef INTERNAL_SYSCALL
|
|
#define INTERNAL_SYSCALL(name, err, nr, args...) \
|
|
internal_syscall##nr ("li\t%0, %2\t\t\t# " #name "\n\t", \
|
|
"IK" (SYS_ify (name)), \
|
|
0, err, args)
|
|
|
|
#undef INTERNAL_SYSCALL_NCS
|
|
#define INTERNAL_SYSCALL_NCS(number, err, nr, args...) \
|
|
internal_syscall##nr (MOVE32 "\t%0, %2\n\t", \
|
|
"r" (__s0), \
|
|
number, err, args)
|
|
|
|
#define internal_syscall0(v0_init, input, number, err, dummy...) \
|
|
({ \
|
|
long _sys_result; \
|
|
\
|
|
{ \
|
|
register long long __s0 asm ("$16") __attribute__ ((unused)) \
|
|
= (number); \
|
|
register long long __v0 asm ("$2"); \
|
|
register long long __a3 asm ("$7"); \
|
|
__asm__ volatile ( \
|
|
".set\tnoreorder\n\t" \
|
|
v0_init \
|
|
"syscall\n\t" \
|
|
".set reorder" \
|
|
: "=r" (__v0), "=r" (__a3) \
|
|
: input \
|
|
: __SYSCALL_CLOBBERS); \
|
|
err = __a3; \
|
|
_sys_result = __v0; \
|
|
} \
|
|
_sys_result; \
|
|
})
|
|
|
|
#define internal_syscall1(v0_init, input, number, err, arg1) \
|
|
({ \
|
|
long _sys_result; \
|
|
\
|
|
{ \
|
|
register long long __s0 asm ("$16") __attribute__ ((unused)) \
|
|
= (number); \
|
|
register long long __v0 asm ("$2"); \
|
|
register long long __a0 asm ("$4") = ARGIFY (arg1); \
|
|
register long long __a3 asm ("$7"); \
|
|
__asm__ volatile ( \
|
|
".set\tnoreorder\n\t" \
|
|
v0_init \
|
|
"syscall\n\t" \
|
|
".set reorder" \
|
|
: "=r" (__v0), "=r" (__a3) \
|
|
: input, "r" (__a0) \
|
|
: __SYSCALL_CLOBBERS); \
|
|
err = __a3; \
|
|
_sys_result = __v0; \
|
|
} \
|
|
_sys_result; \
|
|
})
|
|
|
|
#define internal_syscall2(v0_init, input, number, err, arg1, arg2) \
|
|
({ \
|
|
long _sys_result; \
|
|
\
|
|
{ \
|
|
register long long __s0 asm ("$16") __attribute__ ((unused)) \
|
|
= (number); \
|
|
register long long __v0 asm ("$2"); \
|
|
register long long __a0 asm ("$4") = ARGIFY (arg1); \
|
|
register long long __a1 asm ("$5") = ARGIFY (arg2); \
|
|
register long long __a3 asm ("$7"); \
|
|
__asm__ volatile ( \
|
|
".set\tnoreorder\n\t" \
|
|
v0_init \
|
|
"syscall\n\t" \
|
|
".set\treorder" \
|
|
: "=r" (__v0), "=r" (__a3) \
|
|
: input, "r" (__a0), "r" (__a1) \
|
|
: __SYSCALL_CLOBBERS); \
|
|
err = __a3; \
|
|
_sys_result = __v0; \
|
|
} \
|
|
_sys_result; \
|
|
})
|
|
|
|
#define internal_syscall3(v0_init, input, number, err, \
|
|
arg1, arg2, arg3) \
|
|
({ \
|
|
long _sys_result; \
|
|
\
|
|
{ \
|
|
register long long __s0 asm ("$16") __attribute__ ((unused)) \
|
|
= (number); \
|
|
register long long __v0 asm ("$2"); \
|
|
register long long __a0 asm ("$4") = ARGIFY (arg1); \
|
|
register long long __a1 asm ("$5") = ARGIFY (arg2); \
|
|
register long long __a2 asm ("$6") = ARGIFY (arg3); \
|
|
register long long __a3 asm ("$7"); \
|
|
__asm__ volatile ( \
|
|
".set\tnoreorder\n\t" \
|
|
v0_init \
|
|
"syscall\n\t" \
|
|
".set\treorder" \
|
|
: "=r" (__v0), "=r" (__a3) \
|
|
: input, "r" (__a0), "r" (__a1), "r" (__a2) \
|
|
: __SYSCALL_CLOBBERS); \
|
|
err = __a3; \
|
|
_sys_result = __v0; \
|
|
} \
|
|
_sys_result; \
|
|
})
|
|
|
|
#define internal_syscall4(v0_init, input, number, err, \
|
|
arg1, arg2, arg3, arg4) \
|
|
({ \
|
|
long _sys_result; \
|
|
\
|
|
{ \
|
|
register long long __s0 asm ("$16") __attribute__ ((unused)) \
|
|
= (number); \
|
|
register long long __v0 asm ("$2"); \
|
|
register long long __a0 asm ("$4") = ARGIFY (arg1); \
|
|
register long long __a1 asm ("$5") = ARGIFY (arg2); \
|
|
register long long __a2 asm ("$6") = ARGIFY (arg3); \
|
|
register long long __a3 asm ("$7") = ARGIFY (arg4); \
|
|
__asm__ volatile ( \
|
|
".set\tnoreorder\n\t" \
|
|
v0_init \
|
|
"syscall\n\t" \
|
|
".set\treorder" \
|
|
: "=r" (__v0), "+r" (__a3) \
|
|
: input, "r" (__a0), "r" (__a1), "r" (__a2) \
|
|
: __SYSCALL_CLOBBERS); \
|
|
err = __a3; \
|
|
_sys_result = __v0; \
|
|
} \
|
|
_sys_result; \
|
|
})
|
|
|
|
#define internal_syscall5(v0_init, input, number, err, \
|
|
arg1, arg2, arg3, arg4, arg5) \
|
|
({ \
|
|
long _sys_result; \
|
|
\
|
|
{ \
|
|
register long long __s0 asm ("$16") __attribute__ ((unused)) \
|
|
= (number); \
|
|
register long long __v0 asm ("$2"); \
|
|
register long long __a0 asm ("$4") = ARGIFY (arg1); \
|
|
register long long __a1 asm ("$5") = ARGIFY (arg2); \
|
|
register long long __a2 asm ("$6") = ARGIFY (arg3); \
|
|
register long long __a3 asm ("$7") = ARGIFY (arg4); \
|
|
register long long __a4 asm ("$8") = ARGIFY (arg5); \
|
|
__asm__ volatile ( \
|
|
".set\tnoreorder\n\t" \
|
|
v0_init \
|
|
"syscall\n\t" \
|
|
".set\treorder" \
|
|
: "=r" (__v0), "+r" (__a3) \
|
|
: input, "r" (__a0), "r" (__a1), "r" (__a2), "r" (__a4) \
|
|
: __SYSCALL_CLOBBERS); \
|
|
err = __a3; \
|
|
_sys_result = __v0; \
|
|
} \
|
|
_sys_result; \
|
|
})
|
|
|
|
#define internal_syscall6(v0_init, input, number, err, \
|
|
arg1, arg2, arg3, arg4, arg5, arg6) \
|
|
({ \
|
|
long _sys_result; \
|
|
\
|
|
{ \
|
|
register long long __s0 asm ("$16") __attribute__ ((unused)) \
|
|
= (number); \
|
|
register long long __v0 asm ("$2"); \
|
|
register long long __a0 asm ("$4") = ARGIFY (arg1); \
|
|
register long long __a1 asm ("$5") = ARGIFY (arg2); \
|
|
register long long __a2 asm ("$6") = ARGIFY (arg3); \
|
|
register long long __a3 asm ("$7") = ARGIFY (arg4); \
|
|
register long long __a4 asm ("$8") = ARGIFY (arg5); \
|
|
register long long __a5 asm ("$9") = ARGIFY (arg6); \
|
|
__asm__ volatile ( \
|
|
".set\tnoreorder\n\t" \
|
|
v0_init \
|
|
"syscall\n\t" \
|
|
".set\treorder" \
|
|
: "=r" (__v0), "+r" (__a3) \
|
|
: input, "r" (__a0), "r" (__a1), "r" (__a2), "r" (__a4), \
|
|
"r" (__a5) \
|
|
: __SYSCALL_CLOBBERS); \
|
|
err = __a3; \
|
|
_sys_result = __v0; \
|
|
} \
|
|
_sys_result; \
|
|
})
|
|
|
|
#define __SYSCALL_CLOBBERS "$1", "$3", "$10", "$11", "$12", "$13", \
|
|
"$14", "$15", "$24", "$25", "hi", "lo", "memory"
|
|
|
|
/* Standard MIPS syscalls have an error flag, and return a positive errno
|
|
when the error flag is set. Emulate this behaviour for vsyscalls so that
|
|
the INTERNAL_SYSCALL_{ERROR_P,ERRNO} macros work correctly. */
|
|
#define INTERNAL_VSYSCALL_CALL(funcptr, err, nr, args...) \
|
|
({ \
|
|
long _ret = funcptr (args); \
|
|
err = ((unsigned long) (_ret) >= (unsigned long) -4095L); \
|
|
if (err) \
|
|
_ret = -_ret; \
|
|
_ret; \
|
|
})
|
|
|
|
#endif /* __ASSEMBLER__ */
|
|
|
|
/* Pointer mangling is not yet supported for MIPS. */
|
|
#define PTR_MANGLE(var) (void) (var)
|
|
#define PTR_DEMANGLE(var) (void) (var)
|
|
|
|
#endif /* linux/mips/sysdep.h */
|