2003-01-14  Guido Guenther  <agx@sigxcpu.org>

	* sysdeps/unix/sysv/linux/mips/sysdep.h (INTERNAL_SYSCALL,
	INTERNAL_SYSCALL_DECL, INTERNAL_SYSCALL_ERRNO,
	INTERNAL_SYSCALL_ERROR_P, INLINE_SYSCALL): Define.

2003-01-14  Steven Munroe  <sjmunroe@us.ibm.com>

	* sysdeps/unix/sysv/linux/powerpc/powerpc64/sysdep.h
	(INTERNAL_SYSCALL): Make use of ERR parameter.
	(INTERNAL_SYSCALL_DECL, INTERNAL_SYSCALL_ERRNO,
	INTERNAL_SYSCALL_ERROR_P): Adjust accordingly.
	(INLINE_SYSCALL): Make use of INTERNAL_SYSCALL.
	* sysdeps/unix/sysv/linux/powerpc/powerpc64/vfork.S: New file.

	Patch by Denis Zaitsev <zzz@cd-club.ru>.
	that %eax is modified.  Reported by Denis Zaitsev <zzz@cd-club.ru>.
This commit is contained in:
Ulrich Drepper 2003-01-15 01:23:02 +00:00
parent 086311a933
commit 574b892ef1
15 changed files with 494 additions and 43 deletions

View File

@ -1,3 +1,18 @@
2003-01-14 Guido Guenther <agx@sigxcpu.org>
* sysdeps/unix/sysv/linux/mips/sysdep.h (INTERNAL_SYSCALL,
INTERNAL_SYSCALL_DECL, INTERNAL_SYSCALL_ERRNO,
INTERNAL_SYSCALL_ERROR_P, INLINE_SYSCALL): Define.
2003-01-14 Steven Munroe <sjmunroe@us.ibm.com>
* sysdeps/unix/sysv/linux/powerpc/powerpc64/sysdep.h
(INTERNAL_SYSCALL): Make use of ERR parameter.
(INTERNAL_SYSCALL_DECL, INTERNAL_SYSCALL_ERRNO,
INTERNAL_SYSCALL_ERROR_P): Adjust accordingly.
(INLINE_SYSCALL): Make use of INTERNAL_SYSCALL.
* sysdeps/unix/sysv/linux/powerpc/powerpc64/vfork.S: New file.
2003-01-14 Ulrich Drepper <drepper@redhat.com>
* sunrpc/pm_getport.c (pmap_getport): Open TCP socket if protocol
@ -21,10 +36,10 @@
source pointer before dereferencing.
(__STRING_SMALLL_GET32): Likewise.
(__memset_gc): Add missing parenthesis around macro arguments.
Patch by Denis Zaitsev<zzz@cd-club.ru>.
Patch by Denis Zaitsev <zzz@cd-club.ru>.
* sysdeps/i386/i486/bits/string.h (__strspn_cg): Tell the compiler
that %eax is modified. Reported by Denis Zaitsev<zzz@cd-club.ru>.
that %eax is modified. Reported by Denis Zaitsev <zzz@cd-club.ru>.
2003-01-14 Jakub Jelinek <jakub@redhat.com>

View File

@ -1,3 +1,29 @@
2003-01-15 Jakub Jelinek <jakub@redhat.com>
* sysdeps/pthread/bits/libc-lock.h (__libc_lock_init,
__libc_lock_init_recursive): Initialize fields directly.
2003-01-15 Jakub Jelinek <jakub@redhat.com>
* sysdeps/unix/sysv/linux/alpha/vfork.S (__vfork): Allow
__fork to be far away from __vfork ifndef SHARED.
* sysdeps/unix/sysv/linux/powerpc/powerpc32/vfork.S (__vfork):
Likewise.
* sysdeps/unix/sysv/linux/sparc/sparc32/vfork.S (__vfork): Likewise.
* sysdeps/unix/sysv/linux/sparc/sparc64/vfork.S (__vfork): Likewise.
Add a missing instruction.
* sysdeps/unix/sysv/linux/arm/vfork.S (__vfork): Conditionally
branch to __fork even if __NR_vfork is not defined.
2003-01-14 Ulrich Drepper <drepper@redhat.com>
* tst-cancel-wrappers.sh: Allow .__*_asynccancel functions names
as well.
2003-01-14 Steven Munroe <sjmunroe@us.ibm.com>
* sysdeps/unix/sysv/linux/powerpc/powerpc64/vfork.S: New file.
2003-01-14 Jakub Jelinek <jakub@redhat.com>
* sysdeps/unix/sysv/linux/ia64/vfork.S (JUMPTARGET): Remove.

View File

@ -116,12 +116,33 @@ typedef pthread_key_t __libc_key_t;
/* Initialize the named lock variable, leaving it in a consistent, unlocked
state. */
#if defined _LIBC && !defined NOT_IN_libc && defined SHARED
#define __libc_lock_init(NAME) \
({ \
(NAME).__m_count = 0; \
(NAME).__m_owner = NULL; \
(NAME).__m_kind = PTHREAD_MUTEX_TIMED_NP; \
(NAME).__m_lock.__status = 0; \
(NAME).__m_lock.__spinlock = __LT_SPINLOCK_INIT; \
0; })
#else
#define __libc_lock_init(NAME) \
(__libc_maybe_call2 (pthread_mutex_init, (&(NAME), NULL), 0))
#endif
#define __libc_rwlock_init(NAME) \
(__libc_maybe_call (__pthread_rwlock_init, (&(NAME), NULL), 0));
/* Same as last but this time we initialize a recursive mutex. */
#if defined _LIBC && !defined NOT_IN_libc && defined SHARED
#define __libc_lock_init_recursive(NAME) \
({ \
(NAME).mutex.__m_count = 0; \
(NAME).mutex.__m_owner = NULL; \
(NAME).mutex.__m_kind = PTHREAD_MUTEX_RECURSIVE_NP; \
(NAME).mutex.__m_lock.__status = 0; \
(NAME).mutex.__m_lock.__spinlock = __LT_SPINLOCK_INIT; \
0; })
#else
#define __libc_lock_init_recursive(NAME) \
do { \
if (__pthread_mutex_init != NULL) \
@ -133,6 +154,7 @@ typedef pthread_key_t __libc_key_t;
__pthread_mutexattr_destroy (&__attr); \
} \
} while (0);
#endif
#define __rtld_lock_init_recursive(NAME) \
__libc_lock_init_recursive (NAME)

View File

@ -27,11 +27,25 @@ __LABEL(__vfork)
.prologue 1
PSEUDO_PROF
SINGLE_THREAD_P(t0)
#ifdef SHARED
bne t0, HIDDEN_JUMPTARGET (__fork) !samegp
#else
bne t0, $hidden_fork
#endif
lda v0, SYS_ify(vfork)
call_pal PAL_callsys
#ifdef SHARED
bne a3, __syscall_error !samegp
#else
bne a3, $syscall_error
#endif
ret
#ifndef SHARED
$hidden_fork:
jmp zero, HIDDEN_JUMPTARGET (__fork)
$syscall_error:
jmp zero, __syscall_error
#endif
PSEUDO_END(__vfork)
libc_hidden_def (__vfork)

View File

@ -32,9 +32,9 @@ rocess,
ENTRY (__vfork)
#ifdef __NR_vfork
SINGLE_THREAD_P
bne HIDDEN_JUMPTARGET (__fork)
#ifdef __NR_vfork
swi __NR_vfork
cmn a1, #4096
RETINSTR(movcc, pc, lr)

View File

@ -31,7 +31,11 @@ ENTRY (__vfork)
#ifdef __NR_vfork
SINGLE_THREAD_P
# ifdef SHARED
bne- HIDDEN_JUMPTARGET(__fork)
# else
bne- .Lhidden_fork
# endif
DO_CALL (SYS_ify (vfork));
@ -41,7 +45,11 @@ ENTRY (__vfork)
bnslr+
/* Check if vfork syscall is known at all. */
cmpwi r3,ENOSYS
# ifdef SHARED
bne JUMPTARGET(__syscall_error)
# else
bne .Lsyscall_error
# endif
# endif
#endif
@ -53,6 +61,13 @@ ENTRY (__vfork)
PSEUDO_RET
#endif
# ifndef SHARED
.Lhidden_fork:
b HIDDEN_JUMPTARGET(__fork)
.Lsyscall_error:
b JUMPTARGET(__syscall_error)
# endif
PSEUDO_END (__vfork)
libc_hidden_def (__vfork)

View File

@ -0,0 +1,59 @@
/* Copyright (C) 2003 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
#include <sysdep-cancel.h>
#define _ERRNO_H 1
#include <bits/errno.h>
#include <kernel-features.h>
/* Clone the calling process, but without copying the whole address space.
The calling process is suspended until the new process exits or is
replaced by a call to `execve'. Return -1 for errors, 0 to the new process,
and the process ID of the new process to the old process. */
ENTRY (__vfork)
#ifdef __NR_vfork
SINGLE_THREAD_P
bne- HIDDEN_JUMPTARGET(__fork)
DO_CALL (SYS_ify (vfork));
# ifdef __ASSUME_VFORK_SYSCALL
PSEUDO_RET
# else
bnslr+
/* Check if vfork syscall is known at all. */
cmpdi r3,ENOSYS
bne JUMPTARGET(__syscall_error)
# endif
#endif
#ifndef __ASSUME_VFORK_SYSCALL
/* If we don't have vfork, fork is close enough. */
DO_CALL (SYS_ify (fork));
PSEUDO_RET
#endif
PSEUDO_END (__vfork)
libc_hidden_def (__vfork)
weak_alias (__vfork, vfork)

View File

@ -23,7 +23,11 @@
ENTRY(__vfork)
ld [%g6 + MULTIPLE_THREADS_OFFSET], %o0
cmp %o0, 0
#ifdef SHARED
bne HIDDEN_JUMPTARGET(__fork)
#else
bne 1f
#endif
mov __NR_vfork, %g1
ta 0x10;
bcs __syscall_error_handler
@ -31,6 +35,11 @@ ENTRY(__vfork)
sub %o1, 1, %o1
retl
and %o0, %o1, %o0
#ifndef SHARED
1: mov %o7, %g1
call HIDDEN_JUMPTARGET(__fork)
mov %g1, %o7
#endif
SYSCALL_ERROR_HANDLER
PSEUDO_END (__vfork)
libc_hidden_def (__vfork)

View File

@ -22,7 +22,12 @@
.text
ENTRY(__vfork)
ld [%g6 + MULTIPLE_THREADS_OFFSET], %o0
#ifdef SHARED
cmp %o0, 0
bne HIDDEN_JUMPTARGET (__fork)
#else
brnz,pn %o0, 1f
#endif
mov __NR_vfork, %g1
ta 0x6d
bcs,pn %xcc, __syscall_error_handler
@ -30,6 +35,11 @@ ENTRY(__vfork)
sub %o1, 1, %o1
retl
and %o0, %o1, %o0
#ifndef SHARED
1: mov %o7, %g1
call HIDDEN_JUMPTARGET(__fork)
mov %g1, %o7
#endif
SYSCALL_ERROR_HANDLER
PSEUDO_END (__vfork)
libc_hidden_def (__vfork)

View File

@ -82,9 +82,9 @@ C["__xpg_sigpause"]=1
{
if (C[$1] && $2 ~ /^[TW]$/)
seen=$1
else if ($1 ~ /^__(libc|pthread)_enable_asynccancel$/ && $2 == "U")
else if ($1 ~ /^([.]|)__(libc|pthread)_enable_asynccancel$/ && $2 == "U")
seen_enable=1
else if ($1 ~ /^__(libc|pthread)_disable_asynccancel$/ && $2 == "U")
else if ($1 ~ /^([.]|)__(libc|pthread)_disable_asynccancel$/ && $2 == "U")
seen_disable=1
}
END {

View File

@ -1,3 +1,8 @@
2003-01-14 Ulrich Drepper <drepper@redhat.com>
* Makefile (CFLAGS-pthread_self.os): Define this, not
CFLAGS-pthread_self.c.
2003-01-13 Ulrich Drepper <drepper@redhat.com>
* sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S: Don't export

View File

@ -206,7 +206,7 @@ $(inst_libdir)/libpthread_nonshared.a: $(objpfx)libpthread_nonshared.a
# stack frame is more work than the actual operation. Disable the
# frame creation entirely. This will help applications which call the
# function frequently to get a thread-specific handle.
CFLAGS-pthread_self.c += -fomit-frame-pointer
CFLAGS-pthread_self.os += -fomit-frame-pointer
CFLAGS-tst-unload.c += -DPREFIX=\"$(objpfx)\"

View File

@ -33,4 +33,242 @@
# define SYS_ify(syscall_name) __NR_/**/syscall_name
#endif
#ifndef __ASSEMBLER__
/* Define a macro which expands into the inline wrapper code for a system
call. */
#undef INLINE_SYSCALL
#define INLINE_SYSCALL(name, nr, args...) \
({ INTERNAL_SYSCALL_DECL(err); \
long result_var = INTERNAL_SYSCALL (name, err, nr, args); \
if ( INTERNAL_SYSCALL_ERROR_P (result_var, err) ) \
{ \
__set_errno (INTERNAL_SYSCALL_ERRNO (result_var, err)); \
result_var = -1L; \
} \
result_var; })
#undef INTERNAL_SYSCALL_DECL
#define INTERNAL_SYSCALL_DECL(err) long err
#undef INTERNAL_SYSCALL_ERROR_P
#define INTERNAL_SYSCALL_ERROR_P(val, err) ((long) (err))
#undef INTERNAL_SYSCALL_ERRNO
#define INTERNAL_SYSCALL_ERRNO(val, err) (val)
#undef INTERNAL_SYSCALL
#define INTERNAL_SYSCALL(name, err, nr, args...) internal_syscall##nr(name, err, args)
#define internal_syscall0(name, err, dummy...) \
({ \
long _sys_result; \
\
{ \
register long __v0 asm("$2"); \
register long __a3 asm("$7"); \
__asm__ volatile ( \
".set\tnoreorder\n\t" \
"li\t$2, %2\t\t\t# " #name "\n\t" \
"syscall\n\t" \
".set reorder" \
: "=r" (__v0), "=r" (__a3) \
: "i" (SYS_ify(name)) \
: __SYSCALL_CLOBBERS); \
err = __a3; \
_sys_result = __v0; \
} \
_sys_result; \
})
#define internal_syscall1(name, err, arg1) \
({ \
long _sys_result; \
\
{ \
register long __v0 asm("$2"); \
register long __a0 asm("$4") = (long) arg1; \
register long __a3 asm("$7"); \
__asm__ volatile ( \
".set\tnoreorder\n\t" \
"li\t$2, %3\t\t\t# " #name "\n\t" \
"syscall\n\t" \
".set reorder" \
: "=r" (__v0), "=r" (__a3) \
: "r" (__a0), "i" (SYS_ify(name)) \
: __SYSCALL_CLOBBERS); \
err = __a3; \
_sys_result = __v0; \
} \
_sys_result; \
})
#define internal_syscall2(name, err, arg1, arg2) \
({ \
long _sys_result; \
\
{ \
register long __v0 asm("$2"); \
register long __a0 asm("$4") = (long) arg1; \
register long __a1 asm("$5") = (long) arg2; \
register long __a3 asm("$7"); \
__asm__ volatile ( \
".set\tnoreorder\n\t" \
"li\t$2, %4\t\t\t# " #name "\n\t" \
"syscall\n\t" \
".set\treorder" \
: "=r" (__v0), "=r" (__a3) \
: "r" (__a0), "r" (__a1), "i" (SYS_ify(name)) \
: __SYSCALL_CLOBBERS); \
err = __a3; \
_sys_result = __v0; \
} \
_sys_result; \
})
#define internal_syscall3(name, err, arg1, arg2, arg3) \
({ \
long _sys_result; \
\
{ \
register long __v0 asm("$2"); \
register long __a0 asm("$4") = (long) arg1; \
register long __a1 asm("$5") = (long) arg2; \
register long __a2 asm("$6") = (long) arg3; \
register long __a3 asm("$7"); \
__asm__ volatile ( \
".set\tnoreorder\n\t" \
"li\t$2, %5\t\t\t# " #name "\n\t" \
"syscall\n\t" \
".set\treorder" \
: "=r" (__v0), "=r" (__a3) \
: "r" (__a0), "r" (__a1), "r" (__a2), "i" (SYS_ify(name)) \
: __SYSCALL_CLOBBERS); \
err = __a3; \
_sys_result = __v0; \
} \
_sys_result; \
})
#define internal_syscall4(name, err, arg1, arg2, arg3, arg4) \
({ \
long _sys_result; \
\
{ \
register long __v0 asm("$2"); \
register long __a0 asm("$4") = (long) arg1; \
register long __a1 asm("$5") = (long) arg2; \
register long __a2 asm("$6") = (long) arg3; \
register long __a3 asm("$7") = (long) arg4; \
__asm__ volatile ( \
".set\tnoreorder\n\t" \
"li\t$2, %5\t\t\t# " #name "\n\t" \
"syscall\n\t" \
".set\treorder" \
: "=r" (__v0), "+r" (__a3) \
: "r" (__a0), "r" (__a1), "r" (__a2), "i" (SYS_ify(name)) \
: __SYSCALL_CLOBBERS); \
err = __a3; \
_sys_result = __v0; \
} \
_sys_result; \
})
#define internal_syscall5(name, err, arg1, arg2, arg3, arg4, arg5) \
({ \
long _sys_result; \
\
{ \
register long __v0 asm("$2"); \
register long __a0 asm("$4") = (long) arg1; \
register long __a1 asm("$5") = (long) arg2; \
register long __a2 asm("$6") = (long) arg3; \
register long __a3 asm("$7") = (long) arg4; \
__asm__ volatile ( \
".set\tnoreorder\n\t" \
"lw\t$2, %6\n\t" \
"subu\t$29, 32\n\t" \
"sw\t$2, 16($29)\n\t" \
"li\t$2, %5\t\t\t# " #name "\n\t" \
"syscall\n\t" \
"addiu\t$29, 32\n\t" \
".set\treorder" \
: "=r" (__v0), "+r" (__a3) \
: "r" (__a0), "r" (__a1), "r" (__a2), "i" (SYS_ify(name)), \
"m" ((long)arg5) \
: __SYSCALL_CLOBBERS); \
err = __a3; \
_sys_result = __v0; \
} \
_sys_result; \
})
#define internal_syscall6(name, err, arg1, arg2, arg3, arg4, arg5, arg6)\
({ \
long _sys_result; \
\
{ \
register long __v0 asm("$2"); \
register long __a0 asm("$4") = (long) arg1; \
register long __a1 asm("$5") = (long) arg2; \
register long __a2 asm("$6") = (long) arg3; \
register long __a3 asm("$7") = (long) arg4; \
__asm__ volatile ( \
".set\tnoreorder\n\t" \
"lw\t$2, %6\n\t" \
"lw\t$8, %7\n\t" \
"subu\t$29, 32\n\t" \
"sw\t$2, 16($29)\n\t" \
"sw\t$8, 20($29)\n\t" \
"li\t$2, %5\t\t\t# " #name "\n\t" \
"syscall\n\t" \
"addiu\t$29, 32\n\t" \
".set\treorder" \
: "=r" (__v0), "+r" (__a3) \
: "r" (__a0), "r" (__a1), "r" (__a2), "i" (SYS_ify(name)), \
"m" ((long)arg5), "m" ((long)arg6) \
: __SYSCALL_CLOBBERS); \
err = __a3; \
_sys_result = __v0; \
} \
_sys_result; \
})
#define internal_syscall7(name, err, arg1, arg2, arg3, arg4, arg5, arg6, arg7)\
({ \
long _sys_result; \
\
{ \
register long __v0 asm("$2"); \
register long __a0 asm("$4") = (long) arg1; \
register long __a1 asm("$5") = (long) arg2; \
register long __a2 asm("$6") = (long) arg3; \
register long __a3 asm("$7") = (long) arg4; \
__asm__ volatile ( \
".set\tnoreorder\n\t" \
"lw\t$2, %6\n\t" \
"lw\t$8, %7\n\t" \
"lw\t$9, %8\n\t" \
"subu\t$29, 32\n\t" \
"sw\t$2, 16($29)\n\t" \
"sw\t$8, 20($29)\n\t" \
"sw\t$9, 24($29)\n\t" \
"li\t$2, %5\t\t\t# " #name "\n\t" \
"syscall\n\t" \
"addiu\t$29, 32\n\t" \
".set\treorder" \
: "=r" (__v0), "+r" (__a3) \
: "r" (__a0), "r" (__a1), "r" (__a2), "i" (SYS_ify(name)), \
"m" ((long)arg5), "m" ((long)arg6), "m" ((long)arg7) \
: __SYSCALL_CLOBBERS); \
err = __a3; \
_sys_result = __v0; \
} \
_sys_result; \
})
#define __SYSCALL_CLOBBERS "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", "$25"
#endif /* __ASSEMBLER__ */
#endif /* linux/mips/sysdep.h */

View File

@ -74,36 +74,14 @@
behave like function calls as far as register saving. */
#define INLINE_SYSCALL(name, nr, args...) \
({ \
register long r0 __asm__ ("r0"); \
register long r3 __asm__ ("r3"); \
register long r4 __asm__ ("r4"); \
register long r5 __asm__ ("r5"); \
register long r6 __asm__ ("r6"); \
register long r7 __asm__ ("r7"); \
register long r8 __asm__ ("r8"); \
long ret, err; \
LOADARGS_##nr(name, args); \
__asm __volatile ("sc\n\t" \
"mfcr %7\n\t" \
: "=r" (r0), "=r" (r3), "=r" (r4), \
"=r" (r5), "=r" (r6), "=r" (r7), \
"=r" (r8), "=r" (err) \
: ASM_INPUT_##nr \
: "r9", "r10", "r11", "r12", \
"fr0", "fr1", "fr2", "fr3", \
"fr4", "fr5", "fr6", "fr7", \
"fr8", "fr9", "fr10", "fr11", \
"fr12", "fr13", \
"ctr", "lr", \
"cr0", "cr1", "cr5", "cr6", "cr7", \
"memory"); \
ret = r3; \
if (__builtin_expect ((err & (1 << 28)), 0)) \
{ \
__set_errno (ret); \
ret = -1L; \
} \
ret; \
INTERNAL_SYSCALL_DECL (sc_err); \
long sc_ret = INTERNAL_SYSCALL (name, sc_err, nr, args); \
if (INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err)) \
{ \
__set_errno (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err)); \
sc_ret = -1L; \
} \
sc_ret; \
})
/* Define a macro which expands inline into the wrapper code for a system
@ -113,7 +91,7 @@
the negation of the return value in the kernel gets reverted. */
# undef INTERNAL_SYSCALL
# define INTERNAL_SYSCALL(name, nr, args...) \
# define INTERNAL_SYSCALL(name, err, nr, args...) \
({ \
register long r0 __asm__ ("r0"); \
register long r3 __asm__ ("r3"); \
@ -125,8 +103,7 @@
LOADARGS_##nr(name, args); \
__asm__ __volatile__ \
("sc\n\t" \
"bns+ 0f\n\t" \
"neg %1,%1\n" \
"mfcr %0\n\t" \
"0:" \
: "=&r" (r0), \
"=&r" (r3), "=&r" (r4), "=&r" (r5), \
@ -134,14 +111,19 @@
: ASM_INPUT_##nr \
: "r9", "r10", "r11", "r12", \
"cr0", "ctr", "memory"); \
(int) r3; \
err = r0; \
(int) r3; \
})
# undef INTERNAL_SYSCALL_DECL
# define INTERNAL_SYSCALL_DECL(err) long err
# undef INTERNAL_SYSCALL_ERROR_P
# define INTERNAL_SYSCALL_ERROR_P(val) ((unsigned long) (val) >= 0xfffffffffffff001u)
# define INTERNAL_SYSCALL_ERROR_P(val, err) \
(__builtin_expect (err & (1 << 28), 0))
# undef INTERNAL_SYSCALL_ERRNO
# define INTERNAL_SYSCALL_ERRNO(val) (-(val))
# define INTERNAL_SYSCALL_ERRNO(val, err) (val)
#define LOADARGS_0(name, dummy) \
r0 = __NR_##name

View File

@ -0,0 +1,56 @@
/* Copyright (C) 2003 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
#include <sysdep.h>
#define _ERRNO_H 1
#include <bits/errno.h>
#include <kernel-features.h>
/* Clone the calling process, but without copying the whole address space.
The calling process is suspended until the new process exits or is
replaced by a call to `execve'. Return -1 for errors, 0 to the new process,
and the process ID of the new process to the old process. */
ENTRY (__vfork)
#ifdef __NR_vfork
DO_CALL (SYS_ify (vfork))
# ifdef __ASSUME_VFORK_SYSCALL
PSEUDO_RET
# else
bnslr+
/* Check if vfork syscall is known at all. */
cmpdi r3,ENOSYS
bne JUMPTARGET(__syscall_error)
# endif
#endif
#ifndef __ASSUME_VFORK_SYSCALL
/* If we don't have vfork, fork is close enough. */
DO_CALL (SYS_ify (fork))
PSEUDO_RET
#endif
PSEUDO_END (__vfork)
libc_hidden_def (__vfork)
weak_alias (__vfork, vfork)