mirror of
https://sourceware.org/git/glibc.git
synced 2024-11-13 00:30:07 +00:00
Update.
2004-02-20 Steven Munroe <sjmunroe@us.ibm.com> * sysdeps/powerpc/pspinlock.c: Move from here. * sysdeps/powerpc/powerpc32/pspinlock.c: To here. * sysdeps/powerpc/powerpc64/pspinlock.c: New file. * sysdeps/powerpc/powerpc64/pt-machine.h: Define __compare_and_swap32 and __compare_and_swap32_with_release_semantics.
This commit is contained in:
parent
1be3130ed2
commit
f1c616fc8e
@ -1,3 +1,11 @@
|
||||
2004-02-20 Steven Munroe <sjmunroe@us.ibm.com>
|
||||
|
||||
* sysdeps/powerpc/pspinlock.c: Move from here.
|
||||
* sysdeps/powerpc/powerpc32/pspinlock.c: To here.
|
||||
* sysdeps/powerpc/powerpc64/pspinlock.c: New file.
|
||||
* sysdeps/powerpc/powerpc64/pt-machine.h: Define __compare_and_swap32
|
||||
and __compare_and_swap32_with_release_semantics.
|
||||
|
||||
2004-02-20 Jakub Jelinek <jakub@redhat.com>
|
||||
|
||||
* Makefile (generated): Remove tst-stack1.mtrace and tst-stack1-mem.
|
||||
|
70
linuxthreads/sysdeps/powerpc/powerpc64/pspinlock.c
Normal file
70
linuxthreads/sysdeps/powerpc/powerpc64/pspinlock.c
Normal file
@ -0,0 +1,70 @@
|
||||
/* POSIX spinlock implementation. PowerPC version.
|
||||
Copyright (C) 2000, 2003 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public License as
|
||||
published by the Free Software Foundation; either version 2.1 of the
|
||||
License, or (at your option) any later version.
|
||||
|
||||
The GNU C Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with the GNU C Library; see the file COPYING.LIB. If not,
|
||||
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||
Boston, MA 02111-1307, USA. */
|
||||
|
||||
#include <errno.h>
|
||||
#include <pthread.h>
|
||||
#include "internals.h"
|
||||
|
||||
int
|
||||
__pthread_spin_lock (pthread_spinlock_t *lock)
|
||||
{
|
||||
while (! __compare_and_swap32 ((int *)lock, 0, 1))
|
||||
;
|
||||
return 0;
|
||||
}
|
||||
weak_alias (__pthread_spin_lock, pthread_spin_lock)
|
||||
|
||||
|
||||
int
|
||||
__pthread_spin_trylock (pthread_spinlock_t *lock)
|
||||
{
|
||||
return __compare_and_swap32 ((int *)lock, 0, 1) ? 0 : EBUSY;
|
||||
}
|
||||
weak_alias (__pthread_spin_trylock, pthread_spin_trylock)
|
||||
|
||||
|
||||
int
|
||||
__pthread_spin_unlock (pthread_spinlock_t *lock)
|
||||
{
|
||||
MEMORY_BARRIER ();
|
||||
*lock = 0;
|
||||
return 0;
|
||||
}
|
||||
weak_alias (__pthread_spin_unlock, pthread_spin_unlock)
|
||||
|
||||
|
||||
int
|
||||
__pthread_spin_init (pthread_spinlock_t *lock, int pshared)
|
||||
{
|
||||
/* We can ignore the `pshared' parameter. Since we are busy-waiting
|
||||
all processes which can access the memory location `lock' points
|
||||
to can use the spinlock. */
|
||||
*lock = 0;
|
||||
return 0;
|
||||
}
|
||||
weak_alias (__pthread_spin_init, pthread_spin_init)
|
||||
|
||||
|
||||
int
|
||||
__pthread_spin_destroy (pthread_spinlock_t *lock)
|
||||
{
|
||||
/* Nothing to do. */
|
||||
return 0;
|
||||
}
|
||||
weak_alias (__pthread_spin_destroy, pthread_spin_destroy)
|
@ -30,6 +30,7 @@
|
||||
|
||||
extern long int testandset (int *spinlock);
|
||||
extern int __compare_and_swap (long int *p, long int oldval, long int newval);
|
||||
extern int __compare_and_swap32 (int *p, int oldval, int newval);
|
||||
|
||||
/* For multiprocessor systems, we want to ensure all memory accesses
|
||||
are completed before we reset a lock. On other systems, we still
|
||||
@ -118,6 +119,49 @@ __compare_and_swap_with_release_semantics (long int *p,
|
||||
return (int)(ret == 0);
|
||||
}
|
||||
|
||||
PT_EI int
|
||||
__compare_and_swap32 (int *p, int oldval, int newval)
|
||||
{
|
||||
int ret;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"0: lwarx %0,0,%1 ;"
|
||||
" xor. %0,%3,%0;"
|
||||
" bne 1f;"
|
||||
" stwcx. %2,0,%1;"
|
||||
" bne- 0b;"
|
||||
"1: "
|
||||
: "=&r"(ret)
|
||||
: "r"(p), "r"(newval), "r"(oldval)
|
||||
: "cr0", "memory");
|
||||
/* This version of __compare_and_swap is to be used when acquiring
|
||||
a lock, so we don't need to worry about whether other memory
|
||||
operations have completed, but we do need to be sure that any loads
|
||||
after this point really occur after we have acquired the lock. */
|
||||
__asm__ __volatile__ ("isync" : : : "memory");
|
||||
return (int)(ret == 0);
|
||||
}
|
||||
|
||||
PT_EI int
|
||||
__compare_and_swap32_with_release_semantics (long int *p,
|
||||
long int oldval, long int newval)
|
||||
{
|
||||
long int ret;
|
||||
|
||||
MEMORY_BARRIER ();
|
||||
__asm__ __volatile__ (
|
||||
"0: lwarx %0,0,%1 ;"
|
||||
" xor. %0,%3,%0;"
|
||||
" bne 1f;"
|
||||
" stwcx. %2,0,%1;"
|
||||
" bne- 0b;"
|
||||
"1: "
|
||||
: "=&r"(ret)
|
||||
: "r"(p), "r"(newval), "r"(oldval)
|
||||
: "cr0", "memory");
|
||||
return (int)(ret == 0);
|
||||
}
|
||||
|
||||
PT_EI long int
|
||||
testandset (int *p)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user