mirror of
https://sourceware.org/git/glibc.git
synced 2025-01-08 18:30:18 +00:00
Use uint64_t and (uint64_t) 1 for 64-bit int
This patch replaces unsigned long int and 1UL with uint64_t and (uint64_t) 1 to support ILP32 targets like x32. [BZ #17870] * nptl/sem_post.c (__new_sem_post): Replace unsigned long int with uint64_t. * nptl/sem_waitcommon.c (__sem_wait_cleanup): Replace 1UL with (uint64_t) 1. (__new_sem_wait_slow): Replace unsigned long int with uint64_t. Replace 1UL with (uint64_t) 1. * sysdeps/nptl/internaltypes.h (new_sem): Replace unsigned long int with uint64_t.
This commit is contained in:
parent
2ec2d7032f
commit
22971c35e2
12
ChangeLog
12
ChangeLog
@ -1,3 +1,15 @@
|
|||||||
|
2015-01-23 H.J. Lu <hongjiu.lu@intel.com>
|
||||||
|
|
||||||
|
[BZ #17870]
|
||||||
|
* nptl/sem_post.c (__new_sem_post): Replace unsigned long int
|
||||||
|
with uint64_t.
|
||||||
|
* nptl/sem_waitcommon.c (__sem_wait_cleanup): Replace 1UL with
|
||||||
|
(uint64_t) 1.
|
||||||
|
(__new_sem_wait_slow): Replace unsigned long int with uint64_t.
|
||||||
|
Replace 1UL with (uint64_t) 1.
|
||||||
|
* sysdeps/nptl/internaltypes.h (new_sem): Replace unsigned long
|
||||||
|
int with uint64_t.
|
||||||
|
|
||||||
2015-01-23 Roland McGrath <roland@hack.frob.com>
|
2015-01-23 Roland McGrath <roland@hack.frob.com>
|
||||||
|
|
||||||
* inet/if_index.c (if_nameindex): Add missing libc_hidden_weak.
|
* inet/if_index.c (if_nameindex): Add missing libc_hidden_weak.
|
||||||
|
2
NEWS
2
NEWS
@ -18,7 +18,7 @@ Version 2.21
|
|||||||
17664, 17665, 17668, 17682, 17702, 17717, 17719, 17722, 17723, 17724,
|
17664, 17665, 17668, 17682, 17702, 17717, 17719, 17722, 17723, 17724,
|
||||||
17725, 17732, 17733, 17744, 17745, 17746, 17747, 17748, 17775, 17777,
|
17725, 17732, 17733, 17744, 17745, 17746, 17747, 17748, 17775, 17777,
|
||||||
17780, 17781, 17782, 17791, 17793, 17796, 17797, 17803, 17806, 17834,
|
17780, 17781, 17782, 17791, 17793, 17796, 17797, 17803, 17806, 17834,
|
||||||
17844, 17848
|
17844, 17848, 17870
|
||||||
|
|
||||||
* A new semaphore algorithm has been implemented in generic C code for all
|
* A new semaphore algorithm has been implemented in generic C code for all
|
||||||
machines. Previous custom assembly implementations of semaphore were
|
machines. Previous custom assembly implementations of semaphore were
|
||||||
|
@ -65,7 +65,7 @@ __new_sem_post (sem_t *sem)
|
|||||||
added tokens before (the release sequence includes atomic RMW operations
|
added tokens before (the release sequence includes atomic RMW operations
|
||||||
by other threads). */
|
by other threads). */
|
||||||
/* TODO Use atomic_fetch_add to make it scale better than a CAS loop? */
|
/* TODO Use atomic_fetch_add to make it scale better than a CAS loop? */
|
||||||
unsigned long int d = atomic_load_relaxed (&isem->data);
|
uint64_t d = atomic_load_relaxed (&isem->data);
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
if ((d & SEM_VALUE_MASK) == SEM_VALUE_MAX)
|
if ((d & SEM_VALUE_MASK) == SEM_VALUE_MAX)
|
||||||
|
@ -187,7 +187,7 @@ __sem_wait_cleanup (void *arg)
|
|||||||
|
|
||||||
#if __HAVE_64B_ATOMICS
|
#if __HAVE_64B_ATOMICS
|
||||||
/* Stop being registered as a waiter. See below for MO. */
|
/* Stop being registered as a waiter. See below for MO. */
|
||||||
atomic_fetch_add_relaxed (&sem->data, -(1UL << SEM_NWAITERS_SHIFT));
|
atomic_fetch_add_relaxed (&sem->data, -((uint64_t) 1 << SEM_NWAITERS_SHIFT));
|
||||||
#else
|
#else
|
||||||
__sem_wait_32_finish (sem);
|
__sem_wait_32_finish (sem);
|
||||||
#endif
|
#endif
|
||||||
@ -263,8 +263,8 @@ __new_sem_wait_slow (struct new_sem *sem, const struct timespec *abstime)
|
|||||||
#if __HAVE_64B_ATOMICS
|
#if __HAVE_64B_ATOMICS
|
||||||
/* Add a waiter. Relaxed MO is sufficient because we can rely on the
|
/* Add a waiter. Relaxed MO is sufficient because we can rely on the
|
||||||
ordering provided by the RMW operations we use. */
|
ordering provided by the RMW operations we use. */
|
||||||
unsigned long d = atomic_fetch_add_relaxed (&sem->data,
|
uint64_t d = atomic_fetch_add_relaxed (&sem->data,
|
||||||
1UL << SEM_NWAITERS_SHIFT);
|
(uint64_t) 1 << SEM_NWAITERS_SHIFT);
|
||||||
|
|
||||||
pthread_cleanup_push (__sem_wait_cleanup, sem);
|
pthread_cleanup_push (__sem_wait_cleanup, sem);
|
||||||
|
|
||||||
@ -304,7 +304,7 @@ __new_sem_wait_slow (struct new_sem *sem, const struct timespec *abstime)
|
|||||||
err = -1;
|
err = -1;
|
||||||
/* Stop being registered as a waiter. */
|
/* Stop being registered as a waiter. */
|
||||||
atomic_fetch_add_relaxed (&sem->data,
|
atomic_fetch_add_relaxed (&sem->data,
|
||||||
-(1UL << SEM_NWAITERS_SHIFT));
|
-((uint64_t) 1 << SEM_NWAITERS_SHIFT));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
/* Relaxed MO is sufficient; see below. */
|
/* Relaxed MO is sufficient; see below. */
|
||||||
@ -320,7 +320,7 @@ __new_sem_wait_slow (struct new_sem *sem, const struct timespec *abstime)
|
|||||||
up-to-date value; the futex_wait or the CAS perform the real
|
up-to-date value; the futex_wait or the CAS perform the real
|
||||||
work. */
|
work. */
|
||||||
if (atomic_compare_exchange_weak_acquire (&sem->data,
|
if (atomic_compare_exchange_weak_acquire (&sem->data,
|
||||||
&d, d - 1 - (1UL << SEM_NWAITERS_SHIFT)))
|
&d, d - 1 - ((uint64_t) 1 << SEM_NWAITERS_SHIFT)))
|
||||||
{
|
{
|
||||||
err = 0;
|
err = 0;
|
||||||
break;
|
break;
|
||||||
|
@ -155,7 +155,7 @@ struct new_sem
|
|||||||
# endif
|
# endif
|
||||||
# define SEM_NWAITERS_SHIFT 32
|
# define SEM_NWAITERS_SHIFT 32
|
||||||
# define SEM_VALUE_MASK (~(unsigned int)0)
|
# define SEM_VALUE_MASK (~(unsigned int)0)
|
||||||
unsigned long int data;
|
uint64_t data;
|
||||||
int private;
|
int private;
|
||||||
int pad;
|
int pad;
|
||||||
#else
|
#else
|
||||||
|
Loading…
Reference in New Issue
Block a user