cheri: Implement 128-bit atomics

Arm Morello requires 128-bit atomics.
This commit is contained in:
Szabolcs Nagy 2021-06-08 12:48:43 +01:00
parent 7880bbd374
commit c8f1fc9d94
2 changed files with 36 additions and 2 deletions

View File

@ -62,6 +62,8 @@
__atg1_result = pre##_32_##post (mem, __VA_ARGS__); \
else if (sizeof (*mem) == 8) \
__atg1_result = pre##_64_##post (mem, __VA_ARGS__); \
else if (sizeof (*mem) == 16) \
__atg1_result = pre##_128_##post (mem, __VA_ARGS__); \
else \
abort (); \
__atg1_result; \
@ -77,6 +79,8 @@
__atg2_result = pre##_32_##post (mem, __VA_ARGS__); \
else if (sizeof (*mem) == 8) \
__atg2_result = pre##_64_##post (mem, __VA_ARGS__); \
else if (sizeof (*mem) == 16) \
__atg2_result = pre##_128_##post (mem, __VA_ARGS__); \
else \
abort (); \
__atg2_result; \
@ -540,7 +544,11 @@
/* We require 32b atomic operations; some archs also support 64b atomic
operations. */
void __atomic_link_error (void);
# if __HAVE_64B_ATOMICS == 1
# if defined __CHERI_PURE_CAPABILITY__
# define __atomic_check_size(mem) \
if ((sizeof (*mem) != 4) && (sizeof (*mem) != 8) && (sizeof (*mem) != 16)) \
__atomic_link_error ();
# elif __HAVE_64B_ATOMICS == 1
# define __atomic_check_size(mem) \
if ((sizeof (*mem) != 4) && (sizeof (*mem) != 8)) \
__atomic_link_error ();
@ -553,7 +561,12 @@ void __atomic_link_error (void);
need other atomic operations of such sizes, and restricting the support to
loads and stores makes this easier for archs that do not have native
support for atomic operations to less-than-word-sized data. */
# if __HAVE_64B_ATOMICS == 1
# if defined __CHERI_PURE_CAPABILITY__
# define __atomic_check_size_ls(mem) \
if ((sizeof (*mem) != 1) && (sizeof (*mem) != 2) && (sizeof (*mem) != 4) \
&& (sizeof (*mem) != 8) && (sizeof (*mem) != 16)) \
__atomic_link_error ();
# elif __HAVE_64B_ATOMICS == 1
# define __atomic_check_size_ls(mem) \
if ((sizeof (*mem) != 1) && (sizeof (*mem) != 2) && (sizeof (*mem) != 4) \
&& (sizeof (*mem) != 8)) \

View File

@ -54,6 +54,13 @@
model, __ATOMIC_RELAXED); \
})
# define __arch_compare_and_exchange_bool_128_int(mem, newval, oldval, model) \
({ \
typeof (*mem) __oldval = (oldval); \
!__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
model, __ATOMIC_RELAXED); \
})
# define __arch_compare_and_exchange_val_8_int(mem, newval, oldval, model) \
({ \
typeof (*mem) __oldval = (oldval); \
@ -86,6 +93,14 @@
__oldval; \
})
# define __arch_compare_and_exchange_val_128_int(mem, newval, oldval, model) \
({ \
typeof (*mem) __oldval = (oldval); \
__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
model, __ATOMIC_RELAXED); \
__oldval; \
})
/* Compare and exchange with "acquire" semantics, ie barrier after. */
@ -118,6 +133,9 @@
# define __arch_exchange_64_int(mem, newval, model) \
__atomic_exchange_n (mem, newval, model)
# define __arch_exchange_128_int(mem, newval, model) \
__atomic_exchange_n (mem, newval, model)
# define atomic_exchange_acq(mem, value) \
__atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_ACQUIRE)
@ -139,6 +157,9 @@
# define __arch_exchange_and_add_64_int(mem, value, model) \
__atomic_fetch_add (mem, value, model)
# define __arch_exchange_and_add_128_int(mem, value, model) \
__atomic_fetch_add (mem, value, model)
# define atomic_exchange_and_add_acq(mem, value) \
__atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \
__ATOMIC_ACQUIRE)