MIPS: Implement 64-bit atomics in software

MIPS architecture doesn't have support for 64-bit atomics.
It is possible to implement them using 32-bit atomics,
but the process is involved and takes time. For the time
being support 64-bit atomics using runtime.

Bug: v8:8100
Change-Id: I8c732ea9975c46be70643a1e722d78938c8a70de
Reviewed-on: https://chromium-review.googlesource.com/1251521
Commit-Queue: Ivica Bogosavljevic <ibogosavljevic@wavecomp.com>
Reviewed-by: Jakob Kummerow <jkummerow@chromium.org>
Cr-Commit-Position: refs/heads/master@{#56331}
This commit is contained in:
Ivica Bogosavljevic 2018-10-02 10:55:17 +02:00 committed by Commit Bot
parent 393b17a554
commit 408896a8b4
4 changed files with 115 additions and 6 deletions

View File

@ -2975,6 +2975,15 @@ v8_source_set("v8_base") {
sources += [ "$target_gen_dir/debug-support.cc" ] sources += [ "$target_gen_dir/debug-support.cc" ]
deps += [ ":postmortem-metadata" ] deps += [ ":postmortem-metadata" ]
} }
# Platforms that don't have CAS support need to link atomic library
# to implement atomic memory access
if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel" ||
v8_current_cpu == "mips64" || v8_current_cpu == "mips64el" ||
v8_current_cpu == "ppc" || v8_current_cpu == "ppc64" ||
v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
libs = [ "atomic" ]
}
} }
v8_source_set("torque_base") { v8_source_set("torque_base") {

View File

@ -215,7 +215,13 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
BIND(&u32); BIND(&u32);
Return(ChangeUint32ToTagged(AtomicLoad(MachineType::Uint32(), backing_store, Return(ChangeUint32ToTagged(AtomicLoad(MachineType::Uint32(), backing_store,
WordShl(index_word, 2)))); WordShl(index_word, 2))));
#if V8_TARGET_ARCH_MIPS
BIND(&i64);
Return(CallRuntime(Runtime::kAtomicsLoad64, context, array, index_integer));
BIND(&u64);
Return(CallRuntime(Runtime::kAtomicsLoad64, context, array, index_integer));
#else
BIND(&i64); BIND(&i64);
// This uses Uint64() intentionally: AtomicLoad is not implemented for // This uses Uint64() intentionally: AtomicLoad is not implemented for
// Int64(), which is fine because the machine instruction only cares // Int64(), which is fine because the machine instruction only cares
@ -226,7 +232,7 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
BIND(&u64); BIND(&u64);
Return(BigIntFromUnsigned64(AtomicLoad(MachineType::Uint64(), backing_store, Return(BigIntFromUnsigned64(AtomicLoad(MachineType::Uint64(), backing_store,
WordShl(index_word, 3)))); WordShl(index_word, 3))));
#endif
// This shouldn't happen, we've already validated the type. // This shouldn't happen, we've already validated the type.
BIND(&other); BIND(&other);
Unreachable(); Unreachable();
@ -266,9 +272,7 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE, FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE,
FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE, FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
}; };
Label* case_labels[] = { Label* case_labels[] = {&u8, &u8, &u16, &u16, &u32, &u32};
&u8, &u8, &u16, &u16, &u32, &u32,
};
Switch(instance_type, &other, case_values, case_labels, Switch(instance_type, &other, case_values, case_labels,
arraysize(case_labels)); arraysize(case_labels));
@ -288,6 +292,10 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
Return(value_integer); Return(value_integer);
BIND(&u64); BIND(&u64);
#if V8_TARGET_ARCH_MIPS
Return(CallRuntime(Runtime::kAtomicsStore64, context, array, index_integer,
value));
#else
TNode<BigInt> value_bigint = ToBigInt(CAST(context), CAST(value)); TNode<BigInt> value_bigint = ToBigInt(CAST(context), CAST(value));
#if DEBUG #if DEBUG
DebugSanityCheckAtomicIndex(array, index_word32, context); DebugSanityCheckAtomicIndex(array, index_word32, context);
@ -299,6 +307,7 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
AtomicStore(MachineRepresentation::kWord64, backing_store, AtomicStore(MachineRepresentation::kWord64, backing_store,
WordShl(index_word, 3), var_low.value(), high); WordShl(index_word, 3), var_low.value(), high);
Return(value_bigint); Return(value_bigint);
#endif
// This shouldn't happen, we've already validated the type. // This shouldn't happen, we've already validated the type.
BIND(&other); BIND(&other);

View File

@ -21,8 +21,6 @@ namespace internal {
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \ #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
// Note: 32-bit platforms may need ldflags += [ "-latomic" ] in BUILD.gn.
namespace { namespace {
#if V8_CC_GNU #if V8_CC_GNU
@ -31,9 +29,20 @@ namespace {
// can be slow. Good to know, but we don't have a choice. // can be slow. Good to know, but we don't have a choice.
#ifdef V8_TARGET_ARCH_32_BIT #ifdef V8_TARGET_ARCH_32_BIT
#pragma GCC diagnostic push #pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wpragmas"
#pragma GCC diagnostic ignored "-Watomic-alignment" #pragma GCC diagnostic ignored "-Watomic-alignment"
#endif // V8_TARGET_ARCH_32_BIT #endif // V8_TARGET_ARCH_32_BIT
template <typename T>
inline T LoadSeqCst(T* p) {
return __atomic_load_n(p, __ATOMIC_SEQ_CST);
}
template <typename T>
inline void StoreSeqCst(T* p, T value) {
__atomic_store_n(p, value, __ATOMIC_SEQ_CST);
}
template <typename T> template <typename T>
inline T ExchangeSeqCst(T* p, T value) { inline T ExchangeSeqCst(T* p, T value) {
return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST); return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST);
@ -128,6 +137,16 @@ ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */
ATOMIC_OPS(int64_t, 64, __int64) ATOMIC_OPS(int64_t, 64, __int64)
ATOMIC_OPS(uint64_t, 64, __int64) ATOMIC_OPS(uint64_t, 64, __int64)
template <typename T>
inline T LoadSeqCst(T* p) {
UNREACHABLE();
}
template <typename T>
inline void StoreSeqCst(T* p, T value) {
UNREACHABLE();
}
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef InterlockedExchange32 #undef InterlockedExchange32
@ -216,6 +235,23 @@ inline Object* ToObject(Isolate* isolate, uint64_t t) {
return *BigInt::FromUint64(isolate, t); return *BigInt::FromUint64(isolate, t);
} }
template <typename T>
struct Load {
static inline Object* Do(Isolate* isolate, void* buffer, size_t index) {
T result = LoadSeqCst(static_cast<T*>(buffer) + index);
return ToObject(isolate, result);
}
};
template <typename T>
struct Store {
static inline void Do(Isolate* isolate, void* buffer, size_t index,
Handle<Object> obj) {
T value = FromObject<T>(obj);
StoreSeqCst(static_cast<T*>(buffer) + index, value);
}
};
template <typename T> template <typename T>
struct Exchange { struct Exchange {
static inline Object* Do(Isolate* isolate, void* buffer, size_t index, static inline Object* Do(Isolate* isolate, void* buffer, size_t index,
@ -347,6 +383,55 @@ Object* GetModifySetValueInBuffer(Arguments args, Isolate* isolate) {
UNREACHABLE(); UNREACHABLE();
} }
RUNTIME_FUNCTION(Runtime_AtomicsLoad64) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
CONVERT_SIZE_ARG_CHECKED(index, 1);
CHECK(sta->GetBuffer()->is_shared());
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
sta->byte_offset();
DCHECK(sta->type() == kExternalBigInt64Array ||
sta->type() == kExternalBigUint64Array);
// SharedArrayBuffers are not neuterable.
CHECK_LT(index, NumberToSize(sta->length()));
if (sta->type() == kExternalBigInt64Array) {
return Load<int64_t>::Do(isolate, source, index);
}
DCHECK(sta->type() == kExternalBigUint64Array);
return Load<uint64_t>::Do(isolate, source, index);
}
RUNTIME_FUNCTION(Runtime_AtomicsStore64) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
CONVERT_SIZE_ARG_CHECKED(index, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, value_obj, 2);
CHECK(sta->GetBuffer()->is_shared());
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
sta->byte_offset();
Handle<BigInt> bigint;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, bigint,
BigInt::FromObject(isolate, value_obj));
DCHECK(sta->type() == kExternalBigInt64Array ||
sta->type() == kExternalBigUint64Array);
// SharedArrayBuffers are not neuterable.
CHECK_LT(index, NumberToSize(sta->length()));
if (sta->type() == kExternalBigInt64Array) {
Store<int64_t>::Do(isolate, source, index, bigint);
return *bigint;
}
DCHECK(sta->type() == kExternalBigUint64Array);
Store<uint64_t>::Do(isolate, source, index, bigint);
return *bigint;
}
RUNTIME_FUNCTION(Runtime_AtomicsExchange) { RUNTIME_FUNCTION(Runtime_AtomicsExchange) {
return GetModifySetValueInBuffer<Exchange>(args, isolate); return GetModifySetValueInBuffer<Exchange>(args, isolate);
} }
@ -441,6 +526,10 @@ RUNTIME_FUNCTION(Runtime_AtomicsXor) {
#else #else
RUNTIME_FUNCTION(Runtime_AtomicsLoad64) { UNREACHABLE(); }
RUNTIME_FUNCTION(Runtime_AtomicsStore64) { UNREACHABLE(); }
RUNTIME_FUNCTION(Runtime_AtomicsExchange) { UNREACHABLE(); } RUNTIME_FUNCTION(Runtime_AtomicsExchange) { UNREACHABLE(); }
RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) { UNREACHABLE(); } RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) { UNREACHABLE(); }

View File

@ -55,6 +55,8 @@ namespace internal {
F(TrySliceSimpleNonFastElements, 3, 1) F(TrySliceSimpleNonFastElements, 3, 1)
#define FOR_EACH_INTRINSIC_ATOMICS(F) \ #define FOR_EACH_INTRINSIC_ATOMICS(F) \
F(AtomicsLoad64, 2, 1) \
F(AtomicsStore64, 3, 1) \
F(AtomicsAdd, 3, 1) \ F(AtomicsAdd, 3, 1) \
F(AtomicsAnd, 3, 1) \ F(AtomicsAnd, 3, 1) \
F(AtomicsCompareExchange, 4, 1) \ F(AtomicsCompareExchange, 4, 1) \