From 408896a8b41751fea92e482c5eb4b858e7ffe68d Mon Sep 17 00:00:00 2001 From: Ivica Bogosavljevic Date: Tue, 2 Oct 2018 10:55:17 +0200 Subject: [PATCH] MIPS: Implement 64-bit atomics in software MIPS architecture doesn't have support for 64-bit atomics. It is possible to implement them using 32-bit atomics, but the process is involved and takes time. For the time being support 64-bit atomics using runtime. Bug: v8:8100 Change-Id: I8c732ea9975c46be70643a1e722d78938c8a70de Reviewed-on: https://chromium-review.googlesource.com/1251521 Commit-Queue: Ivica Bogosavljevic Reviewed-by: Jakob Kummerow Cr-Commit-Position: refs/heads/master@{#56331} --- BUILD.gn | 9 ++ .../builtins-sharedarraybuffer-gen.cc | 17 +++- src/runtime/runtime-atomics.cc | 93 ++++++++++++++++++- src/runtime/runtime.h | 2 + 4 files changed, 115 insertions(+), 6 deletions(-) diff --git a/BUILD.gn b/BUILD.gn index 6e08eb8c4d..a88020fdae 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -2975,6 +2975,15 @@ v8_source_set("v8_base") { sources += [ "$target_gen_dir/debug-support.cc" ] deps += [ ":postmortem-metadata" ] } + + # Platforms that don't have CAS support need to link atomic library + # to implement atomic memory access + if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel" || + v8_current_cpu == "mips64" || v8_current_cpu == "mips64el" || + v8_current_cpu == "ppc" || v8_current_cpu == "ppc64" || + v8_current_cpu == "s390" || v8_current_cpu == "s390x") { + libs = [ "atomic" ] + } } v8_source_set("torque_base") { diff --git a/src/builtins/builtins-sharedarraybuffer-gen.cc b/src/builtins/builtins-sharedarraybuffer-gen.cc index 271b1f4981..37bc01e35f 100644 --- a/src/builtins/builtins-sharedarraybuffer-gen.cc +++ b/src/builtins/builtins-sharedarraybuffer-gen.cc @@ -215,7 +215,13 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) { BIND(&u32); Return(ChangeUint32ToTagged(AtomicLoad(MachineType::Uint32(), backing_store, WordShl(index_word, 2)))); +#if V8_TARGET_ARCH_MIPS + BIND(&i64); + Return(CallRuntime(Runtime::kAtomicsLoad64, context, array, index_integer)); + BIND(&u64); + Return(CallRuntime(Runtime::kAtomicsLoad64, context, array, index_integer)); +#else BIND(&i64); // This uses Uint64() intentionally: AtomicLoad is not implemented for // Int64(), which is fine because the machine instruction only cares @@ -226,7 +232,7 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) { BIND(&u64); Return(BigIntFromUnsigned64(AtomicLoad(MachineType::Uint64(), backing_store, WordShl(index_word, 3)))); - +#endif // This shouldn't happen, we've already validated the type. BIND(&other); Unreachable(); @@ -266,9 +272,7 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) { FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE, FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE, }; - Label* case_labels[] = { - &u8, &u8, &u16, &u16, &u32, &u32, - }; + Label* case_labels[] = {&u8, &u8, &u16, &u16, &u32, &u32}; Switch(instance_type, &other, case_values, case_labels, arraysize(case_labels)); @@ -288,6 +292,10 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) { Return(value_integer); BIND(&u64); +#if V8_TARGET_ARCH_MIPS + Return(CallRuntime(Runtime::kAtomicsStore64, context, array, index_integer, + value)); +#else TNode value_bigint = ToBigInt(CAST(context), CAST(value)); #if DEBUG DebugSanityCheckAtomicIndex(array, index_word32, context); @@ -299,6 +307,7 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) { AtomicStore(MachineRepresentation::kWord64, backing_store, WordShl(index_word, 3), var_low.value(), high); Return(value_bigint); +#endif // This shouldn't happen, we've already validated the type. BIND(&other); diff --git a/src/runtime/runtime-atomics.cc b/src/runtime/runtime-atomics.cc index 19838e2201..3fd07af255 100644 --- a/src/runtime/runtime-atomics.cc +++ b/src/runtime/runtime-atomics.cc @@ -21,8 +21,6 @@ namespace internal { #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \ V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X -// Note: 32-bit platforms may need ldflags += [ "-latomic" ] in BUILD.gn. - namespace { #if V8_CC_GNU @@ -31,9 +29,20 @@ namespace { // can be slow. Good to know, but we don't have a choice. #ifdef V8_TARGET_ARCH_32_BIT #pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wpragmas" #pragma GCC diagnostic ignored "-Watomic-alignment" #endif // V8_TARGET_ARCH_32_BIT +template +inline T LoadSeqCst(T* p) { + return __atomic_load_n(p, __ATOMIC_SEQ_CST); +} + +template +inline void StoreSeqCst(T* p, T value) { + __atomic_store_n(p, value, __ATOMIC_SEQ_CST); +} + template inline T ExchangeSeqCst(T* p, T value) { return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST); @@ -128,6 +137,16 @@ ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */ ATOMIC_OPS(int64_t, 64, __int64) ATOMIC_OPS(uint64_t, 64, __int64) +template +inline T LoadSeqCst(T* p) { + UNREACHABLE(); +} + +template +inline void StoreSeqCst(T* p, T value) { + UNREACHABLE(); +} + #undef ATOMIC_OPS #undef InterlockedExchange32 @@ -216,6 +235,23 @@ inline Object* ToObject(Isolate* isolate, uint64_t t) { return *BigInt::FromUint64(isolate, t); } +template +struct Load { + static inline Object* Do(Isolate* isolate, void* buffer, size_t index) { + T result = LoadSeqCst(static_cast(buffer) + index); + return ToObject(isolate, result); + } +}; + +template +struct Store { + static inline void Do(Isolate* isolate, void* buffer, size_t index, + Handle obj) { + T value = FromObject(obj); + StoreSeqCst(static_cast(buffer) + index, value); + } +}; + template struct Exchange { static inline Object* Do(Isolate* isolate, void* buffer, size_t index, @@ -347,6 +383,55 @@ Object* GetModifySetValueInBuffer(Arguments args, Isolate* isolate) { UNREACHABLE(); } +RUNTIME_FUNCTION(Runtime_AtomicsLoad64) { + HandleScope scope(isolate); + DCHECK_EQ(2, args.length()); + CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); + CONVERT_SIZE_ARG_CHECKED(index, 1); + CHECK(sta->GetBuffer()->is_shared()); + + uint8_t* source = static_cast(sta->GetBuffer()->backing_store()) + + sta->byte_offset(); + + DCHECK(sta->type() == kExternalBigInt64Array || + sta->type() == kExternalBigUint64Array); + // SharedArrayBuffers are not neuterable. + CHECK_LT(index, NumberToSize(sta->length())); + if (sta->type() == kExternalBigInt64Array) { + return Load::Do(isolate, source, index); + } + DCHECK(sta->type() == kExternalBigUint64Array); + return Load::Do(isolate, source, index); +} + +RUNTIME_FUNCTION(Runtime_AtomicsStore64) { + HandleScope scope(isolate); + DCHECK_EQ(3, args.length()); + CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); + CONVERT_SIZE_ARG_CHECKED(index, 1); + CONVERT_ARG_HANDLE_CHECKED(Object, value_obj, 2); + CHECK(sta->GetBuffer()->is_shared()); + + uint8_t* source = static_cast(sta->GetBuffer()->backing_store()) + + sta->byte_offset(); + + Handle bigint; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, bigint, + BigInt::FromObject(isolate, value_obj)); + + DCHECK(sta->type() == kExternalBigInt64Array || + sta->type() == kExternalBigUint64Array); + // SharedArrayBuffers are not neuterable. + CHECK_LT(index, NumberToSize(sta->length())); + if (sta->type() == kExternalBigInt64Array) { + Store::Do(isolate, source, index, bigint); + return *bigint; + } + DCHECK(sta->type() == kExternalBigUint64Array); + Store::Do(isolate, source, index, bigint); + return *bigint; +} + RUNTIME_FUNCTION(Runtime_AtomicsExchange) { return GetModifySetValueInBuffer(args, isolate); } @@ -441,6 +526,10 @@ RUNTIME_FUNCTION(Runtime_AtomicsXor) { #else +RUNTIME_FUNCTION(Runtime_AtomicsLoad64) { UNREACHABLE(); } + +RUNTIME_FUNCTION(Runtime_AtomicsStore64) { UNREACHABLE(); } + RUNTIME_FUNCTION(Runtime_AtomicsExchange) { UNREACHABLE(); } RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) { UNREACHABLE(); } diff --git a/src/runtime/runtime.h b/src/runtime/runtime.h index 842a925d37..6ed37916cd 100644 --- a/src/runtime/runtime.h +++ b/src/runtime/runtime.h @@ -55,6 +55,8 @@ namespace internal { F(TrySliceSimpleNonFastElements, 3, 1) #define FOR_EACH_INTRINSIC_ATOMICS(F) \ + F(AtomicsLoad64, 2, 1) \ + F(AtomicsStore64, 3, 1) \ F(AtomicsAdd, 3, 1) \ F(AtomicsAnd, 3, 1) \ F(AtomicsCompareExchange, 4, 1) \