[Atomics] use TFJ builtins for atomic add, sub, and, or, and xor
BUG=v8:4614 R=binji@chromium.org,jarin@chromium.org Review-Url: https://codereview.chromium.org/2799863002 Cr-Commit-Position: refs/heads/master@{#44542}
This commit is contained in:
parent
dbfc030057
commit
14be6ae5e1
@ -790,11 +790,11 @@ namespace internal {
|
||||
TFJ(AtomicsStore, 3, kArray, kIndex, kValue) \
|
||||
TFJ(AtomicsExchange, 3, kArray, kIndex, kValue) \
|
||||
TFJ(AtomicsCompareExchange, 4, kArray, kIndex, kOldValue, kNewValue) \
|
||||
CPP(AtomicsAdd) \
|
||||
CPP(AtomicsSub) \
|
||||
CPP(AtomicsAnd) \
|
||||
CPP(AtomicsOr) \
|
||||
CPP(AtomicsXor) \
|
||||
TFJ(AtomicsAdd, 3, kArray, kIndex, kValue) \
|
||||
TFJ(AtomicsSub, 3, kArray, kIndex, kValue) \
|
||||
TFJ(AtomicsAnd, 3, kArray, kIndex, kValue) \
|
||||
TFJ(AtomicsOr, 3, kArray, kIndex, kValue) \
|
||||
TFJ(AtomicsXor, 3, kArray, kIndex, kValue) \
|
||||
CPP(AtomicsIsLockFree) \
|
||||
CPP(AtomicsWait) \
|
||||
CPP(AtomicsWake) \
|
||||
|
@ -19,6 +19,9 @@ class SharedArrayBufferBuiltinsAssembler : public CodeStubAssembler {
|
||||
: CodeStubAssembler(state) {}
|
||||
|
||||
protected:
|
||||
typedef Node* (CodeAssembler::*AssemblerFunction)(MachineType type,
|
||||
Node* base, Node* offset,
|
||||
Node* value);
|
||||
void ValidateSharedTypedArray(Node* tagged, Node* context,
|
||||
Node** out_instance_type,
|
||||
Node** out_backing_store);
|
||||
@ -26,6 +29,9 @@ class SharedArrayBufferBuiltinsAssembler : public CodeStubAssembler {
|
||||
Node** number_index);
|
||||
void ValidateAtomicIndex(Node* index_word, Node* array_length_word,
|
||||
Node* context);
|
||||
void AtomicBinopBuiltinCommon(Node* array, Node* index, Node* value,
|
||||
Node* context, AssemblerFunction function,
|
||||
Runtime::FunctionId runtime_function);
|
||||
};
|
||||
|
||||
void SharedArrayBufferBuiltinsAssembler::ValidateSharedTypedArray(
|
||||
@ -144,13 +150,14 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
|
||||
Node* index = Parameter(Descriptor::kIndex);
|
||||
Node* context = Parameter(Descriptor::kContext);
|
||||
|
||||
Node* index_integer;
|
||||
Node* index_word32 =
|
||||
ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
|
||||
|
||||
Node* instance_type;
|
||||
Node* backing_store;
|
||||
ValidateSharedTypedArray(array, context, &instance_type, &backing_store);
|
||||
|
||||
Node* index_integer;
|
||||
Node* index_word32 =
|
||||
ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
|
||||
Node* array_length_word32 = TruncateTaggedToWord32(
|
||||
context, LoadObjectField(array, JSTypedArray::kLengthOffset));
|
||||
ValidateAtomicIndex(index_word32, array_length_word32, context);
|
||||
@ -203,21 +210,26 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
|
||||
Node* value = Parameter(Descriptor::kValue);
|
||||
Node* context = Parameter(Descriptor::kContext);
|
||||
|
||||
Node* instance_type;
|
||||
Node* backing_store;
|
||||
ValidateSharedTypedArray(array, context, &instance_type, &backing_store);
|
||||
// The value_integer needs to be computed before the validations as the
|
||||
// ToInteger function can be potentially modified in JS to invalidate the
|
||||
// conditions. This is just a no-cost safety measure as SABs can't be neutered
|
||||
// or shrunk.
|
||||
Node* value_integer = ToInteger(context, value);
|
||||
Node* value_word32 = TruncateTaggedToWord32(context, value_integer);
|
||||
|
||||
Node* index_integer;
|
||||
Node* index_word32 =
|
||||
ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
|
||||
|
||||
Node* instance_type;
|
||||
Node* backing_store;
|
||||
ValidateSharedTypedArray(array, context, &instance_type, &backing_store);
|
||||
|
||||
Node* array_length_word32 = TruncateTaggedToWord32(
|
||||
context, LoadObjectField(array, JSTypedArray::kLengthOffset));
|
||||
ValidateAtomicIndex(index_word32, array_length_word32, context);
|
||||
Node* index_word = ChangeUint32ToWord(index_word32);
|
||||
|
||||
Node* value_integer = ToInteger(context, value);
|
||||
Node* value_word32 = TruncateTaggedToWord32(context, value_integer);
|
||||
|
||||
Label u8(this), u16(this), u32(this), other(this);
|
||||
int32_t case_values[] = {
|
||||
FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE,
|
||||
@ -255,19 +267,24 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
|
||||
Node* value = Parameter(Descriptor::kValue);
|
||||
Node* context = Parameter(Descriptor::kContext);
|
||||
|
||||
Node* instance_type;
|
||||
Node* backing_store;
|
||||
ValidateSharedTypedArray(array, context, &instance_type, &backing_store);
|
||||
// The value_integer needs to be computed before the validations as the
|
||||
// ToInteger function can be potentially modified in JS to invalidate the
|
||||
// conditions. This is just a no-cost safety measure as SABs can't be neutered
|
||||
// or shrunk.
|
||||
Node* value_integer = ToInteger(context, value);
|
||||
|
||||
Node* index_integer;
|
||||
Node* index_word32 =
|
||||
ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
|
||||
|
||||
Node* instance_type;
|
||||
Node* backing_store;
|
||||
ValidateSharedTypedArray(array, context, &instance_type, &backing_store);
|
||||
|
||||
Node* array_length_word32 = TruncateTaggedToWord32(
|
||||
context, LoadObjectField(array, JSTypedArray::kLengthOffset));
|
||||
ValidateAtomicIndex(index_word32, array_length_word32, context);
|
||||
|
||||
Node* value_integer = ToInteger(context, value);
|
||||
|
||||
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
|
||||
Return(CallRuntime(Runtime::kAtomicsExchange, context, array, index_integer,
|
||||
value_integer));
|
||||
@ -327,21 +344,25 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
|
||||
Node* new_value = Parameter(Descriptor::kNewValue);
|
||||
Node* context = Parameter(Descriptor::kContext);
|
||||
|
||||
Node* instance_type;
|
||||
Node* backing_store;
|
||||
ValidateSharedTypedArray(array, context, &instance_type, &backing_store);
|
||||
// The value_integers needs to be computed before the validations as the
|
||||
// ToInteger function can be potentially modified in JS to invalidate the
|
||||
// conditions. This is just a no-cost safety measure as SABs can't be neutered
|
||||
// or shrunk.
|
||||
Node* old_value_integer = ToInteger(context, old_value);
|
||||
Node* new_value_integer = ToInteger(context, new_value);
|
||||
|
||||
Node* index_integer;
|
||||
Node* index_word32 =
|
||||
ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
|
||||
|
||||
Node* instance_type;
|
||||
Node* backing_store;
|
||||
ValidateSharedTypedArray(array, context, &instance_type, &backing_store);
|
||||
|
||||
Node* array_length_word32 = TruncateTaggedToWord32(
|
||||
context, LoadObjectField(array, JSTypedArray::kLengthOffset));
|
||||
ValidateAtomicIndex(index_word32, array_length_word32, context);
|
||||
|
||||
Node* old_value_integer = ToInteger(context, old_value);
|
||||
|
||||
Node* new_value_integer = ToInteger(context, new_value);
|
||||
|
||||
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \
|
||||
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
|
||||
Return(CallRuntime(Runtime::kAtomicsCompareExchange, context, array,
|
||||
@ -402,5 +423,99 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
|
||||
// || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
|
||||
}
|
||||
|
||||
#define BINOP_BUILTIN(op) \
|
||||
TF_BUILTIN(Atomics##op, SharedArrayBufferBuiltinsAssembler) { \
|
||||
Node* array = Parameter(Descriptor::kArray); \
|
||||
Node* index = Parameter(Descriptor::kIndex); \
|
||||
Node* value = Parameter(Descriptor::kValue); \
|
||||
Node* context = Parameter(Descriptor::kContext); \
|
||||
AtomicBinopBuiltinCommon(array, index, value, context, \
|
||||
&CodeAssembler::Atomic##op, \
|
||||
Runtime::kAtomics##op); \
|
||||
}
|
||||
BINOP_BUILTIN(Add)
|
||||
BINOP_BUILTIN(Sub)
|
||||
BINOP_BUILTIN(And)
|
||||
BINOP_BUILTIN(Or)
|
||||
BINOP_BUILTIN(Xor)
|
||||
#undef BINOP_BUILTIN
|
||||
|
||||
void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
|
||||
Node* array, Node* index, Node* value, Node* context,
|
||||
AssemblerFunction function, Runtime::FunctionId runtime_function) {
|
||||
// The value_integer needs to be computed before the validations as the
|
||||
// ToInteger function can be potentially modified in JS to invalidate the
|
||||
// conditions. This is just a no-cost safety measure as SABs can't be neutered
|
||||
// or shrunk.
|
||||
Node* value_integer = ToInteger(context, value);
|
||||
|
||||
Node* index_integer;
|
||||
Node* index_word32 =
|
||||
ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
|
||||
|
||||
Node* instance_type;
|
||||
Node* backing_store;
|
||||
ValidateSharedTypedArray(array, context, &instance_type, &backing_store);
|
||||
|
||||
Node* array_length_word32 = TruncateTaggedToWord32(
|
||||
context, LoadObjectField(array, JSTypedArray::kLengthOffset));
|
||||
ValidateAtomicIndex(index_word32, array_length_word32, context);
|
||||
|
||||
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \
|
||||
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
|
||||
Return(CallRuntime(runtime_function, context, array, index_integer,
|
||||
value_integer));
|
||||
#else
|
||||
Node* index_word = ChangeUint32ToWord(index_word32);
|
||||
|
||||
Node* value_word32 = TruncateTaggedToWord32(context, value_integer);
|
||||
|
||||
Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this),
|
||||
other(this);
|
||||
int32_t case_values[] = {
|
||||
FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE,
|
||||
FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
|
||||
};
|
||||
Label* case_labels[] = {
|
||||
&i8, &u8, &i16, &u16, &i32, &u32,
|
||||
};
|
||||
Switch(instance_type, &other, case_values, case_labels,
|
||||
arraysize(case_labels));
|
||||
|
||||
Bind(&i8);
|
||||
Return(SmiFromWord32((this->*function)(MachineType::Int8(), backing_store,
|
||||
index_word, value_word32)));
|
||||
|
||||
Bind(&u8);
|
||||
Return(SmiFromWord32((this->*function)(MachineType::Uint8(), backing_store,
|
||||
index_word, value_word32)));
|
||||
|
||||
Bind(&i16);
|
||||
Return(
|
||||
SmiFromWord32((this->*function)(MachineType::Int16(), backing_store,
|
||||
WordShl(index_word, 1), value_word32)));
|
||||
|
||||
Bind(&u16);
|
||||
Return(
|
||||
SmiFromWord32((this->*function)(MachineType::Uint16(), backing_store,
|
||||
WordShl(index_word, 1), value_word32)));
|
||||
|
||||
Bind(&i32);
|
||||
Return(ChangeInt32ToTagged(
|
||||
(this->*function)(MachineType::Int32(), backing_store,
|
||||
WordShl(index_word, 2), value_word32)));
|
||||
|
||||
Bind(&u32);
|
||||
Return(ChangeUint32ToTagged(
|
||||
(this->*function)(MachineType::Uint32(), backing_store,
|
||||
WordShl(index_word, 2), value_word32)));
|
||||
|
||||
// This shouldn't happen, we've already validated the type.
|
||||
Bind(&other);
|
||||
Unreachable();
|
||||
#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64
|
||||
// || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -169,381 +169,5 @@ BUILTIN(AtomicsWait) {
|
||||
timeout_number);
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
#if V8_CC_GNU
|
||||
|
||||
template <typename T>
|
||||
inline T AddSeqCst(T* p, T value) {
|
||||
return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline T SubSeqCst(T* p, T value) {
|
||||
return __atomic_fetch_sub(p, value, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline T AndSeqCst(T* p, T value) {
|
||||
return __atomic_fetch_and(p, value, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline T OrSeqCst(T* p, T value) {
|
||||
return __atomic_fetch_or(p, value, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline T XorSeqCst(T* p, T value) {
|
||||
return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
|
||||
#elif V8_CC_MSVC
|
||||
|
||||
#define InterlockedExchangeAdd32 _InterlockedExchangeAdd
|
||||
#define InterlockedAnd32 _InterlockedAnd
|
||||
#define InterlockedOr32 _InterlockedOr
|
||||
#define InterlockedXor32 _InterlockedXor
|
||||
#define InterlockedExchangeAdd16 _InterlockedExchangeAdd16
|
||||
#define InterlockedExchangeAdd8 _InterlockedExchangeAdd8
|
||||
|
||||
#define ATOMIC_OPS(type, suffix, vctype) \
|
||||
inline type AddSeqCst(type* p, type value) { \
|
||||
return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \
|
||||
bit_cast<vctype>(value)); \
|
||||
} \
|
||||
inline type SubSeqCst(type* p, type value) { \
|
||||
return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \
|
||||
-bit_cast<vctype>(value)); \
|
||||
} \
|
||||
inline type AndSeqCst(type* p, type value) { \
|
||||
return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \
|
||||
bit_cast<vctype>(value)); \
|
||||
} \
|
||||
inline type OrSeqCst(type* p, type value) { \
|
||||
return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \
|
||||
bit_cast<vctype>(value)); \
|
||||
} \
|
||||
inline type XorSeqCst(type* p, type value) { \
|
||||
return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \
|
||||
bit_cast<vctype>(value)); \
|
||||
}
|
||||
|
||||
ATOMIC_OPS(int8_t, 8, char)
|
||||
ATOMIC_OPS(uint8_t, 8, char)
|
||||
ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */
|
||||
ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */
|
||||
ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */
|
||||
ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */
|
||||
|
||||
#undef ATOMIC_OPS_INTEGER
|
||||
#undef ATOMIC_OPS
|
||||
|
||||
#undef InterlockedExchangeAdd32
|
||||
#undef InterlockedAnd32
|
||||
#undef InterlockedOr32
|
||||
#undef InterlockedXor32
|
||||
#undef InterlockedExchangeAdd16
|
||||
#undef InterlockedExchangeAdd8
|
||||
|
||||
#else
|
||||
|
||||
#error Unsupported platform!
|
||||
|
||||
#endif
|
||||
|
||||
template <typename T>
|
||||
T FromObject(Handle<Object> number);
|
||||
|
||||
template <>
|
||||
inline uint8_t FromObject<uint8_t>(Handle<Object> number) {
|
||||
return NumberToUint32(*number);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int8_t FromObject<int8_t>(Handle<Object> number) {
|
||||
return NumberToInt32(*number);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline uint16_t FromObject<uint16_t>(Handle<Object> number) {
|
||||
return NumberToUint32(*number);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int16_t FromObject<int16_t>(Handle<Object> number) {
|
||||
return NumberToInt32(*number);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline uint32_t FromObject<uint32_t>(Handle<Object> number) {
|
||||
return NumberToUint32(*number);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int32_t FromObject<int32_t>(Handle<Object> number) {
|
||||
return NumberToInt32(*number);
|
||||
}
|
||||
|
||||
inline Object* ToObject(Isolate* isolate, int8_t t) { return Smi::FromInt(t); }
|
||||
|
||||
inline Object* ToObject(Isolate* isolate, uint8_t t) { return Smi::FromInt(t); }
|
||||
|
||||
inline Object* ToObject(Isolate* isolate, int16_t t) { return Smi::FromInt(t); }
|
||||
|
||||
inline Object* ToObject(Isolate* isolate, uint16_t t) {
|
||||
return Smi::FromInt(t);
|
||||
}
|
||||
|
||||
inline Object* ToObject(Isolate* isolate, int32_t t) {
|
||||
return *isolate->factory()->NewNumber(t);
|
||||
}
|
||||
|
||||
inline Object* ToObject(Isolate* isolate, uint32_t t) {
|
||||
return *isolate->factory()->NewNumber(t);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline Object* DoAdd(Isolate* isolate, void* buffer, size_t index,
|
||||
Handle<Object> obj) {
|
||||
T value = FromObject<T>(obj);
|
||||
T result = AddSeqCst(static_cast<T*>(buffer) + index, value);
|
||||
return ToObject(isolate, result);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline Object* DoSub(Isolate* isolate, void* buffer, size_t index,
|
||||
Handle<Object> obj) {
|
||||
T value = FromObject<T>(obj);
|
||||
T result = SubSeqCst(static_cast<T*>(buffer) + index, value);
|
||||
return ToObject(isolate, result);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline Object* DoAnd(Isolate* isolate, void* buffer, size_t index,
|
||||
Handle<Object> obj) {
|
||||
T value = FromObject<T>(obj);
|
||||
T result = AndSeqCst(static_cast<T*>(buffer) + index, value);
|
||||
return ToObject(isolate, result);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline Object* DoOr(Isolate* isolate, void* buffer, size_t index,
|
||||
Handle<Object> obj) {
|
||||
T value = FromObject<T>(obj);
|
||||
T result = OrSeqCst(static_cast<T*>(buffer) + index, value);
|
||||
return ToObject(isolate, result);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline Object* DoXor(Isolate* isolate, void* buffer, size_t index,
|
||||
Handle<Object> obj) {
|
||||
T value = FromObject<T>(obj);
|
||||
T result = XorSeqCst(static_cast<T*>(buffer) + index, value);
|
||||
return ToObject(isolate, result);
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
// Duplicated from objects.h
|
||||
// V has parameters (Type, type, TYPE, C type, element_size)
|
||||
#define INTEGER_TYPED_ARRAYS(V) \
|
||||
V(Uint8, uint8, UINT8, uint8_t, 1) \
|
||||
V(Int8, int8, INT8, int8_t, 1) \
|
||||
V(Uint16, uint16, UINT16, uint16_t, 2) \
|
||||
V(Int16, int16, INT16, int16_t, 2) \
|
||||
V(Uint32, uint32, UINT32, uint32_t, 4) \
|
||||
V(Int32, int32, INT32, int32_t, 4)
|
||||
|
||||
// ES #sec-atomics.add
|
||||
// Atomics.add( typedArray, index, value )
|
||||
BUILTIN(AtomicsAdd) {
|
||||
HandleScope scope(isolate);
|
||||
Handle<Object> array = args.atOrUndefined(isolate, 1);
|
||||
Handle<Object> index = args.atOrUndefined(isolate, 2);
|
||||
Handle<Object> value = args.atOrUndefined(isolate, 3);
|
||||
|
||||
Handle<JSTypedArray> sta;
|
||||
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
|
||||
isolate, sta, ValidateSharedIntegerTypedArray(isolate, array));
|
||||
|
||||
Maybe<size_t> maybe_index = ValidateAtomicAccess(isolate, sta, index);
|
||||
if (maybe_index.IsNothing()) return isolate->heap()->exception();
|
||||
size_t i = maybe_index.FromJust();
|
||||
|
||||
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
|
||||
Object::ToInteger(isolate, value));
|
||||
|
||||
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
|
||||
NumberToSize(sta->byte_offset());
|
||||
|
||||
switch (sta->type()) {
|
||||
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
|
||||
case kExternal##Type##Array: \
|
||||
return DoAdd<ctype>(isolate, source, i, value);
|
||||
|
||||
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
|
||||
#undef TYPED_ARRAY_CASE
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
UNREACHABLE();
|
||||
return isolate->heap()->undefined_value();
|
||||
}
|
||||
|
||||
// ES #sec-atomics.sub
|
||||
// Atomics.sub( typedArray, index, value )
|
||||
BUILTIN(AtomicsSub) {
|
||||
HandleScope scope(isolate);
|
||||
Handle<Object> array = args.atOrUndefined(isolate, 1);
|
||||
Handle<Object> index = args.atOrUndefined(isolate, 2);
|
||||
Handle<Object> value = args.atOrUndefined(isolate, 3);
|
||||
|
||||
Handle<JSTypedArray> sta;
|
||||
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
|
||||
isolate, sta, ValidateSharedIntegerTypedArray(isolate, array));
|
||||
|
||||
Maybe<size_t> maybe_index = ValidateAtomicAccess(isolate, sta, index);
|
||||
if (maybe_index.IsNothing()) return isolate->heap()->exception();
|
||||
size_t i = maybe_index.FromJust();
|
||||
|
||||
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
|
||||
Object::ToInteger(isolate, value));
|
||||
|
||||
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
|
||||
NumberToSize(sta->byte_offset());
|
||||
|
||||
switch (sta->type()) {
|
||||
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
|
||||
case kExternal##Type##Array: \
|
||||
return DoSub<ctype>(isolate, source, i, value);
|
||||
|
||||
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
|
||||
#undef TYPED_ARRAY_CASE
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
UNREACHABLE();
|
||||
return isolate->heap()->undefined_value();
|
||||
}
|
||||
|
||||
// ES #sec-atomics.and
|
||||
// Atomics.and( typedArray, index, value )
|
||||
BUILTIN(AtomicsAnd) {
|
||||
HandleScope scope(isolate);
|
||||
Handle<Object> array = args.atOrUndefined(isolate, 1);
|
||||
Handle<Object> index = args.atOrUndefined(isolate, 2);
|
||||
Handle<Object> value = args.atOrUndefined(isolate, 3);
|
||||
|
||||
Handle<JSTypedArray> sta;
|
||||
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
|
||||
isolate, sta, ValidateSharedIntegerTypedArray(isolate, array));
|
||||
|
||||
Maybe<size_t> maybe_index = ValidateAtomicAccess(isolate, sta, index);
|
||||
if (maybe_index.IsNothing()) return isolate->heap()->exception();
|
||||
size_t i = maybe_index.FromJust();
|
||||
|
||||
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
|
||||
Object::ToInteger(isolate, value));
|
||||
|
||||
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
|
||||
NumberToSize(sta->byte_offset());
|
||||
|
||||
switch (sta->type()) {
|
||||
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
|
||||
case kExternal##Type##Array: \
|
||||
return DoAnd<ctype>(isolate, source, i, value);
|
||||
|
||||
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
|
||||
#undef TYPED_ARRAY_CASE
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
UNREACHABLE();
|
||||
return isolate->heap()->undefined_value();
|
||||
}
|
||||
|
||||
// ES #sec-atomics.or
|
||||
// Atomics.or( typedArray, index, value )
|
||||
BUILTIN(AtomicsOr) {
|
||||
HandleScope scope(isolate);
|
||||
Handle<Object> array = args.atOrUndefined(isolate, 1);
|
||||
Handle<Object> index = args.atOrUndefined(isolate, 2);
|
||||
Handle<Object> value = args.atOrUndefined(isolate, 3);
|
||||
|
||||
Handle<JSTypedArray> sta;
|
||||
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
|
||||
isolate, sta, ValidateSharedIntegerTypedArray(isolate, array));
|
||||
|
||||
Maybe<size_t> maybe_index = ValidateAtomicAccess(isolate, sta, index);
|
||||
if (maybe_index.IsNothing()) return isolate->heap()->exception();
|
||||
size_t i = maybe_index.FromJust();
|
||||
|
||||
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
|
||||
Object::ToInteger(isolate, value));
|
||||
|
||||
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
|
||||
NumberToSize(sta->byte_offset());
|
||||
|
||||
switch (sta->type()) {
|
||||
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
|
||||
case kExternal##Type##Array: \
|
||||
return DoOr<ctype>(isolate, source, i, value);
|
||||
|
||||
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
|
||||
#undef TYPED_ARRAY_CASE
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
UNREACHABLE();
|
||||
return isolate->heap()->undefined_value();
|
||||
}
|
||||
|
||||
// ES #sec-atomics.xor
|
||||
// Atomics.xor( typedArray, index, value )
|
||||
BUILTIN(AtomicsXor) {
|
||||
HandleScope scope(isolate);
|
||||
Handle<Object> array = args.atOrUndefined(isolate, 1);
|
||||
Handle<Object> index = args.atOrUndefined(isolate, 2);
|
||||
Handle<Object> value = args.atOrUndefined(isolate, 3);
|
||||
|
||||
Handle<JSTypedArray> sta;
|
||||
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
|
||||
isolate, sta, ValidateSharedIntegerTypedArray(isolate, array));
|
||||
|
||||
Maybe<size_t> maybe_index = ValidateAtomicAccess(isolate, sta, index);
|
||||
if (maybe_index.IsNothing()) return isolate->heap()->exception();
|
||||
size_t i = maybe_index.FromJust();
|
||||
|
||||
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
|
||||
Object::ToInteger(isolate, value));
|
||||
|
||||
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
|
||||
NumberToSize(sta->byte_offset());
|
||||
|
||||
switch (sta->type()) {
|
||||
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
|
||||
case kExternal##Type##Array: \
|
||||
return DoXor<ctype>(isolate, source, i, value);
|
||||
|
||||
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
|
||||
#undef TYPED_ARRAY_CASE
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
UNREACHABLE();
|
||||
return isolate->heap()->undefined_value();
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -452,6 +452,21 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
|
||||
__ dmb(ISH); \
|
||||
} while (0)
|
||||
|
||||
#define ASSEMBLE_ATOMIC_BINOP(load_instr, store_instr, bin_instr) \
|
||||
do { \
|
||||
Label binop; \
|
||||
__ add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
|
||||
__ dmb(ISH); \
|
||||
__ bind(&binop); \
|
||||
__ load_instr(i.OutputRegister(0), i.TempRegister(0)); \
|
||||
__ bin_instr(i.TempRegister(1), i.OutputRegister(0), \
|
||||
Operand(i.InputRegister(2))); \
|
||||
__ store_instr(i.TempRegister(1), i.TempRegister(1), i.TempRegister(0)); \
|
||||
__ teq(i.TempRegister(1), Operand(0)); \
|
||||
__ b(ne, &binop); \
|
||||
__ dmb(ISH); \
|
||||
} while (0)
|
||||
|
||||
#define ASSEMBLE_IEEE754_BINOP(name) \
|
||||
do { \
|
||||
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
|
||||
@ -2305,6 +2320,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
__ mov(i.TempRegister(1), i.InputRegister(2));
|
||||
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrex, strex);
|
||||
break;
|
||||
#define ATOMIC_BINOP_CASE(op, inst) \
|
||||
case kAtomic##op##Int8: \
|
||||
ASSEMBLE_ATOMIC_BINOP(ldrexb, strexb, inst); \
|
||||
__ sxtb(i.OutputRegister(0), i.OutputRegister(0)); \
|
||||
break; \
|
||||
case kAtomic##op##Uint8: \
|
||||
ASSEMBLE_ATOMIC_BINOP(ldrexb, strexb, inst); \
|
||||
break; \
|
||||
case kAtomic##op##Int16: \
|
||||
ASSEMBLE_ATOMIC_BINOP(ldrexh, strexh, inst); \
|
||||
__ sxth(i.OutputRegister(0), i.OutputRegister(0)); \
|
||||
break; \
|
||||
case kAtomic##op##Uint16: \
|
||||
ASSEMBLE_ATOMIC_BINOP(ldrexh, strexh, inst); \
|
||||
break; \
|
||||
case kAtomic##op##Word32: \
|
||||
ASSEMBLE_ATOMIC_BINOP(ldrex, strex, inst); \
|
||||
break;
|
||||
ATOMIC_BINOP_CASE(Add, add)
|
||||
ATOMIC_BINOP_CASE(Sub, sub)
|
||||
ATOMIC_BINOP_CASE(And, and_)
|
||||
ATOMIC_BINOP_CASE(Or, orr)
|
||||
ATOMIC_BINOP_CASE(Xor, eor)
|
||||
#undef ATOMIC_BINOP_CASE
|
||||
}
|
||||
return kSuccess;
|
||||
} // NOLINT(readability/fn_size)
|
||||
|
@ -2234,7 +2234,7 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
|
||||
Node* index = node->InputAt(1);
|
||||
Node* value = node->InputAt(2);
|
||||
ArchOpcode opcode = kArchNop;
|
||||
MachineType type = AtomicExchangeRepresentationOf(node->op());
|
||||
MachineType type = AtomicOpRepresentationOf(node->op());
|
||||
if (type == MachineType::Int8()) {
|
||||
opcode = kAtomicExchangeInt8;
|
||||
} else if (type == MachineType::Uint8()) {
|
||||
@ -2271,7 +2271,7 @@ void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
|
||||
Node* old_value = node->InputAt(2);
|
||||
Node* new_value = node->InputAt(3);
|
||||
ArchOpcode opcode = kArchNop;
|
||||
MachineType type = AtomicCompareExchangeRepresentationOf(node->op());
|
||||
MachineType type = AtomicOpRepresentationOf(node->op());
|
||||
if (type == MachineType::Int8()) {
|
||||
opcode = kAtomicCompareExchangeInt8;
|
||||
} else if (type == MachineType::Uint8()) {
|
||||
@ -2303,6 +2303,58 @@ void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
|
||||
Emit(code, 1, outputs, input_count, inputs, 2, temp);
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitAtomicBinaryOperation(
|
||||
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
|
||||
ArchOpcode uint16_op, ArchOpcode word32_op) {
|
||||
ArmOperandGenerator g(this);
|
||||
Node* base = node->InputAt(0);
|
||||
Node* index = node->InputAt(1);
|
||||
Node* value = node->InputAt(2);
|
||||
ArchOpcode opcode = kArchNop;
|
||||
MachineType type = AtomicOpRepresentationOf(node->op());
|
||||
if (type == MachineType::Int8()) {
|
||||
opcode = int8_op;
|
||||
} else if (type == MachineType::Uint8()) {
|
||||
opcode = uint8_op;
|
||||
} else if (type == MachineType::Int16()) {
|
||||
opcode = int16_op;
|
||||
} else if (type == MachineType::Uint16()) {
|
||||
opcode = uint16_op;
|
||||
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
|
||||
opcode = word32_op;
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
return;
|
||||
}
|
||||
|
||||
AddressingMode addressing_mode = kMode_Offset_RR;
|
||||
InstructionOperand inputs[3];
|
||||
size_t input_count = 0;
|
||||
inputs[input_count++] = g.UseUniqueRegister(base);
|
||||
inputs[input_count++] = g.UseUniqueRegister(index);
|
||||
inputs[input_count++] = g.UseUniqueRegister(value);
|
||||
InstructionOperand outputs[1];
|
||||
outputs[0] = g.UseUniqueRegister(node);
|
||||
InstructionOperand temps[2];
|
||||
temps[0] = g.TempRegister();
|
||||
temps[1] = g.TempRegister();
|
||||
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
|
||||
Emit(code, 1, outputs, input_count, inputs, 2, temps);
|
||||
}
|
||||
|
||||
#define VISIT_ATOMIC_BINOP(op) \
|
||||
void InstructionSelector::VisitAtomic##op(Node* node) { \
|
||||
VisitAtomicBinaryOperation(node, kAtomic##op##Int8, kAtomic##op##Uint8, \
|
||||
kAtomic##op##Int16, kAtomic##op##Uint16, \
|
||||
kAtomic##op##Word32); \
|
||||
}
|
||||
VISIT_ATOMIC_BINOP(Add)
|
||||
VISIT_ATOMIC_BINOP(Sub)
|
||||
VISIT_ATOMIC_BINOP(And)
|
||||
VISIT_ATOMIC_BINOP(Or)
|
||||
VISIT_ATOMIC_BINOP(Xor)
|
||||
#undef VISIT_ATOMIC_BINOP
|
||||
|
||||
#define SIMD_TYPE_LIST(V) \
|
||||
V(F32x4) \
|
||||
V(I32x4) \
|
||||
|
@ -556,6 +556,19 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
|
||||
__ bind(&exit); \
|
||||
} while (0)
|
||||
|
||||
#define ASSEMBLE_ATOMIC_BINOP(load_instr, store_instr, bin_instr) \
|
||||
do { \
|
||||
Label binop; \
|
||||
__ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
|
||||
__ bind(&binop); \
|
||||
__ load_instr(i.OutputRegister32(), i.TempRegister(0)); \
|
||||
__ bin_instr(i.TempRegister32(1), i.OutputRegister32(), \
|
||||
Operand(i.InputRegister32(2))); \
|
||||
__ store_instr(i.TempRegister32(1), i.TempRegister32(1), \
|
||||
i.TempRegister(0)); \
|
||||
__ cbnz(i.TempRegister32(1), &binop); \
|
||||
} while (0)
|
||||
|
||||
#define ASSEMBLE_IEEE754_BINOP(name) \
|
||||
do { \
|
||||
FrameScope scope(masm(), StackFrame::MANUAL); \
|
||||
@ -1696,6 +1709,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
__ mov(i.TempRegister(1), i.InputRegister(2));
|
||||
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxr, stlxr);
|
||||
break;
|
||||
#define ATOMIC_BINOP_CASE(op, inst) \
|
||||
case kAtomic##op##Int8: \
|
||||
ASSEMBLE_ATOMIC_BINOP(ldaxrb, stlxrb, inst); \
|
||||
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0)); \
|
||||
break; \
|
||||
case kAtomic##op##Uint8: \
|
||||
ASSEMBLE_ATOMIC_BINOP(ldaxrb, stlxrb, inst); \
|
||||
break; \
|
||||
case kAtomic##op##Int16: \
|
||||
ASSEMBLE_ATOMIC_BINOP(ldaxrh, stlxrh, inst); \
|
||||
__ Sxth(i.OutputRegister(0), i.OutputRegister(0)); \
|
||||
break; \
|
||||
case kAtomic##op##Uint16: \
|
||||
ASSEMBLE_ATOMIC_BINOP(ldaxrh, stlxrh, inst); \
|
||||
break; \
|
||||
case kAtomic##op##Word32: \
|
||||
ASSEMBLE_ATOMIC_BINOP(ldaxr, stlxr, inst); \
|
||||
break;
|
||||
ATOMIC_BINOP_CASE(Add, Add)
|
||||
ATOMIC_BINOP_CASE(Sub, Sub)
|
||||
ATOMIC_BINOP_CASE(And, And)
|
||||
ATOMIC_BINOP_CASE(Or, Orr)
|
||||
ATOMIC_BINOP_CASE(Xor, Eor)
|
||||
#undef ATOMIC_BINOP_CASE
|
||||
}
|
||||
return kSuccess;
|
||||
} // NOLINT(readability/fn_size)
|
||||
|
@ -2714,7 +2714,7 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
|
||||
Node* index = node->InputAt(1);
|
||||
Node* value = node->InputAt(2);
|
||||
ArchOpcode opcode = kArchNop;
|
||||
MachineType type = AtomicExchangeRepresentationOf(node->op());
|
||||
MachineType type = AtomicOpRepresentationOf(node->op());
|
||||
if (type == MachineType::Int8()) {
|
||||
opcode = kAtomicExchangeInt8;
|
||||
} else if (type == MachineType::Uint8()) {
|
||||
@ -2750,7 +2750,7 @@ void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
|
||||
Node* old_value = node->InputAt(2);
|
||||
Node* new_value = node->InputAt(3);
|
||||
ArchOpcode opcode = kArchNop;
|
||||
MachineType type = AtomicCompareExchangeRepresentationOf(node->op());
|
||||
MachineType type = AtomicOpRepresentationOf(node->op());
|
||||
if (type == MachineType::Int8()) {
|
||||
opcode = kAtomicCompareExchangeInt8;
|
||||
} else if (type == MachineType::Uint8()) {
|
||||
@ -2782,6 +2782,58 @@ void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
|
||||
Emit(code, 1, outputs, input_count, inputs, 2, temp);
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitAtomicBinaryOperation(
|
||||
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
|
||||
ArchOpcode uint16_op, ArchOpcode word32_op) {
|
||||
Arm64OperandGenerator g(this);
|
||||
Node* base = node->InputAt(0);
|
||||
Node* index = node->InputAt(1);
|
||||
Node* value = node->InputAt(2);
|
||||
ArchOpcode opcode = kArchNop;
|
||||
MachineType type = AtomicOpRepresentationOf(node->op());
|
||||
if (type == MachineType::Int8()) {
|
||||
opcode = int8_op;
|
||||
} else if (type == MachineType::Uint8()) {
|
||||
opcode = uint8_op;
|
||||
} else if (type == MachineType::Int16()) {
|
||||
opcode = int16_op;
|
||||
} else if (type == MachineType::Uint16()) {
|
||||
opcode = uint16_op;
|
||||
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
|
||||
opcode = word32_op;
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
return;
|
||||
}
|
||||
|
||||
AddressingMode addressing_mode = kMode_MRR;
|
||||
InstructionOperand inputs[3];
|
||||
size_t input_count = 0;
|
||||
inputs[input_count++] = g.UseUniqueRegister(base);
|
||||
inputs[input_count++] = g.UseUniqueRegister(index);
|
||||
inputs[input_count++] = g.UseUniqueRegister(value);
|
||||
InstructionOperand outputs[1];
|
||||
outputs[0] = g.UseUniqueRegister(node);
|
||||
InstructionOperand temps[2];
|
||||
temps[0] = g.TempRegister();
|
||||
temps[1] = g.TempRegister();
|
||||
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
|
||||
Emit(code, 1, outputs, input_count, inputs, 2, temps);
|
||||
}
|
||||
|
||||
#define VISIT_ATOMIC_BINOP(op) \
|
||||
void InstructionSelector::VisitAtomic##op(Node* node) { \
|
||||
VisitAtomicBinaryOperation(node, kAtomic##op##Int8, kAtomic##op##Uint8, \
|
||||
kAtomic##op##Int16, kAtomic##op##Uint16, \
|
||||
kAtomic##op##Word32); \
|
||||
}
|
||||
VISIT_ATOMIC_BINOP(Add)
|
||||
VISIT_ATOMIC_BINOP(Sub)
|
||||
VISIT_ATOMIC_BINOP(And)
|
||||
VISIT_ATOMIC_BINOP(Or)
|
||||
VISIT_ATOMIC_BINOP(Xor)
|
||||
#undef VISIT_ATOMIC_BINOP
|
||||
|
||||
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
@ -516,10 +516,18 @@ Node* CodeAssembler::AtomicStore(MachineRepresentation rep, Node* base,
|
||||
return raw_assembler()->AtomicStore(rep, base, offset, value);
|
||||
}
|
||||
|
||||
Node* CodeAssembler::AtomicExchange(MachineType type, Node* base, Node* offset,
|
||||
Node* value) {
|
||||
return raw_assembler()->AtomicExchange(type, base, offset, value);
|
||||
}
|
||||
#define ATOMIC_FUNCTION(name) \
|
||||
Node* CodeAssembler::Atomic##name(MachineType type, Node* base, \
|
||||
Node* offset, Node* value) { \
|
||||
return raw_assembler()->Atomic##name(type, base, offset, value); \
|
||||
}
|
||||
ATOMIC_FUNCTION(Exchange);
|
||||
ATOMIC_FUNCTION(Add);
|
||||
ATOMIC_FUNCTION(Sub);
|
||||
ATOMIC_FUNCTION(And);
|
||||
ATOMIC_FUNCTION(Or);
|
||||
ATOMIC_FUNCTION(Xor);
|
||||
#undef ATOMIC_FUNCTION
|
||||
|
||||
Node* CodeAssembler::AtomicCompareExchange(MachineType type, Node* base,
|
||||
Node* offset, Node* old_value,
|
||||
|
@ -293,6 +293,16 @@ class V8_EXPORT_PRIVATE CodeAssembler {
|
||||
Node* AtomicCompareExchange(MachineType type, Node* base, Node* offset,
|
||||
Node* old_value, Node* new_value);
|
||||
|
||||
Node* AtomicAdd(MachineType type, Node* base, Node* offset, Node* value);
|
||||
|
||||
Node* AtomicSub(MachineType type, Node* base, Node* offset, Node* value);
|
||||
|
||||
Node* AtomicAnd(MachineType type, Node* base, Node* offset, Node* value);
|
||||
|
||||
Node* AtomicOr(MachineType type, Node* base, Node* offset, Node* value);
|
||||
|
||||
Node* AtomicXor(MachineType type, Node* base, Node* offset, Node* value);
|
||||
|
||||
// Store a value to the root array.
|
||||
Node* StoreRoot(Heap::RootListIndex root_index, Node* value);
|
||||
|
||||
|
@ -775,6 +775,18 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define ASSEMBLE_ATOMIC_BINOP(bin_inst, mov_inst, cmpxchg_inst) \
|
||||
do { \
|
||||
Label binop; \
|
||||
__ bind(&binop); \
|
||||
__ mov_inst(eax, i.MemoryOperand(1)); \
|
||||
__ mov_inst(i.TempRegister(0), Operand(eax)); \
|
||||
__ bin_inst(i.TempRegister(0), i.InputRegister(0)); \
|
||||
__ lock(); \
|
||||
__ cmpxchg_inst(i.MemoryOperand(1), i.TempRegister(0)); \
|
||||
__ j(not_equal, &binop); \
|
||||
} while (false)
|
||||
|
||||
void CodeGenerator::AssembleDeconstructFrame() {
|
||||
__ mov(esp, ebp);
|
||||
__ pop(ebp);
|
||||
@ -2016,6 +2028,37 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
__ cmpxchg(i.MemoryOperand(2), i.InputRegister(1));
|
||||
break;
|
||||
}
|
||||
#define ATOMIC_BINOP_CASE(op, inst) \
|
||||
case kAtomic##op##Int8: { \
|
||||
ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
|
||||
__ movsx_b(eax, eax); \
|
||||
break; \
|
||||
} \
|
||||
case kAtomic##op##Uint8: { \
|
||||
ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
|
||||
__ movzx_b(eax, eax); \
|
||||
break; \
|
||||
} \
|
||||
case kAtomic##op##Int16: { \
|
||||
ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
|
||||
__ movsx_w(eax, eax); \
|
||||
break; \
|
||||
} \
|
||||
case kAtomic##op##Uint16: { \
|
||||
ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
|
||||
__ movzx_w(eax, eax); \
|
||||
break; \
|
||||
} \
|
||||
case kAtomic##op##Word32: { \
|
||||
ASSEMBLE_ATOMIC_BINOP(inst, mov, cmpxchg); \
|
||||
break; \
|
||||
}
|
||||
ATOMIC_BINOP_CASE(Add, add)
|
||||
ATOMIC_BINOP_CASE(Sub, sub)
|
||||
ATOMIC_BINOP_CASE(And, and_)
|
||||
ATOMIC_BINOP_CASE(Or, or_)
|
||||
ATOMIC_BINOP_CASE(Xor, xor_)
|
||||
#undef ATOMIC_BINOP_CASE
|
||||
case kAtomicLoadInt8:
|
||||
case kAtomicLoadUint8:
|
||||
case kAtomicLoadInt16:
|
||||
|
@ -1724,7 +1724,11 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
|
||||
AddressingMode addressing_mode;
|
||||
InstructionOperand inputs[4];
|
||||
size_t input_count = 0;
|
||||
inputs[input_count++] = g.UseUniqueRegister(value);
|
||||
if (rep == MachineRepresentation::kWord8) {
|
||||
inputs[input_count++] = g.UseByteRegister(value);
|
||||
} else {
|
||||
inputs[input_count++] = g.UseUniqueRegister(value);
|
||||
}
|
||||
inputs[input_count++] = g.UseUniqueRegister(base);
|
||||
if (g.CanBeImmediate(index)) {
|
||||
inputs[input_count++] = g.UseImmediate(index);
|
||||
@ -1743,7 +1747,7 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
|
||||
Node* index = node->InputAt(1);
|
||||
Node* value = node->InputAt(2);
|
||||
|
||||
MachineType type = AtomicExchangeRepresentationOf(node->op());
|
||||
MachineType type = AtomicOpRepresentationOf(node->op());
|
||||
ArchOpcode opcode = kArchNop;
|
||||
if (type == MachineType::Int8()) {
|
||||
opcode = kAtomicExchangeInt8;
|
||||
@ -1761,9 +1765,13 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
|
||||
}
|
||||
InstructionOperand outputs[1];
|
||||
AddressingMode addressing_mode;
|
||||
InstructionOperand inputs[4];
|
||||
InstructionOperand inputs[3];
|
||||
size_t input_count = 0;
|
||||
inputs[input_count++] = g.UseUniqueRegister(value);
|
||||
if (type == MachineType::Int8() || type == MachineType::Uint8()) {
|
||||
inputs[input_count++] = g.UseFixed(value, edx);
|
||||
} else {
|
||||
inputs[input_count++] = g.UseUniqueRegister(value);
|
||||
}
|
||||
inputs[input_count++] = g.UseUniqueRegister(base);
|
||||
if (g.CanBeImmediate(index)) {
|
||||
inputs[input_count++] = g.UseImmediate(index);
|
||||
@ -1772,7 +1780,12 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
|
||||
inputs[input_count++] = g.UseUniqueRegister(index);
|
||||
addressing_mode = kMode_MR1;
|
||||
}
|
||||
outputs[0] = g.DefineSameAsFirst(node);
|
||||
if (type == MachineType::Int8() || type == MachineType::Uint8()) {
|
||||
// Using DefineSameAsFirst requires the register to be unallocated.
|
||||
outputs[0] = g.DefineAsFixed(node, edx);
|
||||
} else {
|
||||
outputs[0] = g.DefineSameAsFirst(node);
|
||||
}
|
||||
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
|
||||
Emit(code, 1, outputs, input_count, inputs);
|
||||
}
|
||||
@ -1784,7 +1797,7 @@ void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
|
||||
Node* old_value = node->InputAt(2);
|
||||
Node* new_value = node->InputAt(3);
|
||||
|
||||
MachineType type = AtomicCompareExchangeRepresentationOf(node->op());
|
||||
MachineType type = AtomicOpRepresentationOf(node->op());
|
||||
ArchOpcode opcode = kArchNop;
|
||||
if (type == MachineType::Int8()) {
|
||||
opcode = kAtomicCompareExchangeInt8;
|
||||
@ -1805,7 +1818,11 @@ void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
|
||||
InstructionOperand inputs[4];
|
||||
size_t input_count = 0;
|
||||
inputs[input_count++] = g.UseFixed(old_value, eax);
|
||||
inputs[input_count++] = g.UseUniqueRegister(new_value);
|
||||
if (type == MachineType::Int8() || type == MachineType::Uint8()) {
|
||||
inputs[input_count++] = g.UseByteRegister(new_value);
|
||||
} else {
|
||||
inputs[input_count++] = g.UseUniqueRegister(new_value);
|
||||
}
|
||||
inputs[input_count++] = g.UseUniqueRegister(base);
|
||||
if (g.CanBeImmediate(index)) {
|
||||
inputs[input_count++] = g.UseImmediate(index);
|
||||
@ -1819,6 +1836,67 @@ void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
|
||||
Emit(code, 1, outputs, input_count, inputs);
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitAtomicBinaryOperation(
|
||||
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
|
||||
ArchOpcode uint16_op, ArchOpcode word32_op) {
|
||||
IA32OperandGenerator g(this);
|
||||
Node* base = node->InputAt(0);
|
||||
Node* index = node->InputAt(1);
|
||||
Node* value = node->InputAt(2);
|
||||
|
||||
MachineType type = AtomicOpRepresentationOf(node->op());
|
||||
ArchOpcode opcode = kArchNop;
|
||||
if (type == MachineType::Int8()) {
|
||||
opcode = int8_op;
|
||||
} else if (type == MachineType::Uint8()) {
|
||||
opcode = uint8_op;
|
||||
} else if (type == MachineType::Int16()) {
|
||||
opcode = int16_op;
|
||||
} else if (type == MachineType::Uint16()) {
|
||||
opcode = uint16_op;
|
||||
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
|
||||
opcode = word32_op;
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
return;
|
||||
}
|
||||
InstructionOperand outputs[1];
|
||||
AddressingMode addressing_mode;
|
||||
InstructionOperand inputs[3];
|
||||
size_t input_count = 0;
|
||||
if (type == MachineType::Int8() || type == MachineType::Uint8()) {
|
||||
inputs[input_count++] = g.UseByteRegister(value);
|
||||
} else {
|
||||
inputs[input_count++] = g.UseUniqueRegister(value);
|
||||
}
|
||||
inputs[input_count++] = g.UseUniqueRegister(base);
|
||||
if (g.CanBeImmediate(index)) {
|
||||
inputs[input_count++] = g.UseImmediate(index);
|
||||
addressing_mode = kMode_MRI;
|
||||
} else {
|
||||
inputs[input_count++] = g.UseUniqueRegister(index);
|
||||
addressing_mode = kMode_MR1;
|
||||
}
|
||||
outputs[0] = g.DefineAsFixed(node, eax);
|
||||
InstructionOperand temp[1];
|
||||
temp[0] = g.TempRegister();
|
||||
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
|
||||
Emit(code, 1, outputs, input_count, inputs, 1, temp);
|
||||
}
|
||||
|
||||
#define VISIT_ATOMIC_BINOP(op) \
|
||||
void InstructionSelector::VisitAtomic##op(Node* node) { \
|
||||
VisitAtomicBinaryOperation(node, kAtomic##op##Int8, kAtomic##op##Uint8, \
|
||||
kAtomic##op##Int16, kAtomic##op##Uint16, \
|
||||
kAtomic##op##Word32); \
|
||||
}
|
||||
VISIT_ATOMIC_BINOP(Add)
|
||||
VISIT_ATOMIC_BINOP(Sub)
|
||||
VISIT_ATOMIC_BINOP(And)
|
||||
VISIT_ATOMIC_BINOP(Or)
|
||||
VISIT_ATOMIC_BINOP(Xor)
|
||||
#undef VISIT_ATOMIC_BINOP
|
||||
|
||||
void InstructionSelector::VisitI32x4Splat(Node* node) {
|
||||
VisitRO(this, node, kIA32I32x4Splat);
|
||||
}
|
||||
|
@ -99,6 +99,31 @@ enum class RecordWriteMode { kValueIsMap, kValueIsPointer, kValueIsAny };
|
||||
V(AtomicCompareExchangeInt16) \
|
||||
V(AtomicCompareExchangeUint16) \
|
||||
V(AtomicCompareExchangeWord32) \
|
||||
V(AtomicAddInt8) \
|
||||
V(AtomicAddUint8) \
|
||||
V(AtomicAddInt16) \
|
||||
V(AtomicAddUint16) \
|
||||
V(AtomicAddWord32) \
|
||||
V(AtomicSubInt8) \
|
||||
V(AtomicSubUint8) \
|
||||
V(AtomicSubInt16) \
|
||||
V(AtomicSubUint16) \
|
||||
V(AtomicSubWord32) \
|
||||
V(AtomicAndInt8) \
|
||||
V(AtomicAndUint8) \
|
||||
V(AtomicAndInt16) \
|
||||
V(AtomicAndUint16) \
|
||||
V(AtomicAndWord32) \
|
||||
V(AtomicOrInt8) \
|
||||
V(AtomicOrUint8) \
|
||||
V(AtomicOrInt16) \
|
||||
V(AtomicOrUint16) \
|
||||
V(AtomicOrWord32) \
|
||||
V(AtomicXorInt8) \
|
||||
V(AtomicXorUint8) \
|
||||
V(AtomicXorInt16) \
|
||||
V(AtomicXorUint16) \
|
||||
V(AtomicXorWord32) \
|
||||
V(Ieee754Float64Acos) \
|
||||
V(Ieee754Float64Acosh) \
|
||||
V(Ieee754Float64Asin) \
|
||||
|
@ -336,6 +336,31 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
|
||||
case kAtomicCompareExchangeInt16:
|
||||
case kAtomicCompareExchangeUint16:
|
||||
case kAtomicCompareExchangeWord32:
|
||||
case kAtomicAddInt8:
|
||||
case kAtomicAddUint8:
|
||||
case kAtomicAddInt16:
|
||||
case kAtomicAddUint16:
|
||||
case kAtomicAddWord32:
|
||||
case kAtomicSubInt8:
|
||||
case kAtomicSubUint8:
|
||||
case kAtomicSubInt16:
|
||||
case kAtomicSubUint16:
|
||||
case kAtomicSubWord32:
|
||||
case kAtomicAndInt8:
|
||||
case kAtomicAndUint8:
|
||||
case kAtomicAndInt16:
|
||||
case kAtomicAndUint16:
|
||||
case kAtomicAndWord32:
|
||||
case kAtomicOrInt8:
|
||||
case kAtomicOrUint8:
|
||||
case kAtomicOrInt16:
|
||||
case kAtomicOrUint16:
|
||||
case kAtomicOrWord32:
|
||||
case kAtomicXorInt8:
|
||||
case kAtomicXorUint8:
|
||||
case kAtomicXorInt16:
|
||||
case kAtomicXorUint16:
|
||||
case kAtomicXorWord32:
|
||||
return kHasSideEffect;
|
||||
|
||||
#define CASE(Name) case k##Name:
|
||||
|
@ -1462,16 +1462,20 @@ void InstructionSelector::VisitNode(Node* node) {
|
||||
}
|
||||
case IrOpcode::kAtomicStore:
|
||||
return VisitAtomicStore(node);
|
||||
case IrOpcode::kAtomicExchange: {
|
||||
MachineType type = AtomicExchangeRepresentationOf(node->op());
|
||||
MarkAsRepresentation(type.representation(), node);
|
||||
return VisitAtomicExchange(node);
|
||||
}
|
||||
case IrOpcode::kAtomicCompareExchange: {
|
||||
MachineType type = AtomicCompareExchangeRepresentationOf(node->op());
|
||||
MarkAsRepresentation(type.representation(), node);
|
||||
return VisitAtomicCompareExchange(node);
|
||||
}
|
||||
#define ATOMIC_CASE(name) \
|
||||
case IrOpcode::kAtomic##name: { \
|
||||
MachineType type = AtomicOpRepresentationOf(node->op()); \
|
||||
MarkAsRepresentation(type.representation(), node); \
|
||||
return VisitAtomic##name(node); \
|
||||
}
|
||||
ATOMIC_CASE(Exchange)
|
||||
ATOMIC_CASE(CompareExchange)
|
||||
ATOMIC_CASE(Add)
|
||||
ATOMIC_CASE(Sub)
|
||||
ATOMIC_CASE(And)
|
||||
ATOMIC_CASE(Or)
|
||||
ATOMIC_CASE(Xor)
|
||||
#undef ATOMIC_CASE
|
||||
case IrOpcode::kProtectedLoad: {
|
||||
LoadRepresentation type = LoadRepresentationOf(node->op());
|
||||
MarkAsRepresentation(type.representation(), node);
|
||||
|
@ -389,6 +389,9 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
|
||||
|
||||
void MarkPairProjectionsAsWord32(Node* node);
|
||||
bool IsSourcePositionUsed(Node* node);
|
||||
void VisitAtomicBinaryOperation(Node* node, ArchOpcode int8_op,
|
||||
ArchOpcode uint8_op, ArchOpcode int16_op,
|
||||
ArchOpcode uint16_op, ArchOpcode word32_op);
|
||||
|
||||
// ===========================================================================
|
||||
|
||||
|
@ -149,13 +149,14 @@ class MachineRepresentationInferrer {
|
||||
PromoteRepresentation(AtomicStoreRepresentationOf(node->op()));
|
||||
break;
|
||||
case IrOpcode::kAtomicExchange:
|
||||
representation_vector_[node->id()] = PromoteRepresentation(
|
||||
AtomicExchangeRepresentationOf(node->op()).representation());
|
||||
break;
|
||||
case IrOpcode::kAtomicCompareExchange:
|
||||
case IrOpcode::kAtomicAdd:
|
||||
case IrOpcode::kAtomicSub:
|
||||
case IrOpcode::kAtomicAnd:
|
||||
case IrOpcode::kAtomicOr:
|
||||
case IrOpcode::kAtomicXor:
|
||||
representation_vector_[node->id()] = PromoteRepresentation(
|
||||
AtomicCompareExchangeRepresentationOf(node->op())
|
||||
.representation());
|
||||
AtomicOpRepresentationOf(node->op()).representation());
|
||||
break;
|
||||
case IrOpcode::kStore:
|
||||
case IrOpcode::kProtectedStore:
|
||||
@ -455,6 +456,11 @@ class MachineRepresentationChecker {
|
||||
case IrOpcode::kStore:
|
||||
case IrOpcode::kAtomicStore:
|
||||
case IrOpcode::kAtomicExchange:
|
||||
case IrOpcode::kAtomicAdd:
|
||||
case IrOpcode::kAtomicSub:
|
||||
case IrOpcode::kAtomicAnd:
|
||||
case IrOpcode::kAtomicOr:
|
||||
case IrOpcode::kAtomicXor:
|
||||
CheckValueInputIsTaggedOrPointer(node, 0);
|
||||
CheckValueInputRepresentationIs(
|
||||
node, 1, MachineType::PointerRepresentation());
|
||||
|
@ -80,13 +80,7 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
|
||||
return OpParameter<MachineRepresentation>(op);
|
||||
}
|
||||
|
||||
MachineType AtomicExchangeRepresentationOf(Operator const* op) {
|
||||
DCHECK_EQ(IrOpcode::kAtomicExchange, op->opcode());
|
||||
return OpParameter<MachineType>(op);
|
||||
}
|
||||
|
||||
MachineType AtomicCompareExchangeRepresentationOf(Operator const* op) {
|
||||
DCHECK_EQ(IrOpcode::kAtomicCompareExchange, op->opcode());
|
||||
MachineType AtomicOpRepresentationOf(Operator const* op) {
|
||||
return OpParameter<MachineType>(op);
|
||||
}
|
||||
|
||||
@ -608,17 +602,24 @@ struct MachineOperatorGlobalCache {
|
||||
ATOMIC_REPRESENTATION_LIST(ATOMIC_STORE)
|
||||
#undef STORE
|
||||
|
||||
#define ATOMIC_EXCHANGE(Type) \
|
||||
struct AtomicExchange##Type##Operator : public Operator1<MachineType> { \
|
||||
AtomicExchange##Type##Operator() \
|
||||
: Operator1<MachineType>(IrOpcode::kAtomicExchange, \
|
||||
Operator::kNoDeopt | Operator::kNoThrow, \
|
||||
"AtomicExchange", 3, 1, 1, 1, 1, 0, \
|
||||
MachineType::Type()) {} \
|
||||
}; \
|
||||
AtomicExchange##Type##Operator kAtomicExchange##Type;
|
||||
ATOMIC_TYPE_LIST(ATOMIC_EXCHANGE)
|
||||
#undef ATOMIC_EXCHANGE
|
||||
#define ATOMIC_OP(op, type) \
|
||||
struct op##type##Operator : public Operator1<MachineType> { \
|
||||
op##type##Operator() \
|
||||
: Operator1<MachineType>(IrOpcode::k##op, \
|
||||
Operator::kNoDeopt | Operator::kNoThrow, #op, \
|
||||
3, 1, 1, 1, 1, 0, MachineType::type()) {} \
|
||||
}; \
|
||||
op##type##Operator k##op##type;
|
||||
#define ATOMIC_OP_LIST(type) \
|
||||
ATOMIC_OP(AtomicExchange, type) \
|
||||
ATOMIC_OP(AtomicAdd, type) \
|
||||
ATOMIC_OP(AtomicSub, type) \
|
||||
ATOMIC_OP(AtomicAnd, type) \
|
||||
ATOMIC_OP(AtomicOr, type) \
|
||||
ATOMIC_OP(AtomicXor, type)
|
||||
ATOMIC_TYPE_LIST(ATOMIC_OP_LIST)
|
||||
#undef ATOMIC_OP_LIST
|
||||
#undef ATOMIC_OP
|
||||
|
||||
#define ATOMIC_COMPARE_EXCHANGE(Type) \
|
||||
struct AtomicCompareExchange##Type##Operator \
|
||||
@ -911,6 +912,61 @@ const Operator* MachineOperatorBuilder::AtomicCompareExchange(MachineType rep) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const Operator* MachineOperatorBuilder::AtomicAdd(MachineType rep) {
|
||||
#define ADD(kRep) \
|
||||
if (rep == MachineType::kRep()) { \
|
||||
return &cache_.kAtomicAdd##kRep; \
|
||||
}
|
||||
ATOMIC_TYPE_LIST(ADD)
|
||||
#undef ADD
|
||||
UNREACHABLE();
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const Operator* MachineOperatorBuilder::AtomicSub(MachineType rep) {
|
||||
#define SUB(kRep) \
|
||||
if (rep == MachineType::kRep()) { \
|
||||
return &cache_.kAtomicSub##kRep; \
|
||||
}
|
||||
ATOMIC_TYPE_LIST(SUB)
|
||||
#undef SUB
|
||||
UNREACHABLE();
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const Operator* MachineOperatorBuilder::AtomicAnd(MachineType rep) {
|
||||
#define AND(kRep) \
|
||||
if (rep == MachineType::kRep()) { \
|
||||
return &cache_.kAtomicAnd##kRep; \
|
||||
}
|
||||
ATOMIC_TYPE_LIST(AND)
|
||||
#undef AND
|
||||
UNREACHABLE();
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const Operator* MachineOperatorBuilder::AtomicOr(MachineType rep) {
|
||||
#define OR(kRep) \
|
||||
if (rep == MachineType::kRep()) { \
|
||||
return &cache_.kAtomicOr##kRep; \
|
||||
}
|
||||
ATOMIC_TYPE_LIST(OR)
|
||||
#undef OR
|
||||
UNREACHABLE();
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const Operator* MachineOperatorBuilder::AtomicXor(MachineType rep) {
|
||||
#define XOR(kRep) \
|
||||
if (rep == MachineType::kRep()) { \
|
||||
return &cache_.kAtomicXor##kRep; \
|
||||
}
|
||||
ATOMIC_TYPE_LIST(XOR)
|
||||
#undef XOR
|
||||
UNREACHABLE();
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
#define SIMD_LANE_OPS(Type, lane_count) \
|
||||
const Operator* MachineOperatorBuilder::Type##ExtractLane( \
|
||||
int32_t lane_index) { \
|
||||
|
@ -97,9 +97,7 @@ int StackSlotSizeOf(Operator const* op);
|
||||
|
||||
MachineRepresentation AtomicStoreRepresentationOf(Operator const* op);
|
||||
|
||||
MachineType AtomicExchangeRepresentationOf(Operator const* op);
|
||||
|
||||
MachineType AtomicCompareExchangeRepresentationOf(Operator const* op);
|
||||
MachineType AtomicOpRepresentationOf(Operator const* op);
|
||||
|
||||
// Interface for building machine-level operators. These operators are
|
||||
// machine-level but machine-independent and thus define a language suitable
|
||||
@ -627,6 +625,16 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
|
||||
const Operator* AtomicExchange(MachineType rep);
|
||||
// atomic-compare-exchange [base + index], old_value, new_value
|
||||
const Operator* AtomicCompareExchange(MachineType rep);
|
||||
// atomic-add [base + index], value
|
||||
const Operator* AtomicAdd(MachineType rep);
|
||||
// atomic-sub [base + index], value
|
||||
const Operator* AtomicSub(MachineType rep);
|
||||
// atomic-and [base + index], value
|
||||
const Operator* AtomicAnd(MachineType rep);
|
||||
// atomic-or [base + index], value
|
||||
const Operator* AtomicOr(MachineType rep);
|
||||
// atomic-xor [base + index], value
|
||||
const Operator* AtomicXor(MachineType rep);
|
||||
|
||||
// Target machine word-size assumed by this builder.
|
||||
bool Is32() const { return word() == MachineRepresentation::kWord32; }
|
||||
|
@ -1600,6 +1600,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
case kAtomicCompareExchangeInt16:
|
||||
case kAtomicCompareExchangeUint16:
|
||||
case kAtomicCompareExchangeWord32:
|
||||
case kAtomicAddInt8:
|
||||
case kAtomicAddUint8:
|
||||
case kAtomicAddInt16:
|
||||
case kAtomicAddUint16:
|
||||
case kAtomicAddWord32:
|
||||
case kAtomicSubInt8:
|
||||
case kAtomicSubUint8:
|
||||
case kAtomicSubInt16:
|
||||
case kAtomicSubUint16:
|
||||
case kAtomicSubWord32:
|
||||
case kAtomicAndInt8:
|
||||
case kAtomicAndUint8:
|
||||
case kAtomicAndInt16:
|
||||
case kAtomicAndUint16:
|
||||
case kAtomicAndWord32:
|
||||
case kAtomicOrInt8:
|
||||
case kAtomicOrUint8:
|
||||
case kAtomicOrInt16:
|
||||
case kAtomicOrUint16:
|
||||
case kAtomicOrWord32:
|
||||
case kAtomicXorInt8:
|
||||
case kAtomicXorUint8:
|
||||
case kAtomicXorInt16:
|
||||
case kAtomicXorUint16:
|
||||
case kAtomicXorWord32:
|
||||
UNREACHABLE();
|
||||
break;
|
||||
case kMipsS128Zero: {
|
||||
|
@ -1907,6 +1907,16 @@ void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitAtomicAdd(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitAtomicSub(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitAtomicAnd(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitAtomicOr(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitAtomicXor(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
@ -1926,6 +1926,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
case kAtomicCompareExchangeInt16:
|
||||
case kAtomicCompareExchangeUint16:
|
||||
case kAtomicCompareExchangeWord32:
|
||||
case kAtomicAddInt8:
|
||||
case kAtomicAddUint8:
|
||||
case kAtomicAddInt16:
|
||||
case kAtomicAddUint16:
|
||||
case kAtomicAddWord32:
|
||||
case kAtomicSubInt8:
|
||||
case kAtomicSubUint8:
|
||||
case kAtomicSubInt16:
|
||||
case kAtomicSubUint16:
|
||||
case kAtomicSubWord32:
|
||||
case kAtomicAndInt8:
|
||||
case kAtomicAndUint8:
|
||||
case kAtomicAndInt16:
|
||||
case kAtomicAndUint16:
|
||||
case kAtomicAndWord32:
|
||||
case kAtomicOrInt8:
|
||||
case kAtomicOrUint8:
|
||||
case kAtomicOrInt16:
|
||||
case kAtomicOrUint16:
|
||||
case kAtomicOrWord32:
|
||||
case kAtomicXorInt8:
|
||||
case kAtomicXorUint8:
|
||||
case kAtomicXorInt16:
|
||||
case kAtomicXorUint16:
|
||||
case kAtomicXorWord32:
|
||||
UNREACHABLE();
|
||||
break;
|
||||
case kMips64AssertEqual:
|
||||
|
@ -2658,6 +2658,16 @@ void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitAtomicAdd(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitAtomicSub(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitAtomicAnd(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitAtomicOr(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitAtomicXor(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
@ -560,6 +560,11 @@
|
||||
V(AtomicStore) \
|
||||
V(AtomicExchange) \
|
||||
V(AtomicCompareExchange) \
|
||||
V(AtomicAdd) \
|
||||
V(AtomicSub) \
|
||||
V(AtomicAnd) \
|
||||
V(AtomicOr) \
|
||||
V(AtomicXor) \
|
||||
V(UnsafePointerAdd)
|
||||
|
||||
#define MACHINE_SIMD_OP_LIST(V) \
|
||||
|
@ -2005,13 +2005,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
case kAtomicExchangeWord32:
|
||||
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lwarx, stwcx);
|
||||
break;
|
||||
case kAtomicCompareExchangeInt8:
|
||||
case kAtomicCompareExchangeUint8:
|
||||
case kAtomicCompareExchangeInt16:
|
||||
case kAtomicCompareExchangeUint16:
|
||||
case kAtomicCompareExchangeWord32:
|
||||
UNREACHABLE();
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
break;
|
||||
|
@ -2123,7 +2123,7 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
|
||||
Node* index = node->InputAt(1);
|
||||
Node* value = node->InputAt(2);
|
||||
ArchOpcode opcode = kArchNop;
|
||||
MachineType type = AtomicExchangeRepresentationOf(node->op());
|
||||
MachineType type = AtomicOpRepresentationOf(node->op());
|
||||
if (type == MachineType::Int8()) {
|
||||
opcode = kAtomicExchangeInt8;
|
||||
} else if (type == MachineType::Uint8()) {
|
||||
@ -2155,6 +2155,16 @@ void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitAtomicAdd(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitAtomicSub(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitAtomicAnd(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitAtomicOr(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitAtomicXor(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
@ -174,10 +174,17 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
|
||||
Node* value) {
|
||||
return AddNode(machine()->AtomicStore(rep), base, index, value);
|
||||
}
|
||||
|
||||
Node* AtomicExchange(MachineType rep, Node* base, Node* index, Node* value) {
|
||||
return AddNode(machine()->AtomicExchange(rep), base, index, value);
|
||||
#define ATOMIC_FUNCTION(name) \
|
||||
Node* Atomic##name(MachineType rep, Node* base, Node* index, Node* value) { \
|
||||
return AddNode(machine()->Atomic##name(rep), base, index, value); \
|
||||
}
|
||||
ATOMIC_FUNCTION(Exchange);
|
||||
ATOMIC_FUNCTION(Add);
|
||||
ATOMIC_FUNCTION(Sub);
|
||||
ATOMIC_FUNCTION(And);
|
||||
ATOMIC_FUNCTION(Or);
|
||||
ATOMIC_FUNCTION(Xor);
|
||||
#undef ATOMIC_FUNCTION
|
||||
|
||||
Node* AtomicCompareExchange(MachineType rep, Node* base, Node* index,
|
||||
Node* old_value, Node* new_value) {
|
||||
|
@ -2429,14 +2429,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
__ bne(&do_cs, Label::kNear);
|
||||
break;
|
||||
}
|
||||
case kAtomicCompareExchangeInt8:
|
||||
case kAtomicCompareExchangeUint8:
|
||||
case kAtomicCompareExchangeInt16:
|
||||
case kAtomicCompareExchangeUint16:
|
||||
case kAtomicCompareExchangeWord32: {
|
||||
UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
default:
|
||||
UNREACHABLE();
|
||||
break;
|
||||
|
@ -2431,7 +2431,7 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
|
||||
Node* index = node->InputAt(1);
|
||||
Node* value = node->InputAt(2);
|
||||
ArchOpcode opcode = kArchNop;
|
||||
MachineType type = AtomicExchangeRepresentationOf(node->op());
|
||||
MachineType type = AtomicOpRepresentationOf(node->op());
|
||||
if (type == MachineType::Int8()) {
|
||||
opcode = kAtomicExchangeInt8;
|
||||
} else if (type == MachineType::Uint8()) {
|
||||
@ -2463,6 +2463,16 @@ void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitAtomicAdd(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitAtomicSub(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitAtomicAnd(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitAtomicOr(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
void InstructionSelector::VisitAtomicXor(Node* node) { UNIMPLEMENTED(); }
|
||||
|
||||
// static
|
||||
MachineOperatorBuilder::Flags
|
||||
InstructionSelector::SupportedMachineOperatorFlags() {
|
||||
|
@ -1429,6 +1429,11 @@ void Verifier::Visitor::Check(Node* node) {
|
||||
case IrOpcode::kAtomicStore:
|
||||
case IrOpcode::kAtomicExchange:
|
||||
case IrOpcode::kAtomicCompareExchange:
|
||||
case IrOpcode::kAtomicAdd:
|
||||
case IrOpcode::kAtomicSub:
|
||||
case IrOpcode::kAtomicAnd:
|
||||
case IrOpcode::kAtomicOr:
|
||||
case IrOpcode::kAtomicXor:
|
||||
|
||||
#define SIMD_MACHINE_OP_CASE(Name) case IrOpcode::k##Name:
|
||||
MACHINE_SIMD_OP_LIST(SIMD_MACHINE_OP_CASE)
|
||||
|
@ -710,6 +710,18 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
|
||||
1); \
|
||||
} while (false)
|
||||
|
||||
#define ASSEMBLE_ATOMIC_BINOP(bin_inst, mov_inst, cmpxchg_inst) \
|
||||
do { \
|
||||
Label binop; \
|
||||
__ bind(&binop); \
|
||||
__ mov_inst(rax, i.MemoryOperand(1)); \
|
||||
__ movl(i.TempRegister(0), rax); \
|
||||
__ bin_inst(i.TempRegister(0), i.InputRegister(0)); \
|
||||
__ lock(); \
|
||||
__ cmpxchg_inst(i.MemoryOperand(1), i.TempRegister(0)); \
|
||||
__ j(not_equal, &binop); \
|
||||
} while (false)
|
||||
|
||||
void CodeGenerator::AssembleDeconstructFrame() {
|
||||
unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
|
||||
__ movq(rsp, rbp);
|
||||
@ -2330,6 +2342,32 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
__ cmpxchgl(i.MemoryOperand(2), i.InputRegister(1));
|
||||
break;
|
||||
}
|
||||
#define ATOMIC_BINOP_CASE(op, inst) \
|
||||
case kAtomic##op##Int8: \
|
||||
ASSEMBLE_ATOMIC_BINOP(inst, movb, cmpxchgb); \
|
||||
__ movsxbl(rax, rax); \
|
||||
break; \
|
||||
case kAtomic##op##Uint8: \
|
||||
ASSEMBLE_ATOMIC_BINOP(inst, movb, cmpxchgb); \
|
||||
__ movzxbl(rax, rax); \
|
||||
break; \
|
||||
case kAtomic##op##Int16: \
|
||||
ASSEMBLE_ATOMIC_BINOP(inst, movw, cmpxchgw); \
|
||||
__ movsxwl(rax, rax); \
|
||||
break; \
|
||||
case kAtomic##op##Uint16: \
|
||||
ASSEMBLE_ATOMIC_BINOP(inst, movw, cmpxchgw); \
|
||||
__ movzxwl(rax, rax); \
|
||||
break; \
|
||||
case kAtomic##op##Word32: \
|
||||
ASSEMBLE_ATOMIC_BINOP(inst, movl, cmpxchgl); \
|
||||
break;
|
||||
ATOMIC_BINOP_CASE(Add, addl)
|
||||
ATOMIC_BINOP_CASE(Sub, subl)
|
||||
ATOMIC_BINOP_CASE(And, andl)
|
||||
ATOMIC_BINOP_CASE(Or, orl)
|
||||
ATOMIC_BINOP_CASE(Xor, xorl)
|
||||
#undef ATOMIC_BINOP_CASE
|
||||
case kAtomicLoadInt8:
|
||||
case kAtomicLoadUint8:
|
||||
case kAtomicLoadInt16:
|
||||
|
@ -2305,7 +2305,7 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
|
||||
Node* index = node->InputAt(1);
|
||||
Node* value = node->InputAt(2);
|
||||
|
||||
MachineType type = AtomicExchangeRepresentationOf(node->op());
|
||||
MachineType type = AtomicOpRepresentationOf(node->op());
|
||||
ArchOpcode opcode = kArchNop;
|
||||
if (type == MachineType::Int8()) {
|
||||
opcode = kAtomicExchangeInt8;
|
||||
@ -2323,7 +2323,7 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
|
||||
}
|
||||
InstructionOperand outputs[1];
|
||||
AddressingMode addressing_mode;
|
||||
InstructionOperand inputs[4];
|
||||
InstructionOperand inputs[3];
|
||||
size_t input_count = 0;
|
||||
inputs[input_count++] = g.UseUniqueRegister(value);
|
||||
inputs[input_count++] = g.UseUniqueRegister(base);
|
||||
@ -2346,7 +2346,7 @@ void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
|
||||
Node* old_value = node->InputAt(2);
|
||||
Node* new_value = node->InputAt(3);
|
||||
|
||||
MachineType type = AtomicCompareExchangeRepresentationOf(node->op());
|
||||
MachineType type = AtomicOpRepresentationOf(node->op());
|
||||
ArchOpcode opcode = kArchNop;
|
||||
if (type == MachineType::Int8()) {
|
||||
opcode = kAtomicCompareExchangeInt8;
|
||||
@ -2381,6 +2381,63 @@ void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
|
||||
Emit(code, 1, outputs, input_count, inputs);
|
||||
}
|
||||
|
||||
void InstructionSelector::VisitAtomicBinaryOperation(
|
||||
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
|
||||
ArchOpcode uint16_op, ArchOpcode word32_op) {
|
||||
X64OperandGenerator g(this);
|
||||
Node* base = node->InputAt(0);
|
||||
Node* index = node->InputAt(1);
|
||||
Node* value = node->InputAt(2);
|
||||
|
||||
MachineType type = AtomicOpRepresentationOf(node->op());
|
||||
ArchOpcode opcode = kArchNop;
|
||||
if (type == MachineType::Int8()) {
|
||||
opcode = int8_op;
|
||||
} else if (type == MachineType::Uint8()) {
|
||||
opcode = uint8_op;
|
||||
} else if (type == MachineType::Int16()) {
|
||||
opcode = int16_op;
|
||||
} else if (type == MachineType::Uint16()) {
|
||||
opcode = uint16_op;
|
||||
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
|
||||
opcode = word32_op;
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
return;
|
||||
}
|
||||
InstructionOperand outputs[1];
|
||||
AddressingMode addressing_mode;
|
||||
InstructionOperand inputs[3];
|
||||
size_t input_count = 0;
|
||||
inputs[input_count++] = g.UseUniqueRegister(value);
|
||||
inputs[input_count++] = g.UseUniqueRegister(base);
|
||||
if (g.CanBeImmediate(index)) {
|
||||
inputs[input_count++] = g.UseImmediate(index);
|
||||
addressing_mode = kMode_MRI;
|
||||
} else {
|
||||
inputs[input_count++] = g.UseUniqueRegister(index);
|
||||
addressing_mode = kMode_MR1;
|
||||
}
|
||||
outputs[0] = g.DefineAsFixed(node, rax);
|
||||
InstructionOperand temp[1];
|
||||
temp[0] = g.TempRegister();
|
||||
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
|
||||
Emit(code, 1, outputs, input_count, inputs, 1, temp);
|
||||
}
|
||||
|
||||
#define VISIT_ATOMIC_BINOP(op) \
|
||||
void InstructionSelector::VisitAtomic##op(Node* node) { \
|
||||
VisitAtomicBinaryOperation(node, kAtomic##op##Int8, kAtomic##op##Uint8, \
|
||||
kAtomic##op##Int16, kAtomic##op##Uint16, \
|
||||
kAtomic##op##Word32); \
|
||||
}
|
||||
VISIT_ATOMIC_BINOP(Add)
|
||||
VISIT_ATOMIC_BINOP(Sub)
|
||||
VISIT_ATOMIC_BINOP(And)
|
||||
VISIT_ATOMIC_BINOP(Or)
|
||||
VISIT_ATOMIC_BINOP(Xor)
|
||||
#undef VISIT_ATOMIC_BINOP
|
||||
|
||||
#define SIMD_TYPES(V) V(I32x4)
|
||||
|
||||
#define SIMD_ZERO_OP_LIST(V) \
|
||||
|
@ -708,6 +708,7 @@ void Assembler::xchg(Register dst, const Operand& src) {
|
||||
}
|
||||
|
||||
void Assembler::xchg_b(Register reg, const Operand& op) {
|
||||
DCHECK(reg.is_byte_register());
|
||||
EnsureSpace ensure_space(this);
|
||||
EMIT(0x86);
|
||||
emit_operand(reg, op);
|
||||
@ -733,6 +734,7 @@ void Assembler::cmpxchg(const Operand& dst, Register src) {
|
||||
}
|
||||
|
||||
void Assembler::cmpxchg_b(const Operand& dst, Register src) {
|
||||
DCHECK(src.is_byte_register());
|
||||
EnsureSpace ensure_space(this);
|
||||
EMIT(0x0F);
|
||||
EMIT(0xB0);
|
||||
|
@ -5713,11 +5713,16 @@ class Script: public Struct {
|
||||
V(WeakSet.prototype, delete, WeakSetDelete) \
|
||||
V(WeakSet.prototype, has, WeakSetHas)
|
||||
|
||||
#define ATOMIC_FUNCTIONS_WITH_ID_LIST(V) \
|
||||
V(Atomics, load, AtomicsLoad) \
|
||||
V(Atomics, store, AtomicsStore) \
|
||||
V(Atomics, exchange, AtomicsExchange) \
|
||||
V(Atomics, compareExchange, AtomicsCompareExchange)
|
||||
#define ATOMIC_FUNCTIONS_WITH_ID_LIST(V) \
|
||||
V(Atomics, load, AtomicsLoad) \
|
||||
V(Atomics, store, AtomicsStore) \
|
||||
V(Atomics, exchange, AtomicsExchange) \
|
||||
V(Atomics, compareExchange, AtomicsCompareExchange) \
|
||||
V(Atomics, add, AtomicsAdd) \
|
||||
V(Atomics, sub, AtomicsSub) \
|
||||
V(Atomics, and, AtomicsAnd) \
|
||||
V(Atomics, or, AtomicsOr) \
|
||||
V(Atomics, xor, AtomicsXor)
|
||||
|
||||
enum BuiltinFunctionId {
|
||||
kArrayCode,
|
||||
|
@ -33,11 +33,42 @@ inline T CompareExchangeSeqCst(T* p, T oldval, T newval) {
|
||||
return oldval;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline T AddSeqCst(T* p, T value) {
|
||||
return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline T SubSeqCst(T* p, T value) {
|
||||
return __atomic_fetch_sub(p, value, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline T AndSeqCst(T* p, T value) {
|
||||
return __atomic_fetch_and(p, value, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline T OrSeqCst(T* p, T value) {
|
||||
return __atomic_fetch_or(p, value, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline T XorSeqCst(T* p, T value) {
|
||||
return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
|
||||
#elif V8_CC_MSVC
|
||||
|
||||
#define InterlockedExchange32 _InterlockedExchange
|
||||
#define InterlockedCompareExchange32 _InterlockedCompareExchange
|
||||
#define InterlockedCompareExchange8 _InterlockedCompareExchange8
|
||||
#define InterlockedExchangeAdd32 _InterlockedExchangeAdd
|
||||
#define InterlockedExchangeAdd16 _InterlockedExchangeAdd16
|
||||
#define InterlockedExchangeAdd8 _InterlockedExchangeAdd8
|
||||
#define InterlockedAnd32 _InterlockedAnd
|
||||
#define InterlockedOr32 _InterlockedOr
|
||||
#define InterlockedXor32 _InterlockedXor
|
||||
|
||||
#define ATOMIC_OPS(type, suffix, vctype) \
|
||||
inline type ExchangeSeqCst(type* p, type value) { \
|
||||
@ -48,6 +79,26 @@ inline T CompareExchangeSeqCst(T* p, T oldval, T newval) {
|
||||
return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \
|
||||
bit_cast<vctype>(newval), \
|
||||
bit_cast<vctype>(oldval)); \
|
||||
} \
|
||||
inline type AddSeqCst(type* p, type value) { \
|
||||
return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \
|
||||
bit_cast<vctype>(value)); \
|
||||
} \
|
||||
inline type SubSeqCst(type* p, type value) { \
|
||||
return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \
|
||||
-bit_cast<vctype>(value)); \
|
||||
} \
|
||||
inline type AndSeqCst(type* p, type value) { \
|
||||
return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \
|
||||
bit_cast<vctype>(value)); \
|
||||
} \
|
||||
inline type OrSeqCst(type* p, type value) { \
|
||||
return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \
|
||||
bit_cast<vctype>(value)); \
|
||||
} \
|
||||
inline type XorSeqCst(type* p, type value) { \
|
||||
return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \
|
||||
bit_cast<vctype>(value)); \
|
||||
}
|
||||
|
||||
ATOMIC_OPS(int8_t, 8, char)
|
||||
@ -63,6 +114,12 @@ ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */
|
||||
#undef InterlockedExchange32
|
||||
#undef InterlockedCompareExchange32
|
||||
#undef InterlockedCompareExchange8
|
||||
#undef InterlockedExchangeAdd32
|
||||
#undef InterlockedExchangeAdd16
|
||||
#undef InterlockedExchangeAdd8
|
||||
#undef InterlockedAnd32
|
||||
#undef InterlockedOr32
|
||||
#undef InterlockedXor32
|
||||
|
||||
#else
|
||||
|
||||
@ -140,6 +197,46 @@ inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index,
|
||||
return ToObject(isolate, result);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline Object* DoAdd(Isolate* isolate, void* buffer, size_t index,
|
||||
Handle<Object> obj) {
|
||||
T value = FromObject<T>(obj);
|
||||
T result = AddSeqCst(static_cast<T*>(buffer) + index, value);
|
||||
return ToObject(isolate, result);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline Object* DoSub(Isolate* isolate, void* buffer, size_t index,
|
||||
Handle<Object> obj) {
|
||||
T value = FromObject<T>(obj);
|
||||
T result = SubSeqCst(static_cast<T*>(buffer) + index, value);
|
||||
return ToObject(isolate, result);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline Object* DoAnd(Isolate* isolate, void* buffer, size_t index,
|
||||
Handle<Object> obj) {
|
||||
T value = FromObject<T>(obj);
|
||||
T result = AndSeqCst(static_cast<T*>(buffer) + index, value);
|
||||
return ToObject(isolate, result);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline Object* DoOr(Isolate* isolate, void* buffer, size_t index,
|
||||
Handle<Object> obj) {
|
||||
T value = FromObject<T>(obj);
|
||||
T result = OrSeqCst(static_cast<T*>(buffer) + index, value);
|
||||
return ToObject(isolate, result);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline Object* DoXor(Isolate* isolate, void* buffer, size_t index,
|
||||
Handle<Object> obj) {
|
||||
T value = FromObject<T>(obj);
|
||||
T result = XorSeqCst(static_cast<T*>(buffer) + index, value);
|
||||
return ToObject(isolate, result);
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
// Duplicated from objects.h
|
||||
@ -233,5 +330,155 @@ RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
|
||||
return isolate->heap()->undefined_value();
|
||||
}
|
||||
|
||||
// ES #sec-atomics.add
|
||||
// Atomics.add( typedArray, index, value )
|
||||
RUNTIME_FUNCTION(Runtime_AtomicsAdd) {
|
||||
HandleScope scope(isolate);
|
||||
DCHECK_EQ(3, args.length());
|
||||
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
|
||||
CONVERT_SIZE_ARG_CHECKED(index, 1);
|
||||
CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
|
||||
CHECK(sta->GetBuffer()->is_shared());
|
||||
CHECK_LT(index, NumberToSize(sta->length()));
|
||||
|
||||
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
|
||||
NumberToSize(sta->byte_offset());
|
||||
|
||||
switch (sta->type()) {
|
||||
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
|
||||
case kExternal##Type##Array: \
|
||||
return DoAdd<ctype>(isolate, source, index, value);
|
||||
|
||||
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
|
||||
#undef TYPED_ARRAY_CASE
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
UNREACHABLE();
|
||||
return isolate->heap()->undefined_value();
|
||||
}
|
||||
|
||||
// ES #sec-atomics.sub
|
||||
// Atomics.sub( typedArray, index, value )
|
||||
RUNTIME_FUNCTION(Runtime_AtomicsSub) {
|
||||
HandleScope scope(isolate);
|
||||
DCHECK_EQ(3, args.length());
|
||||
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
|
||||
CONVERT_SIZE_ARG_CHECKED(index, 1);
|
||||
CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
|
||||
CHECK(sta->GetBuffer()->is_shared());
|
||||
CHECK_LT(index, NumberToSize(sta->length()));
|
||||
|
||||
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
|
||||
NumberToSize(sta->byte_offset());
|
||||
|
||||
switch (sta->type()) {
|
||||
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
|
||||
case kExternal##Type##Array: \
|
||||
return DoSub<ctype>(isolate, source, index, value);
|
||||
|
||||
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
|
||||
#undef TYPED_ARRAY_CASE
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
UNREACHABLE();
|
||||
return isolate->heap()->undefined_value();
|
||||
}
|
||||
|
||||
// ES #sec-atomics.and
|
||||
// Atomics.and( typedArray, index, value )
|
||||
RUNTIME_FUNCTION(Runtime_AtomicsAnd) {
|
||||
HandleScope scope(isolate);
|
||||
DCHECK_EQ(3, args.length());
|
||||
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
|
||||
CONVERT_SIZE_ARG_CHECKED(index, 1);
|
||||
CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
|
||||
CHECK(sta->GetBuffer()->is_shared());
|
||||
CHECK_LT(index, NumberToSize(sta->length()));
|
||||
|
||||
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
|
||||
NumberToSize(sta->byte_offset());
|
||||
|
||||
switch (sta->type()) {
|
||||
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
|
||||
case kExternal##Type##Array: \
|
||||
return DoAnd<ctype>(isolate, source, index, value);
|
||||
|
||||
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
|
||||
#undef TYPED_ARRAY_CASE
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
UNREACHABLE();
|
||||
return isolate->heap()->undefined_value();
|
||||
}
|
||||
|
||||
// ES #sec-atomics.or
|
||||
// Atomics.or( typedArray, index, value )
|
||||
RUNTIME_FUNCTION(Runtime_AtomicsOr) {
|
||||
HandleScope scope(isolate);
|
||||
DCHECK_EQ(3, args.length());
|
||||
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
|
||||
CONVERT_SIZE_ARG_CHECKED(index, 1);
|
||||
CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
|
||||
CHECK(sta->GetBuffer()->is_shared());
|
||||
CHECK_LT(index, NumberToSize(sta->length()));
|
||||
|
||||
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
|
||||
NumberToSize(sta->byte_offset());
|
||||
|
||||
switch (sta->type()) {
|
||||
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
|
||||
case kExternal##Type##Array: \
|
||||
return DoOr<ctype>(isolate, source, index, value);
|
||||
|
||||
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
|
||||
#undef TYPED_ARRAY_CASE
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
UNREACHABLE();
|
||||
return isolate->heap()->undefined_value();
|
||||
}
|
||||
|
||||
// ES #sec-atomics.xor
|
||||
// Atomics.xor( typedArray, index, value )
|
||||
RUNTIME_FUNCTION(Runtime_AtomicsXor) {
|
||||
HandleScope scope(isolate);
|
||||
DCHECK_EQ(3, args.length());
|
||||
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
|
||||
CONVERT_SIZE_ARG_CHECKED(index, 1);
|
||||
CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
|
||||
CHECK(sta->GetBuffer()->is_shared());
|
||||
CHECK_LT(index, NumberToSize(sta->length()));
|
||||
|
||||
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
|
||||
NumberToSize(sta->byte_offset());
|
||||
|
||||
switch (sta->type()) {
|
||||
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
|
||||
case kExternal##Type##Array: \
|
||||
return DoXor<ctype>(isolate, source, index, value);
|
||||
|
||||
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
|
||||
#undef TYPED_ARRAY_CASE
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
UNREACHABLE();
|
||||
return isolate->heap()->undefined_value();
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -64,6 +64,11 @@ namespace internal {
|
||||
F(ThrowInvalidAtomicAccessIndexError, 0, 1) \
|
||||
F(AtomicsExchange, 3, 1) \
|
||||
F(AtomicsCompareExchange, 4, 1) \
|
||||
F(AtomicsAdd, 3, 1) \
|
||||
F(AtomicsSub, 3, 1) \
|
||||
F(AtomicsAnd, 3, 1) \
|
||||
F(AtomicsOr, 3, 1) \
|
||||
F(AtomicsXor, 3, 1) \
|
||||
F(AtomicsNumWaitersForTesting, 2, 1) \
|
||||
F(SetAllowAtomicsWait, 1, 1)
|
||||
|
||||
|
44
test/mjsunit/harmony/atomics-value-check.js
Normal file
44
test/mjsunit/harmony/atomics-value-check.js
Normal file
@ -0,0 +1,44 @@
|
||||
// Copyright 2017 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Flags: --harmony-sharedarraybuffer
|
||||
//
|
||||
|
||||
var sab = new SharedArrayBuffer(4);
|
||||
var sta = new Int8Array(sab);
|
||||
sta[0] = 5;
|
||||
var workerScript =
|
||||
`onmessage=function(msg) {
|
||||
postMessage(0);
|
||||
};`;
|
||||
var worker = new Worker(workerScript);
|
||||
|
||||
var value_obj = {
|
||||
valueOf: function() {worker.postMessage({sab:sab}, [sta.buffer]);
|
||||
return 5}
|
||||
}
|
||||
var value = Object.create(value_obj);
|
||||
|
||||
assertThrows(function() {Atomics.exchange(sta, 0, value)});
|
||||
assertThrows(function() {Atomics.compareExchange(sta, 0, 5, value)});
|
||||
assertThrows(function() {Atomics.compareExchange(sta, 0, value, 5)});
|
||||
assertThrows(function() {Atomics.add(sta, 0, value)});
|
||||
assertThrows(function() {Atomics.sub(sta, 0, value)});
|
||||
assertThrows(function() {Atomics.and(sta, 0, value)});
|
||||
assertThrows(function() {Atomics.or(sta, 0, value)});
|
||||
assertThrows(function() {Atomics.xor(sta, 0, value)});
|
||||
|
||||
var index_obj = {
|
||||
valueOf: function() {worker.postMessage({sab:sab}, [sta.buffer]);
|
||||
return 0}
|
||||
}
|
||||
var index = Object.create(index_obj);
|
||||
|
||||
assertThrows(function() {Atomics.exchange(sta, index, 1)});
|
||||
assertThrows(function() {Atomics.compareExchange(sta, index, 5, 2)});
|
||||
assertThrows(function() {Atomics.add(sta, index, 3)});
|
||||
assertThrows(function() {Atomics.sub(sta, index, 4)});
|
||||
assertThrows(function() {Atomics.and(sta, index, 5)});
|
||||
assertThrows(function() {Atomics.or(sta, index, 6)});
|
||||
assertThrows(function() {Atomics.xor(sta, index, 7)});
|
Loading…
Reference in New Issue
Block a user