[Atomics] code stubs for atomic operations
* New atomic code stubs for x64, ia32, arm, arm64 * Add convenience functions JumpIfNotValidSmiValue, JumpIfUintNotValidSmiValue to macro-assembler-ia32 (API based on x64 macro assembler) * Remove runtime implementation of Atomics.load, the code stub should always be called instead * Add new test to mjsunit atomics test; check that Smi values of different sizes are supported when possible, else fall back to HeapNumbers These changes were needed to add another codestub: * Bump kStubMajorKeyBits from 7 to 8 * Reduce ScriptContextFieldStub::kSlotIndexBits from 13 to 12 BUG=v8:4614 LOG=y Review URL: https://codereview.chromium.org/1617503003 Cr-Commit-Position: refs/heads/master@{#35427}
This commit is contained in:
parent
949d322688
commit
10b5febe11
@ -5552,6 +5552,166 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
|
||||
kStackUnwindSpace, NULL, return_value_operand, NULL);
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
void GetTypedArrayBackingStore(MacroAssembler* masm, Register backing_store,
|
||||
Register object, Register scratch,
|
||||
LowDwVfpRegister double_scratch) {
|
||||
Label offset_is_not_smi, done;
|
||||
__ ldr(scratch, FieldMemOperand(object, JSTypedArray::kBufferOffset));
|
||||
__ ldr(backing_store,
|
||||
FieldMemOperand(scratch, JSArrayBuffer::kBackingStoreOffset));
|
||||
__ ldr(scratch,
|
||||
FieldMemOperand(object, JSArrayBufferView::kByteOffsetOffset));
|
||||
__ JumpIfNotSmi(scratch, &offset_is_not_smi);
|
||||
// offset is smi
|
||||
__ add(backing_store, backing_store, Operand::SmiUntag(scratch));
|
||||
__ jmp(&done);
|
||||
|
||||
// offset is a heap number
|
||||
__ bind(&offset_is_not_smi);
|
||||
__ vldr(double_scratch, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
|
||||
__ vcvt_u32_f64(double_scratch.low(), double_scratch);
|
||||
__ vmov(scratch, double_scratch.low());
|
||||
__ add(backing_store, backing_store, scratch);
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
void TypedArrayJumpTable(MacroAssembler* masm, Register object,
|
||||
Register scratch, Label* i8, Label* u8, Label* i16,
|
||||
Label* u16, Label* i32, Label* u32, Label* u8c) {
|
||||
STATIC_ASSERT(FIXED_UINT8_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 1);
|
||||
STATIC_ASSERT(FIXED_INT16_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 2);
|
||||
STATIC_ASSERT(FIXED_UINT16_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 3);
|
||||
STATIC_ASSERT(FIXED_INT32_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 4);
|
||||
STATIC_ASSERT(FIXED_UINT32_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 5);
|
||||
STATIC_ASSERT(FIXED_FLOAT32_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 6);
|
||||
STATIC_ASSERT(FIXED_FLOAT64_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 7);
|
||||
STATIC_ASSERT(FIXED_UINT8_CLAMPED_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 8);
|
||||
|
||||
__ ldr(scratch, FieldMemOperand(object, JSObject::kElementsOffset));
|
||||
__ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
|
||||
__ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
|
||||
__ sub(scratch, scratch, Operand(static_cast<uint8_t>(FIXED_INT8_ARRAY_TYPE)),
|
||||
SetCC);
|
||||
__ Assert(ge, kOffsetOutOfRange);
|
||||
|
||||
Label abort;
|
||||
|
||||
{
|
||||
Assembler::BlockConstPoolScope scope(masm);
|
||||
__ add(pc, pc, Operand(scratch, LSL, 2));
|
||||
__ nop();
|
||||
__ b(i8); // Int8Array
|
||||
__ b(u8); // Uint8Array
|
||||
__ b(i16); // Int16Array
|
||||
__ b(u16); // Uint16Array
|
||||
__ b(i32); // Int32Array
|
||||
__ b(u32); // Uint32Array
|
||||
__ b(&abort); // Float32Array
|
||||
__ b(&abort); // Float64Array
|
||||
__ b(u8c); // Uint8ClampedArray
|
||||
}
|
||||
|
||||
__ bind(&abort);
|
||||
__ Abort(kNoReason);
|
||||
}
|
||||
|
||||
void ReturnInteger32(MacroAssembler* masm, DwVfpRegister dst, Register value,
|
||||
SwVfpRegister single_scratch, Label* use_heap_number) {
|
||||
Label not_smi;
|
||||
__ TrySmiTag(r0, value, ¬_smi);
|
||||
__ Ret();
|
||||
|
||||
__ bind(¬_smi);
|
||||
__ vmov(single_scratch, value);
|
||||
__ vcvt_f64_s32(dst, single_scratch);
|
||||
__ jmp(use_heap_number);
|
||||
}
|
||||
|
||||
void ReturnUnsignedInteger32(MacroAssembler* masm, DwVfpRegister dst,
|
||||
Register value, SwVfpRegister single_scratch,
|
||||
Label* use_heap_number) {
|
||||
Label not_smi;
|
||||
__ cmp(value, Operand(0x40000000U));
|
||||
__ b(cs, ¬_smi);
|
||||
__ SmiTag(r0, value);
|
||||
__ Ret();
|
||||
|
||||
__ bind(¬_smi);
|
||||
__ vmov(single_scratch, value);
|
||||
__ vcvt_f64_u32(dst, single_scratch);
|
||||
__ jmp(use_heap_number);
|
||||
}
|
||||
|
||||
void ReturnAllocatedHeapNumber(MacroAssembler* masm, DwVfpRegister value,
|
||||
Register scratch, Register scratch2,
|
||||
Register scratch3) {
|
||||
Label call_runtime;
|
||||
__ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
|
||||
__ AllocateHeapNumber(r0, scratch, scratch2, scratch3, &call_runtime);
|
||||
__ vstr(value, FieldMemOperand(r0, HeapNumber::kValueOffset));
|
||||
__ Ret();
|
||||
|
||||
__ bind(&call_runtime);
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
|
||||
__ vstr(value, FieldMemOperand(r0, HeapNumber::kValueOffset));
|
||||
}
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
void AtomicsLoadStub::Generate(MacroAssembler* masm) {
|
||||
Register object = r1;
|
||||
Register index = r0; // Index is an untagged word32.
|
||||
Register backing_store = r2;
|
||||
Label i8, u8, i16, u16, i32, u32;
|
||||
|
||||
GetTypedArrayBackingStore(masm, backing_store, object, r3, d0);
|
||||
TypedArrayJumpTable(masm, object, r3, &i8, &u8, &i16, &u16, &i32, &u32, &u8);
|
||||
|
||||
__ bind(&i8);
|
||||
__ ldrsb(r0, MemOperand(backing_store, index));
|
||||
__ dmb(ISH);
|
||||
__ SmiTag(r0);
|
||||
__ Ret();
|
||||
|
||||
__ bind(&u8);
|
||||
__ ldrb(r0, MemOperand(backing_store, index));
|
||||
__ dmb(ISH);
|
||||
__ SmiTag(r0);
|
||||
__ Ret();
|
||||
|
||||
__ bind(&i16);
|
||||
__ ldrsh(r0, MemOperand(backing_store, index, LSL, 1));
|
||||
__ dmb(ISH);
|
||||
__ SmiTag(r0);
|
||||
__ Ret();
|
||||
|
||||
__ bind(&u16);
|
||||
__ ldrh(r0, MemOperand(backing_store, index, LSL, 1));
|
||||
__ dmb(ISH);
|
||||
__ SmiTag(r0);
|
||||
__ Ret();
|
||||
|
||||
Label use_heap_number;
|
||||
|
||||
__ bind(&i32);
|
||||
__ ldr(r0, MemOperand(backing_store, index, LSL, 2));
|
||||
__ dmb(ISH);
|
||||
ReturnInteger32(masm, d0, r0, s2, &use_heap_number);
|
||||
|
||||
__ bind(&u32);
|
||||
__ ldr(r0, MemOperand(backing_store, index, LSL, 2));
|
||||
__ dmb(ISH);
|
||||
ReturnUnsignedInteger32(masm, d0, r0, s2, &use_heap_number);
|
||||
|
||||
__ bind(&use_heap_number);
|
||||
ReturnAllocatedHeapNumber(masm, d0, r1, r2, r3);
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
|
@ -445,6 +445,15 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void AtomicsLoadDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {
|
||||
r1, // the typedarray object
|
||||
r0 // the index to load (untagged)
|
||||
};
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
|
@ -5939,6 +5939,144 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
|
||||
return_value_operand, NULL);
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
void GetTypedArrayBackingStore(MacroAssembler* masm, Register backing_store,
|
||||
Register object, Register scratch,
|
||||
FPRegister double_scratch) {
|
||||
Label offset_is_not_smi, done;
|
||||
__ Ldr(scratch, FieldMemOperand(object, JSTypedArray::kBufferOffset));
|
||||
__ Ldr(backing_store,
|
||||
FieldMemOperand(scratch, JSArrayBuffer::kBackingStoreOffset));
|
||||
__ Ldr(scratch,
|
||||
FieldMemOperand(object, JSArrayBufferView::kByteOffsetOffset));
|
||||
__ JumpIfNotSmi(scratch, &offset_is_not_smi);
|
||||
// offset is smi
|
||||
__ Add(backing_store, backing_store, Operand::UntagSmi(scratch));
|
||||
__ B(&done);
|
||||
|
||||
// offset is a heap number
|
||||
__ Bind(&offset_is_not_smi);
|
||||
__ Ldr(double_scratch, FieldMemOperand(scratch, HeapNumber::kValueOffset));
|
||||
__ Fcvtzu(scratch, double_scratch);
|
||||
__ Add(backing_store, backing_store, scratch);
|
||||
__ Bind(&done);
|
||||
}
|
||||
|
||||
void TypedArrayJumpTable(MacroAssembler* masm, Register object,
|
||||
Register scratch, Register scratch2, Label* i8,
|
||||
Label* u8, Label* i16, Label* u16, Label* i32,
|
||||
Label* u32, Label* u8c) {
|
||||
STATIC_ASSERT(FIXED_UINT8_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 1);
|
||||
STATIC_ASSERT(FIXED_INT16_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 2);
|
||||
STATIC_ASSERT(FIXED_UINT16_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 3);
|
||||
STATIC_ASSERT(FIXED_INT32_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 4);
|
||||
STATIC_ASSERT(FIXED_UINT32_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 5);
|
||||
STATIC_ASSERT(FIXED_FLOAT32_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 6);
|
||||
STATIC_ASSERT(FIXED_FLOAT64_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 7);
|
||||
STATIC_ASSERT(FIXED_UINT8_CLAMPED_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 8);
|
||||
|
||||
__ Ldr(scratch, FieldMemOperand(object, JSObject::kElementsOffset));
|
||||
__ Ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
|
||||
__ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
|
||||
__ Subs(scratch, scratch,
|
||||
Operand(static_cast<uint8_t>(FIXED_INT8_ARRAY_TYPE)));
|
||||
__ Assert(ge, kOffsetOutOfRange);
|
||||
|
||||
Label abort;
|
||||
Label table;
|
||||
|
||||
__ Adr(scratch2, &table);
|
||||
__ Add(scratch, scratch2, Operand(scratch, UXTW, 2));
|
||||
__ Br(scratch);
|
||||
|
||||
__ StartBlockPools();
|
||||
__ Bind(&table);
|
||||
__ B(i8); // Int8Array
|
||||
__ B(u8); // Uint8Array
|
||||
__ B(i16); // Int16Array
|
||||
__ B(u16); // Uint16Array
|
||||
__ B(i32); // Int32Array
|
||||
__ B(u32); // Uint32Array
|
||||
__ B(&abort); // Float32Array
|
||||
__ B(&abort); // Float64Array
|
||||
__ B(u8c); // Uint8ClampedArray
|
||||
__ EndBlockPools();
|
||||
|
||||
__ Bind(&abort);
|
||||
__ Abort(kNoReason);
|
||||
}
|
||||
|
||||
void ReturnUnsignedInteger32(MacroAssembler* masm, FPRegister dst,
|
||||
Register value, Register scratch,
|
||||
Register scratch2) {
|
||||
Label not_smi, call_runtime;
|
||||
__ Tbnz(value, 31, ¬_smi);
|
||||
__ SmiTag(x0, value);
|
||||
__ Ret();
|
||||
|
||||
__ Bind(¬_smi);
|
||||
__ Ucvtf(dst, value);
|
||||
__ AllocateHeapNumber(x0, &call_runtime, scratch, scratch2, dst);
|
||||
__ Ret();
|
||||
|
||||
__ Bind(&call_runtime);
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
|
||||
__ Str(value, FieldMemOperand(x0, HeapNumber::kValueOffset));
|
||||
}
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
void AtomicsLoadStub::Generate(MacroAssembler* masm) {
|
||||
Register object = x1;
|
||||
Register index = x0; // Index is an untagged word32.
|
||||
Register backing_store = x2;
|
||||
Label i8, u8, i16, u16, i32, u32;
|
||||
|
||||
GetTypedArrayBackingStore(masm, backing_store, object, x3, d0);
|
||||
TypedArrayJumpTable(masm, object, x3, x4, &i8, &u8, &i16, &u16, &i32, &u32,
|
||||
&u8);
|
||||
|
||||
__ Bind(&i8);
|
||||
__ Ldrsb(x0, MemOperand(backing_store, index));
|
||||
__ Dmb(InnerShareable, BarrierAll);
|
||||
__ SmiTag(x0);
|
||||
__ Ret();
|
||||
|
||||
__ Bind(&u8);
|
||||
__ Ldrb(x0, MemOperand(backing_store, index));
|
||||
__ Dmb(InnerShareable, BarrierAll);
|
||||
__ SmiTag(x0);
|
||||
__ Ret();
|
||||
|
||||
__ Bind(&i16);
|
||||
__ Ldrsh(x0, MemOperand(backing_store, index, UXTW, 1));
|
||||
__ Dmb(InnerShareable, BarrierAll);
|
||||
__ SmiTag(x0);
|
||||
__ Ret();
|
||||
|
||||
__ Bind(&u16);
|
||||
__ Ldrh(x0, MemOperand(backing_store, index, UXTW, 1));
|
||||
__ Dmb(InnerShareable, BarrierAll);
|
||||
__ SmiTag(x0);
|
||||
__ Ret();
|
||||
|
||||
__ Bind(&i32);
|
||||
__ Ldrsw(x0, MemOperand(backing_store, index, UXTW, 2));
|
||||
__ Dmb(InnerShareable, BarrierAll);
|
||||
DCHECK(SmiValuesAre32Bits());
|
||||
__ SmiTag(x0);
|
||||
__ Ret();
|
||||
|
||||
__ Bind(&u32);
|
||||
__ Ldr(w0, MemOperand(backing_store, index, UXTW, 2));
|
||||
__ Dmb(InnerShareable, BarrierAll);
|
||||
ReturnUnsignedInteger32(masm, d0, x0, x1, x2);
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
|
@ -477,6 +477,15 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void AtomicsLoadDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {
|
||||
x1, // the typedarray object
|
||||
x0 // the index to load (untagged)
|
||||
};
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
|
@ -2442,9 +2442,24 @@ void Genesis::InitializeGlobal_harmony_sharedarraybuffer() {
|
||||
if (!FLAG_harmony_sharedarraybuffer) return;
|
||||
|
||||
Handle<JSGlobalObject> global(native_context()->global_object());
|
||||
Isolate* isolate = global->GetIsolate();
|
||||
Factory* factory = isolate->factory();
|
||||
|
||||
Handle<JSFunction> shared_array_buffer_fun =
|
||||
InstallArrayBuffer(global, "SharedArrayBuffer");
|
||||
native_context()->set_shared_array_buffer_fun(*shared_array_buffer_fun);
|
||||
|
||||
Handle<String> name = factory->InternalizeUtf8String("Atomics");
|
||||
Handle<JSFunction> cons = factory->NewFunction(name);
|
||||
JSFunction::SetInstancePrototype(
|
||||
cons,
|
||||
Handle<Object>(native_context()->initial_object_prototype(), isolate));
|
||||
Handle<JSObject> atomics_object = factory->NewJSObject(cons, TENURED);
|
||||
DCHECK(atomics_object->IsJSObject());
|
||||
JSObject::AddProperty(global, name, atomics_object, DONT_ENUM);
|
||||
|
||||
SimpleInstallFunction(atomics_object, factory->InternalizeUtf8String("load"),
|
||||
Builtins::kAtomicsLoadCheck, 2, true);
|
||||
}
|
||||
|
||||
|
||||
|
137
src/builtins.cc
137
src/builtins.cc
@ -5145,6 +5145,143 @@ void Builtins::Generate_StackCheck(MacroAssembler* masm) {
|
||||
masm->TailCallRuntime(Runtime::kStackGuard);
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
void ValidateSharedTypedArray(compiler::CodeStubAssembler* a,
|
||||
compiler::Node* tagged, compiler::Node* context) {
|
||||
using namespace compiler;
|
||||
CodeStubAssembler::Label is_smi(a), not_smi(a), is_typed_array(a),
|
||||
not_typed_array(a), is_shared(a), not_shared(a), is_float_or_clamped(a),
|
||||
not_float_or_clamped(a), invalid(a);
|
||||
|
||||
// Fail if it is not a heap object.
|
||||
a->Branch(a->WordIsSmi(tagged), &is_smi, ¬_smi);
|
||||
a->Bind(&is_smi);
|
||||
a->Goto(&invalid);
|
||||
|
||||
// Fail if the array's instance type is not JSTypedArray.
|
||||
a->Bind(¬_smi);
|
||||
a->Branch(a->WordEqual(a->LoadInstanceType(tagged),
|
||||
a->Int32Constant(JS_TYPED_ARRAY_TYPE)),
|
||||
&is_typed_array, ¬_typed_array);
|
||||
a->Bind(¬_typed_array);
|
||||
a->Goto(&invalid);
|
||||
|
||||
// Fail if the array's JSArrayBuffer is not shared.
|
||||
a->Bind(&is_typed_array);
|
||||
Node* is_buffer_shared =
|
||||
a->BitFieldDecode<JSArrayBuffer::IsShared>(a->LoadObjectField(
|
||||
a->LoadObjectField(tagged, JSTypedArray::kBufferOffset),
|
||||
JSArrayBuffer::kBitFieldOffset));
|
||||
a->Branch(is_buffer_shared, &is_shared, ¬_shared);
|
||||
a->Bind(¬_shared);
|
||||
a->Goto(&invalid);
|
||||
|
||||
// Fail if the array's element type is float32, float64 or clamped.
|
||||
a->Bind(&is_shared);
|
||||
Node* elements_instance_type = a->LoadInstanceType(
|
||||
a->LoadObjectField(tagged, JSObject::kElementsOffset));
|
||||
STATIC_ASSERT(FIXED_INT8_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
|
||||
STATIC_ASSERT(FIXED_INT16_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
|
||||
STATIC_ASSERT(FIXED_INT32_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
|
||||
STATIC_ASSERT(FIXED_UINT8_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
|
||||
STATIC_ASSERT(FIXED_UINT16_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
|
||||
STATIC_ASSERT(FIXED_UINT32_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
|
||||
a->Branch(a->Int32LessThan(elements_instance_type,
|
||||
a->Int32Constant(FIXED_FLOAT32_ARRAY_TYPE)),
|
||||
¬_float_or_clamped, &is_float_or_clamped);
|
||||
a->Bind(&is_float_or_clamped);
|
||||
a->Goto(&invalid);
|
||||
|
||||
a->Bind(&invalid);
|
||||
a->CallRuntime(Runtime::kThrowNotIntegerSharedTypedArrayError, context,
|
||||
tagged);
|
||||
a->Return(a->UndefinedConstant());
|
||||
|
||||
a->Bind(¬_float_or_clamped);
|
||||
}
|
||||
|
||||
// https://tc39.github.io/ecmascript_sharedmem/shmem.html#Atomics.ValidateAtomicAccess
|
||||
compiler::Node* ConvertTaggedAtomicIndexToWord32(compiler::CodeStubAssembler* a,
|
||||
compiler::Node* tagged,
|
||||
compiler::Node* context) {
|
||||
using namespace compiler;
|
||||
CodeStubAssembler::Variable var_result(a, MachineRepresentation::kWord32);
|
||||
|
||||
Callable to_number = CodeFactory::ToNumber(a->isolate());
|
||||
Node* number_index = a->CallStub(to_number, context, tagged);
|
||||
CodeStubAssembler::Label done(a, &var_result);
|
||||
|
||||
CodeStubAssembler::Label if_numberissmi(a), if_numberisnotsmi(a);
|
||||
a->Branch(a->WordIsSmi(number_index), &if_numberissmi, &if_numberisnotsmi);
|
||||
|
||||
a->Bind(&if_numberissmi);
|
||||
{
|
||||
var_result.Bind(a->SmiToWord32(number_index));
|
||||
a->Goto(&done);
|
||||
}
|
||||
|
||||
a->Bind(&if_numberisnotsmi);
|
||||
{
|
||||
Node* number_index_value = a->LoadHeapNumberValue(number_index);
|
||||
Node* access_index = a->TruncateFloat64ToInt32(number_index_value);
|
||||
Node* test_index = a->ChangeInt32ToFloat64(access_index);
|
||||
|
||||
CodeStubAssembler::Label if_indexesareequal(a), if_indexesarenotequal(a);
|
||||
a->Branch(a->Float64Equal(number_index_value, test_index),
|
||||
&if_indexesareequal, &if_indexesarenotequal);
|
||||
|
||||
a->Bind(&if_indexesareequal);
|
||||
{
|
||||
var_result.Bind(access_index);
|
||||
a->Goto(&done);
|
||||
}
|
||||
|
||||
a->Bind(&if_indexesarenotequal);
|
||||
a->Return(
|
||||
a->CallRuntime(Runtime::kThrowInvalidAtomicAccessIndexError, context));
|
||||
}
|
||||
|
||||
a->Bind(&done);
|
||||
return var_result.value();
|
||||
}
|
||||
|
||||
void ValidateAtomicIndex(compiler::CodeStubAssembler* a,
|
||||
compiler::Node* index_word,
|
||||
compiler::Node* array_length_word,
|
||||
compiler::Node* context) {
|
||||
using namespace compiler;
|
||||
// Check if the index is in bounds. If not, throw RangeError.
|
||||
CodeStubAssembler::Label if_inbounds(a), if_notinbounds(a);
|
||||
a->Branch(
|
||||
a->WordOr(a->Int32LessThan(index_word, a->Int32Constant(0)),
|
||||
a->Int32GreaterThanOrEqual(index_word, array_length_word)),
|
||||
&if_notinbounds, &if_inbounds);
|
||||
a->Bind(&if_notinbounds);
|
||||
a->Return(
|
||||
a->CallRuntime(Runtime::kThrowInvalidAtomicAccessIndexError, context));
|
||||
a->Bind(&if_inbounds);
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
void Builtins::Generate_AtomicsLoadCheck(compiler::CodeStubAssembler* a) {
|
||||
using namespace compiler;
|
||||
Isolate* isolate = a->isolate();
|
||||
Node* array = a->Parameter(1);
|
||||
Node* index = a->Parameter(2);
|
||||
Node* context = a->Parameter(3 + 2);
|
||||
ValidateSharedTypedArray(a, array, context);
|
||||
Node* index_word = ConvertTaggedAtomicIndexToWord32(a, index, context);
|
||||
Node* array_length_word = a->TruncateTaggedToWord32(
|
||||
context, a->LoadObjectField(array, JSTypedArray::kLengthOffset));
|
||||
ValidateAtomicIndex(a, index_word, array_length_word, context);
|
||||
|
||||
Callable atomics_load = CodeFactory::AtomicsLoad(isolate);
|
||||
Node* target = a->HeapConstant(atomics_load.code());
|
||||
a->Return(a->CallStub(atomics_load.descriptor(), target, context, array,
|
||||
index_word));
|
||||
}
|
||||
|
||||
#define DEFINE_BUILTIN_ACCESSOR_C(name, ignore) \
|
||||
Handle<Code> Builtins::name() { \
|
||||
|
@ -303,19 +303,20 @@ inline bool operator&(BuiltinExtraArguments lhs, BuiltinExtraArguments rhs) {
|
||||
CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V)
|
||||
|
||||
// Define list of builtins implemented in TurboFan (with JS linkage).
|
||||
#define BUILTIN_LIST_T(V) \
|
||||
V(GeneratorPrototypeNext, 2) \
|
||||
V(GeneratorPrototypeReturn, 2) \
|
||||
V(GeneratorPrototypeThrow, 2) \
|
||||
V(MathCeil, 2) \
|
||||
V(MathClz32, 2) \
|
||||
V(MathFloor, 2) \
|
||||
V(MathRound, 2) \
|
||||
V(MathSqrt, 2) \
|
||||
V(MathTrunc, 2) \
|
||||
V(ObjectHasOwnProperty, 2) \
|
||||
V(StringPrototypeCharAt, 2) \
|
||||
V(StringPrototypeCharCodeAt, 2)
|
||||
#define BUILTIN_LIST_T(V) \
|
||||
V(GeneratorPrototypeNext, 2) \
|
||||
V(GeneratorPrototypeReturn, 2) \
|
||||
V(GeneratorPrototypeThrow, 2) \
|
||||
V(MathCeil, 2) \
|
||||
V(MathClz32, 2) \
|
||||
V(MathFloor, 2) \
|
||||
V(MathRound, 2) \
|
||||
V(MathSqrt, 2) \
|
||||
V(MathTrunc, 2) \
|
||||
V(ObjectHasOwnProperty, 2) \
|
||||
V(StringPrototypeCharAt, 2) \
|
||||
V(StringPrototypeCharCodeAt, 2) \
|
||||
V(AtomicsLoadCheck, 3)
|
||||
|
||||
// Define list of builtin handlers implemented in assembly.
|
||||
#define BUILTIN_LIST_H(V) \
|
||||
@ -673,6 +674,8 @@ class Builtins {
|
||||
static void Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm);
|
||||
static void Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm);
|
||||
|
||||
static void Generate_AtomicsLoadCheck(compiler::CodeStubAssembler* assembler);
|
||||
|
||||
static void InitBuiltinFunctionTable();
|
||||
|
||||
bool initialized_;
|
||||
|
@ -545,5 +545,10 @@ Callable CodeFactory::InterpreterCEntry(Isolate* isolate, int result_size) {
|
||||
return Callable(stub.GetCode(), InterpreterCEntryDescriptor(isolate));
|
||||
}
|
||||
|
||||
Callable CodeFactory::AtomicsLoad(Isolate* isolate) {
|
||||
AtomicsLoadStub stub(isolate);
|
||||
return Callable(stub.GetCode(), AtomicsLoadDescriptor(isolate));
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -143,6 +143,8 @@ class CodeFactory final {
|
||||
TailCallMode tail_call_mode);
|
||||
static Callable InterpreterPushArgsAndConstruct(Isolate* isolate);
|
||||
static Callable InterpreterCEntry(Isolate* isolate, int result_size = 1);
|
||||
|
||||
static Callable AtomicsLoad(Isolate* isolate);
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
|
@ -22,6 +22,7 @@ namespace internal {
|
||||
#define CODE_STUB_LIST_ALL_PLATFORMS(V) \
|
||||
/* PlatformCodeStubs */ \
|
||||
V(ArrayConstructor) \
|
||||
V(AtomicsLoad) \
|
||||
V(BinaryOpICWithAllocationSite) \
|
||||
V(CallApiCallback) \
|
||||
V(CallApiGetter) \
|
||||
@ -2512,7 +2513,7 @@ class ScriptContextFieldStub : public HandlerStub {
|
||||
|
||||
private:
|
||||
static const int kContextIndexBits = 9;
|
||||
static const int kSlotIndexBits = 13;
|
||||
static const int kSlotIndexBits = 12;
|
||||
class ContextIndexBits : public BitField<int, 0, kContextIndexBits> {};
|
||||
class SlotIndexBits
|
||||
: public BitField<int, kContextIndexBits, kSlotIndexBits> {};
|
||||
@ -3121,6 +3122,14 @@ class ToObjectStub final : public HydrogenCodeStub {
|
||||
DEFINE_HYDROGEN_CODE_STUB(ToObject, HydrogenCodeStub);
|
||||
};
|
||||
|
||||
class AtomicsLoadStub : public PlatformCodeStub {
|
||||
public:
|
||||
explicit AtomicsLoadStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
|
||||
|
||||
DEFINE_CALL_INTERFACE_DESCRIPTOR(AtomicsLoad);
|
||||
DEFINE_PLATFORM_CODE_STUB(AtomicsLoad, PlatformCodeStub);
|
||||
};
|
||||
|
||||
#undef DEFINE_CALL_INTERFACE_DESCRIPTOR
|
||||
#undef DEFINE_PLATFORM_CODE_STUB
|
||||
#undef DEFINE_HANDLER_CODE_STUB
|
||||
|
@ -1276,6 +1276,11 @@ Node* CodeStubAssembler::StringFromCharCode(Node* code) {
|
||||
return var_result.value();
|
||||
}
|
||||
|
||||
Node* CodeStubAssembler::TruncateFloat64ToInt32(Node* value) {
|
||||
return raw_assembler_->TruncateFloat64ToInt32(TruncationMode::kJavaScript,
|
||||
value);
|
||||
}
|
||||
|
||||
void CodeStubAssembler::BranchIf(Node* condition, Label* if_true,
|
||||
Label* if_false) {
|
||||
Label if_condition_is_true(this), if_condition_is_false(this);
|
||||
|
@ -391,6 +391,8 @@ class CodeStubAssembler {
|
||||
Node* ChangeInt32ToTagged(Node* value);
|
||||
Node* TruncateTaggedToFloat64(Node* context, Node* value);
|
||||
Node* TruncateTaggedToWord32(Node* context, Node* value);
|
||||
// Truncate to int32 using JavaScript truncation mode.
|
||||
Node* TruncateFloat64ToInt32(Node* value);
|
||||
|
||||
// Type conversions.
|
||||
// Throws a TypeError for {method_name} if {value} is not coercible to Object,
|
||||
|
@ -5839,6 +5839,175 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
|
||||
return_value_operand, NULL);
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
void GetTypedArrayBackingStore(MacroAssembler* masm, Register backing_store,
|
||||
Register object, Register scratch) {
|
||||
Label offset_is_not_smi, done;
|
||||
__ mov(scratch, FieldOperand(object, JSTypedArray::kBufferOffset));
|
||||
__ mov(backing_store,
|
||||
FieldOperand(scratch, JSArrayBuffer::kBackingStoreOffset));
|
||||
|
||||
__ mov(scratch, FieldOperand(object, JSArrayBufferView::kByteOffsetOffset));
|
||||
__ JumpIfNotSmi(scratch, &offset_is_not_smi, Label::kNear);
|
||||
// Offset is smi.
|
||||
__ SmiUntag(scratch);
|
||||
__ add(backing_store, scratch);
|
||||
__ jmp(&done, Label::kNear);
|
||||
|
||||
// Offset is a heap number.
|
||||
__ bind(&offset_is_not_smi);
|
||||
__ movsd(xmm0, FieldOperand(scratch, HeapNumber::kValueOffset));
|
||||
__ cvttsd2si(scratch, xmm0);
|
||||
__ add(backing_store, scratch);
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
void TypedArrayJumpTablePrologue(MacroAssembler* masm, Register object,
|
||||
Register scratch, Register scratch2,
|
||||
Label* table) {
|
||||
__ mov(scratch, FieldOperand(object, JSObject::kElementsOffset));
|
||||
__ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
|
||||
__ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
|
||||
__ sub(scratch, Immediate(static_cast<uint8_t>(FIXED_INT8_ARRAY_TYPE)));
|
||||
__ Assert(above_equal, kOffsetOutOfRange);
|
||||
__ jmp(Operand::JumpTable(scratch, times_4, table));
|
||||
}
|
||||
|
||||
void TypedArrayJumpTableEpilogue(MacroAssembler* masm, Label* table, Label* i8,
|
||||
Label* u8, Label* i16, Label* u16, Label* i32,
|
||||
Label* u32, Label* u8c) {
|
||||
STATIC_ASSERT(FIXED_UINT8_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 1);
|
||||
STATIC_ASSERT(FIXED_INT16_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 2);
|
||||
STATIC_ASSERT(FIXED_UINT16_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 3);
|
||||
STATIC_ASSERT(FIXED_INT32_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 4);
|
||||
STATIC_ASSERT(FIXED_UINT32_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 5);
|
||||
STATIC_ASSERT(FIXED_FLOAT32_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 6);
|
||||
STATIC_ASSERT(FIXED_FLOAT64_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 7);
|
||||
STATIC_ASSERT(FIXED_UINT8_CLAMPED_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 8);
|
||||
|
||||
Label abort;
|
||||
__ bind(table);
|
||||
__ dd(i8); // Int8Array
|
||||
__ dd(u8); // Uint8Array
|
||||
__ dd(i16); // Int16Array
|
||||
__ dd(u16); // Uint16Array
|
||||
__ dd(i32); // Int32Array
|
||||
__ dd(u32); // Uint32Array
|
||||
__ dd(&abort); // Float32Array
|
||||
__ dd(&abort); // Float64Array
|
||||
__ dd(u8c); // Uint8ClampedArray
|
||||
|
||||
__ bind(&abort);
|
||||
__ Abort(kNoReason);
|
||||
}
|
||||
|
||||
void ReturnInteger32(MacroAssembler* masm, XMMRegister dst, Register value,
|
||||
Register scratch, Label* use_heap_number) {
|
||||
Label not_smi;
|
||||
if (!value.is(eax)) {
|
||||
__ mov(eax, value);
|
||||
}
|
||||
__ JumpIfNotValidSmiValue(eax, scratch, ¬_smi, Label::kNear);
|
||||
__ SmiTag(eax);
|
||||
__ Ret();
|
||||
|
||||
__ bind(¬_smi);
|
||||
__ Cvtsi2sd(dst, eax);
|
||||
__ jmp(use_heap_number);
|
||||
}
|
||||
|
||||
void ReturnUnsignedInteger32(MacroAssembler* masm, XMMRegister dst,
|
||||
Register value, XMMRegister scratch,
|
||||
Label* use_heap_number) {
|
||||
Label not_smi;
|
||||
if (!value.is(eax)) {
|
||||
__ mov(eax, value);
|
||||
}
|
||||
__ JumpIfUIntNotValidSmiValue(eax, ¬_smi, Label::kNear);
|
||||
__ SmiTag(eax);
|
||||
__ Ret();
|
||||
|
||||
__ bind(¬_smi);
|
||||
// Convert [0, 2**32-1] -> [-2**31, 2**31-1].
|
||||
__ add(eax, Immediate(-0x7fffffff - 1)); // -0x80000000 parses incorrectly.
|
||||
__ Cvtsi2sd(dst, eax);
|
||||
__ mov(eax, Immediate(0x4f000000)); // 2**31 as IEEE float
|
||||
__ movd(scratch, eax);
|
||||
__ cvtss2sd(scratch, scratch);
|
||||
__ addsd(dst, scratch);
|
||||
__ jmp(use_heap_number);
|
||||
}
|
||||
|
||||
void ReturnAllocatedHeapNumber(MacroAssembler* masm, XMMRegister value,
|
||||
Register scratch, Register scratch2) {
|
||||
Label call_runtime;
|
||||
__ AllocateHeapNumber(eax, scratch, scratch2, &call_runtime);
|
||||
__ movsd(FieldOperand(eax, HeapNumber::kValueOffset), value);
|
||||
__ Ret();
|
||||
|
||||
__ bind(&call_runtime);
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
|
||||
__ movsd(FieldOperand(eax, HeapNumber::kValueOffset), value);
|
||||
}
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
void AtomicsLoadStub::Generate(MacroAssembler* masm) {
|
||||
Register object = edx;
|
||||
Register index = eax; // Index is an untagged word32.
|
||||
Register backing_store = ebx;
|
||||
Label table;
|
||||
|
||||
GetTypedArrayBackingStore(masm, backing_store, object, ecx);
|
||||
TypedArrayJumpTablePrologue(masm, object, ecx, esi, &table);
|
||||
|
||||
Label i8, u8, i16, u16, i32, u32;
|
||||
|
||||
__ bind(&i8);
|
||||
__ mov_b(eax, Operand(backing_store, index, times_1, 0));
|
||||
__ movsx_b(eax, eax);
|
||||
__ SmiTag(eax);
|
||||
__ Ret();
|
||||
|
||||
__ bind(&u8);
|
||||
__ mov_b(eax, Operand(backing_store, index, times_1, 0));
|
||||
__ movzx_b(eax, eax);
|
||||
__ SmiTag(eax);
|
||||
__ Ret();
|
||||
|
||||
__ bind(&i16);
|
||||
__ mov_w(eax, Operand(backing_store, index, times_2, 0));
|
||||
__ movsx_w(eax, eax);
|
||||
__ SmiTag(eax);
|
||||
__ Ret();
|
||||
|
||||
__ bind(&u16);
|
||||
__ mov_w(eax, Operand(backing_store, index, times_2, 0));
|
||||
__ movzx_w(eax, eax);
|
||||
__ SmiTag(eax);
|
||||
__ Ret();
|
||||
|
||||
Label use_heap_number;
|
||||
|
||||
__ bind(&i32);
|
||||
__ mov(eax, Operand(backing_store, index, times_4, 0));
|
||||
ReturnInteger32(masm, xmm0, eax, ecx, &use_heap_number);
|
||||
|
||||
__ bind(&u32);
|
||||
__ mov(eax, Operand(backing_store, index, times_4, 0));
|
||||
ReturnUnsignedInteger32(masm, xmm0, eax, xmm1, &use_heap_number);
|
||||
|
||||
__ bind(&use_heap_number);
|
||||
ReturnAllocatedHeapNumber(masm, xmm0, ecx, edx);
|
||||
|
||||
TypedArrayJumpTableEpilogue(masm, &table, &i8, &u8, &i16, &u16, &i32, &u32,
|
||||
&u8);
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
|
@ -428,6 +428,15 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void AtomicsLoadDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {
|
||||
edx, // the typedarray object
|
||||
eax // the index to load (untagged)
|
||||
};
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
|
@ -511,6 +511,23 @@ class MacroAssembler: public Assembler {
|
||||
j(not_zero, not_smi_label, distance);
|
||||
}
|
||||
|
||||
// Jump if the value cannot be represented by a smi.
|
||||
inline void JumpIfNotValidSmiValue(Register value, Register scratch,
|
||||
Label* on_invalid,
|
||||
Label::Distance distance = Label::kFar) {
|
||||
mov(scratch, value);
|
||||
add(scratch, Immediate(0x40000000U));
|
||||
j(sign, on_invalid, distance);
|
||||
}
|
||||
|
||||
// Jump if the unsigned integer value cannot be represented by a smi.
|
||||
inline void JumpIfUIntNotValidSmiValue(
|
||||
Register value, Label* on_invalid,
|
||||
Label::Distance distance = Label::kFar) {
|
||||
cmp(value, Immediate(0x40000000U));
|
||||
j(above_equal, on_invalid, distance);
|
||||
}
|
||||
|
||||
void LoadInstanceDescriptors(Register map, Register descriptors);
|
||||
void EnumLength(Register dst, Register map);
|
||||
void NumberOfOwnDescriptors(Register dst, Register map);
|
||||
|
@ -91,7 +91,8 @@ class PlatformInterfaceDescriptor;
|
||||
V(InterpreterPushArgsAndCall) \
|
||||
V(InterpreterPushArgsAndConstruct) \
|
||||
V(InterpreterCEntry) \
|
||||
V(ResumeGenerator)
|
||||
V(ResumeGenerator) \
|
||||
V(AtomicsLoad)
|
||||
|
||||
class CallInterfaceDescriptorData {
|
||||
public:
|
||||
@ -827,6 +828,11 @@ class ResumeGeneratorDescriptor final : public CallInterfaceDescriptor {
|
||||
DECLARE_DESCRIPTOR(ResumeGeneratorDescriptor, CallInterfaceDescriptor)
|
||||
};
|
||||
|
||||
class AtomicsLoadDescriptor : public CallInterfaceDescriptor {
|
||||
public:
|
||||
DECLARE_DESCRIPTOR(AtomicsLoadDescriptor, CallInterfaceDescriptor)
|
||||
};
|
||||
|
||||
#undef DECLARE_DESCRIPTOR_WITH_BASE
|
||||
#undef DECLARE_DESCRIPTOR
|
||||
#undef DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE
|
||||
|
@ -62,12 +62,6 @@ function AtomicsCompareExchangeJS(sta, index, oldValue, newValue) {
|
||||
return %_AtomicsCompareExchange(sta, index, oldValue, newValue);
|
||||
}
|
||||
|
||||
function AtomicsLoadJS(sta, index) {
|
||||
CheckSharedIntegerTypedArray(sta);
|
||||
index = ValidateIndex(index, %_TypedArrayGetLength(sta));
|
||||
return %_AtomicsLoad(sta, index);
|
||||
}
|
||||
|
||||
function AtomicsStoreJS(sta, index, value) {
|
||||
CheckSharedIntegerTypedArray(sta);
|
||||
index = ValidateIndex(index, %_TypedArrayGetLength(sta));
|
||||
@ -161,13 +155,9 @@ function AtomicsFutexWakeOrRequeueJS(ia, index1, count, value, index2) {
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
function AtomicsConstructor() {}
|
||||
var Atomics = global.Atomics;
|
||||
|
||||
var Atomics = new AtomicsConstructor();
|
||||
|
||||
%InternalSetPrototype(Atomics, GlobalObject.prototype);
|
||||
%AddNamedProperty(global, "Atomics", Atomics, DONT_ENUM);
|
||||
%FunctionSetInstanceClassName(AtomicsConstructor, 'Atomics');
|
||||
// The Atomics global is defined by the bootstrapper.
|
||||
|
||||
%AddNamedProperty(Atomics, toStringTagSymbol, "Atomics", READ_ONLY | DONT_ENUM);
|
||||
|
||||
@ -179,8 +169,9 @@ utils.InstallConstants(Atomics, [
|
||||
]);
|
||||
|
||||
utils.InstallFunctions(Atomics, DONT_ENUM, [
|
||||
// TODO(binji): remove the rest of the (non futex) Atomics functions as they
|
||||
// become builtins.
|
||||
"compareExchange", AtomicsCompareExchangeJS,
|
||||
"load", AtomicsLoadJS,
|
||||
"store", AtomicsStoreJS,
|
||||
"add", AtomicsAddJS,
|
||||
"sub", AtomicsSubJS,
|
||||
|
@ -5727,6 +5727,9 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
|
||||
return_value_operand, NULL);
|
||||
}
|
||||
|
||||
void AtomicsLoadStub::Generate(MacroAssembler* masm) {
|
||||
// TODO(binji)
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
|
@ -424,6 +424,15 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void AtomicsLoadDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {
|
||||
a1, // the typedarray object
|
||||
a0 // the index to load (untagged)
|
||||
};
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
|
@ -5755,6 +5755,9 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
|
||||
return_value_operand, NULL);
|
||||
}
|
||||
|
||||
void AtomicsLoadStub::Generate(MacroAssembler* masm) {
|
||||
// TODO(binji)
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
|
@ -423,6 +423,15 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void AtomicsLoadDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {
|
||||
a1, // the typedarray object
|
||||
a0 // the index to load (untagged)
|
||||
};
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
|
@ -309,7 +309,7 @@ const int kVariableSizeSentinel = 0;
|
||||
|
||||
// We may store the unsigned bit field as signed Smi value and do not
|
||||
// use the sign bit.
|
||||
const int kStubMajorKeyBits = 7;
|
||||
const int kStubMajorKeyBits = 8;
|
||||
const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
|
||||
|
||||
// All Maps have a field instance_type containing a InstanceType.
|
||||
|
@ -215,13 +215,6 @@ inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index,
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
inline Object* DoLoad(Isolate* isolate, void* buffer, size_t index) {
|
||||
T result = LoadSeqCst(static_cast<T*>(buffer) + index);
|
||||
return ToObject(isolate, result);
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
inline Object* DoStore(Isolate* isolate, void* buffer, size_t index,
|
||||
Handle<Object> obj) {
|
||||
@ -365,6 +358,29 @@ inline Object* DoExchangeUint8Clamped(Isolate* isolate, void* buffer,
|
||||
V(Uint32, uint32, UINT32, uint32_t, 4) \
|
||||
V(Int32, int32, INT32, int32_t, 4)
|
||||
|
||||
RUNTIME_FUNCTION(Runtime_ThrowNotIntegerSharedTypedArrayError) {
|
||||
HandleScope scope(isolate);
|
||||
DCHECK_EQ(1, args.length());
|
||||
CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
|
||||
THROW_NEW_ERROR_RETURN_FAILURE(
|
||||
isolate,
|
||||
NewTypeError(MessageTemplate::kNotIntegerSharedTypedArray, value));
|
||||
}
|
||||
|
||||
RUNTIME_FUNCTION(Runtime_ThrowNotInt32SharedTypedArrayError) {
|
||||
HandleScope scope(isolate);
|
||||
DCHECK_EQ(1, args.length());
|
||||
CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
|
||||
THROW_NEW_ERROR_RETURN_FAILURE(
|
||||
isolate, NewTypeError(MessageTemplate::kNotInt32SharedTypedArray, value));
|
||||
}
|
||||
|
||||
RUNTIME_FUNCTION(Runtime_ThrowInvalidAtomicAccessIndexError) {
|
||||
HandleScope scope(isolate);
|
||||
DCHECK_EQ(0, args.length());
|
||||
THROW_NEW_ERROR_RETURN_FAILURE(
|
||||
isolate, NewRangeError(MessageTemplate::kInvalidAtomicAccessIndex));
|
||||
}
|
||||
|
||||
RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
|
||||
HandleScope scope(isolate);
|
||||
@ -401,31 +417,6 @@ RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
|
||||
|
||||
|
||||
RUNTIME_FUNCTION(Runtime_AtomicsLoad) {
|
||||
HandleScope scope(isolate);
|
||||
DCHECK(args.length() == 2);
|
||||
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
|
||||
CONVERT_SIZE_ARG_CHECKED(index, 1);
|
||||
RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
|
||||
RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
|
||||
|
||||
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
|
||||
NumberToSize(isolate, sta->byte_offset());
|
||||
|
||||
switch (sta->type()) {
|
||||
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
|
||||
case kExternal##Type##Array: \
|
||||
return DoLoad<ctype>(isolate, source, index);
|
||||
|
||||
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
|
||||
#undef TYPED_ARRAY_CASE
|
||||
|
||||
case kExternalUint8ClampedArray:
|
||||
return DoLoad<uint8_t>(isolate, source, index);
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
UNREACHABLE();
|
||||
return isolate->heap()->undefined_value();
|
||||
}
|
||||
|
@ -397,7 +397,8 @@ RUNTIME_FUNCTION(Runtime_IsSharedIntegerTypedArray) {
|
||||
Handle<JSTypedArray> obj(JSTypedArray::cast(args[0]));
|
||||
return isolate->heap()->ToBoolean(obj->GetBuffer()->is_shared() &&
|
||||
obj->type() != kExternalFloat32Array &&
|
||||
obj->type() != kExternalFloat64Array);
|
||||
obj->type() != kExternalFloat64Array &&
|
||||
obj->type() != kExternalUint8ClampedArray);
|
||||
}
|
||||
|
||||
|
||||
|
@ -53,19 +53,21 @@ namespace internal {
|
||||
F(FixedArraySet, 3, 1) \
|
||||
F(ArraySpeciesConstructor, 1, 1)
|
||||
|
||||
#define FOR_EACH_INTRINSIC_ATOMICS(F) \
|
||||
F(AtomicsCompareExchange, 4, 1) \
|
||||
F(AtomicsLoad, 2, 1) \
|
||||
F(AtomicsStore, 3, 1) \
|
||||
F(AtomicsAdd, 3, 1) \
|
||||
F(AtomicsSub, 3, 1) \
|
||||
F(AtomicsAnd, 3, 1) \
|
||||
F(AtomicsOr, 3, 1) \
|
||||
F(AtomicsXor, 3, 1) \
|
||||
F(AtomicsExchange, 3, 1) \
|
||||
#define FOR_EACH_INTRINSIC_ATOMICS(F) \
|
||||
F(ThrowNotIntegerSharedTypedArrayError, 1, 1) \
|
||||
F(ThrowNotInt32SharedTypedArrayError, 1, 1) \
|
||||
F(ThrowInvalidAtomicAccessIndexError, 0, 1) \
|
||||
F(AtomicsCompareExchange, 4, 1) \
|
||||
F(AtomicsLoad, 2, 1) \
|
||||
F(AtomicsStore, 3, 1) \
|
||||
F(AtomicsAdd, 3, 1) \
|
||||
F(AtomicsSub, 3, 1) \
|
||||
F(AtomicsAnd, 3, 1) \
|
||||
F(AtomicsOr, 3, 1) \
|
||||
F(AtomicsXor, 3, 1) \
|
||||
F(AtomicsExchange, 3, 1) \
|
||||
F(AtomicsIsLockFree, 1, 1)
|
||||
|
||||
|
||||
#define FOR_EACH_INTRINSIC_FUTEX(F) \
|
||||
F(AtomicsFutexWait, 4, 1) \
|
||||
F(AtomicsFutexWake, 3, 1) \
|
||||
|
@ -5564,6 +5564,169 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
|
||||
NULL);
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
void GetTypedArrayBackingStore(MacroAssembler* masm, Register backing_store,
|
||||
Register object, Register scratch) {
|
||||
Label offset_is_not_smi, done;
|
||||
__ movp(scratch, FieldOperand(object, JSTypedArray::kBufferOffset));
|
||||
__ movp(backing_store,
|
||||
FieldOperand(scratch, JSArrayBuffer::kBackingStoreOffset));
|
||||
|
||||
__ movp(scratch, FieldOperand(object, JSArrayBufferView::kByteOffsetOffset));
|
||||
__ JumpIfNotSmi(scratch, &offset_is_not_smi, Label::kNear);
|
||||
// offset is smi
|
||||
__ SmiToInteger32(scratch, scratch);
|
||||
__ addp(backing_store, scratch);
|
||||
__ jmp(&done, Label::kNear);
|
||||
|
||||
// offset is a heap number
|
||||
__ bind(&offset_is_not_smi);
|
||||
__ Movsd(xmm0, FieldOperand(scratch, HeapNumber::kValueOffset));
|
||||
__ Cvttsd2siq(scratch, xmm0);
|
||||
__ addp(backing_store, scratch);
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
void TypedArrayJumpTablePrologue(MacroAssembler* masm, Register object,
|
||||
Register scratch, Register scratch2,
|
||||
Label* table) {
|
||||
__ movp(scratch, FieldOperand(object, JSObject::kElementsOffset));
|
||||
__ movp(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
|
||||
__ movzxbl(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
|
||||
__ subl(scratch, Immediate(static_cast<uint8_t>(FIXED_INT8_ARRAY_TYPE)));
|
||||
__ Assert(above_equal, kOffsetOutOfRange);
|
||||
__ leaq(scratch2, Operand(table));
|
||||
__ jmp(Operand(scratch2, scratch, times_8, 0));
|
||||
}
|
||||
|
||||
void TypedArrayJumpTableEpilogue(MacroAssembler* masm, Label* table, Label* i8,
|
||||
Label* u8, Label* i16, Label* u16, Label* i32,
|
||||
Label* u32, Label* u8c) {
|
||||
STATIC_ASSERT(FIXED_UINT8_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 1);
|
||||
STATIC_ASSERT(FIXED_INT16_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 2);
|
||||
STATIC_ASSERT(FIXED_UINT16_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 3);
|
||||
STATIC_ASSERT(FIXED_INT32_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 4);
|
||||
STATIC_ASSERT(FIXED_UINT32_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 5);
|
||||
STATIC_ASSERT(FIXED_FLOAT32_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 6);
|
||||
STATIC_ASSERT(FIXED_FLOAT64_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 7);
|
||||
STATIC_ASSERT(FIXED_UINT8_CLAMPED_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 8);
|
||||
|
||||
Label abort;
|
||||
__ bind(table);
|
||||
__ dq(i8); // Int8Array
|
||||
__ dq(u8); // Uint8Array
|
||||
__ dq(i16); // Int16Array
|
||||
__ dq(u16); // Uint16Array
|
||||
__ dq(i32); // Int32Array
|
||||
__ dq(u32); // Uint32Array
|
||||
__ dq(&abort); // Float32Array
|
||||
__ dq(&abort); // Float64Array
|
||||
__ dq(u8c); // Uint8ClampedArray
|
||||
|
||||
__ bind(&abort);
|
||||
__ Abort(kNoReason);
|
||||
}
|
||||
|
||||
void ReturnInteger32(MacroAssembler* masm, XMMRegister dst, Register value,
|
||||
Label* use_heap_number) {
|
||||
Label not_smi;
|
||||
if (!value.is(rax)) {
|
||||
__ movp(rax, value);
|
||||
}
|
||||
__ JumpIfNotValidSmiValue(rax, ¬_smi, Label::kNear);
|
||||
__ Integer32ToSmi(rax, rax);
|
||||
__ Ret();
|
||||
|
||||
__ bind(¬_smi);
|
||||
__ Cvtlsi2sd(dst, rax);
|
||||
__ jmp(use_heap_number);
|
||||
}
|
||||
|
||||
void ReturnUnsignedInteger32(MacroAssembler* masm, XMMRegister dst,
|
||||
Register value, Label* use_heap_number) {
|
||||
Label not_smi;
|
||||
if (!value.is(rax)) {
|
||||
__ movp(rax, value);
|
||||
}
|
||||
__ JumpIfUIntNotValidSmiValue(rax, ¬_smi, Label::kNear);
|
||||
__ Integer32ToSmi(rax, rax);
|
||||
__ Ret();
|
||||
|
||||
__ bind(¬_smi);
|
||||
__ Cvtqsi2sd(dst, rax);
|
||||
__ jmp(use_heap_number);
|
||||
}
|
||||
|
||||
void ReturnAllocatedHeapNumber(MacroAssembler* masm, XMMRegister value,
|
||||
Register scratch) {
|
||||
Label call_runtime;
|
||||
__ AllocateHeapNumber(rax, scratch, &call_runtime);
|
||||
__ Movsd(FieldOperand(rax, HeapNumber::kValueOffset), value);
|
||||
__ Ret();
|
||||
|
||||
__ bind(&call_runtime);
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
|
||||
__ Movsd(FieldOperand(rax, HeapNumber::kValueOffset), value);
|
||||
}
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
void AtomicsLoadStub::Generate(MacroAssembler* masm) {
|
||||
Register object = rdx;
|
||||
Register index = rax; // Index is an untagged word32.
|
||||
Register backing_store = rbx;
|
||||
Label table;
|
||||
|
||||
GetTypedArrayBackingStore(masm, backing_store, object, kScratchRegister);
|
||||
TypedArrayJumpTablePrologue(masm, object, rcx, kScratchRegister, &table);
|
||||
|
||||
Label i8, u8, i16, u16, i32, u32;
|
||||
|
||||
__ bind(&i8);
|
||||
__ movb(rax, Operand(backing_store, index, times_1, 0));
|
||||
__ movsxbl(rax, rax);
|
||||
__ Integer32ToSmi(rax, rax);
|
||||
__ Ret();
|
||||
|
||||
__ bind(&u8);
|
||||
__ movb(rax, Operand(backing_store, index, times_1, 0));
|
||||
__ movzxbl(rax, rax);
|
||||
__ Integer32ToSmi(rax, rax);
|
||||
__ Ret();
|
||||
|
||||
__ bind(&i16);
|
||||
__ movw(rax, Operand(backing_store, index, times_2, 0));
|
||||
__ movsxwl(rax, rax);
|
||||
__ Integer32ToSmi(rax, rax);
|
||||
__ Ret();
|
||||
|
||||
__ bind(&u16);
|
||||
__ movw(rax, Operand(backing_store, index, times_2, 0));
|
||||
__ movzxwl(rax, rax);
|
||||
__ Integer32ToSmi(rax, rax);
|
||||
__ Ret();
|
||||
|
||||
Label use_heap_number;
|
||||
|
||||
__ bind(&i32);
|
||||
__ movl(rax, Operand(backing_store, index, times_4, 0));
|
||||
ReturnInteger32(masm, xmm0, rax, &use_heap_number);
|
||||
|
||||
__ bind(&u32);
|
||||
__ movl(rax, Operand(backing_store, index, times_4, 0));
|
||||
ReturnUnsignedInteger32(masm, xmm0, rax, &use_heap_number);
|
||||
|
||||
__ bind(&use_heap_number);
|
||||
ReturnAllocatedHeapNumber(masm, xmm0, rcx);
|
||||
|
||||
TypedArrayJumpTableEpilogue(masm, &table, &i8, &u8, &i16, &u16, &i32, &u32,
|
||||
&u8);
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
|
@ -420,6 +420,15 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void AtomicsLoadDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {
|
||||
rdx, // the typedarray object
|
||||
rax // the index to load (untagged)
|
||||
};
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
|
@ -16,26 +16,19 @@ function toRangeWrapped(value) {
|
||||
return value;
|
||||
}
|
||||
|
||||
function toRangeClamped(value) {
|
||||
if (value < this.min) return this.min;
|
||||
if (value > this.max) return this.max;
|
||||
return value;
|
||||
}
|
||||
|
||||
function makeConstructorObject(constr, min, max, toRange) {
|
||||
var o = {constr: constr, min: min, max: max};
|
||||
o.toRange = toRange.bind(o);
|
||||
o.toRange = toRangeWrapped.bind(o);
|
||||
return o;
|
||||
}
|
||||
|
||||
var IntegerTypedArrayConstructors = [
|
||||
makeConstructorObject(Int8Array, -128, 127, toRangeWrapped),
|
||||
makeConstructorObject(Int16Array, -32768, 32767, toRangeWrapped),
|
||||
makeConstructorObject(Int32Array, -0x80000000, 0x7fffffff, toRangeWrapped),
|
||||
makeConstructorObject(Uint8Array, 0, 255, toRangeWrapped),
|
||||
makeConstructorObject(Uint8ClampedArray, 0, 255, toRangeClamped),
|
||||
makeConstructorObject(Uint16Array, 0, 65535, toRangeWrapped),
|
||||
makeConstructorObject(Uint32Array, 0, 0xffffffff, toRangeWrapped),
|
||||
makeConstructorObject(Int8Array, -128, 127),
|
||||
makeConstructorObject(Int16Array, -32768, 32767),
|
||||
makeConstructorObject(Int32Array, -0x80000000, 0x7fffffff),
|
||||
makeConstructorObject(Uint8Array, 0, 255),
|
||||
makeConstructorObject(Uint16Array, 0, 65535),
|
||||
makeConstructorObject(Uint32Array, 0, 0xffffffff),
|
||||
];
|
||||
|
||||
(function TestBadArray() {
|
||||
@ -44,9 +37,13 @@ var IntegerTypedArrayConstructors = [
|
||||
var sab = new SharedArrayBuffer(128);
|
||||
var sf32a = new Float32Array(sab);
|
||||
var sf64a = new Float64Array(sab);
|
||||
var u8ca = new Uint8ClampedArray(sab);
|
||||
|
||||
// Atomic ops required integer shared typed arrays
|
||||
[undefined, 1, 'hi', 3.4, ab, u32a, sab, sf32a, sf64a].forEach(function(o) {
|
||||
var badArrayTypes = [
|
||||
undefined, 1, 'hi', 3.4, ab, u32a, sab, sf32a, sf64a, u8ca
|
||||
];
|
||||
badArrayTypes.forEach(function(o) {
|
||||
assertThrows(function() { Atomics.compareExchange(o, 0, 0, 0); },
|
||||
TypeError);
|
||||
assertThrows(function() { Atomics.load(o, 0); }, TypeError);
|
||||
@ -129,15 +126,16 @@ var IntegerTypedArrayConstructors = [
|
||||
|
||||
var testOp = function(op, ia, index, expectedIndex, name) {
|
||||
for (var i = 0; i < ia.length; ++i)
|
||||
ia[i] = 22;
|
||||
ia[i] = i * 2;
|
||||
|
||||
ia[expectedIndex] = 0;
|
||||
assertEquals(0, op(ia, index, 0, 0), name);
|
||||
var result = op(ia, index, 0, 0);
|
||||
assertEquals(0, result, name);
|
||||
assertEquals(0, ia[expectedIndex], name);
|
||||
|
||||
for (var i = 0; i < ia.length; ++i) {
|
||||
if (i == expectedIndex) continue;
|
||||
assertEquals(22, ia[i], name);
|
||||
assertEquals(i * 2, ia[i], name);
|
||||
}
|
||||
};
|
||||
|
||||
@ -222,6 +220,24 @@ function clearArray(sab) {
|
||||
}
|
||||
})
|
||||
});
|
||||
|
||||
// Test Smi range
|
||||
(function () {
|
||||
var sab = new SharedArrayBuffer(4);
|
||||
var i32 = new Int32Array(sab);
|
||||
var u32 = new Uint32Array(sab);
|
||||
|
||||
function testLoad(signedValue, unsignedValue) {
|
||||
u32[0] = unsignedValue;
|
||||
assertEquals(unsignedValue, Atomics.load(u32, 0));
|
||||
assertEquals(signedValue, Atomics.load(i32, 0));
|
||||
}
|
||||
|
||||
testLoad(0x3fffffff, 0x3fffffff); // 2**30-1 (always smi)
|
||||
testLoad(0x40000000, 0x40000000); // 2**30 (smi if signed and 32-bits)
|
||||
testLoad(0x80000000, -0x80000000); // 2**31 (smi if signed and 32-bits)
|
||||
testLoad(0xffffffff, -1); // 2**31 (smi if signed)
|
||||
});
|
||||
})();
|
||||
|
||||
(function TestStore() {
|
||||
|
Loading…
Reference in New Issue
Block a user