Reland "[fastcall] Implement support for TypedArray arguments"
This is a reland of 84d5b027a7
It removes support for 8-byte types which were causing
unaligned reads.
Original change's description:
> [fastcall] Implement support for TypedArray arguments
>
> This CL adds TypedArrays as supported arguments for fast API calls.
> It implements "exact type" matching, i.e. if Float32Array is expected
> and e.g. Int32Array is passed instead, the generated code bails to the
> slow callback.
>
> Bug: chromium:1052746, chromium:1018624
> Change-Id: I01d4e681d2b367cbb57b06effcb591c090a23295
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2999094
> Commit-Queue: Maya Lekova <mslekova@chromium.org>
> Reviewed-by: Georg Neis <neis@chromium.org>
> Reviewed-by: Camillo Bruni <cbruni@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#75756}
Bug: chromium:1052746, chromium:1018624
Change-Id: I872716d95bde8c340cf04990a3e4ae8ec8cd74a2
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3035090
Reviewed-by: Georg Neis <neis@chromium.org>
Reviewed-by: Camillo Bruni <cbruni@chromium.org>
Commit-Queue: Maya Lekova <mslekova@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75877}
This commit is contained in:
parent
058b6757a2
commit
66856bacdc
@ -578,7 +578,7 @@ PRIMITIVE_C_TYPES(DEFINE_TYPE_INFO_TRAITS)
|
||||
|
||||
#define SPECIALIZE_GET_TYPE_INFO_HELPER_FOR_TA(T, Enum) \
|
||||
template <> \
|
||||
struct TypeInfoHelper<FastApiTypedArray<T>> { \
|
||||
struct TypeInfoHelper<const FastApiTypedArray<T>&> { \
|
||||
static constexpr CTypeInfo::Flags Flags() { \
|
||||
return CTypeInfo::Flags::kNone; \
|
||||
} \
|
||||
|
@ -197,6 +197,9 @@ class EffectControlLinearizer {
|
||||
void LowerTransitionElementsKind(Node* node);
|
||||
Node* LowerLoadFieldByIndex(Node* node);
|
||||
Node* LowerLoadMessage(Node* node);
|
||||
Node* AdaptFastCallTypedArrayArgument(Node* node,
|
||||
ElementsKind expected_elements_kind,
|
||||
GraphAssemblerLabel<0>* bailout);
|
||||
Node* AdaptFastCallArgument(Node* node, CTypeInfo arg_type,
|
||||
GraphAssemblerLabel<0>* if_error);
|
||||
|
||||
@ -5004,16 +5007,102 @@ MachineType MachineTypeFor(CTypeInfo::Type type) {
|
||||
}
|
||||
} // namespace
|
||||
|
||||
Node* EffectControlLinearizer::AdaptFastCallTypedArrayArgument(
|
||||
Node* node, ElementsKind expected_elements_kind,
|
||||
GraphAssemblerLabel<0>* bailout) {
|
||||
Node* value_map = __ LoadField(AccessBuilder::ForMap(), node);
|
||||
Node* value_instance_type =
|
||||
__ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
|
||||
Node* value_is_typed_array = __ Word32Equal(
|
||||
value_instance_type, __ Int32Constant(JS_TYPED_ARRAY_TYPE));
|
||||
__ GotoIfNot(value_is_typed_array, bailout);
|
||||
|
||||
Node* bit_field2 = __ LoadField(AccessBuilder::ForMapBitField2(), value_map);
|
||||
Node* mask = __ Int32Constant(Map::Bits2::ElementsKindBits::kMask);
|
||||
Node* andit = __ Word32And(bit_field2, mask);
|
||||
Node* shift = __ Int32Constant(Map::Bits2::ElementsKindBits::kShift);
|
||||
Node* kind = __ Word32Shr(andit, shift);
|
||||
|
||||
Node* value_is_expected_elements_kind =
|
||||
__ Word32Equal(kind, __ Int32Constant(expected_elements_kind));
|
||||
__ GotoIfNot(value_is_expected_elements_kind, bailout);
|
||||
|
||||
Node* buffer =
|
||||
__ LoadField(AccessBuilder::ForJSArrayBufferViewBuffer(), node);
|
||||
Node* buffer_bit_field =
|
||||
__ LoadField(AccessBuilder::ForJSArrayBufferBitField(), buffer);
|
||||
|
||||
// Go to the slow path if the {buffer} was detached.
|
||||
Node* buffer_is_not_detached = __ Word32Equal(
|
||||
__ Word32And(buffer_bit_field,
|
||||
__ Int32Constant(JSArrayBuffer::WasDetachedBit::kMask)),
|
||||
__ ZeroConstant());
|
||||
__ GotoIfNot(buffer_is_not_detached, bailout);
|
||||
|
||||
// Go to the slow path if the {buffer} is shared.
|
||||
Node* buffer_is_not_shared = __ Word32Equal(
|
||||
__ Word32And(buffer_bit_field,
|
||||
__ Int32Constant(JSArrayBuffer::IsSharedBit::kMask)),
|
||||
__ ZeroConstant());
|
||||
__ GotoIfNot(buffer_is_not_shared, bailout);
|
||||
|
||||
// Unpack the store and length, and store them to a struct
|
||||
// FastApiTypedArray.
|
||||
Node* external_pointer =
|
||||
__ LoadField(AccessBuilder::ForJSTypedArrayExternalPointer(), node);
|
||||
|
||||
// Load the base pointer for the buffer. This will always be Smi
|
||||
// zero unless we allow on-heap TypedArrays, which is only the case
|
||||
// for Chrome. Node and Electron both set this limit to 0. Setting
|
||||
// the base to Smi zero here allows the BuildTypedArrayDataPointer
|
||||
// to optimize away the tricky part of the access later.
|
||||
Node* base_pointer =
|
||||
__ LoadField(AccessBuilder::ForJSTypedArrayBasePointer(), node);
|
||||
if (JSTypedArray::kMaxSizeInHeap == 0) {
|
||||
base_pointer = jsgraph()->ZeroConstant();
|
||||
}
|
||||
Node* data_ptr = BuildTypedArrayDataPointer(base_pointer, external_pointer);
|
||||
Node* length_in_bytes =
|
||||
__ LoadField(AccessBuilder::ForJSTypedArrayLength(), node);
|
||||
|
||||
// We hard-code int32_t here, because all specializations of
|
||||
// FastApiTypedArray have the same size.
|
||||
constexpr int kAlign = alignof(FastApiTypedArray<int32_t>);
|
||||
constexpr int kSize = sizeof(FastApiTypedArray<int32_t>);
|
||||
static_assert(kAlign == alignof(FastApiTypedArray<double>),
|
||||
"Alignment mismatch between different specializations of "
|
||||
"FastApiTypedArray");
|
||||
static_assert(kSize == sizeof(FastApiTypedArray<double>),
|
||||
"Size mismatch between different specializations of "
|
||||
"FastApiTypedArray");
|
||||
static_assert(
|
||||
kSize == sizeof(uintptr_t) + sizeof(size_t),
|
||||
"The size of "
|
||||
"FastApiTypedArray isn't equal to the sum of its expected members.");
|
||||
Node* stack_slot = __ StackSlot(kSize, kAlign);
|
||||
|
||||
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
|
||||
kNoWriteBarrier),
|
||||
stack_slot, 0, data_ptr);
|
||||
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
|
||||
kNoWriteBarrier),
|
||||
stack_slot, sizeof(uintptr_t), length_in_bytes);
|
||||
static_assert(sizeof(uintptr_t) == sizeof(size_t),
|
||||
"The buffer length can't "
|
||||
"fit the PointerRepresentation used to store it.");
|
||||
|
||||
return stack_slot;
|
||||
}
|
||||
|
||||
Node* EffectControlLinearizer::AdaptFastCallArgument(
|
||||
Node* node, CTypeInfo arg_type, GraphAssemblerLabel<0>* if_error) {
|
||||
int kAlign = alignof(uintptr_t);
|
||||
int kSize = sizeof(uintptr_t);
|
||||
switch (arg_type.GetSequenceType()) {
|
||||
case CTypeInfo::SequenceType::kScalar: {
|
||||
switch (arg_type.GetType()) {
|
||||
case CTypeInfo::Type::kV8Value: {
|
||||
int kAlign = alignof(uintptr_t);
|
||||
int kSize = sizeof(uintptr_t);
|
||||
Node* stack_slot = __ StackSlot(kSize, kAlign);
|
||||
|
||||
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
|
||||
kNoWriteBarrier),
|
||||
stack_slot, 0, node);
|
||||
@ -5035,10 +5124,7 @@ Node* EffectControlLinearizer::AdaptFastCallArgument(
|
||||
Node* value_is_smi = ObjectIsSmi(node);
|
||||
__ GotoIf(value_is_smi, if_error);
|
||||
|
||||
int kAlign = alignof(uintptr_t);
|
||||
int kSize = sizeof(uintptr_t);
|
||||
Node* stack_slot = __ StackSlot(kSize, kAlign);
|
||||
|
||||
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
|
||||
kNoWriteBarrier),
|
||||
stack_slot, 0, node);
|
||||
@ -5053,9 +5139,15 @@ Node* EffectControlLinearizer::AdaptFastCallArgument(
|
||||
|
||||
return stack_slot;
|
||||
}
|
||||
case CTypeInfo::SequenceType::kIsTypedArray:
|
||||
// TODO(mslekova): Implement typed arrays.
|
||||
return node;
|
||||
case CTypeInfo::SequenceType::kIsTypedArray: {
|
||||
// Check that the value is a HeapObject.
|
||||
Node* value_is_smi = ObjectIsSmi(node);
|
||||
__ GotoIf(value_is_smi, if_error);
|
||||
|
||||
return AdaptFastCallTypedArrayArgument(
|
||||
node, fast_api_call::GetTypedArrayElementsKind(arg_type.GetType()),
|
||||
if_error);
|
||||
}
|
||||
default: {
|
||||
UNREACHABLE();
|
||||
}
|
||||
@ -5069,14 +5161,8 @@ EffectControlLinearizer::AdaptOverloadedFastCallArgument(
|
||||
GraphAssemblerLabel<0>* if_error) {
|
||||
static constexpr int kReceiver = 1;
|
||||
|
||||
auto merge = __ MakeLabel(MachineRepresentation::kTagged);
|
||||
|
||||
int kAlign = alignof(uintptr_t);
|
||||
int kSize = sizeof(uintptr_t);
|
||||
Node* stack_slot = __ StackSlot(kSize, kAlign);
|
||||
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
|
||||
kNoWriteBarrier),
|
||||
stack_slot, 0, node);
|
||||
auto merge = __ MakeLabel(MachineRepresentation::kTagged,
|
||||
MachineRepresentation::kTagged);
|
||||
|
||||
for (size_t func_index = 0; func_index < c_functions.size(); func_index++) {
|
||||
const CFunctionInfo* c_signature = c_functions[func_index].signature;
|
||||
@ -5101,34 +5187,31 @@ EffectControlLinearizer::AdaptOverloadedFastCallArgument(
|
||||
value_instance_type, __ Int32Constant(JS_ARRAY_TYPE));
|
||||
__ GotoIfNot(value_is_js_array, &next);
|
||||
|
||||
int kAlign = alignof(uintptr_t);
|
||||
int kSize = sizeof(uintptr_t);
|
||||
Node* stack_slot = __ StackSlot(kSize, kAlign);
|
||||
|
||||
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
|
||||
kNoWriteBarrier),
|
||||
stack_slot, 0, node);
|
||||
|
||||
Node* target_address = __ ExternalConstant(
|
||||
ExternalReference::Create(c_functions[func_index].address));
|
||||
__ Goto(&merge, target_address);
|
||||
__ Goto(&merge, target_address, stack_slot);
|
||||
break;
|
||||
}
|
||||
|
||||
case CTypeInfo::SequenceType::kIsTypedArray: {
|
||||
// Check that the value is a TypedArray with a type that matches the
|
||||
// type declared in the c-function.
|
||||
ElementsKind typed_array_elements_kind =
|
||||
Node* stack_slot = AdaptFastCallTypedArrayArgument(
|
||||
node,
|
||||
fast_api_call::GetTypedArrayElementsKind(
|
||||
overloads_resolution_result.element_type);
|
||||
|
||||
Node* value_map = __ LoadField(AccessBuilder::ForMap(), node);
|
||||
Node* value_bit_field2 =
|
||||
__ LoadField(AccessBuilder::ForMapBitField2(), value_map);
|
||||
Node* value_elements_kind = __ WordShr(
|
||||
__ WordAnd(value_bit_field2,
|
||||
__ Int32Constant(Map::Bits2::ElementsKindBits::kMask)),
|
||||
__ Int32Constant(Map::Bits2::ElementsKindBits::kShift));
|
||||
Node* is_same_kind = __ Word32Equal(
|
||||
value_elements_kind,
|
||||
__ Int32Constant(GetPackedElementsKind(typed_array_elements_kind)));
|
||||
__ GotoIfNot(is_same_kind, &next);
|
||||
|
||||
overloads_resolution_result.element_type),
|
||||
&next);
|
||||
Node* target_address = __ ExternalConstant(
|
||||
ExternalReference::Create(c_functions[func_index].address));
|
||||
__ Goto(&merge, target_address);
|
||||
__ Goto(&merge, target_address, stack_slot);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -5142,7 +5225,7 @@ EffectControlLinearizer::AdaptOverloadedFastCallArgument(
|
||||
__ Goto(if_error);
|
||||
|
||||
__ Bind(&merge);
|
||||
return {merge.PhiAt(0), stack_slot};
|
||||
return {merge.PhiAt(0), merge.PhiAt(1)};
|
||||
}
|
||||
|
||||
Node* EffectControlLinearizer::WrapFastCall(
|
||||
|
@ -3558,6 +3558,23 @@ bool Has64BitIntegerParamsInSignature(const CFunctionInfo* c_signature) {
|
||||
} // namespace
|
||||
#endif
|
||||
|
||||
namespace {
|
||||
bool Has64BitTypedArraysInSignature(const CFunctionInfo* c_signature) {
|
||||
for (unsigned int i = 0; i < c_signature->ArgumentCount(); ++i) {
|
||||
if (c_signature->ArgumentInfo(i).GetSequenceType() !=
|
||||
CTypeInfo::SequenceType::kIsTypedArray) {
|
||||
continue;
|
||||
}
|
||||
if (c_signature->ArgumentInfo(i).GetType() == CTypeInfo::Type::kInt64 ||
|
||||
c_signature->ArgumentInfo(i).GetType() == CTypeInfo::Type::kUint64 ||
|
||||
c_signature->ArgumentInfo(i).GetType() == CTypeInfo::Type::kFloat64) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
// Given a FunctionTemplateInfo, checks whether the fast API call can be
|
||||
// optimized, applying the initial step of the overload resolution algorithm:
|
||||
// Given an overload set function_template_info.c_signatures, and a list of
|
||||
@ -3612,6 +3629,11 @@ FastApiCallFunctionVector CanOptimizeFastCall(
|
||||
optimize_to_fast_call =
|
||||
optimize_to_fast_call && !Has64BitIntegerParamsInSignature(c_signature);
|
||||
#endif
|
||||
// TODO(mslekova): Add back support for 64-bit TA params when the API is
|
||||
// changed to disallow raw access to unaligned data.
|
||||
optimize_to_fast_call =
|
||||
optimize_to_fast_call && !Has64BitTypedArraysInSignature(c_signature);
|
||||
|
||||
if (optimize_to_fast_call) {
|
||||
result.push_back({functions[i], c_signature});
|
||||
}
|
||||
|
@ -140,16 +140,20 @@ class FastCApiObject {
|
||||
|
||||
FastCApiObject* self = UnwrapObject(args.This());
|
||||
CHECK_SELF_OR_THROW();
|
||||
self->slow_call_count_++;
|
||||
|
||||
HandleScope handle_scope(isolate);
|
||||
|
||||
if (args.Length() < 2) {
|
||||
self->slow_call_count_++;
|
||||
isolate->ThrowError("This method expects at least 2 arguments.");
|
||||
return;
|
||||
}
|
||||
if (args[1]->IsTypedArray()) {
|
||||
// Not supported yet.
|
||||
AddAllTypedArraySlowCallback(args);
|
||||
return;
|
||||
}
|
||||
self->slow_call_count_++;
|
||||
if (args[1]->IsUndefined()) {
|
||||
Type dummy_result = 0;
|
||||
args.GetReturnValue().Set(Number::New(isolate, dummy_result));
|
||||
return;
|
||||
@ -188,13 +192,11 @@ class FastCApiObject {
|
||||
}
|
||||
args.GetReturnValue().Set(Number::New(isolate, sum));
|
||||
}
|
||||
|
||||
// TODO(mslekova) - The typed array param should be a
|
||||
// {size_t length, uint32_t* data}
|
||||
static Type AddAllTypedArrayFastCallback(Local<Object> receiver,
|
||||
bool should_fallback,
|
||||
Local<Uint32Array> typed_array_arg,
|
||||
FastApiCallbackOptions& options) {
|
||||
template <typename T>
|
||||
static Type AddAllTypedArrayFastCallback(
|
||||
Local<Object> receiver, bool should_fallback,
|
||||
const FastApiTypedArray<T>& typed_array_arg,
|
||||
FastApiCallbackOptions& options) {
|
||||
FastCApiObject* self = UnwrapObject(receiver);
|
||||
CHECK_SELF_OR_FALLBACK(0);
|
||||
self->fast_call_count_++;
|
||||
@ -204,12 +206,72 @@ class FastCApiObject {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Not implemented.
|
||||
return 0;
|
||||
if (!typed_array_arg.data) {
|
||||
options.fallback = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
T sum = 0;
|
||||
for (unsigned i = 0; i < typed_array_arg.length; ++i) {
|
||||
sum += typed_array_arg.data[i];
|
||||
}
|
||||
return static_cast<Type>(sum);
|
||||
}
|
||||
static void AddAllTypedArraySlowCallback(
|
||||
const FunctionCallbackInfo<Value>& args) {
|
||||
// Not implemented.
|
||||
Isolate* isolate = args.GetIsolate();
|
||||
|
||||
FastCApiObject* self = UnwrapObject(args.This());
|
||||
CHECK_SELF_OR_THROW();
|
||||
self->slow_call_count_++;
|
||||
|
||||
HandleScope handle_scope(isolate);
|
||||
|
||||
if (args.Length() < 2) {
|
||||
isolate->ThrowError("This method expects at least 2 arguments.");
|
||||
return;
|
||||
}
|
||||
if (!args[1]->IsTypedArray()) {
|
||||
isolate->ThrowError(
|
||||
"This method expects a TypedArray as a second argument.");
|
||||
return;
|
||||
}
|
||||
|
||||
Local<TypedArray> typed_array_arg = args[1].As<TypedArray>();
|
||||
size_t length = typed_array_arg->Length();
|
||||
|
||||
void* data = typed_array_arg->Buffer()->GetBackingStore()->Data();
|
||||
if (typed_array_arg->IsInt32Array() || typed_array_arg->IsUint32Array() ||
|
||||
typed_array_arg->IsBigInt64Array() ||
|
||||
typed_array_arg->IsBigUint64Array()) {
|
||||
int64_t sum = 0;
|
||||
for (unsigned i = 0; i < length; ++i) {
|
||||
if (typed_array_arg->IsInt32Array()) {
|
||||
sum += static_cast<int32_t*>(data)[i];
|
||||
} else if (typed_array_arg->IsUint32Array()) {
|
||||
sum += static_cast<uint32_t*>(data)[i];
|
||||
} else if (typed_array_arg->IsBigInt64Array()) {
|
||||
sum += static_cast<int64_t*>(data)[i];
|
||||
} else if (typed_array_arg->IsBigUint64Array()) {
|
||||
sum += static_cast<uint64_t*>(data)[i];
|
||||
}
|
||||
}
|
||||
args.GetReturnValue().Set(Number::New(isolate, sum));
|
||||
} else if (typed_array_arg->IsFloat32Array() ||
|
||||
typed_array_arg->IsFloat64Array()) {
|
||||
double sum = 0;
|
||||
for (unsigned i = 0; i < length; ++i) {
|
||||
if (typed_array_arg->IsFloat32Array()) {
|
||||
sum += static_cast<float*>(data)[i];
|
||||
} else if (typed_array_arg->IsFloat64Array()) {
|
||||
sum += static_cast<double*>(data)[i];
|
||||
}
|
||||
}
|
||||
args.GetReturnValue().Set(Number::New(isolate, sum));
|
||||
} else {
|
||||
isolate->ThrowError("TypedArray type is not supported.");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
static int32_t AddAllIntInvalidCallback(Local<Object> receiver,
|
||||
@ -479,17 +541,46 @@ Local<FunctionTemplate> Shell::CreateTestFastCApiTemplate(Isolate* isolate) {
|
||||
signature, 1, ConstructorBehavior::kThrow,
|
||||
SideEffectType::kHasSideEffect, &add_all_seq_c_func));
|
||||
|
||||
CFunction add_all_typed_array_c_func =
|
||||
CFunction::Make(FastCApiObject::AddAllTypedArrayFastCallback);
|
||||
CFunction add_all_int32_typed_array_c_func =
|
||||
CFunction::Make(FastCApiObject::AddAllTypedArrayFastCallback<int32_t>);
|
||||
api_obj_ctor->PrototypeTemplate()->Set(
|
||||
isolate, "add_all_typed_array",
|
||||
isolate, "add_all_int32_typed_array",
|
||||
FunctionTemplate::New(
|
||||
isolate, FastCApiObject::AddAllTypedArraySlowCallback,
|
||||
Local<Value>(), signature, 1, ConstructorBehavior::kThrow,
|
||||
SideEffectType::kHasSideEffect, &add_all_typed_array_c_func));
|
||||
SideEffectType::kHasSideEffect, &add_all_int32_typed_array_c_func));
|
||||
|
||||
CFunction add_all_int64_typed_array_c_func =
|
||||
CFunction::Make(FastCApiObject::AddAllTypedArrayFastCallback<int64_t>);
|
||||
api_obj_ctor->PrototypeTemplate()->Set(
|
||||
isolate, "add_all_int64_typed_array",
|
||||
FunctionTemplate::New(
|
||||
isolate, FastCApiObject::AddAllTypedArraySlowCallback,
|
||||
Local<Value>(), signature, 1, ConstructorBehavior::kThrow,
|
||||
SideEffectType::kHasSideEffect, &add_all_int64_typed_array_c_func));
|
||||
|
||||
CFunction add_all_uint64_typed_array_c_func =
|
||||
CFunction::Make(FastCApiObject::AddAllTypedArrayFastCallback<uint64_t>);
|
||||
api_obj_ctor->PrototypeTemplate()->Set(
|
||||
isolate, "add_all_uint64_typed_array",
|
||||
FunctionTemplate::New(
|
||||
isolate, FastCApiObject::AddAllTypedArraySlowCallback,
|
||||
Local<Value>(), signature, 1, ConstructorBehavior::kThrow,
|
||||
SideEffectType::kHasSideEffect,
|
||||
&add_all_uint64_typed_array_c_func));
|
||||
|
||||
CFunction add_all_uint32_typed_array_c_func =
|
||||
CFunction::Make(FastCApiObject::AddAllTypedArrayFastCallback<uint32_t>);
|
||||
api_obj_ctor->PrototypeTemplate()->Set(
|
||||
isolate, "add_all_uint32_typed_array",
|
||||
FunctionTemplate::New(
|
||||
isolate, FastCApiObject::AddAllTypedArraySlowCallback,
|
||||
Local<Value>(), signature, 1, ConstructorBehavior::kThrow,
|
||||
SideEffectType::kHasSideEffect,
|
||||
&add_all_uint32_typed_array_c_func));
|
||||
|
||||
const CFunction add_all_overloads[] = {
|
||||
add_all_typed_array_c_func,
|
||||
add_all_uint32_typed_array_c_func,
|
||||
add_all_seq_c_func,
|
||||
};
|
||||
api_obj_ctor->PrototypeTemplate()->Set(
|
||||
|
@ -28737,7 +28737,7 @@ TEST(FastApiCalls) {
|
||||
#ifndef V8_LITE_MODE
|
||||
namespace {
|
||||
void FastCallback1TypedArray(v8::Local<v8::Object> receiver, int arg0,
|
||||
v8::FastApiTypedArray<double> arg1) {
|
||||
const v8::FastApiTypedArray<double>& arg1) {
|
||||
// TODO(mslekova): Use the TypedArray parameter
|
||||
}
|
||||
|
||||
|
@ -163,7 +163,7 @@ assertEquals(add_32bit_int_result, add_32bit_int_mismatch(false, -42, 45));
|
||||
assertOptimized(add_32bit_int_mismatch);
|
||||
|
||||
// Test that passing too few argument falls down the slow path,
|
||||
// because it's an argument type mismatch (undefined vs. int).
|
||||
// because one of the arguments is undefined.
|
||||
fast_c_api.reset_counts();
|
||||
assertEquals(-42, add_32bit_int_mismatch(false, -42));
|
||||
assertUnoptimized(add_32bit_int_mismatch);
|
||||
|
36
test/mjsunit/compiler/fast-api-helpers.js
Normal file
36
test/mjsunit/compiler/fast-api-helpers.js
Normal file
@ -0,0 +1,36 @@
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Flags: --turbo-fast-api-calls --allow-natives-syntax --opt
|
||||
|
||||
// Helper for sequence tests.
|
||||
function optimize_and_check(func, fast_count, slow_count, expected) {
|
||||
%PrepareFunctionForOptimization(func);
|
||||
let result = func();
|
||||
assertEqualsDelta(expected, result, 0.001);
|
||||
|
||||
fast_c_api.reset_counts();
|
||||
%OptimizeFunctionOnNextCall(func);
|
||||
result = func();
|
||||
assertEqualsDelta(expected, result, 0.001);
|
||||
assertOptimized(func);
|
||||
assertEquals(fast_count, fast_c_api.fast_call_count());
|
||||
assertEquals(slow_count, fast_c_api.slow_call_count());
|
||||
}
|
||||
|
||||
function ExpectFastCall(func, expected) {
|
||||
optimize_and_check(func, 1, 0, expected);
|
||||
}
|
||||
|
||||
function ExpectSlowCall(func, expected) {
|
||||
optimize_and_check(func, 0, 1, expected);
|
||||
}
|
||||
|
||||
function assert_throws_and_optimized(func, arg) {
|
||||
fast_c_api.reset_counts();
|
||||
assertThrows(() => func(arg));
|
||||
assertOptimized(func);
|
||||
assertEquals(0, fast_c_api.fast_call_count());
|
||||
assertEquals(1, fast_c_api.slow_call_count());
|
||||
}
|
55
test/mjsunit/compiler/fast-api-sequences-x64.js
Normal file
55
test/mjsunit/compiler/fast-api-sequences-x64.js
Normal file
@ -0,0 +1,55 @@
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// This file adds x64 specific tests to the ones in fast-api-sequence.js.
|
||||
|
||||
// Flags: --turbo-fast-api-calls --allow-natives-syntax --opt
|
||||
// --always-opt is disabled because we rely on particular feedback for
|
||||
// optimizing to the fastest path.
|
||||
// Flags: --no-always-opt
|
||||
// The test relies on optimizing/deoptimizing at predictable moments, so
|
||||
// it's not suitable for deoptimization fuzzing.
|
||||
// Flags: --deopt-every-n-times=0
|
||||
|
||||
d8.file.execute('test/mjsunit/compiler/fast-api-helpers.js');
|
||||
|
||||
const fast_c_api = new d8.test.FastCAPI();
|
||||
|
||||
assertTrue(fast_c_api.supports_fp_params);
|
||||
|
||||
(function () {
|
||||
const max_safe_float = 2 ** 24 - 1;
|
||||
const add_all_result = -42 + 45 +
|
||||
Number.MIN_SAFE_INTEGER + Number.MAX_SAFE_INTEGER +
|
||||
max_safe_float * 0.5 + Math.PI;
|
||||
|
||||
function add_all_sequence() {
|
||||
const arr = [-42, 45,
|
||||
Number.MIN_SAFE_INTEGER, Number.MAX_SAFE_INTEGER,
|
||||
max_safe_float * 0.5, Math.PI];
|
||||
return fast_c_api.add_all_sequence(false /* should_fallback */, arr);
|
||||
}
|
||||
ExpectFastCall(add_all_sequence, add_all_result);
|
||||
})();
|
||||
|
||||
const max_safe_as_bigint = BigInt(Number.MAX_SAFE_INTEGER);
|
||||
(function () {
|
||||
function int64_test(should_fallback = false) {
|
||||
let typed_array = new BigInt64Array([-42n, 1n, max_safe_as_bigint]);
|
||||
return fast_c_api.add_all_int64_typed_array(false /* should_fallback */,
|
||||
typed_array);
|
||||
}
|
||||
const expected = Number(BigInt.asIntN(64, -42n + 1n + max_safe_as_bigint));
|
||||
ExpectSlowCall(int64_test, expected);
|
||||
})();
|
||||
|
||||
(function () {
|
||||
function uint64_test(should_fallback = false) {
|
||||
let typed_array = new BigUint64Array([max_safe_as_bigint, 1n, 2n]);
|
||||
return fast_c_api.add_all_uint64_typed_array(false /* should_fallback */,
|
||||
typed_array);
|
||||
}
|
||||
const expected = Number(BigInt.asUintN(64, max_safe_as_bigint + 1n + 2n));
|
||||
ExpectSlowCall(uint64_test, expected);
|
||||
})();
|
@ -12,106 +12,38 @@
|
||||
// it's not suitable for deoptimization fuzzing.
|
||||
// Flags: --deopt-every-n-times=0
|
||||
|
||||
d8.file.execute('test/mjsunit/compiler/fast-api-helpers.js');
|
||||
|
||||
const fast_c_api = new d8.test.FastCAPI();
|
||||
|
||||
// ----------- add_all_sequence -----------
|
||||
// `add_all_sequence` has the following signature:
|
||||
// double add_all_sequence(bool /*should_fallback*/, Local<Array>)
|
||||
|
||||
const max_safe_float = 2**24 - 1;
|
||||
const add_all_result_full = -42 + 45 +
|
||||
Number.MIN_SAFE_INTEGER + Number.MAX_SAFE_INTEGER +
|
||||
max_safe_float * 0.5 + Math.PI;
|
||||
const full_array = [-42, 45,
|
||||
Number.MIN_SAFE_INTEGER, Number.MAX_SAFE_INTEGER,
|
||||
max_safe_float * 0.5, Math.PI];
|
||||
// Smi only test - regular call hits the fast path.
|
||||
(function () {
|
||||
function add_all_sequence() {
|
||||
const arr = [-42, 45];
|
||||
return fast_c_api.add_all_sequence(false /* should_fallback */, arr);
|
||||
}
|
||||
ExpectFastCall(add_all_sequence, 3);
|
||||
})();
|
||||
|
||||
function add_all_sequence_smi(arg) {
|
||||
return fast_c_api.add_all_sequence(false /* should_fallback */, arg);
|
||||
}
|
||||
(function () {
|
||||
function add_all_sequence_mismatch(arg) {
|
||||
return fast_c_api.add_all_sequence(false /*should_fallback*/, arg);
|
||||
}
|
||||
|
||||
%PrepareFunctionForOptimization(add_all_sequence_smi);
|
||||
assertEquals(3, add_all_sequence_smi([-42, 45]));
|
||||
%OptimizeFunctionOnNextCall(add_all_sequence_smi);
|
||||
|
||||
function add_all_sequence_full(arg) {
|
||||
return fast_c_api.add_all_sequence(false /* should_fallback */, arg);
|
||||
}
|
||||
|
||||
%PrepareFunctionForOptimization(add_all_sequence_full);
|
||||
if (fast_c_api.supports_fp_params) {
|
||||
assertEquals(add_all_result_full, add_all_sequence_full(full_array));
|
||||
} else {
|
||||
assertEquals(3, add_all_sequence_smi([-42, 45]));
|
||||
}
|
||||
%OptimizeFunctionOnNextCall(add_all_sequence_full);
|
||||
|
||||
if (fast_c_api.supports_fp_params) {
|
||||
// Test that regular call hits the fast path.
|
||||
fast_c_api.reset_counts();
|
||||
assertEquals(add_all_result_full, add_all_sequence_full(full_array));
|
||||
assertOptimized(add_all_sequence_full);
|
||||
assertEquals(1, fast_c_api.fast_call_count());
|
||||
assertEquals(0, fast_c_api.slow_call_count());
|
||||
} else {
|
||||
// Smi only test - regular call hits the fast path.
|
||||
fast_c_api.reset_counts();
|
||||
assertEquals(3, add_all_sequence_smi([-42, 45]));
|
||||
assertOptimized(add_all_sequence_smi);
|
||||
assertEquals(1, fast_c_api.fast_call_count());
|
||||
assertEquals(0, fast_c_api.slow_call_count());
|
||||
}
|
||||
|
||||
// Test holey arrays
|
||||
fast_c_api.reset_counts();
|
||||
if (fast_c_api.supports_fp_params) {
|
||||
// Test that regular call hits the fast path.
|
||||
assertEquals(add_all_result_full, add_all_sequence_full([, ...full_array]));
|
||||
assertOptimized(add_all_sequence_full);
|
||||
assertEquals(1, fast_c_api.fast_call_count());
|
||||
assertEquals(1, fast_c_api.slow_call_count());
|
||||
} else {
|
||||
// Smi only test - regular call hits the fast path.
|
||||
assertEquals(3, add_all_sequence_smi([-42, , 45]));
|
||||
assertOptimized(add_all_sequence_smi);
|
||||
assertEquals(1, fast_c_api.fast_call_count());
|
||||
assertEquals(1, fast_c_api.slow_call_count());
|
||||
}
|
||||
|
||||
|
||||
function add_all_sequence_mismatch(arg) {
|
||||
return fast_c_api.add_all_sequence(false /*should_fallback*/, arg);
|
||||
}
|
||||
|
||||
%PrepareFunctionForOptimization(add_all_sequence_mismatch);
|
||||
assertThrows(() => add_all_sequence_mismatch());
|
||||
%OptimizeFunctionOnNextCall(add_all_sequence_mismatch);
|
||||
|
||||
// Test that passing non-array arguments falls down the slow path.
|
||||
fast_c_api.reset_counts();
|
||||
assertThrows(() => add_all_sequence_mismatch(42));
|
||||
assertOptimized(add_all_sequence_mismatch);
|
||||
assertEquals(0, fast_c_api.fast_call_count());
|
||||
assertEquals(1, fast_c_api.slow_call_count());
|
||||
|
||||
fast_c_api.reset_counts();
|
||||
assertThrows(() => add_all_sequence_mismatch({}));
|
||||
assertOptimized(add_all_sequence_mismatch);
|
||||
assertEquals(0, fast_c_api.fast_call_count());
|
||||
assertEquals(1, fast_c_api.slow_call_count());
|
||||
|
||||
fast_c_api.reset_counts();
|
||||
assertThrows(() => add_all_sequence_mismatch('string'));
|
||||
assertOptimized(add_all_sequence_mismatch);
|
||||
assertEquals(0, fast_c_api.fast_call_count());
|
||||
assertEquals(1, fast_c_api.slow_call_count());
|
||||
|
||||
fast_c_api.reset_counts();
|
||||
assertThrows(() => add_all_sequence_mismatch(Symbol()));
|
||||
assertOptimized(add_all_sequence_mismatch);
|
||||
assertEquals(0, fast_c_api.fast_call_count());
|
||||
assertEquals(1, fast_c_api.slow_call_count());
|
||||
%PrepareFunctionForOptimization(add_all_sequence_mismatch);
|
||||
add_all_sequence_mismatch();
|
||||
%OptimizeFunctionOnNextCall(add_all_sequence_mismatch);
|
||||
|
||||
// Test that passing non-array arguments falls down the slow path.
|
||||
assert_throws_and_optimized(add_all_sequence_mismatch, 42);
|
||||
assert_throws_and_optimized(add_all_sequence_mismatch, {});
|
||||
assert_throws_and_optimized(add_all_sequence_mismatch, 'string');
|
||||
assert_throws_and_optimized(add_all_sequence_mismatch, Symbol());
|
||||
})();
|
||||
|
||||
//----------- Test function overloads with same arity. -----------
|
||||
//Only overloads between JSArray and TypedArray are supported
|
||||
@ -119,21 +51,26 @@ assertEquals(1, fast_c_api.slow_call_count());
|
||||
// Test with TypedArray.
|
||||
(function () {
|
||||
function overloaded_test(should_fallback = false) {
|
||||
let typed_array = new Uint32Array([1, 2, 3]);
|
||||
let typed_array = new Uint32Array([1,2,3]);
|
||||
return fast_c_api.add_all_overload(false /* should_fallback */,
|
||||
typed_array);
|
||||
}
|
||||
ExpectFastCall(overloaded_test, 6);
|
||||
})();
|
||||
|
||||
%PrepareFunctionForOptimization(overloaded_test);
|
||||
let result = overloaded_test();
|
||||
assertEquals(0, result);
|
||||
let large_array = [];
|
||||
for (let i = 0; i < 100; i++) {
|
||||
large_array.push(i);
|
||||
}
|
||||
|
||||
fast_c_api.reset_counts();
|
||||
%OptimizeFunctionOnNextCall(overloaded_test);
|
||||
result = overloaded_test();
|
||||
assertEquals(0, result);
|
||||
assertOptimized(overloaded_test);
|
||||
assertEquals(1, fast_c_api.fast_call_count());
|
||||
// Non-externalized TypedArray.
|
||||
(function () {
|
||||
function overloaded_test(should_fallback = false) {
|
||||
let typed_array = new Uint32Array(large_array);
|
||||
return fast_c_api.add_all_overload(false /* should_fallback */,
|
||||
typed_array);
|
||||
}
|
||||
ExpectFastCall(overloaded_test, 4950);
|
||||
})();
|
||||
|
||||
// Mismatched TypedArray.
|
||||
@ -143,17 +80,7 @@ assertEquals(1, fast_c_api.slow_call_count());
|
||||
return fast_c_api.add_all_overload(false /* should_fallback */,
|
||||
typed_array);
|
||||
}
|
||||
|
||||
%PrepareFunctionForOptimization(overloaded_test);
|
||||
let result = overloaded_test();
|
||||
assertEquals(0, result);
|
||||
|
||||
fast_c_api.reset_counts();
|
||||
%OptimizeFunctionOnNextCall(overloaded_test);
|
||||
result = overloaded_test();
|
||||
assertEquals(0, result);
|
||||
assertOptimized(overloaded_test);
|
||||
assertEquals(0, fast_c_api.fast_call_count());
|
||||
ExpectSlowCall(overloaded_test, 6.6);
|
||||
})();
|
||||
|
||||
// Test with JSArray.
|
||||
@ -162,17 +89,7 @@ assertEquals(1, fast_c_api.slow_call_count());
|
||||
let js_array = [26, -6, 42];
|
||||
return fast_c_api.add_all_overload(false /* should_fallback */, js_array);
|
||||
}
|
||||
|
||||
%PrepareFunctionForOptimization(overloaded_test);
|
||||
let result = overloaded_test();
|
||||
assertEquals(62, result);
|
||||
|
||||
fast_c_api.reset_counts();
|
||||
%OptimizeFunctionOnNextCall(overloaded_test);
|
||||
result = overloaded_test();
|
||||
assertEquals(62, result);
|
||||
assertOptimized(overloaded_test);
|
||||
assertEquals(1, fast_c_api.fast_call_count());
|
||||
ExpectFastCall(overloaded_test, 62);
|
||||
})();
|
||||
|
||||
// Test function overloads with undefined.
|
||||
@ -180,15 +97,7 @@ assertEquals(1, fast_c_api.slow_call_count());
|
||||
function overloaded_test(should_fallback = false) {
|
||||
return fast_c_api.add_all_overload(false /* should_fallback */, undefined);
|
||||
}
|
||||
|
||||
%PrepareFunctionForOptimization(overloaded_test);
|
||||
assertThrows(() => overloaded_test());
|
||||
|
||||
fast_c_api.reset_counts();
|
||||
%OptimizeFunctionOnNextCall(overloaded_test);
|
||||
assertThrows(() => overloaded_test());
|
||||
assertOptimized(overloaded_test);
|
||||
assertEquals(0, fast_c_api.fast_call_count());
|
||||
ExpectSlowCall(overloaded_test, 0);
|
||||
})();
|
||||
|
||||
// Test function with invalid overloads.
|
||||
@ -214,3 +123,94 @@ assertEquals(1, fast_c_api.slow_call_count());
|
||||
assertUnoptimized(overloaded_test);
|
||||
assertEquals(0, fast_c_api.fast_call_count());
|
||||
})();
|
||||
|
||||
//----------- Test different TypedArray functions. -----------
|
||||
// ----------- add_all_<TYPE>_typed_array -----------
|
||||
// `add_all_<TYPE>_typed_array` have the following signature:
|
||||
// double add_all_<TYPE>_typed_array(bool /*should_fallback*/, FastApiTypedArray<TYPE>)
|
||||
|
||||
(function () {
|
||||
function int32_test(should_fallback = false) {
|
||||
let typed_array = new Int32Array([-42, 1, 2, 3]);
|
||||
return fast_c_api.add_all_int32_typed_array(false /* should_fallback */,
|
||||
typed_array);
|
||||
}
|
||||
ExpectFastCall(int32_test, -36);
|
||||
})();
|
||||
|
||||
(function () {
|
||||
function uint32_test(should_fallback = false) {
|
||||
let typed_array = new Uint32Array([1, 2, 3]);
|
||||
return fast_c_api.add_all_uint32_typed_array(false /* should_fallback */,
|
||||
typed_array);
|
||||
}
|
||||
ExpectFastCall(uint32_test, 6);
|
||||
})();
|
||||
|
||||
(function () {
|
||||
function detached_typed_array_test(should_fallback = false) {
|
||||
let typed_array = new Int32Array([-42, 1, 2, 3]);
|
||||
%ArrayBufferDetach(typed_array.buffer);
|
||||
return fast_c_api.add_all_int32_typed_array(false /* should_fallback */,
|
||||
typed_array);
|
||||
}
|
||||
ExpectSlowCall(detached_typed_array_test, 0);
|
||||
})();
|
||||
|
||||
(function () {
|
||||
function detached_non_ext_typed_array_test(should_fallback = false) {
|
||||
let typed_array = new Int32Array(large_array);
|
||||
%ArrayBufferDetach(typed_array.buffer);
|
||||
return fast_c_api.add_all_int32_typed_array(false /* should_fallback */,
|
||||
typed_array);
|
||||
}
|
||||
ExpectSlowCall(detached_non_ext_typed_array_test, 0);
|
||||
})();
|
||||
|
||||
(function () {
|
||||
function shared_array_buffer_ta_test(should_fallback = false) {
|
||||
let sab = new SharedArrayBuffer(16);
|
||||
let typed_array = new Int32Array(sab);
|
||||
typed_array.set([-42, 1, 2, 3]);
|
||||
return fast_c_api.add_all_int32_typed_array(false /* should_fallback */,
|
||||
typed_array);
|
||||
}
|
||||
ExpectSlowCall(shared_array_buffer_ta_test, -36);
|
||||
})();
|
||||
|
||||
(function () {
|
||||
function shared_array_buffer_ext_ta_test(should_fallback = false) {
|
||||
let sab = new SharedArrayBuffer(400);
|
||||
let typed_array = new Int32Array(sab);
|
||||
typed_array.set(large_array);
|
||||
return fast_c_api.add_all_int32_typed_array(false /* should_fallback */,
|
||||
typed_array);
|
||||
}
|
||||
ExpectSlowCall(shared_array_buffer_ext_ta_test, 4950);
|
||||
})();
|
||||
|
||||
// Empty TypedArray.
|
||||
(function () {
|
||||
function int32_test(should_fallback = false) {
|
||||
let typed_array = new Int32Array(0);
|
||||
return fast_c_api.add_all_int32_typed_array(false /* should_fallback */,
|
||||
typed_array);
|
||||
}
|
||||
ExpectFastCall(int32_test, 0);
|
||||
})();
|
||||
|
||||
// Invalid argument types instead of a TypedArray.
|
||||
(function () {
|
||||
function invalid_test(arg) {
|
||||
return fast_c_api.add_all_int32_typed_array(false /* should_fallback */,
|
||||
arg);
|
||||
}
|
||||
%PrepareFunctionForOptimization(invalid_test);
|
||||
invalid_test(new Int32Array(0));
|
||||
%OptimizeFunctionOnNextCall(invalid_test);
|
||||
|
||||
assert_throws_and_optimized(invalid_test, 42);
|
||||
assert_throws_and_optimized(invalid_test, {});
|
||||
assert_throws_and_optimized(invalid_test, 'string');
|
||||
assert_throws_and_optimized(invalid_test, Symbol());
|
||||
})();
|
||||
|
@ -1608,4 +1608,10 @@
|
||||
'wasm/shared-memory-gc-stress': [SKIP],
|
||||
}], # third_party_heap
|
||||
|
||||
##############################################################################
|
||||
['arch != x64', {
|
||||
# Tests that include types only supported on x64.
|
||||
'compiler/fast-api-sequences-x64': [SKIP],
|
||||
}], # arch != x64
|
||||
|
||||
]
|
||||
|
Loading…
Reference in New Issue
Block a user