2017-03-16 11:32:01 +00:00
|
|
|
|
// Copyright 2017 the V8 project authors. All rights reserved.
|
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
|
// found in the LICENSE file.
|
|
|
|
|
|
2018-05-23 13:29:02 +00:00
|
|
|
|
#include "src/builtins/builtins-array-gen.h"
|
|
|
|
|
|
2018-02-08 17:27:59 +00:00
|
|
|
|
#include "src/builtins/builtins-iterator-gen.h"
|
2017-05-15 19:42:35 +00:00
|
|
|
|
#include "src/builtins/builtins-string-gen.h"
|
2018-04-18 07:07:10 +00:00
|
|
|
|
#include "src/builtins/builtins-typed-array-gen.h"
|
2017-03-16 11:32:01 +00:00
|
|
|
|
#include "src/builtins/builtins-utils-gen.h"
|
|
|
|
|
#include "src/builtins/builtins.h"
|
|
|
|
|
#include "src/code-stub-assembler.h"
|
2017-08-03 09:35:57 +00:00
|
|
|
|
#include "src/frame-constants.h"
|
2018-04-09 19:11:22 +00:00
|
|
|
|
#include "src/heap/factory-inl.h"
|
2018-05-23 13:29:02 +00:00
|
|
|
|
#include "src/objects/arguments-inl.h"
|
2018-01-28 17:02:11 +00:00
|
|
|
|
|
2017-03-16 11:32:01 +00:00
|
|
|
|
namespace v8 {
|
|
|
|
|
namespace internal {
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
using Node = compiler::Node;
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
ArrayBuiltinsAssembler::ArrayBuiltinsAssembler(
|
|
|
|
|
compiler::CodeAssemblerState* state)
|
2018-04-16 09:24:22 +00:00
|
|
|
|
: BaseBuiltinsFromDSLAssembler(state),
|
2018-01-28 17:02:11 +00:00
|
|
|
|
k_(this, MachineRepresentation::kTagged),
|
|
|
|
|
a_(this, MachineRepresentation::kTagged),
|
|
|
|
|
to_(this, MachineRepresentation::kTagged, SmiConstant(0)),
|
|
|
|
|
fully_spec_compliant_(this, {&k_, &a_, &to_}) {}
|
2017-03-21 15:57:38 +00:00
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
void ArrayBuiltinsAssembler::FindResultGenerator() {
|
|
|
|
|
a_.Bind(UndefinedConstant());
|
|
|
|
|
}
|
2017-11-28 05:24:33 +00:00
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
|
|
|
|
|
Node* value = CallJS(CodeFactory::Call(isolate()), context(), callbackfn(),
|
|
|
|
|
this_arg(), k_value, k, o());
|
|
|
|
|
Label false_continue(this), return_true(this);
|
|
|
|
|
BranchIfToBooleanIsTrue(value, &return_true, &false_continue);
|
|
|
|
|
BIND(&return_true);
|
|
|
|
|
ReturnFromBuiltin(k_value);
|
|
|
|
|
BIND(&false_continue);
|
|
|
|
|
return a();
|
2017-11-28 05:24:33 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
void ArrayBuiltinsAssembler::FindIndexResultGenerator() {
|
|
|
|
|
a_.Bind(SmiConstant(-1));
|
|
|
|
|
}
|
2017-11-29 13:57:20 +00:00
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
Node* ArrayBuiltinsAssembler::FindIndexProcessor(Node* k_value, Node* k) {
|
2017-11-29 13:57:20 +00:00
|
|
|
|
Node* value = CallJS(CodeFactory::Call(isolate()), context(), callbackfn(),
|
|
|
|
|
this_arg(), k_value, k, o());
|
|
|
|
|
Label false_continue(this), return_true(this);
|
|
|
|
|
BranchIfToBooleanIsTrue(value, &return_true, &false_continue);
|
|
|
|
|
BIND(&return_true);
|
|
|
|
|
ReturnFromBuiltin(k);
|
|
|
|
|
BIND(&false_continue);
|
|
|
|
|
return a();
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
void ArrayBuiltinsAssembler::ForEachResultGenerator() {
|
|
|
|
|
a_.Bind(UndefinedConstant());
|
|
|
|
|
}
|
2017-03-21 15:57:38 +00:00
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
Node* ArrayBuiltinsAssembler::ForEachProcessor(Node* k_value, Node* k) {
|
2017-03-21 15:57:38 +00:00
|
|
|
|
CallJS(CodeFactory::Call(isolate()), context(), callbackfn(), this_arg(),
|
|
|
|
|
k_value, k, o());
|
|
|
|
|
return a();
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
void ArrayBuiltinsAssembler::SomeResultGenerator() {
|
|
|
|
|
a_.Bind(FalseConstant());
|
|
|
|
|
}
|
2017-03-21 15:57:38 +00:00
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
Node* ArrayBuiltinsAssembler::SomeProcessor(Node* k_value, Node* k) {
|
2017-03-21 15:57:38 +00:00
|
|
|
|
Node* value = CallJS(CodeFactory::Call(isolate()), context(), callbackfn(),
|
|
|
|
|
this_arg(), k_value, k, o());
|
|
|
|
|
Label false_continue(this), return_true(this);
|
|
|
|
|
BranchIfToBooleanIsTrue(value, &return_true, &false_continue);
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&return_true);
|
2017-04-29 11:40:48 +00:00
|
|
|
|
ReturnFromBuiltin(TrueConstant());
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&false_continue);
|
2017-03-21 15:57:38 +00:00
|
|
|
|
return a();
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
void ArrayBuiltinsAssembler::EveryResultGenerator() {
|
|
|
|
|
a_.Bind(TrueConstant());
|
|
|
|
|
}
|
2017-03-21 15:57:38 +00:00
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
Node* ArrayBuiltinsAssembler::EveryProcessor(Node* k_value, Node* k) {
|
2017-03-21 15:57:38 +00:00
|
|
|
|
Node* value = CallJS(CodeFactory::Call(isolate()), context(), callbackfn(),
|
|
|
|
|
this_arg(), k_value, k, o());
|
|
|
|
|
Label true_continue(this), return_false(this);
|
|
|
|
|
BranchIfToBooleanIsTrue(value, &true_continue, &return_false);
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&return_false);
|
2017-04-29 11:40:48 +00:00
|
|
|
|
ReturnFromBuiltin(FalseConstant());
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&true_continue);
|
2017-03-21 15:57:38 +00:00
|
|
|
|
return a();
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
void ArrayBuiltinsAssembler::ReduceResultGenerator() {
|
|
|
|
|
return a_.Bind(this_arg());
|
|
|
|
|
}
|
2017-03-21 15:57:38 +00:00
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
Node* ArrayBuiltinsAssembler::ReduceProcessor(Node* k_value, Node* k) {
|
2017-04-07 08:20:35 +00:00
|
|
|
|
VARIABLE(result, MachineRepresentation::kTagged);
|
2017-03-21 15:57:38 +00:00
|
|
|
|
Label done(this, {&result}), initial(this);
|
|
|
|
|
GotoIf(WordEqual(a(), TheHoleConstant()), &initial);
|
|
|
|
|
result.Bind(CallJS(CodeFactory::Call(isolate()), context(), callbackfn(),
|
|
|
|
|
UndefinedConstant(), a(), k_value, k, o()));
|
|
|
|
|
Goto(&done);
|
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&initial);
|
2017-03-21 15:57:38 +00:00
|
|
|
|
result.Bind(k_value);
|
|
|
|
|
Goto(&done);
|
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&done);
|
2017-03-21 15:57:38 +00:00
|
|
|
|
return result.value();
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
void ArrayBuiltinsAssembler::ReducePostLoopAction() {
|
2017-03-21 15:57:38 +00:00
|
|
|
|
Label ok(this);
|
|
|
|
|
GotoIf(WordNotEqual(a(), TheHoleConstant()), &ok);
|
2017-07-11 12:33:20 +00:00
|
|
|
|
ThrowTypeError(context(), MessageTemplate::kReduceNoInitial);
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&ok);
|
2017-03-21 15:57:38 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
void ArrayBuiltinsAssembler::FilterResultGenerator() {
|
2017-03-21 15:57:38 +00:00
|
|
|
|
// 7. Let A be ArraySpeciesCreate(O, 0).
|
2017-10-20 12:17:40 +00:00
|
|
|
|
// This version of ArraySpeciesCreate will create with the correct
|
|
|
|
|
// ElementsKind in the fast case.
|
2018-01-28 17:02:11 +00:00
|
|
|
|
GenerateArraySpeciesCreate();
|
2017-03-21 15:57:38 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
Node* ArrayBuiltinsAssembler::FilterProcessor(Node* k_value, Node* k) {
|
2017-03-24 11:01:53 +00:00
|
|
|
|
// ii. Let selected be ToBoolean(? Call(callbackfn, T, kValue, k, O)).
|
|
|
|
|
Node* selected = CallJS(CodeFactory::Call(isolate()), context(),
|
|
|
|
|
callbackfn(), this_arg(), k_value, k, o());
|
2017-03-21 15:57:38 +00:00
|
|
|
|
Label true_continue(this, &to_), false_continue(this);
|
2017-03-24 11:01:53 +00:00
|
|
|
|
BranchIfToBooleanIsTrue(selected, &true_continue, &false_continue);
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&true_continue);
|
2017-03-24 11:01:53 +00:00
|
|
|
|
// iii. If selected is true, then...
|
|
|
|
|
{
|
2017-04-24 12:47:24 +00:00
|
|
|
|
Label after_work(this, &to_);
|
|
|
|
|
Node* kind = nullptr;
|
|
|
|
|
|
|
|
|
|
// If a() is a JSArray, we can have a fast path.
|
|
|
|
|
Label fast(this);
|
|
|
|
|
Label runtime(this);
|
|
|
|
|
Label object_push_pre(this), object_push(this), double_push(this);
|
2017-10-04 14:48:25 +00:00
|
|
|
|
BranchIfFastJSArray(a(), context(), &fast, &runtime);
|
2017-04-24 12:47:24 +00:00
|
|
|
|
|
|
|
|
|
BIND(&fast);
|
|
|
|
|
{
|
2018-05-17 11:56:28 +00:00
|
|
|
|
GotoIf(WordNotEqual(LoadJSArrayLength(a()), to_.value()), &runtime);
|
2018-03-27 11:53:14 +00:00
|
|
|
|
kind = EnsureArrayPushable(LoadMap(a()), &runtime);
|
2017-06-30 11:26:14 +00:00
|
|
|
|
GotoIf(IsElementsKindGreaterThan(kind, HOLEY_SMI_ELEMENTS),
|
2017-04-24 12:47:24 +00:00
|
|
|
|
&object_push_pre);
|
|
|
|
|
|
2017-07-05 12:30:58 +00:00
|
|
|
|
BuildAppendJSArray(HOLEY_SMI_ELEMENTS, a(), k_value, &runtime);
|
2017-04-24 12:47:24 +00:00
|
|
|
|
Goto(&after_work);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BIND(&object_push_pre);
|
|
|
|
|
{
|
2017-06-30 11:26:14 +00:00
|
|
|
|
Branch(IsElementsKindGreaterThan(kind, HOLEY_ELEMENTS), &double_push,
|
|
|
|
|
&object_push);
|
2017-04-24 12:47:24 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BIND(&object_push);
|
|
|
|
|
{
|
2017-07-05 12:30:58 +00:00
|
|
|
|
BuildAppendJSArray(HOLEY_ELEMENTS, a(), k_value, &runtime);
|
2017-04-24 12:47:24 +00:00
|
|
|
|
Goto(&after_work);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BIND(&double_push);
|
|
|
|
|
{
|
2017-07-05 12:30:58 +00:00
|
|
|
|
BuildAppendJSArray(HOLEY_DOUBLE_ELEMENTS, a(), k_value, &runtime);
|
2017-04-24 12:47:24 +00:00
|
|
|
|
Goto(&after_work);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BIND(&runtime);
|
|
|
|
|
{
|
|
|
|
|
// 1. Perform ? CreateDataPropertyOrThrow(A, ToString(to), kValue).
|
|
|
|
|
CallRuntime(Runtime::kCreateDataProperty, context(), a(), to_.value(),
|
|
|
|
|
k_value);
|
|
|
|
|
Goto(&after_work);
|
|
|
|
|
}
|
2017-03-21 15:57:38 +00:00
|
|
|
|
|
2017-04-24 12:47:24 +00:00
|
|
|
|
BIND(&after_work);
|
|
|
|
|
{
|
|
|
|
|
// 2. Increase to by 1.
|
|
|
|
|
to_.Bind(NumberInc(to_.value()));
|
|
|
|
|
Goto(&false_continue);
|
|
|
|
|
}
|
2017-03-24 11:01:53 +00:00
|
|
|
|
}
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&false_continue);
|
2017-03-21 15:57:38 +00:00
|
|
|
|
return a();
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
void ArrayBuiltinsAssembler::MapResultGenerator() {
|
|
|
|
|
GenerateArraySpeciesCreate(len_);
|
|
|
|
|
}
|
2017-05-03 14:11:44 +00:00
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
void ArrayBuiltinsAssembler::TypedArrayMapResultGenerator() {
|
2017-05-17 15:27:37 +00:00
|
|
|
|
// 6. Let A be ? TypedArraySpeciesCreate(O, len).
|
2018-02-07 10:40:30 +00:00
|
|
|
|
TNode<JSTypedArray> original_array = CAST(o());
|
|
|
|
|
TNode<Smi> length = CAST(len_);
|
|
|
|
|
const char* method_name = "%TypedArray%.prototype.map";
|
|
|
|
|
|
|
|
|
|
TypedArrayBuiltinsAssembler typedarray_asm(state());
|
|
|
|
|
TNode<JSTypedArray> a = typedarray_asm.SpeciesCreateByLength(
|
2018-02-27 09:36:55 +00:00
|
|
|
|
context(), original_array, length, method_name);
|
2017-05-17 15:27:37 +00:00
|
|
|
|
// In the Spec and our current implementation, the length check is already
|
2017-05-26 09:40:21 +00:00
|
|
|
|
// performed in TypedArraySpeciesCreate.
|
2018-05-17 11:56:28 +00:00
|
|
|
|
CSA_ASSERT(
|
|
|
|
|
this,
|
|
|
|
|
SmiLessThanOrEqual(
|
|
|
|
|
CAST(len_), CAST(LoadObjectField(a, JSTypedArray::kLengthOffset))));
|
2018-03-05 21:32:29 +00:00
|
|
|
|
fast_typed_array_target_ =
|
|
|
|
|
Word32Equal(LoadInstanceType(LoadElements(original_array)),
|
|
|
|
|
LoadInstanceType(LoadElements(a)));
|
2017-05-17 15:27:37 +00:00
|
|
|
|
a_.Bind(a);
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
Node* ArrayBuiltinsAssembler::SpecCompliantMapProcessor(Node* k_value,
|
|
|
|
|
Node* k) {
|
2017-05-03 14:11:44 +00:00
|
|
|
|
// i. Let kValue be ? Get(O, Pk). Performed by the caller of
|
|
|
|
|
// SpecCompliantMapProcessor.
|
2017-07-05 12:30:58 +00:00
|
|
|
|
// ii. Let mapped_value be ? Call(callbackfn, T, kValue, k, O).
|
|
|
|
|
Node* mapped_value = CallJS(CodeFactory::Call(isolate()), context(),
|
|
|
|
|
callbackfn(), this_arg(), k_value, k, o());
|
2017-05-03 14:11:44 +00:00
|
|
|
|
|
2017-07-05 12:30:58 +00:00
|
|
|
|
// iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mapped_value).
|
|
|
|
|
CallRuntime(Runtime::kCreateDataProperty, context(), a(), k, mapped_value);
|
2017-05-03 14:11:44 +00:00
|
|
|
|
return a();
|
2017-03-24 11:01:53 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
Node* ArrayBuiltinsAssembler::FastMapProcessor(Node* k_value, Node* k) {
|
2017-05-03 14:11:44 +00:00
|
|
|
|
// i. Let kValue be ? Get(O, Pk). Performed by the caller of
|
|
|
|
|
// FastMapProcessor.
|
2017-07-05 12:30:58 +00:00
|
|
|
|
// ii. Let mapped_value be ? Call(callbackfn, T, kValue, k, O).
|
|
|
|
|
Node* mapped_value = CallJS(CodeFactory::Call(isolate()), context(),
|
|
|
|
|
callbackfn(), this_arg(), k_value, k, o());
|
2017-03-24 11:01:53 +00:00
|
|
|
|
|
2017-04-24 12:47:24 +00:00
|
|
|
|
// mode is SMI_PARAMETERS because k has tagged representation.
|
|
|
|
|
ParameterMode mode = SMI_PARAMETERS;
|
2017-07-05 12:30:58 +00:00
|
|
|
|
Label runtime(this), finished(this);
|
|
|
|
|
Label transition_pre(this), transition_smi_fast(this),
|
|
|
|
|
transition_smi_double(this);
|
|
|
|
|
Label array_not_smi(this), array_fast(this), array_double(this);
|
|
|
|
|
|
|
|
|
|
Node* kind = LoadMapElementsKind(LoadMap(a()));
|
|
|
|
|
Node* elements = LoadElements(a());
|
|
|
|
|
GotoIf(IsElementsKindGreaterThan(kind, HOLEY_SMI_ELEMENTS), &array_not_smi);
|
|
|
|
|
TryStoreArrayElement(HOLEY_SMI_ELEMENTS, mode, &transition_pre, elements, k,
|
|
|
|
|
mapped_value);
|
|
|
|
|
Goto(&finished);
|
|
|
|
|
|
|
|
|
|
BIND(&transition_pre);
|
|
|
|
|
{
|
|
|
|
|
// array is smi. Value is either tagged or a heap number.
|
|
|
|
|
CSA_ASSERT(this, TaggedIsNotSmi(mapped_value));
|
|
|
|
|
GotoIf(IsHeapNumberMap(LoadMap(mapped_value)), &transition_smi_double);
|
|
|
|
|
Goto(&transition_smi_fast);
|
|
|
|
|
}
|
2017-04-24 12:47:24 +00:00
|
|
|
|
|
2017-07-05 12:30:58 +00:00
|
|
|
|
BIND(&array_not_smi);
|
2017-04-24 12:47:24 +00:00
|
|
|
|
{
|
2017-07-05 12:30:58 +00:00
|
|
|
|
Branch(IsElementsKindGreaterThan(kind, HOLEY_ELEMENTS), &array_double,
|
|
|
|
|
&array_fast);
|
2017-04-24 12:47:24 +00:00
|
|
|
|
}
|
|
|
|
|
|
2017-07-05 12:30:58 +00:00
|
|
|
|
BIND(&transition_smi_fast);
|
2017-04-24 12:47:24 +00:00
|
|
|
|
{
|
2017-07-05 12:30:58 +00:00
|
|
|
|
// iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mapped_value).
|
|
|
|
|
Node* const native_context = LoadNativeContext(context());
|
|
|
|
|
Node* const fast_map = LoadContextElement(
|
|
|
|
|
native_context, Context::JS_ARRAY_HOLEY_ELEMENTS_MAP_INDEX);
|
|
|
|
|
|
|
|
|
|
// Since this transition is only a map change, just do it right here.
|
|
|
|
|
// Since a() doesn't have an allocation site, it's safe to do the
|
|
|
|
|
// map store directly, otherwise I'd call TransitionElementsKind().
|
|
|
|
|
StoreMap(a(), fast_map);
|
|
|
|
|
Goto(&array_fast);
|
2017-04-24 12:47:24 +00:00
|
|
|
|
}
|
|
|
|
|
|
2017-07-05 12:30:58 +00:00
|
|
|
|
BIND(&array_fast);
|
2017-04-24 12:47:24 +00:00
|
|
|
|
{
|
2017-07-05 12:30:58 +00:00
|
|
|
|
TryStoreArrayElement(HOLEY_ELEMENTS, mode, &runtime, elements, k,
|
|
|
|
|
mapped_value);
|
2017-04-24 12:47:24 +00:00
|
|
|
|
Goto(&finished);
|
|
|
|
|
}
|
|
|
|
|
|
2017-07-05 12:30:58 +00:00
|
|
|
|
BIND(&transition_smi_double);
|
2017-04-24 12:47:24 +00:00
|
|
|
|
{
|
2017-07-05 12:30:58 +00:00
|
|
|
|
// iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mapped_value).
|
|
|
|
|
Node* const native_context = LoadNativeContext(context());
|
|
|
|
|
Node* const double_map = LoadContextElement(
|
|
|
|
|
native_context, Context::JS_ARRAY_HOLEY_DOUBLE_ELEMENTS_MAP_INDEX);
|
|
|
|
|
CallStub(CodeFactory::TransitionElementsKind(
|
|
|
|
|
isolate(), HOLEY_SMI_ELEMENTS, HOLEY_DOUBLE_ELEMENTS, true),
|
|
|
|
|
context(), a(), double_map);
|
|
|
|
|
Goto(&array_double);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BIND(&array_double);
|
|
|
|
|
{
|
|
|
|
|
// TODO(mvstanton): If we use a variable for elements and bind it
|
|
|
|
|
// appropriately, we can avoid an extra load of elements by binding the
|
|
|
|
|
// value only after a transition from smi to double.
|
|
|
|
|
elements = LoadElements(a());
|
|
|
|
|
// If the mapped_value isn't a number, this will bail out to the runtime
|
|
|
|
|
// to make the transition.
|
|
|
|
|
TryStoreArrayElement(HOLEY_DOUBLE_ELEMENTS, mode, &runtime, elements, k,
|
|
|
|
|
mapped_value);
|
2017-04-24 12:47:24 +00:00
|
|
|
|
Goto(&finished);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BIND(&runtime);
|
|
|
|
|
{
|
2017-07-05 12:30:58 +00:00
|
|
|
|
// iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mapped_value).
|
|
|
|
|
CallRuntime(Runtime::kCreateDataProperty, context(), a(), k,
|
|
|
|
|
mapped_value);
|
2017-04-24 12:47:24 +00:00
|
|
|
|
Goto(&finished);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BIND(&finished);
|
2017-03-24 11:01:53 +00:00
|
|
|
|
return a();
|
|
|
|
|
}
|
|
|
|
|
|
2017-05-17 15:27:37 +00:00
|
|
|
|
// See tc39.github.io/ecma262/#sec-%typedarray%.prototype.map.
|
2018-01-28 17:02:11 +00:00
|
|
|
|
Node* ArrayBuiltinsAssembler::TypedArrayMapProcessor(Node* k_value, Node* k) {
|
2017-07-05 12:30:58 +00:00
|
|
|
|
// 8. c. Let mapped_value be ? Call(callbackfn, T, « kValue, k, O »).
|
|
|
|
|
Node* mapped_value = CallJS(CodeFactory::Call(isolate()), context(),
|
|
|
|
|
callbackfn(), this_arg(), k_value, k, o());
|
2017-05-17 15:27:37 +00:00
|
|
|
|
Label fast(this), slow(this), done(this), detached(this, Label::kDeferred);
|
|
|
|
|
|
2017-07-05 12:30:58 +00:00
|
|
|
|
// 8. d. Perform ? Set(A, Pk, mapped_value, true).
|
2017-05-17 15:27:37 +00:00
|
|
|
|
// Since we know that A is a TypedArray, this always ends up in
|
|
|
|
|
// #sec-integer-indexed-exotic-objects-set-p-v-receiver and then
|
|
|
|
|
// tc39.github.io/ecma262/#sec-integerindexedelementset .
|
|
|
|
|
Branch(fast_typed_array_target_, &fast, &slow);
|
|
|
|
|
|
|
|
|
|
BIND(&fast);
|
2018-02-17 07:44:01 +00:00
|
|
|
|
// #sec-integerindexedelementset
|
|
|
|
|
// 5. If arrayTypeName is "BigUint64Array" or "BigInt64Array", let
|
|
|
|
|
// numValue be ? ToBigInt(v).
|
|
|
|
|
// 6. Otherwise, let numValue be ? ToNumber(value).
|
|
|
|
|
Node* num_value;
|
|
|
|
|
if (source_elements_kind_ == BIGINT64_ELEMENTS ||
|
|
|
|
|
source_elements_kind_ == BIGUINT64_ELEMENTS) {
|
|
|
|
|
num_value = ToBigInt(context(), mapped_value);
|
|
|
|
|
} else {
|
|
|
|
|
num_value = ToNumber_Inline(context(), mapped_value);
|
|
|
|
|
}
|
2017-05-17 15:27:37 +00:00
|
|
|
|
// The only way how this can bailout is because of a detached buffer.
|
2017-05-26 09:40:21 +00:00
|
|
|
|
EmitElementStore(a(), k, num_value, false, source_elements_kind_,
|
2018-02-17 07:44:01 +00:00
|
|
|
|
KeyedAccessStoreMode::STANDARD_STORE, &detached,
|
|
|
|
|
context());
|
2017-05-17 15:27:37 +00:00
|
|
|
|
Goto(&done);
|
|
|
|
|
|
|
|
|
|
BIND(&slow);
|
2017-07-05 12:30:58 +00:00
|
|
|
|
CallRuntime(Runtime::kSetProperty, context(), a(), k, mapped_value,
|
2017-10-25 17:43:04 +00:00
|
|
|
|
SmiConstant(LanguageMode::kStrict));
|
2017-05-17 15:27:37 +00:00
|
|
|
|
Goto(&done);
|
|
|
|
|
|
|
|
|
|
BIND(&detached);
|
2017-07-11 12:33:20 +00:00
|
|
|
|
// tc39.github.io/ecma262/#sec-integerindexedelementset
|
2018-02-17 07:44:01 +00:00
|
|
|
|
// 8. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
|
2017-07-11 12:33:20 +00:00
|
|
|
|
ThrowTypeError(context_, MessageTemplate::kDetachedOperation, name_);
|
2017-05-17 15:27:37 +00:00
|
|
|
|
|
|
|
|
|
BIND(&done);
|
|
|
|
|
return a();
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
void ArrayBuiltinsAssembler::NullPostLoopAction() {}
|
|
|
|
|
|
|
|
|
|
void ArrayBuiltinsAssembler::ReturnFromBuiltin(Node* value) {
|
2017-04-29 11:40:48 +00:00
|
|
|
|
if (argc_ == nullptr) {
|
|
|
|
|
Return(value);
|
|
|
|
|
} else {
|
|
|
|
|
// argc_ doesn't include the receiver, so it has to be added back in
|
|
|
|
|
// manually.
|
|
|
|
|
PopAndReturn(IntPtrAdd(argc_, IntPtrConstant(1)), value);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
void ArrayBuiltinsAssembler::InitIteratingArrayBuiltinBody(
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context, TNode<Object> receiver, Node* callbackfn,
|
2018-04-27 10:03:15 +00:00
|
|
|
|
Node* this_arg, TNode<IntPtrT> argc) {
|
2017-03-21 17:25:35 +00:00
|
|
|
|
context_ = context;
|
|
|
|
|
receiver_ = receiver;
|
|
|
|
|
callbackfn_ = callbackfn;
|
|
|
|
|
this_arg_ = this_arg;
|
2017-04-29 11:40:48 +00:00
|
|
|
|
argc_ = argc;
|
2017-03-21 17:25:35 +00:00
|
|
|
|
}
|
2017-03-21 15:57:38 +00:00
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
void ArrayBuiltinsAssembler::GenerateIteratingArrayBuiltinBody(
|
2017-03-21 17:25:35 +00:00
|
|
|
|
const char* name, const BuiltinResultGenerator& generator,
|
|
|
|
|
const CallResultProcessor& processor, const PostLoopAction& action,
|
2017-03-24 13:35:56 +00:00
|
|
|
|
const Callable& slow_case_continuation,
|
2018-01-28 17:02:11 +00:00
|
|
|
|
MissingPropertyMode missing_property_mode, ForEachDirection direction) {
|
2017-05-03 14:11:44 +00:00
|
|
|
|
Label non_array(this), array_changes(this, {&k_, &a_, &to_});
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
|
|
// TODO(danno): Seriously? Do we really need to throw the exact error
|
|
|
|
|
// message on null and undefined so that the webkit tests pass?
|
|
|
|
|
Label throw_null_undefined_exception(this, Label::kDeferred);
|
2017-10-24 19:09:18 +00:00
|
|
|
|
GotoIf(IsNullOrUndefined(receiver()), &throw_null_undefined_exception);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
|
|
// By the book: taken directly from the ECMAScript 2015 specification
|
|
|
|
|
|
|
|
|
|
// 1. Let O be ToObject(this value).
|
|
|
|
|
// 2. ReturnIfAbrupt(O)
|
2018-02-12 09:33:36 +00:00
|
|
|
|
o_ = ToObject(context(), receiver());
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
|
|
// 3. Let len be ToLength(Get(O, "length")).
|
|
|
|
|
// 4. ReturnIfAbrupt(len).
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TVARIABLE(Number, merged_length);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
Label has_length(this, &merged_length), not_js_array(this);
|
2017-03-21 15:57:38 +00:00
|
|
|
|
GotoIf(DoesntHaveInstanceType(o(), JS_ARRAY_TYPE), ¬_js_array);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
merged_length = LoadJSArrayLength(CAST(o()));
|
2017-03-16 11:32:01 +00:00
|
|
|
|
Goto(&has_length);
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(¬_js_array);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
Node* len_property =
|
2017-03-21 15:57:38 +00:00
|
|
|
|
GetProperty(context(), o(), isolate()->factory()->length_string());
|
2018-03-05 21:32:29 +00:00
|
|
|
|
merged_length = ToLength_Inline(context(), len_property);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
Goto(&has_length);
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&has_length);
|
2017-03-21 15:57:38 +00:00
|
|
|
|
len_ = merged_length.value();
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
|
|
// 5. If IsCallable(callbackfn) is false, throw a TypeError exception.
|
|
|
|
|
Label type_exception(this, Label::kDeferred);
|
|
|
|
|
Label done(this);
|
2017-03-21 15:57:38 +00:00
|
|
|
|
GotoIf(TaggedIsSmi(callbackfn()), &type_exception);
|
|
|
|
|
Branch(IsCallableMap(LoadMap(callbackfn())), &done, &type_exception);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&throw_null_undefined_exception);
|
2017-07-11 12:33:20 +00:00
|
|
|
|
ThrowTypeError(context(), MessageTemplate::kCalledOnNullOrUndefined, name);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&type_exception);
|
2017-07-11 12:33:20 +00:00
|
|
|
|
ThrowTypeError(context(), MessageTemplate::kCalledNonCallable,
|
|
|
|
|
callbackfn());
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&done);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
|
|
// 6. If thisArg was supplied, let T be thisArg; else let T be undefined.
|
|
|
|
|
// [Already done by the arguments adapter]
|
|
|
|
|
|
2017-03-24 13:35:56 +00:00
|
|
|
|
if (direction == ForEachDirection::kForward) {
|
|
|
|
|
// 7. Let k be 0.
|
|
|
|
|
k_.Bind(SmiConstant(0));
|
|
|
|
|
} else {
|
2017-04-11 11:02:27 +00:00
|
|
|
|
k_.Bind(NumberDec(len()));
|
2017-03-24 13:35:56 +00:00
|
|
|
|
}
|
2017-03-21 15:57:38 +00:00
|
|
|
|
|
2017-05-03 14:11:44 +00:00
|
|
|
|
generator(this);
|
2017-03-21 15:57:38 +00:00
|
|
|
|
|
2017-12-05 04:14:36 +00:00
|
|
|
|
HandleFastElements(processor, action, &fully_spec_compliant_, direction,
|
|
|
|
|
missing_property_mode);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2017-05-03 14:11:44 +00:00
|
|
|
|
BIND(&fully_spec_compliant_);
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
|
|
2017-04-29 11:40:48 +00:00
|
|
|
|
Node* result =
|
|
|
|
|
CallStub(slow_case_continuation, context(), receiver(), callbackfn(),
|
|
|
|
|
this_arg(), a_.value(), o(), k_.value(), len(), to_.value());
|
|
|
|
|
ReturnFromBuiltin(result);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
void ArrayBuiltinsAssembler::InitIteratingArrayBuiltinLoopContinuation(
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context, TNode<Object> receiver, Node* callbackfn,
|
2018-03-05 21:32:29 +00:00
|
|
|
|
Node* this_arg, Node* a, TNode<JSReceiver> o, Node* initial_k,
|
|
|
|
|
TNode<Number> len, Node* to) {
|
2017-03-21 17:25:35 +00:00
|
|
|
|
context_ = context;
|
|
|
|
|
this_arg_ = this_arg;
|
|
|
|
|
callbackfn_ = callbackfn;
|
|
|
|
|
a_.Bind(a);
|
|
|
|
|
k_.Bind(initial_k);
|
|
|
|
|
o_ = o;
|
|
|
|
|
len_ = len;
|
2017-03-22 13:18:26 +00:00
|
|
|
|
to_.Bind(to);
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
void ArrayBuiltinsAssembler::GenerateIteratingTypedArrayBuiltinBody(
|
2017-03-30 16:36:53 +00:00
|
|
|
|
const char* name, const BuiltinResultGenerator& generator,
|
2017-04-11 11:02:27 +00:00
|
|
|
|
const CallResultProcessor& processor, const PostLoopAction& action,
|
2018-01-28 17:02:11 +00:00
|
|
|
|
ForEachDirection direction) {
|
2017-07-11 12:33:20 +00:00
|
|
|
|
name_ = name;
|
2017-03-30 16:36:53 +00:00
|
|
|
|
|
|
|
|
|
// ValidateTypedArray: tc39.github.io/ecma262/#sec-validatetypedarray
|
|
|
|
|
|
|
|
|
|
Label throw_not_typed_array(this, Label::kDeferred),
|
|
|
|
|
throw_detached(this, Label::kDeferred);
|
|
|
|
|
|
|
|
|
|
GotoIf(TaggedIsSmi(receiver_), &throw_not_typed_array);
|
2018-02-28 13:49:46 +00:00
|
|
|
|
GotoIfNot(HasInstanceType(CAST(receiver_), JS_TYPED_ARRAY_TYPE),
|
2017-03-30 16:36:53 +00:00
|
|
|
|
&throw_not_typed_array);
|
|
|
|
|
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<JSTypedArray> typed_array = CAST(receiver_);
|
|
|
|
|
o_ = typed_array;
|
|
|
|
|
|
|
|
|
|
Node* array_buffer =
|
|
|
|
|
LoadObjectField(typed_array, JSTypedArray::kBufferOffset);
|
2017-03-30 16:36:53 +00:00
|
|
|
|
GotoIf(IsDetachedBuffer(array_buffer), &throw_detached);
|
|
|
|
|
|
2018-03-05 21:32:29 +00:00
|
|
|
|
len_ = LoadObjectField<Smi>(typed_array, JSTypedArray::kLengthOffset);
|
2017-03-30 16:36:53 +00:00
|
|
|
|
|
|
|
|
|
Label throw_not_callable(this, Label::kDeferred);
|
|
|
|
|
Label distinguish_types(this);
|
|
|
|
|
GotoIf(TaggedIsSmi(callbackfn_), &throw_not_callable);
|
|
|
|
|
Branch(IsCallableMap(LoadMap(callbackfn_)), &distinguish_types,
|
|
|
|
|
&throw_not_callable);
|
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&throw_not_typed_array);
|
2017-07-11 12:33:20 +00:00
|
|
|
|
ThrowTypeError(context_, MessageTemplate::kNotTypedArray);
|
2017-03-30 16:36:53 +00:00
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&throw_detached);
|
2017-07-11 12:33:20 +00:00
|
|
|
|
ThrowTypeError(context_, MessageTemplate::kDetachedOperation, name_);
|
2017-03-30 16:36:53 +00:00
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&throw_not_callable);
|
2017-07-11 12:33:20 +00:00
|
|
|
|
ThrowTypeError(context_, MessageTemplate::kCalledNonCallable, callbackfn_);
|
2017-03-30 16:36:53 +00:00
|
|
|
|
|
|
|
|
|
Label unexpected_instance_type(this);
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&unexpected_instance_type);
|
2017-03-30 16:36:53 +00:00
|
|
|
|
Unreachable();
|
|
|
|
|
|
|
|
|
|
std::vector<int32_t> instance_types = {
|
|
|
|
|
#define INSTANCE_TYPE(Type, type, TYPE, ctype, size) FIXED_##TYPE##_ARRAY_TYPE,
|
|
|
|
|
TYPED_ARRAYS(INSTANCE_TYPE)
|
|
|
|
|
#undef INSTANCE_TYPE
|
|
|
|
|
};
|
|
|
|
|
std::vector<Label> labels;
|
|
|
|
|
for (size_t i = 0; i < instance_types.size(); ++i) {
|
|
|
|
|
labels.push_back(Label(this));
|
|
|
|
|
}
|
|
|
|
|
std::vector<Label*> label_ptrs;
|
|
|
|
|
for (Label& label : labels) {
|
|
|
|
|
label_ptrs.push_back(&label);
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&distinguish_types);
|
2017-04-11 11:02:27 +00:00
|
|
|
|
|
2018-02-09 09:33:11 +00:00
|
|
|
|
generator(this);
|
|
|
|
|
|
2017-04-11 11:02:27 +00:00
|
|
|
|
if (direction == ForEachDirection::kForward) {
|
|
|
|
|
k_.Bind(SmiConstant(0));
|
|
|
|
|
} else {
|
|
|
|
|
k_.Bind(NumberDec(len()));
|
|
|
|
|
}
|
2018-03-05 21:32:29 +00:00
|
|
|
|
Node* instance_type = LoadInstanceType(LoadElements(typed_array));
|
2017-05-17 15:27:37 +00:00
|
|
|
|
Switch(instance_type, &unexpected_instance_type, instance_types.data(),
|
2017-03-30 16:36:53 +00:00
|
|
|
|
label_ptrs.data(), labels.size());
|
|
|
|
|
|
|
|
|
|
for (size_t i = 0; i < labels.size(); ++i) {
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&labels[i]);
|
2017-03-30 16:36:53 +00:00
|
|
|
|
Label done(this);
|
2017-05-17 15:27:37 +00:00
|
|
|
|
source_elements_kind_ = ElementsKindForInstanceType(
|
|
|
|
|
static_cast<InstanceType>(instance_types[i]));
|
2017-03-30 16:36:53 +00:00
|
|
|
|
// TODO(tebbi): Silently cancelling the loop on buffer detachment is a
|
2017-05-17 15:27:37 +00:00
|
|
|
|
// spec violation. Should go to &throw_detached and throw a TypeError
|
|
|
|
|
// instead.
|
2018-03-05 21:32:29 +00:00
|
|
|
|
VisitAllTypedArrayElements(array_buffer, processor, &done, direction,
|
|
|
|
|
typed_array);
|
2017-03-30 16:36:53 +00:00
|
|
|
|
Goto(&done);
|
|
|
|
|
// No exception, return success
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&done);
|
2017-04-11 11:02:27 +00:00
|
|
|
|
action(this);
|
2017-04-29 11:40:48 +00:00
|
|
|
|
ReturnFromBuiltin(a_.value());
|
2017-03-30 16:36:53 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
void ArrayBuiltinsAssembler::GenerateIteratingArrayBuiltinLoopContinuation(
|
2017-03-24 13:35:56 +00:00
|
|
|
|
const CallResultProcessor& processor, const PostLoopAction& action,
|
2018-01-28 17:02:11 +00:00
|
|
|
|
MissingPropertyMode missing_property_mode, ForEachDirection direction) {
|
2017-03-21 15:57:38 +00:00
|
|
|
|
Label loop(this, {&k_, &a_, &to_});
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
|
Label after_loop(this);
|
|
|
|
|
Goto(&loop);
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
{
|
2017-03-24 13:35:56 +00:00
|
|
|
|
if (direction == ForEachDirection::kForward) {
|
|
|
|
|
// 8. Repeat, while k < len
|
2017-12-13 00:12:10 +00:00
|
|
|
|
GotoIfNumberGreaterThanOrEqual(k(), len_, &after_loop);
|
2017-03-24 13:35:56 +00:00
|
|
|
|
} else {
|
|
|
|
|
// OR
|
|
|
|
|
// 10. Repeat, while k >= 0
|
2017-12-13 00:12:10 +00:00
|
|
|
|
GotoIfNumberGreaterThanOrEqual(SmiConstant(-1), k(), &after_loop);
|
2017-03-24 13:35:56 +00:00
|
|
|
|
}
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
|
|
2017-03-21 08:56:56 +00:00
|
|
|
|
Label done_element(this, &to_);
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
|
// a. Let Pk be ToString(k).
|
2017-09-15 12:07:23 +00:00
|
|
|
|
// We never have to perform a ToString conversion as the above guards
|
|
|
|
|
// guarantee that we have a positive {k} which also is a valid array
|
|
|
|
|
// index in the range [0, 2^32-1).
|
|
|
|
|
CSA_ASSERT(this, IsNumberArrayIndex(k()));
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
|
|
2017-12-05 04:14:36 +00:00
|
|
|
|
if (missing_property_mode == MissingPropertyMode::kSkip) {
|
|
|
|
|
// b. Let kPresent be HasProperty(O, Pk).
|
|
|
|
|
// c. ReturnIfAbrupt(kPresent).
|
2017-12-19 19:20:38 +00:00
|
|
|
|
TNode<Oddball> k_present =
|
|
|
|
|
HasProperty(o(), k(), context(), kHasProperty);
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
|
|
2017-12-05 04:14:36 +00:00
|
|
|
|
// d. If kPresent is true, then
|
2017-12-19 19:20:38 +00:00
|
|
|
|
GotoIf(IsFalse(k_present), &done_element);
|
2017-12-05 04:14:36 +00:00
|
|
|
|
}
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
|
|
|
|
|
|
// i. Let kValue be Get(O, Pk).
|
|
|
|
|
// ii. ReturnIfAbrupt(kValue).
|
2017-03-21 15:57:38 +00:00
|
|
|
|
Node* k_value = GetProperty(context(), o(), k());
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
|
|
|
|
|
|
// iii. Let funcResult be Call(callbackfn, T, «kValue, k, O»).
|
|
|
|
|
// iv. ReturnIfAbrupt(funcResult).
|
2017-03-21 15:57:38 +00:00
|
|
|
|
a_.Bind(processor(this, k_value, k()));
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
|
Goto(&done_element);
|
2017-03-21 15:57:38 +00:00
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&done_element);
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
|
|
2017-03-24 13:35:56 +00:00
|
|
|
|
if (direction == ForEachDirection::kForward) {
|
|
|
|
|
// e. Increase k by 1.
|
|
|
|
|
k_.Bind(NumberInc(k()));
|
|
|
|
|
} else {
|
|
|
|
|
// e. Decrease k by 1.
|
|
|
|
|
k_.Bind(NumberDec(k()));
|
|
|
|
|
}
|
2017-03-16 11:32:01 +00:00
|
|
|
|
Goto(&loop);
|
|
|
|
|
}
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&after_loop);
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
|
action(this);
|
|
|
|
|
Return(a_.value());
|
2017-03-21 08:56:56 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
ElementsKind ArrayBuiltinsAssembler::ElementsKindForInstanceType(
|
|
|
|
|
InstanceType type) {
|
2017-03-30 16:36:53 +00:00
|
|
|
|
switch (type) {
|
|
|
|
|
#define INSTANCE_TYPE_TO_ELEMENTS_KIND(Type, type, TYPE, ctype, size) \
|
|
|
|
|
case FIXED_##TYPE##_ARRAY_TYPE: \
|
|
|
|
|
return TYPE##_ELEMENTS;
|
|
|
|
|
|
|
|
|
|
TYPED_ARRAYS(INSTANCE_TYPE_TO_ELEMENTS_KIND)
|
|
|
|
|
#undef INSTANCE_TYPE_TO_ELEMENTS_KIND
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
UNREACHABLE();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
void ArrayBuiltinsAssembler::VisitAllTypedArrayElements(
|
|
|
|
|
Node* array_buffer, const CallResultProcessor& processor, Label* detached,
|
2018-03-05 21:32:29 +00:00
|
|
|
|
ForEachDirection direction, TNode<JSTypedArray> typed_array) {
|
2017-03-30 16:36:53 +00:00
|
|
|
|
VariableList list({&a_, &k_, &to_}, zone());
|
|
|
|
|
|
|
|
|
|
FastLoopBody body = [&](Node* index) {
|
|
|
|
|
GotoIf(IsDetachedBuffer(array_buffer), detached);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
Node* elements = LoadElements(typed_array);
|
2017-03-30 16:36:53 +00:00
|
|
|
|
Node* base_ptr =
|
|
|
|
|
LoadObjectField(elements, FixedTypedArrayBase::kBasePointerOffset);
|
|
|
|
|
Node* external_ptr =
|
|
|
|
|
LoadObjectField(elements, FixedTypedArrayBase::kExternalPointerOffset,
|
|
|
|
|
MachineType::Pointer());
|
|
|
|
|
Node* data_ptr = IntPtrAdd(BitcastTaggedToWord(base_ptr), external_ptr);
|
2017-05-17 15:27:37 +00:00
|
|
|
|
Node* value = LoadFixedTypedArrayElementAsTagged(
|
|
|
|
|
data_ptr, index, source_elements_kind_, SMI_PARAMETERS);
|
2017-03-30 16:36:53 +00:00
|
|
|
|
k_.Bind(index);
|
|
|
|
|
a_.Bind(processor(this, value, index));
|
|
|
|
|
};
|
2017-04-11 11:02:27 +00:00
|
|
|
|
Node* start = SmiConstant(0);
|
|
|
|
|
Node* end = len_;
|
|
|
|
|
IndexAdvanceMode advance_mode = IndexAdvanceMode::kPost;
|
|
|
|
|
int incr = 1;
|
|
|
|
|
if (direction == ForEachDirection::kReverse) {
|
|
|
|
|
std::swap(start, end);
|
|
|
|
|
advance_mode = IndexAdvanceMode::kPre;
|
|
|
|
|
incr = -1;
|
|
|
|
|
}
|
|
|
|
|
BuildFastLoop(list, start, end, body, incr, ParameterMode::SMI_PARAMETERS,
|
|
|
|
|
advance_mode);
|
2017-03-30 16:36:53 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
void ArrayBuiltinsAssembler::VisitAllFastElementsOneKind(
|
|
|
|
|
ElementsKind kind, const CallResultProcessor& processor,
|
|
|
|
|
Label* array_changed, ParameterMode mode, ForEachDirection direction,
|
2018-03-05 21:32:29 +00:00
|
|
|
|
MissingPropertyMode missing_property_mode, TNode<Smi> length) {
|
2017-03-16 11:32:01 +00:00
|
|
|
|
Comment("begin VisitAllFastElementsOneKind");
|
2018-04-18 07:11:07 +00:00
|
|
|
|
// We only use this kind of processing if the no-elements protector is
|
|
|
|
|
// in place at the start. We'll continue checking during array iteration.
|
|
|
|
|
CSA_ASSERT(this, Word32BinaryNot(IsNoElementsProtectorCellInvalid()));
|
2017-04-07 08:20:35 +00:00
|
|
|
|
VARIABLE(original_map, MachineRepresentation::kTagged);
|
2017-03-21 15:57:38 +00:00
|
|
|
|
original_map.Bind(LoadMap(o()));
|
|
|
|
|
VariableList list({&original_map, &a_, &k_, &to_}, zone());
|
2017-03-24 13:35:56 +00:00
|
|
|
|
Node* start = IntPtrOrSmiConstant(0, mode);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
Node* end = TaggedToParameter(length, mode);
|
2017-03-24 13:35:56 +00:00
|
|
|
|
IndexAdvanceMode advance_mode = direction == ForEachDirection::kReverse
|
|
|
|
|
? IndexAdvanceMode::kPre
|
|
|
|
|
: IndexAdvanceMode::kPost;
|
|
|
|
|
if (direction == ForEachDirection::kReverse) std::swap(start, end);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
BuildFastLoop(
|
2017-03-24 13:35:56 +00:00
|
|
|
|
list, start, end,
|
2017-03-21 15:57:38 +00:00
|
|
|
|
[=, &original_map](Node* index) {
|
|
|
|
|
k_.Bind(ParameterToTagged(index, mode));
|
2017-12-05 04:14:36 +00:00
|
|
|
|
Label one_element_done(this), hole_element(this),
|
|
|
|
|
process_element(this);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
|
|
// Check if o's map has changed during the callback. If so, we have to
|
|
|
|
|
// fall back to the slower spec implementation for the rest of the
|
|
|
|
|
// iteration.
|
2017-03-21 15:57:38 +00:00
|
|
|
|
Node* o_map = LoadMap(o());
|
2017-03-16 11:32:01 +00:00
|
|
|
|
GotoIf(WordNotEqual(o_map, original_map.value()), array_changed);
|
|
|
|
|
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<JSArray> o_array = CAST(o());
|
2017-03-16 11:32:01 +00:00
|
|
|
|
// Check if o's length has changed during the callback and if the
|
|
|
|
|
// index is now out of range of the new length.
|
2018-05-17 11:56:28 +00:00
|
|
|
|
GotoIf(SmiGreaterThanOrEqual(CAST(k_.value()),
|
|
|
|
|
CAST(LoadJSArrayLength(o_array))),
|
2017-03-16 11:32:01 +00:00
|
|
|
|
array_changed);
|
|
|
|
|
|
|
|
|
|
// Re-load the elements array. If may have been resized.
|
2018-03-05 21:32:29 +00:00
|
|
|
|
Node* elements = LoadElements(o_array);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
|
|
// Fast case: load the element directly from the elements FixedArray
|
|
|
|
|
// and call the callback if the element is not the hole.
|
2017-06-30 11:26:14 +00:00
|
|
|
|
DCHECK(kind == PACKED_ELEMENTS || kind == PACKED_DOUBLE_ELEMENTS);
|
|
|
|
|
int base_size = kind == PACKED_ELEMENTS
|
2017-03-16 11:32:01 +00:00
|
|
|
|
? FixedArray::kHeaderSize
|
|
|
|
|
: (FixedArray::kHeaderSize - kHeapObjectTag);
|
|
|
|
|
Node* offset = ElementOffsetFromIndex(index, kind, mode, base_size);
|
2017-12-05 04:14:36 +00:00
|
|
|
|
VARIABLE(value, MachineRepresentation::kTagged);
|
2017-06-30 11:26:14 +00:00
|
|
|
|
if (kind == PACKED_ELEMENTS) {
|
2017-12-05 04:14:36 +00:00
|
|
|
|
value.Bind(LoadObjectField(elements, offset));
|
|
|
|
|
GotoIf(WordEqual(value.value(), TheHoleConstant()), &hole_element);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
} else {
|
|
|
|
|
Node* double_value =
|
|
|
|
|
LoadDoubleWithHoleCheck(elements, offset, &hole_element);
|
2017-12-05 04:14:36 +00:00
|
|
|
|
value.Bind(AllocateHeapNumberWithValue(double_value));
|
2017-03-16 11:32:01 +00:00
|
|
|
|
}
|
2017-12-05 04:14:36 +00:00
|
|
|
|
Goto(&process_element);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&hole_element);
|
2017-12-05 04:14:36 +00:00
|
|
|
|
if (missing_property_mode == MissingPropertyMode::kSkip) {
|
2018-04-18 07:11:07 +00:00
|
|
|
|
// The NoElementsProtectorCell could go invalid during callbacks.
|
|
|
|
|
Branch(IsNoElementsProtectorCellInvalid(), array_changed,
|
|
|
|
|
&one_element_done);
|
2017-12-05 04:14:36 +00:00
|
|
|
|
} else {
|
|
|
|
|
value.Bind(UndefinedConstant());
|
|
|
|
|
Goto(&process_element);
|
|
|
|
|
}
|
|
|
|
|
BIND(&process_element);
|
|
|
|
|
{
|
|
|
|
|
a_.Bind(processor(this, value.value(), k()));
|
|
|
|
|
Goto(&one_element_done);
|
|
|
|
|
}
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&one_element_done);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
},
|
2017-03-24 13:35:56 +00:00
|
|
|
|
1, mode, advance_mode);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
Comment("end VisitAllFastElementsOneKind");
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
void ArrayBuiltinsAssembler::HandleFastElements(
|
|
|
|
|
const CallResultProcessor& processor, const PostLoopAction& action,
|
|
|
|
|
Label* slow, ForEachDirection direction,
|
|
|
|
|
MissingPropertyMode missing_property_mode) {
|
2017-03-16 11:32:01 +00:00
|
|
|
|
Label switch_on_elements_kind(this), fast_elements(this),
|
|
|
|
|
maybe_double_elements(this), fast_double_elements(this);
|
|
|
|
|
|
|
|
|
|
Comment("begin HandleFastElements");
|
|
|
|
|
// Non-smi lengths must use the slow path.
|
2017-03-21 15:57:38 +00:00
|
|
|
|
GotoIf(TaggedIsNotSmi(len()), slow);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
|
BranchIfFastJSArray(o(), context(),
|
2017-03-16 11:32:01 +00:00
|
|
|
|
&switch_on_elements_kind, slow);
|
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&switch_on_elements_kind);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<Smi> smi_len = CAST(len());
|
2017-03-16 11:32:01 +00:00
|
|
|
|
// Select by ElementsKind
|
2017-03-21 15:57:38 +00:00
|
|
|
|
Node* o_map = LoadMap(o());
|
2017-03-16 11:32:01 +00:00
|
|
|
|
Node* bit_field2 = LoadMapBitField2(o_map);
|
|
|
|
|
Node* kind = DecodeWord32<Map::ElementsKindBits>(bit_field2);
|
2017-06-30 11:26:14 +00:00
|
|
|
|
Branch(IsElementsKindGreaterThan(kind, HOLEY_ELEMENTS),
|
2017-03-16 11:32:01 +00:00
|
|
|
|
&maybe_double_elements, &fast_elements);
|
|
|
|
|
|
|
|
|
|
ParameterMode mode = OptimalParameterMode();
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&fast_elements);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
{
|
2017-06-30 11:26:14 +00:00
|
|
|
|
VisitAllFastElementsOneKind(PACKED_ELEMENTS, processor, slow, mode,
|
2018-03-05 21:32:29 +00:00
|
|
|
|
direction, missing_property_mode, smi_len);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
|
action(this);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
|
// No exception, return success
|
2017-04-29 11:40:48 +00:00
|
|
|
|
ReturnFromBuiltin(a_.value());
|
2017-03-16 11:32:01 +00:00
|
|
|
|
}
|
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&maybe_double_elements);
|
2017-06-30 11:26:14 +00:00
|
|
|
|
Branch(IsElementsKindGreaterThan(kind, HOLEY_DOUBLE_ELEMENTS), slow,
|
2017-04-24 12:47:24 +00:00
|
|
|
|
&fast_double_elements);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&fast_double_elements);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
{
|
2017-06-30 11:26:14 +00:00
|
|
|
|
VisitAllFastElementsOneKind(PACKED_DOUBLE_ELEMENTS, processor, slow, mode,
|
2018-03-05 21:32:29 +00:00
|
|
|
|
direction, missing_property_mode, smi_len);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
|
action(this);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
|
// No exception, return success
|
2017-04-29 11:40:48 +00:00
|
|
|
|
ReturnFromBuiltin(a_.value());
|
2017-03-16 11:32:01 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
2017-03-21 08:56:56 +00:00
|
|
|
|
|
2017-05-12 11:37:21 +00:00
|
|
|
|
// Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate).
|
2017-10-20 12:17:40 +00:00
|
|
|
|
// This version is specialized to create a zero length array
|
|
|
|
|
// of the elements kind of the input array.
|
2018-01-28 17:02:11 +00:00
|
|
|
|
void ArrayBuiltinsAssembler::GenerateArraySpeciesCreate() {
|
2017-10-20 12:17:40 +00:00
|
|
|
|
Label runtime(this, Label::kDeferred), done(this);
|
|
|
|
|
|
|
|
|
|
TNode<Smi> len = SmiConstant(0);
|
|
|
|
|
TNode<Map> original_map = LoadMap(o());
|
|
|
|
|
GotoIfNot(
|
|
|
|
|
InstanceTypeEqual(LoadMapInstanceType(original_map), JS_ARRAY_TYPE),
|
|
|
|
|
&runtime);
|
|
|
|
|
|
|
|
|
|
GotoIfNot(IsPrototypeInitialArrayPrototype(context(), original_map),
|
|
|
|
|
&runtime);
|
|
|
|
|
|
2018-04-23 13:58:02 +00:00
|
|
|
|
Node* species_protector = ArraySpeciesProtectorConstant();
|
2017-10-20 12:17:40 +00:00
|
|
|
|
Node* value =
|
|
|
|
|
LoadObjectField(species_protector, PropertyCell::kValueOffset);
|
|
|
|
|
TNode<Smi> const protector_invalid =
|
|
|
|
|
SmiConstant(Isolate::kProtectorInvalid);
|
|
|
|
|
GotoIf(WordEqual(value, protector_invalid), &runtime);
|
|
|
|
|
|
|
|
|
|
// Respect the ElementsKind of the input array.
|
|
|
|
|
TNode<Int32T> elements_kind = LoadMapElementsKind(original_map);
|
|
|
|
|
GotoIfNot(IsFastElementsKind(elements_kind), &runtime);
|
2018-02-27 09:36:55 +00:00
|
|
|
|
TNode<Context> native_context = LoadNativeContext(context());
|
2017-10-20 12:17:40 +00:00
|
|
|
|
TNode<Map> array_map =
|
2018-02-27 09:36:55 +00:00
|
|
|
|
LoadJSArrayElementsMap(elements_kind, native_context);
|
2017-10-20 12:17:40 +00:00
|
|
|
|
TNode<JSArray> array =
|
|
|
|
|
CAST(AllocateJSArray(GetInitialFastElementsKind(), array_map, len, len,
|
|
|
|
|
nullptr, CodeStubAssembler::SMI_PARAMETERS));
|
|
|
|
|
a_.Bind(array);
|
|
|
|
|
|
|
|
|
|
Goto(&done);
|
|
|
|
|
|
|
|
|
|
BIND(&runtime);
|
|
|
|
|
{
|
|
|
|
|
// 5. Let A be ? ArraySpeciesCreate(O, len).
|
|
|
|
|
Node* constructor =
|
|
|
|
|
CallRuntime(Runtime::kArraySpeciesConstructor, context(), o());
|
|
|
|
|
a_.Bind(ConstructJS(CodeFactory::Construct(isolate()), context(),
|
|
|
|
|
constructor, len));
|
|
|
|
|
Goto(&fully_spec_compliant_);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BIND(&done);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate).
|
2018-03-05 21:32:29 +00:00
|
|
|
|
void ArrayBuiltinsAssembler::GenerateArraySpeciesCreate(TNode<Number> len) {
|
2017-05-12 11:37:21 +00:00
|
|
|
|
Label runtime(this, Label::kDeferred), done(this);
|
|
|
|
|
|
|
|
|
|
Node* const original_map = LoadMap(o());
|
2017-10-20 01:01:54 +00:00
|
|
|
|
GotoIfNot(
|
|
|
|
|
InstanceTypeEqual(LoadMapInstanceType(original_map), JS_ARRAY_TYPE),
|
|
|
|
|
&runtime);
|
2017-05-12 11:37:21 +00:00
|
|
|
|
|
2017-10-02 05:52:03 +00:00
|
|
|
|
GotoIfNot(IsPrototypeInitialArrayPrototype(context(), original_map),
|
|
|
|
|
&runtime);
|
2017-05-12 11:37:21 +00:00
|
|
|
|
|
2018-04-23 13:58:02 +00:00
|
|
|
|
Node* species_protector = ArraySpeciesProtectorConstant();
|
2017-06-28 10:17:59 +00:00
|
|
|
|
Node* value =
|
|
|
|
|
LoadObjectField(species_protector, PropertyCell::kValueOffset);
|
2017-05-12 11:37:21 +00:00
|
|
|
|
Node* const protector_invalid = SmiConstant(Isolate::kProtectorInvalid);
|
|
|
|
|
GotoIf(WordEqual(value, protector_invalid), &runtime);
|
|
|
|
|
|
|
|
|
|
GotoIfNot(TaggedIsPositiveSmi(len), &runtime);
|
2018-05-17 11:56:28 +00:00
|
|
|
|
GotoIf(
|
|
|
|
|
SmiAbove(CAST(len), SmiConstant(JSArray::kInitialMaxFastElementArray)),
|
|
|
|
|
&runtime);
|
2017-05-12 11:37:21 +00:00
|
|
|
|
|
2017-07-05 12:30:58 +00:00
|
|
|
|
// We need to be conservative and start with holey because the builtins
|
2017-08-02 08:23:36 +00:00
|
|
|
|
// that create output arrays aren't guaranteed to be called for every
|
2017-07-05 12:30:58 +00:00
|
|
|
|
// element in the input array (maybe the callback deletes an element).
|
2017-05-12 11:37:21 +00:00
|
|
|
|
const ElementsKind elements_kind =
|
|
|
|
|
GetHoleyElementsKind(GetInitialFastElementsKind());
|
2018-02-27 09:36:55 +00:00
|
|
|
|
TNode<Context> native_context = LoadNativeContext(context());
|
2017-10-20 12:17:40 +00:00
|
|
|
|
TNode<Map> array_map =
|
2018-02-27 09:36:55 +00:00
|
|
|
|
LoadJSArrayElementsMap(elements_kind, native_context);
|
2017-06-30 11:26:14 +00:00
|
|
|
|
a_.Bind(AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, len, len, nullptr,
|
2017-05-12 11:37:21 +00:00
|
|
|
|
CodeStubAssembler::SMI_PARAMETERS));
|
|
|
|
|
|
|
|
|
|
Goto(&done);
|
|
|
|
|
|
|
|
|
|
BIND(&runtime);
|
|
|
|
|
{
|
|
|
|
|
// 5. Let A be ? ArraySpeciesCreate(O, len).
|
|
|
|
|
Node* constructor =
|
|
|
|
|
CallRuntime(Runtime::kArraySpeciesConstructor, context(), o());
|
|
|
|
|
a_.Bind(ConstructJS(CodeFactory::Construct(isolate()), context(),
|
|
|
|
|
constructor, len));
|
|
|
|
|
Goto(&fully_spec_compliant_);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BIND(&done);
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-02 09:58:07 +00:00
|
|
|
|
TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Int32T> argc =
|
|
|
|
|
UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount));
|
|
|
|
|
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
|
2017-10-24 19:09:18 +00:00
|
|
|
|
CSA_ASSERT(this, IsUndefined(Parameter(BuiltinDescriptor::kNewTarget)));
|
2017-05-05 12:11:36 +00:00
|
|
|
|
|
|
|
|
|
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Object> receiver = args.GetReceiver();
|
2017-05-05 12:11:36 +00:00
|
|
|
|
|
|
|
|
|
Label runtime(this, Label::kDeferred);
|
|
|
|
|
Label fast(this);
|
|
|
|
|
|
|
|
|
|
// Only pop in this stub if
|
|
|
|
|
// 1) the array has fast elements
|
|
|
|
|
// 2) the length is writable,
|
|
|
|
|
// 3) the elements backing store isn't copy-on-write,
|
|
|
|
|
// 4) we aren't supposed to shrink the backing store.
|
|
|
|
|
|
|
|
|
|
// 1) Check that the array has fast elements.
|
2017-10-04 14:48:25 +00:00
|
|
|
|
BranchIfFastJSArray(receiver, context, &fast, &runtime);
|
2017-05-05 12:11:36 +00:00
|
|
|
|
|
|
|
|
|
BIND(&fast);
|
|
|
|
|
{
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<JSArray> array_receiver = CAST(receiver);
|
|
|
|
|
CSA_ASSERT(this, TaggedIsPositiveSmi(LoadJSArrayLength(array_receiver)));
|
|
|
|
|
Node* length =
|
|
|
|
|
LoadAndUntagObjectField(array_receiver, JSArray::kLengthOffset);
|
2017-05-05 12:11:36 +00:00
|
|
|
|
Label return_undefined(this), fast_elements(this);
|
|
|
|
|
GotoIf(IntPtrEqual(length, IntPtrConstant(0)), &return_undefined);
|
|
|
|
|
|
2017-05-17 14:39:34 +00:00
|
|
|
|
// 2) Ensure that the length is writable.
|
2018-02-23 13:08:34 +00:00
|
|
|
|
EnsureArrayLengthWritable(LoadMap(array_receiver), &runtime);
|
2017-05-05 12:11:36 +00:00
|
|
|
|
|
|
|
|
|
// 3) Check that the elements backing store isn't copy-on-write.
|
2018-02-23 13:08:34 +00:00
|
|
|
|
Node* elements = LoadElements(array_receiver);
|
2017-05-05 12:11:36 +00:00
|
|
|
|
GotoIf(WordEqual(LoadMap(elements),
|
|
|
|
|
LoadRoot(Heap::kFixedCOWArrayMapRootIndex)),
|
|
|
|
|
&runtime);
|
|
|
|
|
|
|
|
|
|
Node* new_length = IntPtrSub(length, IntPtrConstant(1));
|
|
|
|
|
|
2017-05-15 15:56:10 +00:00
|
|
|
|
// 4) Check that we're not supposed to shrink the backing store, as
|
|
|
|
|
// implemented in elements.cc:ElementsAccessorBase::SetLengthImpl.
|
2017-05-05 12:11:36 +00:00
|
|
|
|
Node* capacity = SmiUntag(LoadFixedArrayBaseLength(elements));
|
2017-05-15 15:56:10 +00:00
|
|
|
|
GotoIf(IntPtrLessThan(
|
|
|
|
|
IntPtrAdd(IntPtrAdd(new_length, new_length),
|
|
|
|
|
IntPtrConstant(JSObject::kMinAddedElementsCapacity)),
|
|
|
|
|
capacity),
|
2017-05-05 12:11:36 +00:00
|
|
|
|
&runtime);
|
|
|
|
|
|
2018-02-23 13:08:34 +00:00
|
|
|
|
StoreObjectFieldNoWriteBarrier(array_receiver, JSArray::kLengthOffset,
|
2017-05-05 12:11:36 +00:00
|
|
|
|
SmiTag(new_length));
|
|
|
|
|
|
2018-02-23 13:08:34 +00:00
|
|
|
|
Node* elements_kind = LoadMapElementsKind(LoadMap(array_receiver));
|
2017-05-05 12:11:36 +00:00
|
|
|
|
GotoIf(Int32LessThanOrEqual(elements_kind,
|
|
|
|
|
Int32Constant(TERMINAL_FAST_ELEMENTS_KIND)),
|
|
|
|
|
&fast_elements);
|
|
|
|
|
|
|
|
|
|
Node* value = LoadFixedDoubleArrayElement(
|
|
|
|
|
elements, new_length, MachineType::Float64(), 0, INTPTR_PARAMETERS,
|
|
|
|
|
&return_undefined);
|
|
|
|
|
|
|
|
|
|
int32_t header_size = FixedDoubleArray::kHeaderSize - kHeapObjectTag;
|
2017-06-30 11:26:14 +00:00
|
|
|
|
Node* offset = ElementOffsetFromIndex(new_length, HOLEY_DOUBLE_ELEMENTS,
|
|
|
|
|
INTPTR_PARAMETERS, header_size);
|
2017-05-05 12:11:36 +00:00
|
|
|
|
if (Is64()) {
|
|
|
|
|
Node* double_hole = Int64Constant(kHoleNanInt64);
|
|
|
|
|
StoreNoWriteBarrier(MachineRepresentation::kWord64, elements, offset,
|
|
|
|
|
double_hole);
|
|
|
|
|
} else {
|
|
|
|
|
STATIC_ASSERT(kHoleNanLower32 == kHoleNanUpper32);
|
|
|
|
|
Node* double_hole = Int32Constant(kHoleNanLower32);
|
|
|
|
|
StoreNoWriteBarrier(MachineRepresentation::kWord32, elements, offset,
|
|
|
|
|
double_hole);
|
|
|
|
|
StoreNoWriteBarrier(MachineRepresentation::kWord32, elements,
|
|
|
|
|
IntPtrAdd(offset, IntPtrConstant(kPointerSize)),
|
|
|
|
|
double_hole);
|
|
|
|
|
}
|
|
|
|
|
args.PopAndReturn(AllocateHeapNumberWithValue(value));
|
|
|
|
|
|
2017-05-19 16:13:17 +00:00
|
|
|
|
BIND(&fast_elements);
|
2017-05-05 12:11:36 +00:00
|
|
|
|
{
|
|
|
|
|
Node* value = LoadFixedArrayElement(elements, new_length);
|
|
|
|
|
StoreFixedArrayElement(elements, new_length, TheHoleConstant());
|
|
|
|
|
GotoIf(WordEqual(value, TheHoleConstant()), &return_undefined);
|
|
|
|
|
args.PopAndReturn(value);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BIND(&return_undefined);
|
|
|
|
|
{ args.PopAndReturn(UndefinedConstant()); }
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BIND(&runtime);
|
|
|
|
|
{
|
|
|
|
|
Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
|
|
|
|
|
MachineType::TaggedPointer());
|
|
|
|
|
TailCallStub(CodeFactory::ArrayPop(isolate()), context, target,
|
|
|
|
|
UndefinedConstant(), argc);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-02 09:58:07 +00:00
|
|
|
|
TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
|
2017-08-03 14:27:11 +00:00
|
|
|
|
TVARIABLE(IntPtrT, arg_index);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
Label default_label(this, &arg_index);
|
|
|
|
|
Label smi_transition(this);
|
|
|
|
|
Label object_push_pre(this);
|
|
|
|
|
Label object_push(this, &arg_index);
|
|
|
|
|
Label double_push(this, &arg_index);
|
|
|
|
|
Label double_transition(this);
|
|
|
|
|
Label runtime(this, Label::kDeferred);
|
|
|
|
|
|
2017-03-20 10:55:37 +00:00
|
|
|
|
// TODO(ishell): use constants from Descriptor once the JSFunction linkage
|
|
|
|
|
// arguments are reordered.
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Int32T> argc =
|
|
|
|
|
UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount));
|
|
|
|
|
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
|
2017-10-24 19:09:18 +00:00
|
|
|
|
CSA_ASSERT(this, IsUndefined(Parameter(BuiltinDescriptor::kNewTarget)));
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
|
|
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Object> receiver = args.GetReceiver();
|
|
|
|
|
TNode<JSArray> array_receiver;
|
2017-03-16 11:32:01 +00:00
|
|
|
|
Node* kind = nullptr;
|
|
|
|
|
|
|
|
|
|
Label fast(this);
|
2017-10-04 14:48:25 +00:00
|
|
|
|
BranchIfFastJSArray(receiver, context, &fast, &runtime);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&fast);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
{
|
2018-02-23 13:08:34 +00:00
|
|
|
|
array_receiver = CAST(receiver);
|
2017-08-03 14:27:11 +00:00
|
|
|
|
arg_index = IntPtrConstant(0);
|
2018-03-27 11:53:14 +00:00
|
|
|
|
kind = EnsureArrayPushable(LoadMap(array_receiver), &runtime);
|
2017-06-30 11:26:14 +00:00
|
|
|
|
GotoIf(IsElementsKindGreaterThan(kind, HOLEY_SMI_ELEMENTS),
|
2017-03-16 11:32:01 +00:00
|
|
|
|
&object_push_pre);
|
|
|
|
|
|
2018-02-23 13:08:34 +00:00
|
|
|
|
Node* new_length = BuildAppendJSArray(PACKED_SMI_ELEMENTS, array_receiver,
|
|
|
|
|
&args, &arg_index, &smi_transition);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
args.PopAndReturn(new_length);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// If the argument is not a smi, then use a heavyweight SetProperty to
|
|
|
|
|
// transition the array for only the single next element. If the argument is
|
|
|
|
|
// a smi, the failure is due to some other reason and we should fall back on
|
|
|
|
|
// the most generic implementation for the rest of the array.
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&smi_transition);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
{
|
2018-02-09 15:24:14 +00:00
|
|
|
|
Node* arg = args.AtIndex(arg_index.value());
|
2017-03-16 11:32:01 +00:00
|
|
|
|
GotoIf(TaggedIsSmi(arg), &default_label);
|
2018-02-23 13:08:34 +00:00
|
|
|
|
Node* length = LoadJSArrayLength(array_receiver);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
// TODO(danno): Use the KeyedStoreGeneric stub here when possible,
|
|
|
|
|
// calling into the runtime to do the elements transition is overkill.
|
2018-02-23 13:08:34 +00:00
|
|
|
|
CallRuntime(Runtime::kSetProperty, context, array_receiver, length, arg,
|
2017-10-25 17:43:04 +00:00
|
|
|
|
SmiConstant(LanguageMode::kStrict));
|
2017-08-03 14:27:11 +00:00
|
|
|
|
Increment(&arg_index);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
// The runtime SetProperty call could have converted the array to dictionary
|
|
|
|
|
// mode, which must be detected to abort the fast-path.
|
2018-02-23 13:08:34 +00:00
|
|
|
|
Node* map = LoadMap(array_receiver);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
Node* bit_field2 = LoadMapBitField2(map);
|
|
|
|
|
Node* kind = DecodeWord32<Map::ElementsKindBits>(bit_field2);
|
|
|
|
|
GotoIf(Word32Equal(kind, Int32Constant(DICTIONARY_ELEMENTS)),
|
|
|
|
|
&default_label);
|
|
|
|
|
|
|
|
|
|
GotoIfNotNumber(arg, &object_push);
|
|
|
|
|
Goto(&double_push);
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&object_push_pre);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
{
|
2017-06-30 11:26:14 +00:00
|
|
|
|
Branch(IsElementsKindGreaterThan(kind, HOLEY_ELEMENTS), &double_push,
|
2017-04-24 12:47:24 +00:00
|
|
|
|
&object_push);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
}
|
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&object_push);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
{
|
2018-02-23 13:08:34 +00:00
|
|
|
|
Node* new_length = BuildAppendJSArray(PACKED_ELEMENTS, array_receiver,
|
|
|
|
|
&args, &arg_index, &default_label);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
args.PopAndReturn(new_length);
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&double_push);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
{
|
2017-08-03 14:27:11 +00:00
|
|
|
|
Node* new_length =
|
2018-02-23 13:08:34 +00:00
|
|
|
|
BuildAppendJSArray(PACKED_DOUBLE_ELEMENTS, array_receiver, &args,
|
|
|
|
|
&arg_index, &double_transition);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
args.PopAndReturn(new_length);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// If the argument is not a double, then use a heavyweight SetProperty to
|
|
|
|
|
// transition the array for only the single next element. If the argument is
|
|
|
|
|
// a double, the failure is due to some other reason and we should fall back
|
|
|
|
|
// on the most generic implementation for the rest of the array.
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&double_transition);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
{
|
2018-02-09 15:24:14 +00:00
|
|
|
|
Node* arg = args.AtIndex(arg_index.value());
|
2017-03-16 11:32:01 +00:00
|
|
|
|
GotoIfNumber(arg, &default_label);
|
2018-02-23 13:08:34 +00:00
|
|
|
|
Node* length = LoadJSArrayLength(array_receiver);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
// TODO(danno): Use the KeyedStoreGeneric stub here when possible,
|
|
|
|
|
// calling into the runtime to do the elements transition is overkill.
|
2018-02-23 13:08:34 +00:00
|
|
|
|
CallRuntime(Runtime::kSetProperty, context, array_receiver, length, arg,
|
2017-10-25 17:43:04 +00:00
|
|
|
|
SmiConstant(LanguageMode::kStrict));
|
2017-08-03 14:27:11 +00:00
|
|
|
|
Increment(&arg_index);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
// The runtime SetProperty call could have converted the array to dictionary
|
|
|
|
|
// mode, which must be detected to abort the fast-path.
|
2018-02-23 13:08:34 +00:00
|
|
|
|
Node* map = LoadMap(array_receiver);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
Node* bit_field2 = LoadMapBitField2(map);
|
|
|
|
|
Node* kind = DecodeWord32<Map::ElementsKindBits>(bit_field2);
|
|
|
|
|
GotoIf(Word32Equal(kind, Int32Constant(DICTIONARY_ELEMENTS)),
|
|
|
|
|
&default_label);
|
|
|
|
|
Goto(&object_push);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Fallback that stores un-processed arguments using the full, heavyweight
|
|
|
|
|
// SetProperty machinery.
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&default_label);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
{
|
|
|
|
|
args.ForEach(
|
2018-02-23 13:08:34 +00:00
|
|
|
|
[this, array_receiver, context](Node* arg) {
|
|
|
|
|
Node* length = LoadJSArrayLength(array_receiver);
|
|
|
|
|
CallRuntime(Runtime::kSetProperty, context, array_receiver, length,
|
|
|
|
|
arg, SmiConstant(LanguageMode::kStrict));
|
2017-03-16 11:32:01 +00:00
|
|
|
|
},
|
2018-02-09 15:24:14 +00:00
|
|
|
|
arg_index.value());
|
2018-02-23 13:08:34 +00:00
|
|
|
|
args.PopAndReturn(LoadJSArrayLength(array_receiver));
|
2017-03-16 11:32:01 +00:00
|
|
|
|
}
|
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&runtime);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
{
|
|
|
|
|
Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
|
|
|
|
|
MachineType::TaggedPointer());
|
2017-05-05 12:11:36 +00:00
|
|
|
|
TailCallStub(CodeFactory::ArrayPush(isolate()), context, target,
|
|
|
|
|
UndefinedConstant(), argc);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-02 09:58:07 +00:00
|
|
|
|
class ArrayPrototypeSliceCodeStubAssembler : public CodeStubAssembler {
|
2017-10-23 18:41:42 +00:00
|
|
|
|
public:
|
2018-01-02 09:58:07 +00:00
|
|
|
|
explicit ArrayPrototypeSliceCodeStubAssembler(
|
|
|
|
|
compiler::CodeAssemblerState* state)
|
2017-10-23 18:41:42 +00:00
|
|
|
|
: CodeStubAssembler(state) {}
|
|
|
|
|
|
2018-02-23 13:08:34 +00:00
|
|
|
|
Node* HandleFastSlice(TNode<Context> context, Node* array, Node* from,
|
|
|
|
|
Node* count, Label* slow) {
|
2017-10-23 18:41:42 +00:00
|
|
|
|
VARIABLE(result, MachineRepresentation::kTagged);
|
|
|
|
|
Label done(this);
|
|
|
|
|
|
|
|
|
|
GotoIf(TaggedIsNotSmi(from), slow);
|
|
|
|
|
GotoIf(TaggedIsNotSmi(count), slow);
|
|
|
|
|
|
|
|
|
|
Label try_fast_arguments(this), try_simple_slice(this);
|
|
|
|
|
|
|
|
|
|
Node* map = LoadMap(array);
|
|
|
|
|
GotoIfNot(IsJSArrayMap(map), &try_fast_arguments);
|
|
|
|
|
|
|
|
|
|
// Check prototype chain if receiver does not have packed elements
|
|
|
|
|
GotoIfNot(IsPrototypeInitialArrayPrototype(context, map), slow);
|
|
|
|
|
|
2017-11-20 08:42:53 +00:00
|
|
|
|
GotoIf(IsNoElementsProtectorCellInvalid(), slow);
|
2017-10-23 18:41:42 +00:00
|
|
|
|
|
2018-04-23 13:58:02 +00:00
|
|
|
|
GotoIf(IsArraySpeciesProtectorCellInvalid(), slow);
|
2017-10-23 18:41:42 +00:00
|
|
|
|
|
|
|
|
|
// Bailout if receiver has slow elements.
|
|
|
|
|
Node* elements_kind = LoadMapElementsKind(map);
|
|
|
|
|
GotoIfNot(IsFastElementsKind(elements_kind), &try_simple_slice);
|
|
|
|
|
|
2017-11-17 18:17:34 +00:00
|
|
|
|
// Make sure that the length hasn't been changed by side-effect.
|
|
|
|
|
Node* array_length = LoadJSArrayLength(array);
|
|
|
|
|
GotoIf(TaggedIsNotSmi(array_length), slow);
|
2018-05-17 11:56:28 +00:00
|
|
|
|
GotoIf(SmiAbove(SmiAdd(CAST(from), CAST(count)), CAST(array_length)), slow);
|
2017-11-17 18:17:34 +00:00
|
|
|
|
|
2018-05-17 11:56:28 +00:00
|
|
|
|
CSA_ASSERT(this, SmiGreaterThanOrEqual(CAST(from), SmiConstant(0)));
|
2017-11-10 14:22:44 +00:00
|
|
|
|
|
2017-10-23 18:41:42 +00:00
|
|
|
|
result.Bind(CallStub(CodeFactory::ExtractFastJSArray(isolate()), context,
|
|
|
|
|
array, from, count));
|
|
|
|
|
Goto(&done);
|
|
|
|
|
|
|
|
|
|
BIND(&try_fast_arguments);
|
|
|
|
|
|
|
|
|
|
Node* const native_context = LoadNativeContext(context);
|
|
|
|
|
Node* const fast_aliasted_arguments_map = LoadContextElement(
|
|
|
|
|
native_context, Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
|
|
|
|
|
GotoIf(WordNotEqual(map, fast_aliasted_arguments_map), &try_simple_slice);
|
|
|
|
|
|
|
|
|
|
Node* sloppy_elements = LoadElements(array);
|
2018-05-17 11:56:28 +00:00
|
|
|
|
TNode<Smi> sloppy_elements_length =
|
|
|
|
|
LoadFixedArrayBaseLength(sloppy_elements);
|
|
|
|
|
TNode<Smi> parameter_map_length =
|
2017-10-23 18:41:42 +00:00
|
|
|
|
SmiSub(sloppy_elements_length,
|
|
|
|
|
SmiConstant(SloppyArgumentsElements::kParameterMapStart));
|
|
|
|
|
VARIABLE(index_out, MachineType::PointerRepresentation());
|
|
|
|
|
|
|
|
|
|
int max_fast_elements =
|
|
|
|
|
(kMaxRegularHeapObjectSize - FixedArray::kHeaderSize - JSArray::kSize -
|
|
|
|
|
AllocationMemento::kSize) /
|
|
|
|
|
kPointerSize;
|
2018-05-17 11:56:28 +00:00
|
|
|
|
GotoIf(SmiAboveOrEqual(CAST(count), SmiConstant(max_fast_elements)),
|
2017-10-23 18:41:42 +00:00
|
|
|
|
&try_simple_slice);
|
|
|
|
|
|
2018-05-17 11:56:28 +00:00
|
|
|
|
GotoIf(SmiLessThan(CAST(from), SmiConstant(0)), slow);
|
2017-11-10 14:22:44 +00:00
|
|
|
|
|
2018-05-17 11:56:28 +00:00
|
|
|
|
TNode<Smi> end = SmiAdd(CAST(from), CAST(count));
|
2017-10-23 18:41:42 +00:00
|
|
|
|
|
|
|
|
|
Node* unmapped_elements = LoadFixedArrayElement(
|
|
|
|
|
sloppy_elements, SloppyArgumentsElements::kArgumentsIndex);
|
2018-05-17 11:56:28 +00:00
|
|
|
|
TNode<Smi> unmapped_elements_length =
|
2017-10-23 18:41:42 +00:00
|
|
|
|
LoadFixedArrayBaseLength(unmapped_elements);
|
|
|
|
|
|
2017-12-04 20:43:05 +00:00
|
|
|
|
GotoIf(SmiAbove(end, unmapped_elements_length), slow);
|
2017-10-23 18:41:42 +00:00
|
|
|
|
|
|
|
|
|
Node* array_map = LoadJSArrayElementsMap(HOLEY_ELEMENTS, native_context);
|
|
|
|
|
result.Bind(AllocateJSArray(HOLEY_ELEMENTS, array_map, count, count,
|
|
|
|
|
nullptr, SMI_PARAMETERS));
|
|
|
|
|
|
|
|
|
|
index_out.Bind(IntPtrConstant(0));
|
|
|
|
|
Node* result_elements = LoadElements(result.value());
|
2018-05-17 11:56:28 +00:00
|
|
|
|
TNode<Smi> from_mapped = SmiMin(parameter_map_length, CAST(from));
|
|
|
|
|
TNode<Smi> to = SmiMin(parameter_map_length, end);
|
2017-10-23 18:41:42 +00:00
|
|
|
|
Node* arguments_context = LoadFixedArrayElement(
|
|
|
|
|
sloppy_elements, SloppyArgumentsElements::kContextIndex);
|
|
|
|
|
VariableList var_list({&index_out}, zone());
|
|
|
|
|
BuildFastLoop(
|
|
|
|
|
var_list, from_mapped, to,
|
|
|
|
|
[this, result_elements, arguments_context, sloppy_elements,
|
2017-11-22 11:22:41 +00:00
|
|
|
|
unmapped_elements, &index_out](Node* current) {
|
2017-10-23 18:41:42 +00:00
|
|
|
|
Node* context_index = LoadFixedArrayElement(
|
|
|
|
|
sloppy_elements, current,
|
|
|
|
|
kPointerSize * SloppyArgumentsElements::kParameterMapStart,
|
|
|
|
|
SMI_PARAMETERS);
|
2017-11-22 11:22:41 +00:00
|
|
|
|
Label is_the_hole(this), done(this);
|
|
|
|
|
GotoIf(IsTheHole(context_index), &is_the_hole);
|
|
|
|
|
Node* mapped_argument =
|
2017-10-23 18:41:42 +00:00
|
|
|
|
LoadContextElement(arguments_context, SmiUntag(context_index));
|
2017-11-22 11:22:41 +00:00
|
|
|
|
StoreFixedArrayElement(result_elements, index_out.value(),
|
|
|
|
|
mapped_argument, SKIP_WRITE_BARRIER);
|
|
|
|
|
Goto(&done);
|
|
|
|
|
BIND(&is_the_hole);
|
|
|
|
|
Node* argument = LoadFixedArrayElement(unmapped_elements, current, 0,
|
|
|
|
|
SMI_PARAMETERS);
|
2017-10-23 18:41:42 +00:00
|
|
|
|
StoreFixedArrayElement(result_elements, index_out.value(), argument,
|
|
|
|
|
SKIP_WRITE_BARRIER);
|
2017-11-22 11:22:41 +00:00
|
|
|
|
Goto(&done);
|
|
|
|
|
BIND(&done);
|
2017-10-23 18:41:42 +00:00
|
|
|
|
index_out.Bind(IntPtrAdd(index_out.value(), IntPtrConstant(1)));
|
|
|
|
|
},
|
|
|
|
|
1, SMI_PARAMETERS, IndexAdvanceMode::kPost);
|
|
|
|
|
|
2018-05-17 11:56:28 +00:00
|
|
|
|
TNode<Smi> unmapped_from =
|
|
|
|
|
SmiMin(SmiMax(parameter_map_length, CAST(from)), end);
|
2017-10-23 18:41:42 +00:00
|
|
|
|
|
|
|
|
|
BuildFastLoop(
|
|
|
|
|
var_list, unmapped_from, end,
|
|
|
|
|
[this, unmapped_elements, result_elements, &index_out](Node* current) {
|
|
|
|
|
Node* argument = LoadFixedArrayElement(unmapped_elements, current, 0,
|
|
|
|
|
SMI_PARAMETERS);
|
|
|
|
|
StoreFixedArrayElement(result_elements, index_out.value(), argument,
|
|
|
|
|
SKIP_WRITE_BARRIER);
|
|
|
|
|
index_out.Bind(IntPtrAdd(index_out.value(), IntPtrConstant(1)));
|
|
|
|
|
},
|
|
|
|
|
1, SMI_PARAMETERS, IndexAdvanceMode::kPost);
|
|
|
|
|
|
|
|
|
|
Goto(&done);
|
|
|
|
|
|
|
|
|
|
BIND(&try_simple_slice);
|
|
|
|
|
Node* simple_result = CallRuntime(Runtime::kTrySliceSimpleNonFastElements,
|
|
|
|
|
context, array, from, count);
|
|
|
|
|
GotoIfNumber(simple_result, slow);
|
|
|
|
|
result.Bind(simple_result);
|
|
|
|
|
|
|
|
|
|
Goto(&done);
|
|
|
|
|
|
|
|
|
|
BIND(&done);
|
|
|
|
|
return result.value();
|
|
|
|
|
}
|
|
|
|
|
|
2018-02-23 13:08:34 +00:00
|
|
|
|
void CopyOneElement(TNode<Context> context, Node* o, Node* a, Node* p_k,
|
|
|
|
|
Variable& n) {
|
2017-10-23 18:41:42 +00:00
|
|
|
|
// b. Let kPresent be HasProperty(O, Pk).
|
|
|
|
|
// c. ReturnIfAbrupt(kPresent).
|
2017-12-19 19:20:38 +00:00
|
|
|
|
TNode<Oddball> k_present = HasProperty(o, p_k, context, kHasProperty);
|
2017-10-23 18:41:42 +00:00
|
|
|
|
|
|
|
|
|
// d. If kPresent is true, then
|
|
|
|
|
Label done_element(this);
|
2017-12-19 19:20:38 +00:00
|
|
|
|
GotoIf(IsFalse(k_present), &done_element);
|
2017-10-23 18:41:42 +00:00
|
|
|
|
|
|
|
|
|
// i. Let kValue be Get(O, Pk).
|
|
|
|
|
// ii. ReturnIfAbrupt(kValue).
|
|
|
|
|
Node* k_value = GetProperty(context, o, p_k);
|
|
|
|
|
|
|
|
|
|
// iii. Let status be CreateDataPropertyOrThrow(A, ToString(n), kValue).
|
|
|
|
|
// iv. ReturnIfAbrupt(status).
|
|
|
|
|
CallRuntime(Runtime::kCreateDataProperty, context, a, n.value(), k_value);
|
|
|
|
|
|
|
|
|
|
Goto(&done_element);
|
|
|
|
|
BIND(&done_element);
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
2018-01-02 09:58:07 +00:00
|
|
|
|
TF_BUILTIN(ArrayPrototypeSlice, ArrayPrototypeSliceCodeStubAssembler) {
|
2017-10-23 18:41:42 +00:00
|
|
|
|
Node* const argc =
|
|
|
|
|
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
|
2017-12-19 16:10:01 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
|
2017-10-23 18:41:42 +00:00
|
|
|
|
Label slow(this, Label::kDeferred), fast_elements_kind(this);
|
|
|
|
|
|
|
|
|
|
CodeStubArguments args(this, argc);
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Object> receiver = args.GetReceiver();
|
2017-10-23 18:41:42 +00:00
|
|
|
|
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TVARIABLE(JSReceiver, o);
|
2017-10-23 18:41:42 +00:00
|
|
|
|
VARIABLE(len, MachineRepresentation::kTagged);
|
|
|
|
|
Label length_done(this), generic_length(this), check_arguments_length(this),
|
|
|
|
|
load_arguments_length(this);
|
|
|
|
|
|
|
|
|
|
GotoIf(TaggedIsSmi(receiver), &generic_length);
|
2018-04-27 11:27:17 +00:00
|
|
|
|
GotoIfNot(IsJSArray(CAST(receiver)), &check_arguments_length);
|
2017-10-23 18:41:42 +00:00
|
|
|
|
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<JSArray> array_receiver = CAST(receiver);
|
|
|
|
|
o = array_receiver;
|
|
|
|
|
len.Bind(LoadJSArrayLength(array_receiver));
|
2017-10-23 18:41:42 +00:00
|
|
|
|
|
|
|
|
|
// Check for the array clone case. There can be no arguments to slice, the
|
|
|
|
|
// array prototype chain must be intact and have no elements, the array has to
|
|
|
|
|
// have fast elements.
|
|
|
|
|
GotoIf(WordNotEqual(argc, IntPtrConstant(0)), &length_done);
|
|
|
|
|
|
|
|
|
|
Label clone(this);
|
|
|
|
|
BranchIfFastJSArrayForCopy(receiver, context, &clone, &length_done);
|
|
|
|
|
BIND(&clone);
|
|
|
|
|
|
|
|
|
|
args.PopAndReturn(
|
|
|
|
|
CallStub(CodeFactory::CloneFastJSArray(isolate()), context, receiver));
|
|
|
|
|
|
|
|
|
|
BIND(&check_arguments_length);
|
|
|
|
|
|
2018-02-23 13:08:34 +00:00
|
|
|
|
Node* map = LoadMap(array_receiver);
|
2017-10-23 18:41:42 +00:00
|
|
|
|
Node* native_context = LoadNativeContext(context);
|
|
|
|
|
GotoIfContextElementEqual(map, native_context,
|
|
|
|
|
Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX,
|
|
|
|
|
&load_arguments_length);
|
|
|
|
|
GotoIfContextElementEqual(map, native_context,
|
|
|
|
|
Context::SLOW_ALIASED_ARGUMENTS_MAP_INDEX,
|
|
|
|
|
&load_arguments_length);
|
|
|
|
|
GotoIfContextElementEqual(map, native_context,
|
|
|
|
|
Context::STRICT_ARGUMENTS_MAP_INDEX,
|
|
|
|
|
&load_arguments_length);
|
|
|
|
|
GotoIfContextElementEqual(map, native_context,
|
|
|
|
|
Context::SLOPPY_ARGUMENTS_MAP_INDEX,
|
|
|
|
|
&load_arguments_length);
|
|
|
|
|
|
|
|
|
|
Goto(&generic_length);
|
|
|
|
|
|
|
|
|
|
BIND(&load_arguments_length);
|
|
|
|
|
Node* arguments_length =
|
2018-02-23 13:08:34 +00:00
|
|
|
|
LoadObjectField(array_receiver, JSArgumentsObject::kLengthOffset);
|
2017-10-23 18:41:42 +00:00
|
|
|
|
GotoIf(TaggedIsNotSmi(arguments_length), &generic_length);
|
2018-02-23 13:08:34 +00:00
|
|
|
|
o = CAST(receiver);
|
2017-10-23 18:41:42 +00:00
|
|
|
|
len.Bind(arguments_length);
|
|
|
|
|
Goto(&length_done);
|
|
|
|
|
|
|
|
|
|
BIND(&generic_length);
|
|
|
|
|
// 1. Let O be ToObject(this value).
|
|
|
|
|
// 2. ReturnIfAbrupt(O).
|
2018-02-23 13:08:34 +00:00
|
|
|
|
o = ToObject(context, receiver);
|
2017-10-23 18:41:42 +00:00
|
|
|
|
|
|
|
|
|
// 3. Let len be ToLength(Get(O, "length")).
|
|
|
|
|
// 4. ReturnIfAbrupt(len).
|
|
|
|
|
len.Bind(ToLength_Inline(
|
|
|
|
|
context,
|
|
|
|
|
GetProperty(context, o.value(), isolate()->factory()->length_string())));
|
|
|
|
|
Goto(&length_done);
|
|
|
|
|
|
|
|
|
|
BIND(&length_done);
|
|
|
|
|
|
|
|
|
|
// 5. Let relativeStart be ToInteger(start).
|
|
|
|
|
// 6. ReturnIfAbrupt(relativeStart).
|
2018-02-27 09:36:55 +00:00
|
|
|
|
TNode<Object> arg0 = args.GetOptionalArgumentValue(0, SmiConstant(0));
|
2017-12-19 16:10:01 +00:00
|
|
|
|
Node* relative_start = ToInteger_Inline(context, arg0);
|
2017-10-23 18:41:42 +00:00
|
|
|
|
|
|
|
|
|
// 7. If relativeStart < 0, let k be max((len + relativeStart),0);
|
|
|
|
|
// else let k be min(relativeStart, len.value()).
|
|
|
|
|
VARIABLE(k, MachineRepresentation::kTagged);
|
|
|
|
|
Label relative_start_positive(this), relative_start_done(this);
|
2017-12-13 00:12:10 +00:00
|
|
|
|
GotoIfNumberGreaterThanOrEqual(relative_start, SmiConstant(0),
|
|
|
|
|
&relative_start_positive);
|
2017-10-23 18:41:42 +00:00
|
|
|
|
k.Bind(NumberMax(NumberAdd(len.value(), relative_start), NumberConstant(0)));
|
|
|
|
|
Goto(&relative_start_done);
|
|
|
|
|
BIND(&relative_start_positive);
|
|
|
|
|
k.Bind(NumberMin(relative_start, len.value()));
|
|
|
|
|
Goto(&relative_start_done);
|
|
|
|
|
BIND(&relative_start_done);
|
|
|
|
|
|
|
|
|
|
// 8. If end is undefined, let relativeEnd be len;
|
|
|
|
|
// else let relativeEnd be ToInteger(end).
|
|
|
|
|
// 9. ReturnIfAbrupt(relativeEnd).
|
2018-02-27 09:36:55 +00:00
|
|
|
|
TNode<Object> end = args.GetOptionalArgumentValue(1, UndefinedConstant());
|
2017-10-23 18:41:42 +00:00
|
|
|
|
Label end_undefined(this), end_done(this);
|
|
|
|
|
VARIABLE(relative_end, MachineRepresentation::kTagged);
|
|
|
|
|
GotoIf(WordEqual(end, UndefinedConstant()), &end_undefined);
|
2017-12-19 16:10:01 +00:00
|
|
|
|
relative_end.Bind(ToInteger_Inline(context, end));
|
2017-10-23 18:41:42 +00:00
|
|
|
|
Goto(&end_done);
|
|
|
|
|
BIND(&end_undefined);
|
|
|
|
|
relative_end.Bind(len.value());
|
|
|
|
|
Goto(&end_done);
|
|
|
|
|
BIND(&end_done);
|
|
|
|
|
|
|
|
|
|
// 10. If relativeEnd < 0, let final be max((len + relativeEnd),0);
|
|
|
|
|
// else let final be min(relativeEnd, len).
|
|
|
|
|
VARIABLE(final, MachineRepresentation::kTagged);
|
|
|
|
|
Label relative_end_positive(this), relative_end_done(this);
|
2017-12-13 00:12:10 +00:00
|
|
|
|
GotoIfNumberGreaterThanOrEqual(relative_end.value(), NumberConstant(0),
|
|
|
|
|
&relative_end_positive);
|
2017-10-23 18:41:42 +00:00
|
|
|
|
final.Bind(NumberMax(NumberAdd(len.value(), relative_end.value()),
|
|
|
|
|
NumberConstant(0)));
|
|
|
|
|
Goto(&relative_end_done);
|
|
|
|
|
BIND(&relative_end_positive);
|
|
|
|
|
final.Bind(NumberMin(relative_end.value(), len.value()));
|
|
|
|
|
Goto(&relative_end_done);
|
|
|
|
|
BIND(&relative_end_done);
|
|
|
|
|
|
|
|
|
|
// 11. Let count be max(final – k, 0).
|
|
|
|
|
Node* count =
|
|
|
|
|
NumberMax(NumberSub(final.value(), k.value()), NumberConstant(0));
|
|
|
|
|
|
|
|
|
|
// Handle FAST_ELEMENTS
|
|
|
|
|
Label non_fast(this);
|
|
|
|
|
Node* fast_result =
|
|
|
|
|
HandleFastSlice(context, o.value(), k.value(), count, &non_fast);
|
|
|
|
|
args.PopAndReturn(fast_result);
|
|
|
|
|
|
|
|
|
|
// 12. Let A be ArraySpeciesCreate(O, count).
|
|
|
|
|
// 13. ReturnIfAbrupt(A).
|
|
|
|
|
BIND(&non_fast);
|
|
|
|
|
|
|
|
|
|
Node* constructor =
|
|
|
|
|
CallRuntime(Runtime::kArraySpeciesConstructor, context, o.value());
|
|
|
|
|
Node* a = ConstructJS(CodeFactory::Construct(isolate()), context, constructor,
|
|
|
|
|
count);
|
|
|
|
|
|
|
|
|
|
// 14. Let n be 0.
|
|
|
|
|
VARIABLE(n, MachineRepresentation::kTagged);
|
|
|
|
|
n.Bind(SmiConstant(0));
|
|
|
|
|
|
|
|
|
|
Label loop(this, {&k, &n});
|
|
|
|
|
Label after_loop(this);
|
|
|
|
|
Goto(&loop);
|
|
|
|
|
BIND(&loop);
|
|
|
|
|
{
|
|
|
|
|
// 15. Repeat, while k < final
|
2017-12-13 00:12:10 +00:00
|
|
|
|
GotoIfNumberGreaterThanOrEqual(k.value(), final.value(), &after_loop);
|
2017-10-23 18:41:42 +00:00
|
|
|
|
|
|
|
|
|
Node* p_k = k.value(); // ToString(context, k.value()) is no-op
|
|
|
|
|
|
|
|
|
|
CopyOneElement(context, o.value(), a, p_k, n);
|
|
|
|
|
|
|
|
|
|
// e. Increase k by 1.
|
|
|
|
|
k.Bind(NumberInc(k.value()));
|
|
|
|
|
|
|
|
|
|
// f. Increase n by 1.
|
|
|
|
|
n.Bind(NumberInc(n.value()));
|
|
|
|
|
|
|
|
|
|
Goto(&loop);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BIND(&after_loop);
|
|
|
|
|
|
|
|
|
|
// 16. Let setStatus be Set(A, "length", n, true).
|
|
|
|
|
// 17. ReturnIfAbrupt(setStatus).
|
|
|
|
|
CallRuntime(Runtime::kSetProperty, context, a,
|
|
|
|
|
HeapConstant(isolate()->factory()->length_string()), n.value(),
|
|
|
|
|
SmiConstant(static_cast<int>(LanguageMode::kStrict)));
|
|
|
|
|
|
|
|
|
|
args.PopAndReturn(a);
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-02 09:58:07 +00:00
|
|
|
|
TF_BUILTIN(ArrayPrototypeShift, CodeStubAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Int32T> argc =
|
|
|
|
|
UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount));
|
|
|
|
|
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
|
2017-10-24 19:09:18 +00:00
|
|
|
|
CSA_ASSERT(this, IsUndefined(Parameter(BuiltinDescriptor::kNewTarget)));
|
2017-05-17 14:39:34 +00:00
|
|
|
|
|
|
|
|
|
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Object> receiver = args.GetReceiver();
|
2017-05-17 14:39:34 +00:00
|
|
|
|
|
|
|
|
|
Label runtime(this, Label::kDeferred);
|
|
|
|
|
Label fast(this);
|
|
|
|
|
|
|
|
|
|
// Only shift in this stub if
|
|
|
|
|
// 1) the array has fast elements
|
|
|
|
|
// 2) the length is writable,
|
|
|
|
|
// 3) the elements backing store isn't copy-on-write,
|
|
|
|
|
// 4) we aren't supposed to shrink the backing store,
|
|
|
|
|
// 5) we aren't supposed to left-trim the backing store.
|
|
|
|
|
|
|
|
|
|
// 1) Check that the array has fast elements.
|
2017-10-04 14:48:25 +00:00
|
|
|
|
BranchIfFastJSArray(receiver, context, &fast, &runtime);
|
2017-05-17 14:39:34 +00:00
|
|
|
|
|
|
|
|
|
BIND(&fast);
|
|
|
|
|
{
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<JSArray> array_receiver = CAST(receiver);
|
|
|
|
|
CSA_ASSERT(this, TaggedIsPositiveSmi(LoadJSArrayLength(array_receiver)));
|
|
|
|
|
Node* length =
|
|
|
|
|
LoadAndUntagObjectField(array_receiver, JSArray::kLengthOffset);
|
2017-05-17 14:39:34 +00:00
|
|
|
|
Label return_undefined(this), fast_elements_tagged(this),
|
2017-08-07 15:23:13 +00:00
|
|
|
|
fast_elements_smi(this);
|
2017-05-17 14:39:34 +00:00
|
|
|
|
GotoIf(IntPtrEqual(length, IntPtrConstant(0)), &return_undefined);
|
|
|
|
|
|
|
|
|
|
// 2) Ensure that the length is writable.
|
2018-02-23 13:08:34 +00:00
|
|
|
|
EnsureArrayLengthWritable(LoadMap(array_receiver), &runtime);
|
2017-05-17 14:39:34 +00:00
|
|
|
|
|
|
|
|
|
// 3) Check that the elements backing store isn't copy-on-write.
|
2018-02-23 13:08:34 +00:00
|
|
|
|
Node* elements = LoadElements(array_receiver);
|
2017-05-17 14:39:34 +00:00
|
|
|
|
GotoIf(WordEqual(LoadMap(elements),
|
|
|
|
|
LoadRoot(Heap::kFixedCOWArrayMapRootIndex)),
|
|
|
|
|
&runtime);
|
|
|
|
|
|
|
|
|
|
Node* new_length = IntPtrSub(length, IntPtrConstant(1));
|
|
|
|
|
|
|
|
|
|
// 4) Check that we're not supposed to right-trim the backing store, as
|
|
|
|
|
// implemented in elements.cc:ElementsAccessorBase::SetLengthImpl.
|
|
|
|
|
Node* capacity = SmiUntag(LoadFixedArrayBaseLength(elements));
|
|
|
|
|
GotoIf(IntPtrLessThan(
|
|
|
|
|
IntPtrAdd(IntPtrAdd(new_length, new_length),
|
|
|
|
|
IntPtrConstant(JSObject::kMinAddedElementsCapacity)),
|
|
|
|
|
capacity),
|
|
|
|
|
&runtime);
|
|
|
|
|
|
|
|
|
|
// 5) Check that we're not supposed to left-trim the backing store, as
|
|
|
|
|
// implemented in elements.cc:FastElementsAccessor::MoveElements.
|
|
|
|
|
GotoIf(IntPtrGreaterThan(new_length,
|
|
|
|
|
IntPtrConstant(JSArray::kMaxCopyElements)),
|
|
|
|
|
&runtime);
|
|
|
|
|
|
2018-02-23 13:08:34 +00:00
|
|
|
|
StoreObjectFieldNoWriteBarrier(array_receiver, JSArray::kLengthOffset,
|
2017-05-17 14:39:34 +00:00
|
|
|
|
SmiTag(new_length));
|
|
|
|
|
|
2018-02-23 13:08:34 +00:00
|
|
|
|
Node* elements_kind = LoadMapElementsKind(LoadMap(array_receiver));
|
2017-06-30 11:26:14 +00:00
|
|
|
|
GotoIf(
|
|
|
|
|
Int32LessThanOrEqual(elements_kind, Int32Constant(HOLEY_SMI_ELEMENTS)),
|
2017-08-07 15:23:13 +00:00
|
|
|
|
&fast_elements_smi);
|
|
|
|
|
GotoIf(Int32LessThanOrEqual(elements_kind, Int32Constant(HOLEY_ELEMENTS)),
|
2017-05-17 14:39:34 +00:00
|
|
|
|
&fast_elements_tagged);
|
|
|
|
|
|
2017-08-07 15:23:13 +00:00
|
|
|
|
// Fast double elements kind:
|
|
|
|
|
{
|
|
|
|
|
CSA_ASSERT(this,
|
|
|
|
|
Int32LessThanOrEqual(elements_kind,
|
|
|
|
|
Int32Constant(HOLEY_DOUBLE_ELEMENTS)));
|
|
|
|
|
|
|
|
|
|
VARIABLE(result, MachineRepresentation::kTagged, UndefinedConstant());
|
|
|
|
|
|
|
|
|
|
Label move_elements(this);
|
|
|
|
|
result.Bind(AllocateHeapNumberWithValue(LoadFixedDoubleArrayElement(
|
|
|
|
|
elements, IntPtrConstant(0), MachineType::Float64(), 0,
|
|
|
|
|
INTPTR_PARAMETERS, &move_elements)));
|
|
|
|
|
Goto(&move_elements);
|
|
|
|
|
BIND(&move_elements);
|
|
|
|
|
|
|
|
|
|
int32_t header_size = FixedDoubleArray::kHeaderSize - kHeapObjectTag;
|
|
|
|
|
Node* memmove =
|
2018-04-25 07:28:14 +00:00
|
|
|
|
ExternalConstant(ExternalReference::libc_memmove_function());
|
2017-08-07 15:23:13 +00:00
|
|
|
|
Node* start = IntPtrAdd(
|
|
|
|
|
BitcastTaggedToWord(elements),
|
|
|
|
|
ElementOffsetFromIndex(IntPtrConstant(0), HOLEY_DOUBLE_ELEMENTS,
|
|
|
|
|
INTPTR_PARAMETERS, header_size));
|
|
|
|
|
CallCFunction3(MachineType::AnyTagged(), MachineType::Pointer(),
|
|
|
|
|
MachineType::Pointer(), MachineType::UintPtr(), memmove,
|
|
|
|
|
start, IntPtrAdd(start, IntPtrConstant(kDoubleSize)),
|
|
|
|
|
IntPtrMul(new_length, IntPtrConstant(kDoubleSize)));
|
|
|
|
|
Node* offset = ElementOffsetFromIndex(new_length, HOLEY_DOUBLE_ELEMENTS,
|
|
|
|
|
INTPTR_PARAMETERS, header_size);
|
|
|
|
|
if (Is64()) {
|
|
|
|
|
Node* double_hole = Int64Constant(kHoleNanInt64);
|
|
|
|
|
StoreNoWriteBarrier(MachineRepresentation::kWord64, elements, offset,
|
|
|
|
|
double_hole);
|
|
|
|
|
} else {
|
|
|
|
|
STATIC_ASSERT(kHoleNanLower32 == kHoleNanUpper32);
|
|
|
|
|
Node* double_hole = Int32Constant(kHoleNanLower32);
|
|
|
|
|
StoreNoWriteBarrier(MachineRepresentation::kWord32, elements, offset,
|
|
|
|
|
double_hole);
|
|
|
|
|
StoreNoWriteBarrier(MachineRepresentation::kWord32, elements,
|
|
|
|
|
IntPtrAdd(offset, IntPtrConstant(kPointerSize)),
|
|
|
|
|
double_hole);
|
|
|
|
|
}
|
|
|
|
|
args.PopAndReturn(result.value());
|
2017-05-17 14:39:34 +00:00
|
|
|
|
}
|
|
|
|
|
|
2017-05-19 16:13:17 +00:00
|
|
|
|
BIND(&fast_elements_tagged);
|
2017-05-17 14:39:34 +00:00
|
|
|
|
{
|
|
|
|
|
Node* value = LoadFixedArrayElement(elements, 0);
|
|
|
|
|
BuildFastLoop(IntPtrConstant(0), new_length,
|
|
|
|
|
[&](Node* index) {
|
|
|
|
|
StoreFixedArrayElement(
|
|
|
|
|
elements, index,
|
|
|
|
|
LoadFixedArrayElement(
|
|
|
|
|
elements, IntPtrAdd(index, IntPtrConstant(1))));
|
|
|
|
|
},
|
|
|
|
|
1, ParameterMode::INTPTR_PARAMETERS,
|
|
|
|
|
IndexAdvanceMode::kPost);
|
|
|
|
|
StoreFixedArrayElement(elements, new_length, TheHoleConstant());
|
|
|
|
|
GotoIf(WordEqual(value, TheHoleConstant()), &return_undefined);
|
|
|
|
|
args.PopAndReturn(value);
|
|
|
|
|
}
|
|
|
|
|
|
2017-08-07 15:23:13 +00:00
|
|
|
|
BIND(&fast_elements_smi);
|
2017-05-17 14:39:34 +00:00
|
|
|
|
{
|
|
|
|
|
Node* value = LoadFixedArrayElement(elements, 0);
|
2017-10-11 11:09:33 +00:00
|
|
|
|
BuildFastLoop(IntPtrConstant(0), new_length,
|
|
|
|
|
[&](Node* index) {
|
|
|
|
|
StoreFixedArrayElement(
|
|
|
|
|
elements, index,
|
|
|
|
|
LoadFixedArrayElement(
|
|
|
|
|
elements, IntPtrAdd(index, IntPtrConstant(1))),
|
|
|
|
|
SKIP_WRITE_BARRIER);
|
|
|
|
|
},
|
|
|
|
|
1, ParameterMode::INTPTR_PARAMETERS,
|
|
|
|
|
IndexAdvanceMode::kPost);
|
2017-05-17 14:39:34 +00:00
|
|
|
|
StoreFixedArrayElement(elements, new_length, TheHoleConstant());
|
|
|
|
|
GotoIf(WordEqual(value, TheHoleConstant()), &return_undefined);
|
|
|
|
|
args.PopAndReturn(value);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BIND(&return_undefined);
|
|
|
|
|
{ args.PopAndReturn(UndefinedConstant()); }
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BIND(&runtime);
|
|
|
|
|
{
|
|
|
|
|
Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
|
|
|
|
|
MachineType::TaggedPointer());
|
|
|
|
|
TailCallStub(CodeFactory::ArrayShift(isolate()), context, target,
|
|
|
|
|
UndefinedConstant(), argc);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(ExtractFastJSArray, ArrayBuiltinsAssembler) {
|
2017-10-23 18:41:42 +00:00
|
|
|
|
ParameterMode mode = OptimalParameterMode();
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
2017-10-23 18:41:42 +00:00
|
|
|
|
Node* array = Parameter(Descriptor::kSource);
|
|
|
|
|
Node* begin = TaggedToParameter(Parameter(Descriptor::kBegin), mode);
|
|
|
|
|
Node* count = TaggedToParameter(Parameter(Descriptor::kCount), mode);
|
|
|
|
|
|
|
|
|
|
CSA_ASSERT(this, IsJSArray(array));
|
2017-11-20 08:42:53 +00:00
|
|
|
|
CSA_ASSERT(this, Word32BinaryNot(IsNoElementsProtectorCellInvalid()));
|
2017-10-23 18:41:42 +00:00
|
|
|
|
|
|
|
|
|
Return(ExtractFastJSArray(context, array, begin, count, mode));
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(CloneFastJSArray, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
2017-10-23 18:41:42 +00:00
|
|
|
|
Node* array = Parameter(Descriptor::kSource);
|
|
|
|
|
|
|
|
|
|
CSA_ASSERT(this, IsJSArray(array));
|
2017-11-20 08:42:53 +00:00
|
|
|
|
CSA_ASSERT(this, Word32BinaryNot(IsNoElementsProtectorCellInvalid()));
|
2017-10-23 18:41:42 +00:00
|
|
|
|
|
|
|
|
|
ParameterMode mode = OptimalParameterMode();
|
|
|
|
|
Return(CloneFastJSArray(context, array, mode));
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(ArrayFindLoopContinuation, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
|
2017-12-05 04:14:36 +00:00
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
|
Node* array = Parameter(Descriptor::kArray);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<JSReceiver> object = CAST(Parameter(Descriptor::kObject));
|
2017-12-05 04:14:36 +00:00
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<Number> len = CAST(Parameter(Descriptor::kLength));
|
2017-12-05 04:14:36 +00:00
|
|
|
|
Node* to = Parameter(Descriptor::kTo);
|
|
|
|
|
|
|
|
|
|
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
|
|
|
|
|
this_arg, array, object, initial_k,
|
|
|
|
|
len, to);
|
|
|
|
|
|
|
|
|
|
GenerateIteratingArrayBuiltinLoopContinuation(
|
2018-01-28 17:02:11 +00:00
|
|
|
|
&ArrayBuiltinsAssembler::FindProcessor,
|
|
|
|
|
&ArrayBuiltinsAssembler::NullPostLoopAction,
|
2017-12-05 04:14:36 +00:00
|
|
|
|
MissingPropertyMode::kUseUndefined, ForEachDirection::kForward);
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-15 15:50:11 +00:00
|
|
|
|
// Continuation that is called after an eager deoptimization from TF (ex. the
|
|
|
|
|
// array changes during iteration).
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(ArrayFindLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
|
2017-12-11 04:19:48 +00:00
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<Number> len = CAST(Parameter(Descriptor::kLength));
|
2017-12-11 04:19:48 +00:00
|
|
|
|
|
2017-12-15 15:50:11 +00:00
|
|
|
|
Return(CallBuiltin(Builtins::kArrayFindLoopContinuation, context, receiver,
|
|
|
|
|
callbackfn, this_arg, UndefinedConstant(), receiver,
|
|
|
|
|
initial_k, len, UndefinedConstant()));
|
2017-12-11 04:19:48 +00:00
|
|
|
|
}
|
|
|
|
|
|
2017-12-15 15:50:11 +00:00
|
|
|
|
// Continuation that is called after a lazy deoptimization from TF (ex. the
|
|
|
|
|
// callback function is no longer callable).
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(ArrayFindLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
|
2017-12-11 04:19:48 +00:00
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<Number> len = CAST(Parameter(Descriptor::kLength));
|
2017-12-11 04:19:48 +00:00
|
|
|
|
|
2017-12-15 15:50:11 +00:00
|
|
|
|
Return(CallBuiltin(Builtins::kArrayFindLoopContinuation, context, receiver,
|
|
|
|
|
callbackfn, this_arg, UndefinedConstant(), receiver,
|
|
|
|
|
initial_k, len, UndefinedConstant()));
|
2017-12-11 04:19:48 +00:00
|
|
|
|
}
|
|
|
|
|
|
2017-12-15 15:50:11 +00:00
|
|
|
|
// Continuation that is called after a lazy deoptimization from TF that happens
|
|
|
|
|
// right after the callback and it's returned value must be handled before
|
|
|
|
|
// iteration continues.
|
2017-12-11 04:19:48 +00:00
|
|
|
|
TF_BUILTIN(ArrayFindLoopAfterCallbackLazyDeoptContinuation,
|
2018-01-28 17:02:11 +00:00
|
|
|
|
ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
|
2017-12-11 04:19:48 +00:00
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<Number> len = CAST(Parameter(Descriptor::kLength));
|
2017-12-15 15:50:11 +00:00
|
|
|
|
Node* found_value = Parameter(Descriptor::kFoundValue);
|
|
|
|
|
Node* is_found = Parameter(Descriptor::kIsFound);
|
2017-12-11 04:19:48 +00:00
|
|
|
|
|
|
|
|
|
// This custom lazy deopt point is right after the callback. find() needs
|
|
|
|
|
// to pick up at the next step, which is returning the element if the callback
|
|
|
|
|
// value is truthy. Otherwise, continue the search by calling the
|
|
|
|
|
// continuation.
|
|
|
|
|
Label if_true(this), if_false(this);
|
2017-12-15 15:50:11 +00:00
|
|
|
|
BranchIfToBooleanIsTrue(is_found, &if_true, &if_false);
|
2017-12-11 04:19:48 +00:00
|
|
|
|
BIND(&if_true);
|
2017-12-15 15:50:11 +00:00
|
|
|
|
Return(found_value);
|
2017-12-11 04:19:48 +00:00
|
|
|
|
BIND(&if_false);
|
2017-12-15 15:50:11 +00:00
|
|
|
|
Return(CallBuiltin(Builtins::kArrayFindLoopContinuation, context, receiver,
|
|
|
|
|
callbackfn, this_arg, UndefinedConstant(), receiver,
|
|
|
|
|
initial_k, len, UndefinedConstant()));
|
2017-12-11 04:19:48 +00:00
|
|
|
|
}
|
|
|
|
|
|
2017-12-05 04:14:36 +00:00
|
|
|
|
// ES #sec-get-%typedarray%.prototype.find
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(ArrayPrototypeFind, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<IntPtrT> argc =
|
2017-12-05 04:14:36 +00:00
|
|
|
|
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
|
|
|
|
|
CodeStubArguments args(this, argc);
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = args.GetReceiver();
|
2017-12-05 04:14:36 +00:00
|
|
|
|
Node* callbackfn = args.GetOptionalArgumentValue(0);
|
|
|
|
|
Node* this_arg = args.GetOptionalArgumentValue(1);
|
|
|
|
|
|
2018-04-27 10:03:15 +00:00
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg, argc);
|
2017-12-05 04:14:36 +00:00
|
|
|
|
|
|
|
|
|
GenerateIteratingArrayBuiltinBody(
|
2018-01-28 17:02:11 +00:00
|
|
|
|
"Array.prototype.find", &ArrayBuiltinsAssembler::FindResultGenerator,
|
|
|
|
|
&ArrayBuiltinsAssembler::FindProcessor,
|
|
|
|
|
&ArrayBuiltinsAssembler::NullPostLoopAction,
|
2017-12-05 04:14:36 +00:00
|
|
|
|
Builtins::CallableFor(isolate(), Builtins::kArrayFindLoopContinuation),
|
|
|
|
|
MissingPropertyMode::kUseUndefined, ForEachDirection::kForward);
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(ArrayFindIndexLoopContinuation, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
|
2017-12-05 04:14:36 +00:00
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
|
Node* array = Parameter(Descriptor::kArray);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<JSReceiver> object = CAST(Parameter(Descriptor::kObject));
|
2017-12-05 04:14:36 +00:00
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<Number> len = CAST(Parameter(Descriptor::kLength));
|
2017-12-05 04:14:36 +00:00
|
|
|
|
Node* to = Parameter(Descriptor::kTo);
|
|
|
|
|
|
|
|
|
|
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
|
|
|
|
|
this_arg, array, object, initial_k,
|
|
|
|
|
len, to);
|
|
|
|
|
|
|
|
|
|
GenerateIteratingArrayBuiltinLoopContinuation(
|
2018-01-28 17:02:11 +00:00
|
|
|
|
&ArrayBuiltinsAssembler::FindIndexProcessor,
|
|
|
|
|
&ArrayBuiltinsAssembler::NullPostLoopAction,
|
2017-12-05 04:14:36 +00:00
|
|
|
|
MissingPropertyMode::kUseUndefined, ForEachDirection::kForward);
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(ArrayFindIndexLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
|
2017-12-15 15:50:11 +00:00
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<Number> len = CAST(Parameter(Descriptor::kLength));
|
2017-12-15 15:50:11 +00:00
|
|
|
|
|
|
|
|
|
Return(CallBuiltin(Builtins::kArrayFindIndexLoopContinuation, context,
|
|
|
|
|
receiver, callbackfn, this_arg, SmiConstant(-1), receiver,
|
|
|
|
|
initial_k, len, UndefinedConstant()));
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(ArrayFindIndexLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
|
2017-12-15 15:50:11 +00:00
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<Number> len = CAST(Parameter(Descriptor::kLength));
|
2017-12-15 15:50:11 +00:00
|
|
|
|
|
|
|
|
|
Return(CallBuiltin(Builtins::kArrayFindIndexLoopContinuation, context,
|
|
|
|
|
receiver, callbackfn, this_arg, SmiConstant(-1), receiver,
|
|
|
|
|
initial_k, len, UndefinedConstant()));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TF_BUILTIN(ArrayFindIndexLoopAfterCallbackLazyDeoptContinuation,
|
2018-01-28 17:02:11 +00:00
|
|
|
|
ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
|
2017-12-15 15:50:11 +00:00
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<Number> len = CAST(Parameter(Descriptor::kLength));
|
2017-12-15 15:50:11 +00:00
|
|
|
|
Node* found_value = Parameter(Descriptor::kFoundValue);
|
|
|
|
|
Node* is_found = Parameter(Descriptor::kIsFound);
|
|
|
|
|
|
|
|
|
|
// This custom lazy deopt point is right after the callback. find() needs
|
|
|
|
|
// to pick up at the next step, which is returning the element if the callback
|
|
|
|
|
// value is truthy. Otherwise, continue the search by calling the
|
|
|
|
|
// continuation.
|
|
|
|
|
Label if_true(this), if_false(this);
|
|
|
|
|
BranchIfToBooleanIsTrue(is_found, &if_true, &if_false);
|
|
|
|
|
BIND(&if_true);
|
|
|
|
|
Return(found_value);
|
|
|
|
|
BIND(&if_false);
|
|
|
|
|
Return(CallBuiltin(Builtins::kArrayFindIndexLoopContinuation, context,
|
|
|
|
|
receiver, callbackfn, this_arg, SmiConstant(-1), receiver,
|
|
|
|
|
initial_k, len, UndefinedConstant()));
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-05 04:14:36 +00:00
|
|
|
|
// ES #sec-get-%typedarray%.prototype.findIndex
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(ArrayPrototypeFindIndex, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<IntPtrT> argc =
|
2017-12-05 04:14:36 +00:00
|
|
|
|
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
|
|
|
|
|
CodeStubArguments args(this, argc);
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = args.GetReceiver();
|
2017-12-05 04:14:36 +00:00
|
|
|
|
Node* callbackfn = args.GetOptionalArgumentValue(0);
|
|
|
|
|
Node* this_arg = args.GetOptionalArgumentValue(1);
|
|
|
|
|
|
2018-04-27 10:03:15 +00:00
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg, argc);
|
2017-12-05 04:14:36 +00:00
|
|
|
|
|
|
|
|
|
GenerateIteratingArrayBuiltinBody(
|
|
|
|
|
"Array.prototype.findIndex",
|
2018-01-28 17:02:11 +00:00
|
|
|
|
&ArrayBuiltinsAssembler::FindIndexResultGenerator,
|
|
|
|
|
&ArrayBuiltinsAssembler::FindIndexProcessor,
|
|
|
|
|
&ArrayBuiltinsAssembler::NullPostLoopAction,
|
2017-12-05 04:14:36 +00:00
|
|
|
|
Builtins::CallableFor(isolate(),
|
|
|
|
|
Builtins::kArrayFindIndexLoopContinuation),
|
|
|
|
|
MissingPropertyMode::kUseUndefined, ForEachDirection::kForward);
|
|
|
|
|
}
|
|
|
|
|
|
2018-02-08 17:27:59 +00:00
|
|
|
|
class ArrayPopulatorAssembler : public CodeStubAssembler {
|
|
|
|
|
public:
|
|
|
|
|
explicit ArrayPopulatorAssembler(compiler::CodeAssemblerState* state)
|
|
|
|
|
: CodeStubAssembler(state) {}
|
2018-01-18 13:03:16 +00:00
|
|
|
|
|
2018-02-08 17:27:59 +00:00
|
|
|
|
TNode<Object> ConstructArrayLike(TNode<Context> context,
|
|
|
|
|
TNode<Object> receiver) {
|
|
|
|
|
TVARIABLE(Object, array);
|
|
|
|
|
Label is_constructor(this), is_not_constructor(this), done(this);
|
|
|
|
|
GotoIf(TaggedIsSmi(receiver), &is_not_constructor);
|
2018-04-27 11:27:17 +00:00
|
|
|
|
Branch(IsConstructor(CAST(receiver)), &is_constructor, &is_not_constructor);
|
2018-01-18 13:03:16 +00:00
|
|
|
|
|
2018-02-08 17:27:59 +00:00
|
|
|
|
BIND(&is_constructor);
|
|
|
|
|
{
|
|
|
|
|
array = CAST(
|
|
|
|
|
ConstructJS(CodeFactory::Construct(isolate()), context, receiver));
|
|
|
|
|
Goto(&done);
|
|
|
|
|
}
|
2018-01-18 13:03:16 +00:00
|
|
|
|
|
2018-02-08 17:27:59 +00:00
|
|
|
|
BIND(&is_not_constructor);
|
|
|
|
|
{
|
|
|
|
|
Label allocate_js_array(this);
|
|
|
|
|
|
|
|
|
|
TNode<Map> array_map = CAST(LoadContextElement(
|
|
|
|
|
context, Context::JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX));
|
|
|
|
|
|
|
|
|
|
array = CAST(AllocateJSArray(PACKED_SMI_ELEMENTS, array_map,
|
|
|
|
|
SmiConstant(0), SmiConstant(0), nullptr,
|
|
|
|
|
ParameterMode::SMI_PARAMETERS));
|
|
|
|
|
Goto(&done);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BIND(&done);
|
2018-02-09 15:24:14 +00:00
|
|
|
|
return array.value();
|
2018-02-08 17:27:59 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TNode<Object> ConstructArrayLike(TNode<Context> context,
|
|
|
|
|
TNode<Object> receiver,
|
|
|
|
|
TNode<Number> length) {
|
|
|
|
|
TVARIABLE(Object, array);
|
|
|
|
|
Label is_constructor(this), is_not_constructor(this), done(this);
|
|
|
|
|
CSA_ASSERT(this, IsNumberNormalized(length));
|
2018-01-18 13:03:16 +00:00
|
|
|
|
GotoIf(TaggedIsSmi(receiver), &is_not_constructor);
|
2018-04-27 11:27:17 +00:00
|
|
|
|
Branch(IsConstructor(CAST(receiver)), &is_constructor, &is_not_constructor);
|
2018-01-18 13:03:16 +00:00
|
|
|
|
|
|
|
|
|
BIND(&is_constructor);
|
|
|
|
|
{
|
|
|
|
|
array = CAST(ConstructJS(CodeFactory::Construct(isolate()), context,
|
|
|
|
|
receiver, length));
|
2018-02-08 17:27:59 +00:00
|
|
|
|
Goto(&done);
|
2018-01-18 13:03:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BIND(&is_not_constructor);
|
|
|
|
|
{
|
2018-02-08 17:27:59 +00:00
|
|
|
|
Label allocate_js_array(this);
|
|
|
|
|
|
|
|
|
|
Label next(this), runtime(this, Label::kDeferred);
|
|
|
|
|
TNode<Smi> limit = SmiConstant(JSArray::kInitialMaxFastElementArray);
|
|
|
|
|
CSA_ASSERT_BRANCH(this, [=](Label* ok, Label* not_ok) {
|
|
|
|
|
BranchIfNumberRelationalComparison(Operation::kGreaterThanOrEqual,
|
|
|
|
|
length, SmiConstant(0), ok, not_ok);
|
|
|
|
|
});
|
|
|
|
|
// This check also transitively covers the case where length is too big
|
|
|
|
|
// to be representable by a SMI and so is not usable with
|
|
|
|
|
// AllocateJSArray.
|
|
|
|
|
BranchIfNumberRelationalComparison(Operation::kGreaterThanOrEqual, length,
|
|
|
|
|
limit, &runtime, &next);
|
2018-01-19 15:38:32 +00:00
|
|
|
|
|
|
|
|
|
BIND(&runtime);
|
|
|
|
|
{
|
|
|
|
|
TNode<Context> native_context = LoadNativeContext(context);
|
|
|
|
|
TNode<JSFunction> array_function = CAST(
|
|
|
|
|
LoadContextElement(native_context, Context::ARRAY_FUNCTION_INDEX));
|
|
|
|
|
array = CallRuntime(Runtime::kNewArray, context, array_function, length,
|
|
|
|
|
array_function, UndefinedConstant());
|
2018-02-08 17:27:59 +00:00
|
|
|
|
Goto(&done);
|
2018-01-19 15:38:32 +00:00
|
|
|
|
}
|
2018-02-08 17:27:59 +00:00
|
|
|
|
|
|
|
|
|
BIND(&next);
|
|
|
|
|
CSA_ASSERT(this, TaggedIsSmi(length));
|
|
|
|
|
|
|
|
|
|
TNode<Map> array_map = CAST(LoadContextElement(
|
|
|
|
|
context, Context::JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX));
|
|
|
|
|
|
|
|
|
|
// TODO(delphick): Consider using
|
|
|
|
|
// AllocateUninitializedJSArrayWithElements to avoid initializing an
|
|
|
|
|
// array and then writing over it.
|
|
|
|
|
array = CAST(AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, length,
|
|
|
|
|
SmiConstant(0), nullptr,
|
|
|
|
|
ParameterMode::SMI_PARAMETERS));
|
|
|
|
|
Goto(&done);
|
2018-01-18 13:03:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-02-08 17:27:59 +00:00
|
|
|
|
BIND(&done);
|
2018-02-09 15:24:14 +00:00
|
|
|
|
return array.value();
|
2018-01-18 13:03:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-02-08 17:27:59 +00:00
|
|
|
|
void GenerateSetLength(TNode<Context> context, TNode<Object> array,
|
|
|
|
|
TNode<Number> length) {
|
|
|
|
|
Label fast(this), runtime(this), done(this);
|
2018-03-14 13:40:21 +00:00
|
|
|
|
// There's no need to set the length, if
|
|
|
|
|
// 1) the array is a fast JS array and
|
|
|
|
|
// 2) the new length is equal to the old length.
|
|
|
|
|
// as the set is not observable. Otherwise fall back to the run-time.
|
2018-01-23 21:26:34 +00:00
|
|
|
|
|
|
|
|
|
// 1) Check that the array has fast elements.
|
2018-01-18 13:03:16 +00:00
|
|
|
|
// TODO(delphick): Consider changing this since it does an an unnecessary
|
|
|
|
|
// check for SMIs.
|
2018-02-08 17:27:59 +00:00
|
|
|
|
// TODO(delphick): Also we could hoist this to after the array construction
|
|
|
|
|
// and copy the args into array in the same way as the Array constructor.
|
2018-01-23 21:26:34 +00:00
|
|
|
|
BranchIfFastJSArray(array, context, &fast, &runtime);
|
2018-01-18 13:03:16 +00:00
|
|
|
|
|
2018-01-23 21:26:34 +00:00
|
|
|
|
BIND(&fast);
|
2018-01-18 13:03:16 +00:00
|
|
|
|
{
|
2018-01-23 21:26:34 +00:00
|
|
|
|
TNode<JSArray> fast_array = CAST(array);
|
|
|
|
|
|
2018-02-08 17:27:59 +00:00
|
|
|
|
TNode<Smi> length_smi = CAST(length);
|
|
|
|
|
TNode<Smi> old_length = LoadFastJSArrayLength(fast_array);
|
|
|
|
|
CSA_ASSERT(this, TaggedIsPositiveSmi(old_length));
|
2018-01-23 21:26:34 +00:00
|
|
|
|
|
2018-03-14 13:40:21 +00:00
|
|
|
|
// 2) If the created array's length matches the required length, then
|
|
|
|
|
// there's nothing else to do. Otherwise use the runtime to set the
|
|
|
|
|
// property as that will insert holes into excess elements or shrink
|
|
|
|
|
// the backing store as appropriate.
|
|
|
|
|
Branch(SmiNotEqual(length_smi, old_length), &runtime, &done);
|
2018-01-18 13:03:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-01-23 21:26:34 +00:00
|
|
|
|
BIND(&runtime);
|
2018-01-18 13:03:16 +00:00
|
|
|
|
{
|
|
|
|
|
CallRuntime(Runtime::kSetProperty, context, static_cast<Node*>(array),
|
|
|
|
|
CodeStubAssembler::LengthStringConstant(), length,
|
|
|
|
|
SmiConstant(LanguageMode::kStrict));
|
2018-02-08 17:27:59 +00:00
|
|
|
|
Goto(&done);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BIND(&done);
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// ES #sec-array.from
|
|
|
|
|
TF_BUILTIN(ArrayFrom, ArrayPopulatorAssembler) {
|
|
|
|
|
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
|
|
|
|
|
TNode<Int32T> argc =
|
|
|
|
|
UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount));
|
|
|
|
|
|
|
|
|
|
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
|
|
|
|
|
|
|
|
|
|
TNode<Object> map_function = args.GetOptionalArgumentValue(1);
|
|
|
|
|
|
|
|
|
|
// If map_function is not undefined, then ensure it's callable else throw.
|
|
|
|
|
{
|
|
|
|
|
Label no_error(this), error(this);
|
|
|
|
|
GotoIf(IsUndefined(map_function), &no_error);
|
|
|
|
|
GotoIf(TaggedIsSmi(map_function), &error);
|
2018-04-27 11:27:17 +00:00
|
|
|
|
Branch(IsCallable(CAST(map_function)), &no_error, &error);
|
2018-02-08 17:27:59 +00:00
|
|
|
|
|
|
|
|
|
BIND(&error);
|
|
|
|
|
ThrowTypeError(context, MessageTemplate::kCalledNonCallable, map_function);
|
|
|
|
|
|
|
|
|
|
BIND(&no_error);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Label iterable(this), not_iterable(this), finished(this), if_exception(this);
|
|
|
|
|
|
|
|
|
|
TNode<Object> this_arg = args.GetOptionalArgumentValue(2);
|
|
|
|
|
TNode<Object> items = args.GetOptionalArgumentValue(0);
|
|
|
|
|
// The spec doesn't require ToObject to be called directly on the iterable
|
|
|
|
|
// branch, but it's part of GetMethod that is in the spec.
|
2018-02-12 09:33:36 +00:00
|
|
|
|
TNode<JSReceiver> array_like = ToObject(context, items);
|
2018-02-08 17:27:59 +00:00
|
|
|
|
|
|
|
|
|
TVARIABLE(Object, array);
|
|
|
|
|
TVARIABLE(Number, length);
|
|
|
|
|
|
|
|
|
|
// Determine whether items[Symbol.iterator] is defined:
|
|
|
|
|
IteratorBuiltinsAssembler iterator_assembler(state());
|
|
|
|
|
Node* iterator_method =
|
|
|
|
|
iterator_assembler.GetIteratorMethod(context, array_like);
|
|
|
|
|
Branch(IsNullOrUndefined(iterator_method), ¬_iterable, &iterable);
|
|
|
|
|
|
|
|
|
|
BIND(&iterable);
|
|
|
|
|
{
|
|
|
|
|
TVARIABLE(Number, index, SmiConstant(0));
|
|
|
|
|
TVARIABLE(Object, var_exception);
|
|
|
|
|
Label loop(this, &index), loop_done(this),
|
|
|
|
|
on_exception(this, Label::kDeferred),
|
|
|
|
|
index_overflow(this, Label::kDeferred);
|
|
|
|
|
|
|
|
|
|
// Check that the method is callable.
|
|
|
|
|
{
|
|
|
|
|
Label get_method_not_callable(this, Label::kDeferred), next(this);
|
|
|
|
|
GotoIf(TaggedIsSmi(iterator_method), &get_method_not_callable);
|
2018-04-27 11:27:17 +00:00
|
|
|
|
GotoIfNot(IsCallable(CAST(iterator_method)), &get_method_not_callable);
|
2018-01-18 13:03:16 +00:00
|
|
|
|
Goto(&next);
|
2018-02-08 17:27:59 +00:00
|
|
|
|
|
|
|
|
|
BIND(&get_method_not_callable);
|
|
|
|
|
ThrowTypeError(context, MessageTemplate::kCalledNonCallable,
|
|
|
|
|
iterator_method);
|
|
|
|
|
|
|
|
|
|
BIND(&next);
|
2018-01-18 13:03:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-02-08 17:27:59 +00:00
|
|
|
|
// Construct the output array with empty length.
|
|
|
|
|
array = ConstructArrayLike(context, args.GetReceiver());
|
|
|
|
|
|
|
|
|
|
// Actually get the iterator and throw if the iterator method does not yield
|
|
|
|
|
// one.
|
|
|
|
|
IteratorRecord iterator_record =
|
|
|
|
|
iterator_assembler.GetIterator(context, items, iterator_method);
|
|
|
|
|
|
|
|
|
|
TNode<Context> native_context = LoadNativeContext(context);
|
|
|
|
|
TNode<Object> fast_iterator_result_map =
|
|
|
|
|
LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
|
|
|
|
|
|
|
|
|
|
Goto(&loop);
|
|
|
|
|
|
|
|
|
|
BIND(&loop);
|
|
|
|
|
{
|
|
|
|
|
// Loop while iterator is not done.
|
|
|
|
|
TNode<Object> next = CAST(iterator_assembler.IteratorStep(
|
|
|
|
|
context, iterator_record, &loop_done, fast_iterator_result_map));
|
|
|
|
|
TVARIABLE(Object, value,
|
|
|
|
|
CAST(iterator_assembler.IteratorValue(
|
|
|
|
|
context, next, fast_iterator_result_map)));
|
|
|
|
|
|
|
|
|
|
// If a map_function is supplied then call it (using this_arg as
|
|
|
|
|
// receiver), on the value returned from the iterator. Exceptions are
|
|
|
|
|
// caught so the iterator can be closed.
|
|
|
|
|
{
|
|
|
|
|
Label next(this);
|
|
|
|
|
GotoIf(IsUndefined(map_function), &next);
|
|
|
|
|
|
2018-04-27 11:27:17 +00:00
|
|
|
|
CSA_ASSERT(this, IsCallable(CAST(map_function)));
|
2018-02-08 17:27:59 +00:00
|
|
|
|
Node* v = CallJS(CodeFactory::Call(isolate()), context, map_function,
|
2018-02-09 15:24:14 +00:00
|
|
|
|
this_arg, value.value(), index.value());
|
2018-02-08 17:27:59 +00:00
|
|
|
|
GotoIfException(v, &on_exception, &var_exception);
|
|
|
|
|
value = CAST(v);
|
|
|
|
|
Goto(&next);
|
|
|
|
|
BIND(&next);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Store the result in the output object (catching any exceptions so the
|
|
|
|
|
// iterator can be closed).
|
2018-02-09 15:24:14 +00:00
|
|
|
|
Node* define_status =
|
|
|
|
|
CallRuntime(Runtime::kCreateDataProperty, context, array.value(),
|
|
|
|
|
index.value(), value.value());
|
2018-02-08 17:27:59 +00:00
|
|
|
|
GotoIfException(define_status, &on_exception, &var_exception);
|
|
|
|
|
|
2018-02-23 11:44:01 +00:00
|
|
|
|
index = NumberInc(index.value());
|
2018-02-08 17:27:59 +00:00
|
|
|
|
|
|
|
|
|
// The spec requires that we throw an exception if index reaches 2^53-1,
|
|
|
|
|
// but an empty loop would take >100 days to do this many iterations. To
|
|
|
|
|
// actually run for that long would require an iterator that never set
|
|
|
|
|
// done to true and a target array which somehow never ran out of memory,
|
|
|
|
|
// e.g. a proxy that discarded the values. Ignoring this case just means
|
|
|
|
|
// we would repeatedly call CreateDataProperty with index = 2^53.
|
|
|
|
|
CSA_ASSERT_BRANCH(this, [&](Label* ok, Label* not_ok) {
|
2018-02-09 15:24:14 +00:00
|
|
|
|
BranchIfNumberRelationalComparison(Operation::kLessThan, index.value(),
|
2018-02-08 17:27:59 +00:00
|
|
|
|
NumberConstant(kMaxSafeInteger), ok,
|
|
|
|
|
not_ok);
|
|
|
|
|
});
|
|
|
|
|
Goto(&loop);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BIND(&loop_done);
|
|
|
|
|
{
|
|
|
|
|
length = index;
|
|
|
|
|
Goto(&finished);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BIND(&on_exception);
|
|
|
|
|
{
|
|
|
|
|
// Close the iterator, rethrowing either the passed exception or
|
|
|
|
|
// exceptions thrown during the close.
|
|
|
|
|
iterator_assembler.IteratorCloseOnException(context, iterator_record,
|
|
|
|
|
&var_exception);
|
|
|
|
|
}
|
2018-01-18 13:03:16 +00:00
|
|
|
|
}
|
2018-02-08 17:27:59 +00:00
|
|
|
|
|
|
|
|
|
BIND(¬_iterable);
|
|
|
|
|
{
|
|
|
|
|
// Treat array_like as an array and try to get its length.
|
2018-03-05 21:32:29 +00:00
|
|
|
|
length = ToLength_Inline(
|
|
|
|
|
context, GetProperty(context, array_like, factory()->length_string()));
|
2018-02-08 17:27:59 +00:00
|
|
|
|
|
|
|
|
|
// Construct an array using the receiver as constructor with the same length
|
|
|
|
|
// as the input array.
|
2018-02-09 15:24:14 +00:00
|
|
|
|
array = ConstructArrayLike(context, args.GetReceiver(), length.value());
|
2018-02-08 17:27:59 +00:00
|
|
|
|
|
|
|
|
|
TVARIABLE(Number, index, SmiConstant(0));
|
|
|
|
|
|
2018-05-18 10:21:52 +00:00
|
|
|
|
// TODO(ishell): remove <Object, Object>
|
|
|
|
|
GotoIf(WordEqual<Object, Object>(length.value(), SmiConstant(0)),
|
|
|
|
|
&finished);
|
2018-02-08 17:27:59 +00:00
|
|
|
|
|
|
|
|
|
// Loop from 0 to length-1.
|
|
|
|
|
{
|
|
|
|
|
Label loop(this, &index);
|
|
|
|
|
Goto(&loop);
|
|
|
|
|
BIND(&loop);
|
|
|
|
|
TVARIABLE(Object, value);
|
|
|
|
|
|
2018-02-23 09:45:34 +00:00
|
|
|
|
value = GetProperty(context, array_like, index.value());
|
2018-02-08 17:27:59 +00:00
|
|
|
|
|
|
|
|
|
// If a map_function is supplied then call it (using this_arg as
|
|
|
|
|
// receiver), on the value retrieved from the array.
|
|
|
|
|
{
|
|
|
|
|
Label next(this);
|
|
|
|
|
GotoIf(IsUndefined(map_function), &next);
|
|
|
|
|
|
2018-04-27 11:27:17 +00:00
|
|
|
|
CSA_ASSERT(this, IsCallable(CAST(map_function)));
|
2018-02-08 17:27:59 +00:00
|
|
|
|
value = CAST(CallJS(CodeFactory::Call(isolate()), context, map_function,
|
2018-02-09 15:24:14 +00:00
|
|
|
|
this_arg, value.value(), index.value()));
|
2018-02-08 17:27:59 +00:00
|
|
|
|
Goto(&next);
|
|
|
|
|
BIND(&next);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Store the result in the output object.
|
2018-02-09 15:24:14 +00:00
|
|
|
|
CallRuntime(Runtime::kCreateDataProperty, context, array.value(),
|
|
|
|
|
index.value(), value.value());
|
2018-02-23 11:44:01 +00:00
|
|
|
|
index = NumberInc(index.value());
|
2018-02-09 15:24:14 +00:00
|
|
|
|
BranchIfNumberRelationalComparison(Operation::kLessThan, index.value(),
|
|
|
|
|
length.value(), &loop, &finished);
|
2018-02-08 17:27:59 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BIND(&finished);
|
|
|
|
|
|
|
|
|
|
// Finally set the length on the output and return it.
|
2018-02-09 15:24:14 +00:00
|
|
|
|
GenerateSetLength(context, array.value(), length.value());
|
|
|
|
|
args.PopAndReturn(array.value());
|
2018-02-08 17:27:59 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ES #sec-array.of
|
|
|
|
|
TF_BUILTIN(ArrayOf, ArrayPopulatorAssembler) {
|
|
|
|
|
TNode<Int32T> argc =
|
|
|
|
|
UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount));
|
2018-02-23 13:46:00 +00:00
|
|
|
|
TNode<Smi> length = SmiFromInt32(argc);
|
2018-02-08 17:27:59 +00:00
|
|
|
|
|
|
|
|
|
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
|
|
|
|
|
|
|
|
|
|
CodeStubArguments args(this, length, nullptr, ParameterMode::SMI_PARAMETERS);
|
|
|
|
|
|
|
|
|
|
TNode<Object> array = ConstructArrayLike(context, args.GetReceiver(), length);
|
|
|
|
|
|
|
|
|
|
// TODO(delphick): Avoid using CreateDataProperty on the fast path.
|
|
|
|
|
BuildFastLoop(SmiConstant(0), length,
|
|
|
|
|
[=](Node* index) {
|
|
|
|
|
CallRuntime(
|
|
|
|
|
Runtime::kCreateDataProperty, context,
|
|
|
|
|
static_cast<Node*>(array), index,
|
|
|
|
|
args.AtIndex(index, ParameterMode::SMI_PARAMETERS));
|
|
|
|
|
},
|
|
|
|
|
1, ParameterMode::SMI_PARAMETERS, IndexAdvanceMode::kPost);
|
|
|
|
|
|
|
|
|
|
GenerateSetLength(context, array, length);
|
2018-01-18 13:03:16 +00:00
|
|
|
|
args.PopAndReturn(array);
|
|
|
|
|
}
|
|
|
|
|
|
2017-11-28 05:24:33 +00:00
|
|
|
|
// ES #sec-get-%typedarray%.prototype.find
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(TypedArrayPrototypeFind, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<IntPtrT> argc =
|
2017-11-28 05:24:33 +00:00
|
|
|
|
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
|
|
|
|
|
CodeStubArguments args(this, argc);
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = args.GetReceiver();
|
2017-11-28 05:24:33 +00:00
|
|
|
|
Node* callbackfn = args.GetOptionalArgumentValue(0);
|
|
|
|
|
Node* this_arg = args.GetOptionalArgumentValue(1);
|
|
|
|
|
|
2018-04-27 10:03:15 +00:00
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg, argc);
|
2017-11-28 05:24:33 +00:00
|
|
|
|
|
|
|
|
|
GenerateIteratingTypedArrayBuiltinBody(
|
|
|
|
|
"%TypedArray%.prototype.find",
|
2018-01-28 17:02:11 +00:00
|
|
|
|
&ArrayBuiltinsAssembler::FindResultGenerator,
|
|
|
|
|
&ArrayBuiltinsAssembler::FindProcessor,
|
|
|
|
|
&ArrayBuiltinsAssembler::NullPostLoopAction);
|
2017-11-28 05:24:33 +00:00
|
|
|
|
}
|
|
|
|
|
|
2017-11-29 13:57:20 +00:00
|
|
|
|
// ES #sec-get-%typedarray%.prototype.findIndex
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(TypedArrayPrototypeFindIndex, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<IntPtrT> argc =
|
2017-11-29 13:57:20 +00:00
|
|
|
|
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
|
|
|
|
|
CodeStubArguments args(this, argc);
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = args.GetReceiver();
|
2017-11-29 13:57:20 +00:00
|
|
|
|
Node* callbackfn = args.GetOptionalArgumentValue(0);
|
|
|
|
|
Node* this_arg = args.GetOptionalArgumentValue(1);
|
|
|
|
|
|
2018-04-27 10:03:15 +00:00
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg, argc);
|
2017-11-29 13:57:20 +00:00
|
|
|
|
|
|
|
|
|
GenerateIteratingTypedArrayBuiltinBody(
|
|
|
|
|
"%TypedArray%.prototype.findIndex",
|
2018-01-28 17:02:11 +00:00
|
|
|
|
&ArrayBuiltinsAssembler::FindIndexResultGenerator,
|
|
|
|
|
&ArrayBuiltinsAssembler::FindIndexProcessor,
|
|
|
|
|
&ArrayBuiltinsAssembler::NullPostLoopAction);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(TypedArrayPrototypeForEach, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<IntPtrT> argc =
|
2017-05-22 08:23:34 +00:00
|
|
|
|
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
|
|
|
|
|
CodeStubArguments args(this, argc);
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = args.GetReceiver();
|
2017-06-13 13:08:11 +00:00
|
|
|
|
Node* callbackfn = args.GetOptionalArgumentValue(0);
|
|
|
|
|
Node* this_arg = args.GetOptionalArgumentValue(1);
|
2017-05-22 08:23:34 +00:00
|
|
|
|
|
2018-04-27 10:03:15 +00:00
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg, argc);
|
2017-05-22 08:23:34 +00:00
|
|
|
|
|
|
|
|
|
GenerateIteratingTypedArrayBuiltinBody(
|
|
|
|
|
"%TypedArray%.prototype.forEach",
|
2018-01-28 17:02:11 +00:00
|
|
|
|
&ArrayBuiltinsAssembler::ForEachResultGenerator,
|
|
|
|
|
&ArrayBuiltinsAssembler::ForEachProcessor,
|
|
|
|
|
&ArrayBuiltinsAssembler::NullPostLoopAction);
|
2017-05-22 08:23:34 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(ArraySomeLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
|
2018-01-04 09:51:47 +00:00
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<Number> len = CAST(Parameter(Descriptor::kLength));
|
2018-01-04 09:51:47 +00:00
|
|
|
|
Node* result = Parameter(Descriptor::kResult);
|
|
|
|
|
|
|
|
|
|
// This custom lazy deopt point is right after the callback. every() needs
|
|
|
|
|
// to pick up at the next step, which is either continuing to the next
|
|
|
|
|
// array element or returning false if {result} is false.
|
|
|
|
|
Label true_continue(this), false_continue(this);
|
|
|
|
|
|
|
|
|
|
// iii. If selected is true, then...
|
|
|
|
|
BranchIfToBooleanIsTrue(result, &true_continue, &false_continue);
|
|
|
|
|
BIND(&true_continue);
|
|
|
|
|
{ Return(TrueConstant()); }
|
|
|
|
|
BIND(&false_continue);
|
|
|
|
|
{
|
|
|
|
|
// Increment k.
|
|
|
|
|
initial_k = NumberInc(initial_k);
|
|
|
|
|
|
|
|
|
|
Return(CallBuiltin(Builtins::kArraySomeLoopContinuation, context, receiver,
|
|
|
|
|
callbackfn, this_arg, FalseConstant(), receiver,
|
|
|
|
|
initial_k, len, UndefinedConstant()));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(ArraySomeLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
|
2018-01-04 09:51:47 +00:00
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<Number> len = CAST(Parameter(Descriptor::kLength));
|
2018-01-04 09:51:47 +00:00
|
|
|
|
|
|
|
|
|
Return(CallBuiltin(Builtins::kArraySomeLoopContinuation, context, receiver,
|
|
|
|
|
callbackfn, this_arg, FalseConstant(), receiver, initial_k,
|
|
|
|
|
len, UndefinedConstant()));
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(ArraySomeLoopContinuation, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
|
2017-03-21 17:25:35 +00:00
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
|
Node* array = Parameter(Descriptor::kArray);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<JSReceiver> object = CAST(Parameter(Descriptor::kObject));
|
2017-03-21 17:25:35 +00:00
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<Number> len = CAST(Parameter(Descriptor::kLength));
|
2017-03-22 13:18:26 +00:00
|
|
|
|
Node* to = Parameter(Descriptor::kTo);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
|
2017-03-22 13:18:26 +00:00
|
|
|
|
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
|
|
|
|
|
this_arg, array, object, initial_k,
|
|
|
|
|
len, to);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
|
GenerateIteratingArrayBuiltinLoopContinuation(
|
2018-01-28 17:02:11 +00:00
|
|
|
|
&ArrayBuiltinsAssembler::SomeProcessor,
|
|
|
|
|
&ArrayBuiltinsAssembler::NullPostLoopAction, MissingPropertyMode::kSkip);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(ArraySome, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<IntPtrT> argc =
|
2017-04-29 11:40:48 +00:00
|
|
|
|
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
|
|
|
|
|
CodeStubArguments args(this, argc);
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = args.GetReceiver();
|
2017-06-13 13:08:11 +00:00
|
|
|
|
Node* callbackfn = args.GetOptionalArgumentValue(0);
|
|
|
|
|
Node* this_arg = args.GetOptionalArgumentValue(1);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
|
2018-04-27 10:03:15 +00:00
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg, argc);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
|
GenerateIteratingArrayBuiltinBody(
|
2018-01-28 17:02:11 +00:00
|
|
|
|
"Array.prototype.some", &ArrayBuiltinsAssembler::SomeResultGenerator,
|
|
|
|
|
&ArrayBuiltinsAssembler::SomeProcessor,
|
|
|
|
|
&ArrayBuiltinsAssembler::NullPostLoopAction,
|
2017-12-05 04:14:36 +00:00
|
|
|
|
Builtins::CallableFor(isolate(), Builtins::kArraySomeLoopContinuation),
|
|
|
|
|
MissingPropertyMode::kSkip);
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(TypedArrayPrototypeSome, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<IntPtrT> argc =
|
2017-04-29 11:40:48 +00:00
|
|
|
|
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
|
|
|
|
|
CodeStubArguments args(this, argc);
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = args.GetReceiver();
|
2017-06-13 13:08:11 +00:00
|
|
|
|
Node* callbackfn = args.GetOptionalArgumentValue(0);
|
|
|
|
|
Node* this_arg = args.GetOptionalArgumentValue(1);
|
2017-03-30 16:36:53 +00:00
|
|
|
|
|
2018-04-27 10:03:15 +00:00
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg, argc);
|
2017-03-30 16:36:53 +00:00
|
|
|
|
|
|
|
|
|
GenerateIteratingTypedArrayBuiltinBody(
|
|
|
|
|
"%TypedArray%.prototype.some",
|
2018-01-28 17:02:11 +00:00
|
|
|
|
&ArrayBuiltinsAssembler::SomeResultGenerator,
|
|
|
|
|
&ArrayBuiltinsAssembler::SomeProcessor,
|
|
|
|
|
&ArrayBuiltinsAssembler::NullPostLoopAction);
|
2017-03-30 16:36:53 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(ArrayEveryLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
|
2017-12-29 10:44:41 +00:00
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<Number> len = CAST(Parameter(Descriptor::kLength));
|
2017-12-29 10:44:41 +00:00
|
|
|
|
Node* result = Parameter(Descriptor::kResult);
|
|
|
|
|
|
|
|
|
|
// This custom lazy deopt point is right after the callback. every() needs
|
|
|
|
|
// to pick up at the next step, which is either continuing to the next
|
|
|
|
|
// array element or returning false if {result} is false.
|
|
|
|
|
Label true_continue(this), false_continue(this);
|
|
|
|
|
|
|
|
|
|
// iii. If selected is true, then...
|
|
|
|
|
BranchIfToBooleanIsTrue(result, &true_continue, &false_continue);
|
|
|
|
|
BIND(&true_continue);
|
|
|
|
|
{
|
|
|
|
|
// Increment k.
|
|
|
|
|
initial_k = NumberInc(initial_k);
|
|
|
|
|
|
|
|
|
|
Return(CallBuiltin(Builtins::kArrayEveryLoopContinuation, context, receiver,
|
|
|
|
|
callbackfn, this_arg, TrueConstant(), receiver,
|
|
|
|
|
initial_k, len, UndefinedConstant()));
|
|
|
|
|
}
|
|
|
|
|
BIND(&false_continue);
|
|
|
|
|
{ Return(FalseConstant()); }
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(ArrayEveryLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
|
2017-12-29 10:44:41 +00:00
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<Number> len = CAST(Parameter(Descriptor::kLength));
|
2017-12-29 10:44:41 +00:00
|
|
|
|
|
|
|
|
|
Return(CallBuiltin(Builtins::kArrayEveryLoopContinuation, context, receiver,
|
|
|
|
|
callbackfn, this_arg, TrueConstant(), receiver, initial_k,
|
|
|
|
|
len, UndefinedConstant()));
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(ArrayEveryLoopContinuation, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
|
2017-03-21 17:25:35 +00:00
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
|
Node* array = Parameter(Descriptor::kArray);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<JSReceiver> object = CAST(Parameter(Descriptor::kObject));
|
2017-03-21 17:25:35 +00:00
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<Number> len = CAST(Parameter(Descriptor::kLength));
|
2017-03-22 13:18:26 +00:00
|
|
|
|
Node* to = Parameter(Descriptor::kTo);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
|
2017-03-22 13:18:26 +00:00
|
|
|
|
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
|
|
|
|
|
this_arg, array, object, initial_k,
|
|
|
|
|
len, to);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
|
GenerateIteratingArrayBuiltinLoopContinuation(
|
2018-01-28 17:02:11 +00:00
|
|
|
|
&ArrayBuiltinsAssembler::EveryProcessor,
|
|
|
|
|
&ArrayBuiltinsAssembler::NullPostLoopAction, MissingPropertyMode::kSkip);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(ArrayEvery, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<IntPtrT> argc =
|
2017-04-29 11:40:48 +00:00
|
|
|
|
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
|
|
|
|
|
CodeStubArguments args(this, argc);
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = args.GetReceiver();
|
2017-06-13 13:08:11 +00:00
|
|
|
|
Node* callbackfn = args.GetOptionalArgumentValue(0);
|
|
|
|
|
Node* this_arg = args.GetOptionalArgumentValue(1);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
|
2018-04-27 10:03:15 +00:00
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg, argc);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
|
GenerateIteratingArrayBuiltinBody(
|
2018-01-28 17:02:11 +00:00
|
|
|
|
"Array.prototype.every", &ArrayBuiltinsAssembler::EveryResultGenerator,
|
|
|
|
|
&ArrayBuiltinsAssembler::EveryProcessor,
|
|
|
|
|
&ArrayBuiltinsAssembler::NullPostLoopAction,
|
2017-12-05 04:14:36 +00:00
|
|
|
|
Builtins::CallableFor(isolate(), Builtins::kArrayEveryLoopContinuation),
|
|
|
|
|
MissingPropertyMode::kSkip);
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(TypedArrayPrototypeEvery, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<IntPtrT> argc =
|
2017-04-29 11:40:48 +00:00
|
|
|
|
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
|
|
|
|
|
CodeStubArguments args(this, argc);
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = args.GetReceiver();
|
2017-06-13 13:08:11 +00:00
|
|
|
|
Node* callbackfn = args.GetOptionalArgumentValue(0);
|
|
|
|
|
Node* this_arg = args.GetOptionalArgumentValue(1);
|
2017-03-30 16:36:53 +00:00
|
|
|
|
|
2018-04-27 10:03:15 +00:00
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg, argc);
|
2017-03-30 16:36:53 +00:00
|
|
|
|
|
|
|
|
|
GenerateIteratingTypedArrayBuiltinBody(
|
|
|
|
|
"%TypedArray%.prototype.every",
|
2018-01-28 17:02:11 +00:00
|
|
|
|
&ArrayBuiltinsAssembler::EveryResultGenerator,
|
|
|
|
|
&ArrayBuiltinsAssembler::EveryProcessor,
|
|
|
|
|
&ArrayBuiltinsAssembler::NullPostLoopAction);
|
2017-03-30 16:36:53 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(ArrayReduceLoopContinuation, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
|
2017-03-21 17:25:35 +00:00
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
|
Node* accumulator = Parameter(Descriptor::kAccumulator);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<JSReceiver> object = CAST(Parameter(Descriptor::kObject));
|
2017-03-21 17:25:35 +00:00
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<Number> len = CAST(Parameter(Descriptor::kLength));
|
2017-03-22 13:18:26 +00:00
|
|
|
|
Node* to = Parameter(Descriptor::kTo);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
|
|
|
|
|
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
|
|
|
|
|
this_arg, accumulator, object,
|
2017-03-22 13:18:26 +00:00
|
|
|
|
initial_k, len, to);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
|
GenerateIteratingArrayBuiltinLoopContinuation(
|
2018-01-28 17:02:11 +00:00
|
|
|
|
&ArrayBuiltinsAssembler::ReduceProcessor,
|
|
|
|
|
&ArrayBuiltinsAssembler::ReducePostLoopAction,
|
2017-12-05 04:14:36 +00:00
|
|
|
|
MissingPropertyMode::kSkip);
|
2017-03-21 15:57:38 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-02-07 12:51:58 +00:00
|
|
|
|
TF_BUILTIN(ArrayReducePreLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
|
2018-02-07 12:51:58 +00:00
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<Number> len = CAST(Parameter(Descriptor::kLength));
|
2018-02-07 12:51:58 +00:00
|
|
|
|
|
|
|
|
|
// Simulate starting the loop at 0, but ensuring that the accumulator is
|
|
|
|
|
// the hole. The continuation stub will search for the initial non-hole
|
|
|
|
|
// element, rightly throwing an exception if not found.
|
2018-03-05 13:52:31 +00:00
|
|
|
|
Return(CallBuiltin(Builtins::kArrayReduceLoopContinuation, context, receiver,
|
|
|
|
|
callbackfn, UndefinedConstant(), TheHoleConstant(),
|
|
|
|
|
receiver, SmiConstant(0), len, UndefinedConstant()));
|
2018-02-07 12:51:58 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(ArrayReduceLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
|
2017-12-21 09:49:43 +00:00
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
|
Node* accumulator = Parameter(Descriptor::kAccumulator);
|
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<Number> len = CAST(Parameter(Descriptor::kLength));
|
2017-12-21 09:49:43 +00:00
|
|
|
|
|
2018-03-05 13:52:31 +00:00
|
|
|
|
Return(CallBuiltin(Builtins::kArrayReduceLoopContinuation, context, receiver,
|
|
|
|
|
callbackfn, UndefinedConstant(), accumulator, receiver,
|
|
|
|
|
initial_k, len, UndefinedConstant()));
|
2017-12-21 09:49:43 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(ArrayReduceLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
|
2017-12-21 09:49:43 +00:00
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<Number> len = CAST(Parameter(Descriptor::kLength));
|
2017-12-21 09:49:43 +00:00
|
|
|
|
Node* result = Parameter(Descriptor::kResult);
|
|
|
|
|
|
2018-03-05 13:52:31 +00:00
|
|
|
|
Return(CallBuiltin(Builtins::kArrayReduceLoopContinuation, context, receiver,
|
|
|
|
|
callbackfn, UndefinedConstant(), result, receiver,
|
|
|
|
|
initial_k, len, UndefinedConstant()));
|
2017-12-21 09:49:43 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(ArrayReduce, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<IntPtrT> argc =
|
2017-04-29 11:40:48 +00:00
|
|
|
|
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
|
|
|
|
|
CodeStubArguments args(this, argc);
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = args.GetReceiver();
|
2017-06-13 13:08:11 +00:00
|
|
|
|
Node* callbackfn = args.GetOptionalArgumentValue(0);
|
2017-04-29 11:40:48 +00:00
|
|
|
|
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
|
2017-03-21 17:25:35 +00:00
|
|
|
|
|
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, initial_value,
|
2018-04-27 10:03:15 +00:00
|
|
|
|
argc);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
|
GenerateIteratingArrayBuiltinBody(
|
2018-01-28 17:02:11 +00:00
|
|
|
|
"Array.prototype.reduce", &ArrayBuiltinsAssembler::ReduceResultGenerator,
|
|
|
|
|
&ArrayBuiltinsAssembler::ReduceProcessor,
|
|
|
|
|
&ArrayBuiltinsAssembler::ReducePostLoopAction,
|
2017-12-05 04:14:36 +00:00
|
|
|
|
Builtins::CallableFor(isolate(), Builtins::kArrayReduceLoopContinuation),
|
|
|
|
|
MissingPropertyMode::kSkip);
|
2017-03-21 15:57:38 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(TypedArrayPrototypeReduce, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<IntPtrT> argc =
|
2017-04-29 11:40:48 +00:00
|
|
|
|
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
|
|
|
|
|
CodeStubArguments args(this, argc);
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = args.GetReceiver();
|
2017-06-13 13:08:11 +00:00
|
|
|
|
Node* callbackfn = args.GetOptionalArgumentValue(0);
|
2017-04-29 11:40:48 +00:00
|
|
|
|
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
|
2017-04-11 11:02:27 +00:00
|
|
|
|
|
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, initial_value,
|
2018-04-27 10:03:15 +00:00
|
|
|
|
argc);
|
2017-04-11 11:02:27 +00:00
|
|
|
|
|
|
|
|
|
GenerateIteratingTypedArrayBuiltinBody(
|
|
|
|
|
"%TypedArray%.prototype.reduce",
|
2018-01-28 17:02:11 +00:00
|
|
|
|
&ArrayBuiltinsAssembler::ReduceResultGenerator,
|
|
|
|
|
&ArrayBuiltinsAssembler::ReduceProcessor,
|
|
|
|
|
&ArrayBuiltinsAssembler::ReducePostLoopAction);
|
2017-04-11 11:02:27 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(ArrayReduceRightLoopContinuation, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
|
2017-03-24 13:35:56 +00:00
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
|
Node* accumulator = Parameter(Descriptor::kAccumulator);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<JSReceiver> object = CAST(Parameter(Descriptor::kObject));
|
2017-03-24 13:35:56 +00:00
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<Number> len = CAST(Parameter(Descriptor::kLength));
|
2017-03-24 13:35:56 +00:00
|
|
|
|
Node* to = Parameter(Descriptor::kTo);
|
|
|
|
|
|
|
|
|
|
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
|
|
|
|
|
this_arg, accumulator, object,
|
|
|
|
|
initial_k, len, to);
|
|
|
|
|
|
|
|
|
|
GenerateIteratingArrayBuiltinLoopContinuation(
|
2018-01-28 17:02:11 +00:00
|
|
|
|
&ArrayBuiltinsAssembler::ReduceProcessor,
|
|
|
|
|
&ArrayBuiltinsAssembler::ReducePostLoopAction, MissingPropertyMode::kSkip,
|
|
|
|
|
ForEachDirection::kReverse);
|
2017-03-24 13:35:56 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-02-07 12:51:58 +00:00
|
|
|
|
TF_BUILTIN(ArrayReduceRightPreLoopEagerDeoptContinuation,
|
|
|
|
|
ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
|
2018-02-07 12:51:58 +00:00
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<Smi> len = CAST(Parameter(Descriptor::kLength));
|
2018-02-07 12:51:58 +00:00
|
|
|
|
|
|
|
|
|
// Simulate starting the loop at 0, but ensuring that the accumulator is
|
|
|
|
|
// the hole. The continuation stub will search for the initial non-hole
|
|
|
|
|
// element, rightly throwing an exception if not found.
|
2018-03-05 13:52:31 +00:00
|
|
|
|
Return(CallBuiltin(Builtins::kArrayReduceRightLoopContinuation, context,
|
|
|
|
|
receiver, callbackfn, UndefinedConstant(),
|
|
|
|
|
TheHoleConstant(), receiver, SmiSub(len, SmiConstant(1)),
|
|
|
|
|
len, UndefinedConstant()));
|
2018-02-07 12:51:58 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(ArrayReduceRightLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
|
2017-12-21 16:42:10 +00:00
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
|
Node* accumulator = Parameter(Descriptor::kAccumulator);
|
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<Number> len = CAST(Parameter(Descriptor::kLength));
|
2017-12-21 16:42:10 +00:00
|
|
|
|
|
2018-03-05 13:52:31 +00:00
|
|
|
|
Return(CallBuiltin(Builtins::kArrayReduceRightLoopContinuation, context,
|
|
|
|
|
receiver, callbackfn, UndefinedConstant(), accumulator,
|
|
|
|
|
receiver, initial_k, len, UndefinedConstant()));
|
2017-12-21 16:42:10 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(ArrayReduceRightLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
|
2017-12-21 16:42:10 +00:00
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<Number> len = CAST(Parameter(Descriptor::kLength));
|
2017-12-21 16:42:10 +00:00
|
|
|
|
Node* result = Parameter(Descriptor::kResult);
|
|
|
|
|
|
2018-03-05 13:52:31 +00:00
|
|
|
|
Return(CallBuiltin(Builtins::kArrayReduceRightLoopContinuation, context,
|
|
|
|
|
receiver, callbackfn, UndefinedConstant(), result,
|
|
|
|
|
receiver, initial_k, len, UndefinedConstant()));
|
2017-12-21 16:42:10 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(ArrayReduceRight, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<IntPtrT> argc =
|
2017-04-29 11:40:48 +00:00
|
|
|
|
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
|
|
|
|
|
CodeStubArguments args(this, argc);
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = args.GetReceiver();
|
2017-06-13 13:08:11 +00:00
|
|
|
|
Node* callbackfn = args.GetOptionalArgumentValue(0);
|
2017-04-29 11:40:48 +00:00
|
|
|
|
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
|
2017-03-24 13:35:56 +00:00
|
|
|
|
|
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, initial_value,
|
2018-04-27 10:03:15 +00:00
|
|
|
|
argc);
|
2017-03-24 13:35:56 +00:00
|
|
|
|
|
|
|
|
|
GenerateIteratingArrayBuiltinBody(
|
|
|
|
|
"Array.prototype.reduceRight",
|
2018-01-28 17:02:11 +00:00
|
|
|
|
&ArrayBuiltinsAssembler::ReduceResultGenerator,
|
|
|
|
|
&ArrayBuiltinsAssembler::ReduceProcessor,
|
|
|
|
|
&ArrayBuiltinsAssembler::ReducePostLoopAction,
|
2017-04-29 11:40:48 +00:00
|
|
|
|
Builtins::CallableFor(isolate(),
|
|
|
|
|
Builtins::kArrayReduceRightLoopContinuation),
|
2017-12-05 04:14:36 +00:00
|
|
|
|
MissingPropertyMode::kSkip, ForEachDirection::kReverse);
|
2017-03-24 13:35:56 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(TypedArrayPrototypeReduceRight, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<IntPtrT> argc =
|
2017-04-29 11:40:48 +00:00
|
|
|
|
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
|
|
|
|
|
CodeStubArguments args(this, argc);
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = args.GetReceiver();
|
2017-06-13 13:08:11 +00:00
|
|
|
|
Node* callbackfn = args.GetOptionalArgumentValue(0);
|
2017-04-29 11:40:48 +00:00
|
|
|
|
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
|
2017-04-11 11:02:27 +00:00
|
|
|
|
|
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, initial_value,
|
2018-04-27 10:03:15 +00:00
|
|
|
|
argc);
|
2017-04-11 11:02:27 +00:00
|
|
|
|
|
|
|
|
|
GenerateIteratingTypedArrayBuiltinBody(
|
|
|
|
|
"%TypedArray%.prototype.reduceRight",
|
2018-01-28 17:02:11 +00:00
|
|
|
|
&ArrayBuiltinsAssembler::ReduceResultGenerator,
|
|
|
|
|
&ArrayBuiltinsAssembler::ReduceProcessor,
|
|
|
|
|
&ArrayBuiltinsAssembler::ReducePostLoopAction,
|
2017-04-11 11:02:27 +00:00
|
|
|
|
ForEachDirection::kReverse);
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(ArrayFilterLoopContinuation, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
|
2017-03-21 17:25:35 +00:00
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
|
Node* array = Parameter(Descriptor::kArray);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<JSReceiver> object = CAST(Parameter(Descriptor::kObject));
|
2017-03-21 17:25:35 +00:00
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<Number> len = CAST(Parameter(Descriptor::kLength));
|
2017-03-22 13:18:26 +00:00
|
|
|
|
Node* to = Parameter(Descriptor::kTo);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
|
2017-03-22 13:18:26 +00:00
|
|
|
|
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
|
|
|
|
|
this_arg, array, object, initial_k,
|
|
|
|
|
len, to);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
|
GenerateIteratingArrayBuiltinLoopContinuation(
|
2018-01-28 17:02:11 +00:00
|
|
|
|
&ArrayBuiltinsAssembler::FilterProcessor,
|
|
|
|
|
&ArrayBuiltinsAssembler::NullPostLoopAction, MissingPropertyMode::kSkip);
|
2017-03-21 15:57:38 +00:00
|
|
|
|
}
|
2017-10-18 16:28:05 +00:00
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(ArrayFilterLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
|
2017-10-23 14:23:27 +00:00
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
|
Node* array = Parameter(Descriptor::kArray);
|
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<Number> len = CAST(Parameter(Descriptor::kLength));
|
2017-10-23 14:23:27 +00:00
|
|
|
|
Node* to = Parameter(Descriptor::kTo);
|
|
|
|
|
|
2017-12-15 15:50:11 +00:00
|
|
|
|
Return(CallBuiltin(Builtins::kArrayFilterLoopContinuation, context, receiver,
|
|
|
|
|
callbackfn, this_arg, array, receiver, initial_k, len,
|
|
|
|
|
to));
|
2017-10-23 14:23:27 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(ArrayFilterLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
|
2017-10-23 14:23:27 +00:00
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
|
Node* array = Parameter(Descriptor::kArray);
|
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<Number> len = CAST(Parameter(Descriptor::kLength));
|
2017-10-23 14:23:27 +00:00
|
|
|
|
Node* value_k = Parameter(Descriptor::kValueK);
|
|
|
|
|
Node* result = Parameter(Descriptor::kResult);
|
|
|
|
|
|
|
|
|
|
VARIABLE(to, MachineRepresentation::kTagged, Parameter(Descriptor::kTo));
|
|
|
|
|
|
|
|
|
|
// This custom lazy deopt point is right after the callback. filter() needs
|
|
|
|
|
// to pick up at the next step, which is setting the callback result in
|
|
|
|
|
// the output array. After incrementing k and to, we can glide into the loop
|
|
|
|
|
// continuation builtin.
|
|
|
|
|
|
|
|
|
|
Label true_continue(this, &to), false_continue(this);
|
|
|
|
|
|
|
|
|
|
// iii. If selected is true, then...
|
|
|
|
|
BranchIfToBooleanIsTrue(result, &true_continue, &false_continue);
|
|
|
|
|
BIND(&true_continue);
|
|
|
|
|
{
|
|
|
|
|
// 1. Perform ? CreateDataPropertyOrThrow(A, ToString(to), kValue).
|
|
|
|
|
CallRuntime(Runtime::kCreateDataProperty, context, array, to.value(),
|
|
|
|
|
value_k);
|
|
|
|
|
// 2. Increase to by 1.
|
|
|
|
|
to.Bind(NumberInc(to.value()));
|
|
|
|
|
Goto(&false_continue);
|
|
|
|
|
}
|
|
|
|
|
BIND(&false_continue);
|
|
|
|
|
|
|
|
|
|
// Increment k.
|
|
|
|
|
initial_k = NumberInc(initial_k);
|
|
|
|
|
|
2017-12-15 15:50:11 +00:00
|
|
|
|
Return(CallBuiltin(Builtins::kArrayFilterLoopContinuation, context, receiver,
|
|
|
|
|
callbackfn, this_arg, array, receiver, initial_k, len,
|
|
|
|
|
to.value()));
|
2017-10-23 14:23:27 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(ArrayFilter, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<IntPtrT> argc =
|
2017-04-29 11:40:48 +00:00
|
|
|
|
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
|
|
|
|
|
CodeStubArguments args(this, argc);
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = args.GetReceiver();
|
2017-06-13 13:08:11 +00:00
|
|
|
|
Node* callbackfn = args.GetOptionalArgumentValue(0);
|
|
|
|
|
Node* this_arg = args.GetOptionalArgumentValue(1);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
|
2018-04-27 10:03:15 +00:00
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg, argc);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
|
GenerateIteratingArrayBuiltinBody(
|
2018-01-28 17:02:11 +00:00
|
|
|
|
"Array.prototype.filter", &ArrayBuiltinsAssembler::FilterResultGenerator,
|
|
|
|
|
&ArrayBuiltinsAssembler::FilterProcessor,
|
|
|
|
|
&ArrayBuiltinsAssembler::NullPostLoopAction,
|
2017-12-05 04:14:36 +00:00
|
|
|
|
Builtins::CallableFor(isolate(), Builtins::kArrayFilterLoopContinuation),
|
|
|
|
|
MissingPropertyMode::kSkip);
|
2017-03-21 15:57:38 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(ArrayMapLoopContinuation, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
|
2017-03-24 11:01:53 +00:00
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
|
Node* array = Parameter(Descriptor::kArray);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<JSReceiver> object = CAST(Parameter(Descriptor::kObject));
|
2017-03-24 11:01:53 +00:00
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<Number> len = CAST(Parameter(Descriptor::kLength));
|
2017-03-24 11:01:53 +00:00
|
|
|
|
Node* to = Parameter(Descriptor::kTo);
|
|
|
|
|
|
|
|
|
|
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
|
|
|
|
|
this_arg, array, object, initial_k,
|
|
|
|
|
len, to);
|
|
|
|
|
|
|
|
|
|
GenerateIteratingArrayBuiltinLoopContinuation(
|
2018-01-28 17:02:11 +00:00
|
|
|
|
&ArrayBuiltinsAssembler::SpecCompliantMapProcessor,
|
|
|
|
|
&ArrayBuiltinsAssembler::NullPostLoopAction, MissingPropertyMode::kSkip);
|
2017-03-24 11:01:53 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(ArrayMapLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
|
2017-07-13 08:16:17 +00:00
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
|
Node* array = Parameter(Descriptor::kArray);
|
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<Number> len = CAST(Parameter(Descriptor::kLength));
|
2017-07-13 08:16:17 +00:00
|
|
|
|
|
2017-12-15 15:50:11 +00:00
|
|
|
|
Return(CallBuiltin(Builtins::kArrayMapLoopContinuation, context, receiver,
|
|
|
|
|
callbackfn, this_arg, array, receiver, initial_k, len,
|
|
|
|
|
UndefinedConstant()));
|
2017-07-13 08:16:17 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(ArrayMapLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
|
2017-07-13 08:16:17 +00:00
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
|
Node* array = Parameter(Descriptor::kArray);
|
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
2018-03-05 21:32:29 +00:00
|
|
|
|
TNode<Number> len = CAST(Parameter(Descriptor::kLength));
|
2017-07-13 08:16:17 +00:00
|
|
|
|
Node* result = Parameter(Descriptor::kResult);
|
|
|
|
|
|
|
|
|
|
// This custom lazy deopt point is right after the callback. map() needs
|
|
|
|
|
// to pick up at the next step, which is setting the callback result in
|
|
|
|
|
// the output array. After incrementing k, we can glide into the loop
|
|
|
|
|
// continuation builtin.
|
|
|
|
|
|
|
|
|
|
// iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mappedValue).
|
|
|
|
|
CallRuntime(Runtime::kCreateDataProperty, context, array, initial_k, result);
|
|
|
|
|
// Then we have to increment k before going on.
|
|
|
|
|
initial_k = NumberInc(initial_k);
|
|
|
|
|
|
2017-12-15 15:50:11 +00:00
|
|
|
|
Return(CallBuiltin(Builtins::kArrayMapLoopContinuation, context, receiver,
|
|
|
|
|
callbackfn, this_arg, array, receiver, initial_k, len,
|
|
|
|
|
UndefinedConstant()));
|
2017-07-13 08:16:17 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(ArrayMap, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<IntPtrT> argc =
|
2017-04-29 11:40:48 +00:00
|
|
|
|
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
|
|
|
|
|
CodeStubArguments args(this, argc);
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = args.GetReceiver();
|
2017-06-13 13:08:11 +00:00
|
|
|
|
Node* callbackfn = args.GetOptionalArgumentValue(0);
|
|
|
|
|
Node* this_arg = args.GetOptionalArgumentValue(1);
|
2017-03-24 11:01:53 +00:00
|
|
|
|
|
2018-04-27 10:03:15 +00:00
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg, argc);
|
2017-03-24 11:01:53 +00:00
|
|
|
|
|
|
|
|
|
GenerateIteratingArrayBuiltinBody(
|
2018-01-28 17:02:11 +00:00
|
|
|
|
"Array.prototype.map", &ArrayBuiltinsAssembler::MapResultGenerator,
|
|
|
|
|
&ArrayBuiltinsAssembler::FastMapProcessor,
|
|
|
|
|
&ArrayBuiltinsAssembler::NullPostLoopAction,
|
2017-12-05 04:14:36 +00:00
|
|
|
|
Builtins::CallableFor(isolate(), Builtins::kArrayMapLoopContinuation),
|
|
|
|
|
MissingPropertyMode::kSkip);
|
2017-03-24 11:01:53 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-01-28 17:02:11 +00:00
|
|
|
|
TF_BUILTIN(TypedArrayPrototypeMap, ArrayBuiltinsAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<IntPtrT> argc =
|
2017-05-17 15:27:37 +00:00
|
|
|
|
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
|
|
|
|
|
CodeStubArguments args(this, argc);
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = args.GetReceiver();
|
2017-06-13 13:08:11 +00:00
|
|
|
|
Node* callbackfn = args.GetOptionalArgumentValue(0);
|
|
|
|
|
Node* this_arg = args.GetOptionalArgumentValue(1);
|
2017-05-17 15:27:37 +00:00
|
|
|
|
|
2018-04-27 10:03:15 +00:00
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg, argc);
|
2017-05-17 15:27:37 +00:00
|
|
|
|
|
|
|
|
|
GenerateIteratingTypedArrayBuiltinBody(
|
|
|
|
|
"%TypedArray%.prototype.map",
|
2018-01-28 17:02:11 +00:00
|
|
|
|
&ArrayBuiltinsAssembler::TypedArrayMapResultGenerator,
|
|
|
|
|
&ArrayBuiltinsAssembler::TypedArrayMapProcessor,
|
|
|
|
|
&ArrayBuiltinsAssembler::NullPostLoopAction);
|
2017-05-17 15:27:37 +00:00
|
|
|
|
}
|
|
|
|
|
|
2017-03-16 11:32:01 +00:00
|
|
|
|
TF_BUILTIN(ArrayIsArray, CodeStubAssembler) {
|
2017-08-03 14:27:11 +00:00
|
|
|
|
TNode<Object> object = CAST(Parameter(Descriptor::kArg));
|
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
|
|
Label call_runtime(this), return_true(this), return_false(this);
|
|
|
|
|
|
|
|
|
|
GotoIf(TaggedIsSmi(object), &return_false);
|
2018-02-28 13:49:46 +00:00
|
|
|
|
TNode<Int32T> instance_type = LoadInstanceType(CAST(object));
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2017-10-13 09:40:12 +00:00
|
|
|
|
GotoIf(InstanceTypeEqual(instance_type, JS_ARRAY_TYPE), &return_true);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
|
|
// TODO(verwaest): Handle proxies in-place.
|
2017-10-13 09:40:12 +00:00
|
|
|
|
Branch(InstanceTypeEqual(instance_type, JS_PROXY_TYPE), &call_runtime,
|
|
|
|
|
&return_false);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&return_true);
|
2017-10-25 08:04:24 +00:00
|
|
|
|
Return(TrueConstant());
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&return_false);
|
2017-10-25 08:04:24 +00:00
|
|
|
|
Return(FalseConstant());
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&call_runtime);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
Return(CallRuntime(Runtime::kArrayIsArray, context, object));
|
|
|
|
|
}
|
|
|
|
|
|
2017-05-16 15:05:29 +00:00
|
|
|
|
class ArrayIncludesIndexofAssembler : public CodeStubAssembler {
|
|
|
|
|
public:
|
|
|
|
|
explicit ArrayIncludesIndexofAssembler(compiler::CodeAssemblerState* state)
|
|
|
|
|
: CodeStubAssembler(state) {}
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2017-05-16 15:05:29 +00:00
|
|
|
|
enum SearchVariant { kIncludes, kIndexOf };
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2017-05-16 15:05:29 +00:00
|
|
|
|
void Generate(SearchVariant variant);
|
2018-03-05 12:48:04 +00:00
|
|
|
|
void GenerateSmiOrObject(SearchVariant variant, Node* context, Node* elements,
|
|
|
|
|
Node* search_element, Node* array_length,
|
|
|
|
|
Node* from_index);
|
|
|
|
|
void GeneratePackedDoubles(SearchVariant variant, Node* elements,
|
|
|
|
|
Node* search_element, Node* array_length,
|
|
|
|
|
Node* from_index);
|
|
|
|
|
void GenerateHoleyDoubles(SearchVariant variant, Node* elements,
|
|
|
|
|
Node* search_element, Node* array_length,
|
|
|
|
|
Node* from_index);
|
2017-05-16 15:05:29 +00:00
|
|
|
|
};
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2017-05-16 15:05:29 +00:00
|
|
|
|
void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
|
2017-05-10 06:46:29 +00:00
|
|
|
|
const int kSearchElementArg = 0;
|
|
|
|
|
const int kFromIndexArg = 1;
|
|
|
|
|
|
2017-08-03 14:27:11 +00:00
|
|
|
|
TNode<IntPtrT> argc =
|
2017-05-10 06:46:29 +00:00
|
|
|
|
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
|
|
|
|
|
CodeStubArguments args(this, argc);
|
|
|
|
|
|
2017-08-03 14:27:11 +00:00
|
|
|
|
TNode<Object> receiver = args.GetReceiver();
|
|
|
|
|
TNode<Object> search_element =
|
|
|
|
|
args.GetOptionalArgumentValue(kSearchElementArg);
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
|
|
Node* intptr_zero = IntPtrConstant(0);
|
|
|
|
|
|
2018-03-05 12:48:04 +00:00
|
|
|
|
Label init_index(this), return_not_found(this), call_runtime(this);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
|
|
// Take slow path if not a JSArray, if retrieving elements requires
|
|
|
|
|
// traversing prototype, or if access checks are required.
|
2017-10-04 14:48:25 +00:00
|
|
|
|
BranchIfFastJSArray(receiver, context, &init_index, &call_runtime);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2017-05-15 08:32:22 +00:00
|
|
|
|
BIND(&init_index);
|
|
|
|
|
VARIABLE(index_var, MachineType::PointerRepresentation(), intptr_zero);
|
2017-08-03 14:27:11 +00:00
|
|
|
|
TNode<JSArray> array = CAST(receiver);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2017-05-15 08:32:22 +00:00
|
|
|
|
// JSArray length is always a positive Smi for fast arrays.
|
|
|
|
|
CSA_ASSERT(this, TaggedIsPositiveSmi(LoadJSArrayLength(array)));
|
2018-03-05 12:48:04 +00:00
|
|
|
|
Node* array_length = LoadFastJSArrayLength(array);
|
|
|
|
|
Node* array_length_untagged = SmiUntag(array_length);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
|
|
{
|
2017-05-15 19:42:35 +00:00
|
|
|
|
// Initialize fromIndex.
|
|
|
|
|
Label is_smi(this), is_nonsmi(this), done(this);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2017-05-15 08:32:22 +00:00
|
|
|
|
// If no fromIndex was passed, default to 0.
|
|
|
|
|
GotoIf(IntPtrLessThanOrEqual(argc, IntPtrConstant(kFromIndexArg)), &done);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2017-05-15 08:32:22 +00:00
|
|
|
|
Node* start_from = args.AtIndex(kFromIndexArg);
|
2017-05-15 19:42:35 +00:00
|
|
|
|
// Handle Smis and undefined here and everything else in runtime.
|
|
|
|
|
// We must be very careful with side effects from the ToInteger conversion,
|
|
|
|
|
// as the side effects might render previously checked assumptions about
|
|
|
|
|
// the receiver being a fast JSArray and its length invalid.
|
|
|
|
|
Branch(TaggedIsSmi(start_from), &is_smi, &is_nonsmi);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2017-05-15 19:42:35 +00:00
|
|
|
|
BIND(&is_nonsmi);
|
2017-05-15 08:32:22 +00:00
|
|
|
|
{
|
2017-05-15 19:42:35 +00:00
|
|
|
|
GotoIfNot(IsUndefined(start_from), &call_runtime);
|
|
|
|
|
Goto(&done);
|
|
|
|
|
}
|
|
|
|
|
BIND(&is_smi);
|
|
|
|
|
{
|
|
|
|
|
Node* intptr_start_from = SmiUntag(start_from);
|
|
|
|
|
index_var.Bind(intptr_start_from);
|
|
|
|
|
|
|
|
|
|
GotoIf(IntPtrGreaterThanOrEqual(index_var.value(), intptr_zero), &done);
|
|
|
|
|
// The fromIndex is negative: add it to the array's length.
|
2018-03-05 12:48:04 +00:00
|
|
|
|
index_var.Bind(IntPtrAdd(array_length_untagged, index_var.value()));
|
2017-05-15 19:42:35 +00:00
|
|
|
|
// Clamp negative results at zero.
|
|
|
|
|
GotoIf(IntPtrGreaterThanOrEqual(index_var.value(), intptr_zero), &done);
|
|
|
|
|
index_var.Bind(intptr_zero);
|
2017-05-15 08:32:22 +00:00
|
|
|
|
Goto(&done);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
}
|
2017-05-15 08:32:22 +00:00
|
|
|
|
BIND(&done);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
}
|
|
|
|
|
|
2017-05-15 08:32:22 +00:00
|
|
|
|
// Fail early if startIndex >= array.length.
|
2018-03-05 12:48:04 +00:00
|
|
|
|
GotoIf(IntPtrGreaterThanOrEqual(index_var.value(), array_length_untagged),
|
2017-05-15 08:32:22 +00:00
|
|
|
|
&return_not_found);
|
|
|
|
|
|
2017-03-16 11:32:01 +00:00
|
|
|
|
Label if_smiorobjects(this), if_packed_doubles(this), if_holey_doubles(this);
|
|
|
|
|
|
2017-05-16 15:05:29 +00:00
|
|
|
|
Node* elements_kind = LoadMapElementsKind(LoadMap(array));
|
2017-03-16 11:32:01 +00:00
|
|
|
|
Node* elements = LoadElements(array);
|
2017-06-30 11:26:14 +00:00
|
|
|
|
STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
|
|
|
|
|
STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
|
|
|
|
|
STATIC_ASSERT(PACKED_ELEMENTS == 2);
|
|
|
|
|
STATIC_ASSERT(HOLEY_ELEMENTS == 3);
|
|
|
|
|
GotoIf(Uint32LessThanOrEqual(elements_kind, Int32Constant(HOLEY_ELEMENTS)),
|
|
|
|
|
&if_smiorobjects);
|
|
|
|
|
GotoIf(Word32Equal(elements_kind, Int32Constant(PACKED_DOUBLE_ELEMENTS)),
|
2017-05-15 19:42:35 +00:00
|
|
|
|
&if_packed_doubles);
|
2017-06-30 11:26:14 +00:00
|
|
|
|
GotoIf(Word32Equal(elements_kind, Int32Constant(HOLEY_DOUBLE_ELEMENTS)),
|
2017-05-15 19:42:35 +00:00
|
|
|
|
&if_holey_doubles);
|
|
|
|
|
Goto(&return_not_found);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&if_smiorobjects);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
{
|
2018-03-05 12:48:04 +00:00
|
|
|
|
Callable callable =
|
|
|
|
|
(variant == kIncludes)
|
|
|
|
|
? Builtins::CallableFor(isolate(),
|
|
|
|
|
Builtins::kArrayIncludesSmiOrObject)
|
|
|
|
|
: Builtins::CallableFor(isolate(),
|
|
|
|
|
Builtins::kArrayIndexOfSmiOrObject);
|
|
|
|
|
Node* result = CallStub(callable, context, elements, search_element,
|
|
|
|
|
array_length, SmiTag(index_var.value()));
|
|
|
|
|
args.PopAndReturn(result);
|
|
|
|
|
}
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2018-03-05 12:48:04 +00:00
|
|
|
|
BIND(&if_packed_doubles);
|
|
|
|
|
{
|
|
|
|
|
Callable callable =
|
|
|
|
|
(variant == kIncludes)
|
|
|
|
|
? Builtins::CallableFor(isolate(),
|
|
|
|
|
Builtins::kArrayIncludesPackedDoubles)
|
|
|
|
|
: Builtins::CallableFor(isolate(),
|
|
|
|
|
Builtins::kArrayIndexOfPackedDoubles);
|
|
|
|
|
Node* result = CallStub(callable, context, elements, search_element,
|
|
|
|
|
array_length, SmiTag(index_var.value()));
|
|
|
|
|
args.PopAndReturn(result);
|
|
|
|
|
}
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2018-03-05 12:48:04 +00:00
|
|
|
|
BIND(&if_holey_doubles);
|
|
|
|
|
{
|
|
|
|
|
Callable callable =
|
|
|
|
|
(variant == kIncludes)
|
|
|
|
|
? Builtins::CallableFor(isolate(),
|
|
|
|
|
Builtins::kArrayIncludesHoleyDoubles)
|
|
|
|
|
: Builtins::CallableFor(isolate(),
|
|
|
|
|
Builtins::kArrayIndexOfHoleyDoubles);
|
|
|
|
|
Node* result = CallStub(callable, context, elements, search_element,
|
|
|
|
|
array_length, SmiTag(index_var.value()));
|
|
|
|
|
args.PopAndReturn(result);
|
|
|
|
|
}
|
2017-05-16 15:05:29 +00:00
|
|
|
|
|
2018-03-05 12:48:04 +00:00
|
|
|
|
BIND(&return_not_found);
|
|
|
|
|
if (variant == kIncludes) {
|
|
|
|
|
args.PopAndReturn(FalseConstant());
|
|
|
|
|
} else {
|
|
|
|
|
args.PopAndReturn(NumberConstant(-1));
|
|
|
|
|
}
|
2017-05-16 15:05:29 +00:00
|
|
|
|
|
2018-03-05 12:48:04 +00:00
|
|
|
|
BIND(&call_runtime);
|
|
|
|
|
{
|
|
|
|
|
Node* start_from =
|
|
|
|
|
args.GetOptionalArgumentValue(kFromIndexArg, UndefinedConstant());
|
|
|
|
|
Runtime::FunctionId function = variant == kIncludes
|
|
|
|
|
? Runtime::kArrayIncludes_Slow
|
|
|
|
|
: Runtime::kArrayIndexOf;
|
|
|
|
|
args.PopAndReturn(
|
|
|
|
|
CallRuntime(function, context, array, search_element, start_from));
|
|
|
|
|
}
|
|
|
|
|
}
|
2017-05-16 15:05:29 +00:00
|
|
|
|
|
2018-03-05 12:48:04 +00:00
|
|
|
|
void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
|
|
|
|
|
SearchVariant variant, Node* context, Node* elements, Node* search_element,
|
|
|
|
|
Node* array_length, Node* from_index) {
|
|
|
|
|
VARIABLE(index_var, MachineType::PointerRepresentation(),
|
|
|
|
|
SmiUntag(from_index));
|
|
|
|
|
VARIABLE(search_num, MachineRepresentation::kFloat64);
|
|
|
|
|
Node* array_length_untagged = SmiUntag(array_length);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2018-03-05 12:48:04 +00:00
|
|
|
|
Label ident_loop(this, &index_var), heap_num_loop(this, &search_num),
|
|
|
|
|
string_loop(this), bigint_loop(this, &index_var),
|
|
|
|
|
undef_loop(this, &index_var), not_smi(this), not_heap_num(this),
|
|
|
|
|
return_found(this), return_not_found(this);
|
2017-05-16 15:05:29 +00:00
|
|
|
|
|
2018-03-05 12:48:04 +00:00
|
|
|
|
GotoIfNot(TaggedIsSmi(search_element), ¬_smi);
|
|
|
|
|
search_num.Bind(SmiToFloat64(search_element));
|
|
|
|
|
Goto(&heap_num_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2018-03-05 12:48:04 +00:00
|
|
|
|
BIND(¬_smi);
|
|
|
|
|
if (variant == kIncludes) {
|
|
|
|
|
GotoIf(IsUndefined(search_element), &undef_loop);
|
|
|
|
|
}
|
|
|
|
|
Node* map = LoadMap(search_element);
|
|
|
|
|
GotoIfNot(IsHeapNumberMap(map), ¬_heap_num);
|
|
|
|
|
search_num.Bind(LoadHeapNumberValue(search_element));
|
|
|
|
|
Goto(&heap_num_loop);
|
|
|
|
|
|
|
|
|
|
BIND(¬_heap_num);
|
|
|
|
|
Node* search_type = LoadMapInstanceType(map);
|
|
|
|
|
GotoIf(IsStringInstanceType(search_type), &string_loop);
|
|
|
|
|
GotoIf(IsBigIntInstanceType(search_type), &bigint_loop);
|
|
|
|
|
Goto(&ident_loop);
|
|
|
|
|
|
|
|
|
|
BIND(&ident_loop);
|
|
|
|
|
{
|
|
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
|
|
|
|
|
&return_not_found);
|
|
|
|
|
Node* element_k = LoadFixedArrayElement(elements, index_var.value());
|
|
|
|
|
GotoIf(WordEqual(element_k, search_element), &return_found);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2018-03-05 12:48:04 +00:00
|
|
|
|
Increment(&index_var);
|
|
|
|
|
Goto(&ident_loop);
|
|
|
|
|
}
|
2017-09-11 13:16:27 +00:00
|
|
|
|
|
2018-03-05 12:48:04 +00:00
|
|
|
|
if (variant == kIncludes) {
|
|
|
|
|
BIND(&undef_loop);
|
2017-10-24 12:35:40 +00:00
|
|
|
|
|
2018-03-05 12:48:04 +00:00
|
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
|
|
|
|
|
&return_not_found);
|
|
|
|
|
Node* element_k = LoadFixedArrayElement(elements, index_var.value());
|
|
|
|
|
GotoIf(IsUndefined(element_k), &return_found);
|
|
|
|
|
GotoIf(IsTheHole(element_k), &return_found);
|
2017-10-24 12:35:40 +00:00
|
|
|
|
|
2018-03-05 12:48:04 +00:00
|
|
|
|
Increment(&index_var);
|
|
|
|
|
Goto(&undef_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-03-05 12:48:04 +00:00
|
|
|
|
BIND(&heap_num_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
{
|
2018-03-05 12:48:04 +00:00
|
|
|
|
Label nan_loop(this, &index_var), not_nan_loop(this, &index_var);
|
2017-05-16 15:05:29 +00:00
|
|
|
|
Label* nan_handling = variant == kIncludes ? &nan_loop : &return_not_found;
|
|
|
|
|
BranchIfFloat64IsNaN(search_num.value(), nan_handling, ¬_nan_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(¬_nan_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
{
|
2018-03-05 12:48:04 +00:00
|
|
|
|
Label continue_loop(this), not_smi(this);
|
|
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
|
2017-03-16 11:32:01 +00:00
|
|
|
|
&return_not_found);
|
2018-03-05 12:48:04 +00:00
|
|
|
|
Node* element_k = LoadFixedArrayElement(elements, index_var.value());
|
|
|
|
|
GotoIfNot(TaggedIsSmi(element_k), ¬_smi);
|
|
|
|
|
Branch(Float64Equal(search_num.value(), SmiToFloat64(element_k)),
|
|
|
|
|
&return_found, &continue_loop);
|
|
|
|
|
|
|
|
|
|
BIND(¬_smi);
|
|
|
|
|
GotoIfNot(IsHeapNumber(element_k), &continue_loop);
|
|
|
|
|
Branch(Float64Equal(search_num.value(), LoadHeapNumberValue(element_k)),
|
|
|
|
|
&return_found, &continue_loop);
|
|
|
|
|
|
2017-05-16 15:05:29 +00:00
|
|
|
|
BIND(&continue_loop);
|
2017-08-03 14:27:11 +00:00
|
|
|
|
Increment(&index_var);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
Goto(¬_nan_loop);
|
|
|
|
|
}
|
2017-05-16 15:05:29 +00:00
|
|
|
|
|
|
|
|
|
// Array.p.includes uses SameValueZero comparisons, where NaN == NaN.
|
|
|
|
|
if (variant == kIncludes) {
|
|
|
|
|
BIND(&nan_loop);
|
|
|
|
|
Label continue_loop(this);
|
2018-03-05 12:48:04 +00:00
|
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
|
2017-05-16 15:05:29 +00:00
|
|
|
|
&return_not_found);
|
2018-03-05 12:48:04 +00:00
|
|
|
|
Node* element_k = LoadFixedArrayElement(elements, index_var.value());
|
|
|
|
|
GotoIf(TaggedIsSmi(element_k), &continue_loop);
|
2018-04-27 11:27:17 +00:00
|
|
|
|
GotoIfNot(IsHeapNumber(CAST(element_k)), &continue_loop);
|
2018-03-05 12:48:04 +00:00
|
|
|
|
BranchIfFloat64IsNaN(LoadHeapNumberValue(element_k), &return_found,
|
|
|
|
|
&continue_loop);
|
|
|
|
|
|
2017-05-16 15:05:29 +00:00
|
|
|
|
BIND(&continue_loop);
|
2017-08-03 14:27:11 +00:00
|
|
|
|
Increment(&index_var);
|
2017-05-16 15:05:29 +00:00
|
|
|
|
Goto(&nan_loop);
|
|
|
|
|
}
|
2017-03-16 11:32:01 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-03-05 12:48:04 +00:00
|
|
|
|
BIND(&string_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
{
|
2018-03-05 12:48:04 +00:00
|
|
|
|
TNode<String> search_element_string = CAST(search_element);
|
|
|
|
|
Label continue_loop(this), next_iteration(this, &index_var),
|
|
|
|
|
slow_compare(this), runtime(this, Label::kDeferred);
|
|
|
|
|
TNode<IntPtrT> search_length =
|
|
|
|
|
LoadStringLengthAsWord(search_element_string);
|
|
|
|
|
Goto(&next_iteration);
|
|
|
|
|
BIND(&next_iteration);
|
|
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
|
|
|
|
|
&return_not_found);
|
|
|
|
|
Node* element_k = LoadFixedArrayElement(elements, index_var.value());
|
|
|
|
|
GotoIf(TaggedIsSmi(element_k), &continue_loop);
|
|
|
|
|
GotoIf(WordEqual(search_element_string, element_k), &return_found);
|
|
|
|
|
Node* element_k_type = LoadInstanceType(element_k);
|
|
|
|
|
GotoIfNot(IsStringInstanceType(element_k_type), &continue_loop);
|
|
|
|
|
Branch(WordEqual(search_length, LoadStringLengthAsWord(element_k)),
|
|
|
|
|
&slow_compare, &continue_loop);
|
|
|
|
|
|
|
|
|
|
BIND(&slow_compare);
|
|
|
|
|
StringBuiltinsAssembler string_asm(state());
|
|
|
|
|
string_asm.StringEqual_Core(context, search_element_string, search_type,
|
|
|
|
|
element_k, element_k_type, search_length,
|
|
|
|
|
&return_found, &continue_loop, &runtime);
|
|
|
|
|
BIND(&runtime);
|
|
|
|
|
TNode<Object> result = CallRuntime(Runtime::kStringEqual, context,
|
|
|
|
|
search_element_string, element_k);
|
|
|
|
|
Branch(WordEqual(result, TrueConstant()), &return_found, &continue_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2018-03-05 12:48:04 +00:00
|
|
|
|
BIND(&continue_loop);
|
|
|
|
|
Increment(&index_var);
|
|
|
|
|
Goto(&next_iteration);
|
|
|
|
|
}
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2018-03-05 12:48:04 +00:00
|
|
|
|
BIND(&bigint_loop);
|
|
|
|
|
{
|
|
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
|
|
|
|
|
&return_not_found);
|
|
|
|
|
|
|
|
|
|
Node* element_k = LoadFixedArrayElement(elements, index_var.value());
|
|
|
|
|
Label continue_loop(this);
|
|
|
|
|
GotoIf(TaggedIsSmi(element_k), &continue_loop);
|
2018-04-27 11:27:17 +00:00
|
|
|
|
GotoIfNot(IsBigInt(CAST(element_k)), &continue_loop);
|
2018-03-05 12:48:04 +00:00
|
|
|
|
TNode<Object> result = CallRuntime(Runtime::kBigIntEqualToBigInt, context,
|
|
|
|
|
search_element, element_k);
|
|
|
|
|
Branch(WordEqual(result, TrueConstant()), &return_found, &continue_loop);
|
|
|
|
|
|
|
|
|
|
BIND(&continue_loop);
|
|
|
|
|
Increment(&index_var);
|
|
|
|
|
Goto(&bigint_loop);
|
|
|
|
|
}
|
|
|
|
|
BIND(&return_found);
|
|
|
|
|
if (variant == kIncludes) {
|
|
|
|
|
Return(TrueConstant());
|
|
|
|
|
} else {
|
|
|
|
|
Return(SmiTag(index_var.value()));
|
|
|
|
|
}
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2018-03-05 12:48:04 +00:00
|
|
|
|
BIND(&return_not_found);
|
|
|
|
|
if (variant == kIncludes) {
|
|
|
|
|
Return(FalseConstant());
|
|
|
|
|
} else {
|
|
|
|
|
Return(NumberConstant(-1));
|
|
|
|
|
}
|
|
|
|
|
}
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2018-03-05 12:48:04 +00:00
|
|
|
|
void ArrayIncludesIndexofAssembler::GeneratePackedDoubles(SearchVariant variant,
|
|
|
|
|
Node* elements,
|
|
|
|
|
Node* search_element,
|
|
|
|
|
Node* array_length,
|
|
|
|
|
Node* from_index) {
|
|
|
|
|
VARIABLE(index_var, MachineType::PointerRepresentation(),
|
|
|
|
|
SmiUntag(from_index));
|
|
|
|
|
Node* array_length_untagged = SmiUntag(array_length);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2018-03-05 12:48:04 +00:00
|
|
|
|
Label nan_loop(this, &index_var), not_nan_loop(this, &index_var),
|
|
|
|
|
hole_loop(this, &index_var), search_notnan(this), return_found(this),
|
|
|
|
|
return_not_found(this);
|
|
|
|
|
VARIABLE(search_num, MachineRepresentation::kFloat64);
|
|
|
|
|
search_num.Bind(Float64Constant(0));
|
2017-05-16 15:05:29 +00:00
|
|
|
|
|
2018-03-05 12:48:04 +00:00
|
|
|
|
GotoIfNot(TaggedIsSmi(search_element), &search_notnan);
|
|
|
|
|
search_num.Bind(SmiToFloat64(search_element));
|
|
|
|
|
Goto(¬_nan_loop);
|
2017-05-16 15:05:29 +00:00
|
|
|
|
|
2018-03-05 12:48:04 +00:00
|
|
|
|
BIND(&search_notnan);
|
|
|
|
|
GotoIfNot(IsHeapNumber(search_element), &return_not_found);
|
2017-05-16 15:05:29 +00:00
|
|
|
|
|
2018-03-05 12:48:04 +00:00
|
|
|
|
search_num.Bind(LoadHeapNumberValue(search_element));
|
2017-05-16 15:05:29 +00:00
|
|
|
|
|
2018-03-05 12:48:04 +00:00
|
|
|
|
Label* nan_handling = variant == kIncludes ? &nan_loop : &return_not_found;
|
|
|
|
|
BranchIfFloat64IsNaN(search_num.value(), nan_handling, ¬_nan_loop);
|
2017-05-16 15:05:29 +00:00
|
|
|
|
|
2018-03-05 12:48:04 +00:00
|
|
|
|
BIND(¬_nan_loop);
|
|
|
|
|
{
|
|
|
|
|
Label continue_loop(this);
|
|
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
|
|
|
|
|
&return_not_found);
|
|
|
|
|
Node* element_k = LoadFixedDoubleArrayElement(elements, index_var.value(),
|
|
|
|
|
MachineType::Float64());
|
|
|
|
|
Branch(Float64Equal(element_k, search_num.value()), &return_found,
|
|
|
|
|
&continue_loop);
|
|
|
|
|
BIND(&continue_loop);
|
|
|
|
|
Increment(&index_var);
|
|
|
|
|
Goto(¬_nan_loop);
|
|
|
|
|
}
|
2017-05-16 15:05:29 +00:00
|
|
|
|
|
2018-03-05 12:48:04 +00:00
|
|
|
|
// Array.p.includes uses SameValueZero comparisons, where NaN == NaN.
|
|
|
|
|
if (variant == kIncludes) {
|
|
|
|
|
BIND(&nan_loop);
|
|
|
|
|
Label continue_loop(this);
|
|
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
|
|
|
|
|
&return_not_found);
|
|
|
|
|
Node* element_k = LoadFixedDoubleArrayElement(elements, index_var.value(),
|
|
|
|
|
MachineType::Float64());
|
|
|
|
|
BranchIfFloat64IsNaN(element_k, &return_found, &continue_loop);
|
|
|
|
|
BIND(&continue_loop);
|
|
|
|
|
Increment(&index_var);
|
|
|
|
|
Goto(&nan_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
}
|
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&return_found);
|
2017-08-03 14:27:11 +00:00
|
|
|
|
if (variant == kIncludes) {
|
2018-03-05 12:48:04 +00:00
|
|
|
|
Return(TrueConstant());
|
2017-08-03 14:27:11 +00:00
|
|
|
|
} else {
|
2018-03-05 12:48:04 +00:00
|
|
|
|
Return(SmiTag(index_var.value()));
|
2017-08-03 14:27:11 +00:00
|
|
|
|
}
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&return_not_found);
|
2017-08-03 14:27:11 +00:00
|
|
|
|
if (variant == kIncludes) {
|
2018-03-05 12:48:04 +00:00
|
|
|
|
Return(FalseConstant());
|
2017-08-03 14:27:11 +00:00
|
|
|
|
} else {
|
2018-03-05 12:48:04 +00:00
|
|
|
|
Return(NumberConstant(-1));
|
2017-08-03 14:27:11 +00:00
|
|
|
|
}
|
2018-03-05 12:48:04 +00:00
|
|
|
|
}
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2018-03-05 12:48:04 +00:00
|
|
|
|
void ArrayIncludesIndexofAssembler::GenerateHoleyDoubles(SearchVariant variant,
|
|
|
|
|
Node* elements,
|
|
|
|
|
Node* search_element,
|
|
|
|
|
Node* array_length,
|
|
|
|
|
Node* from_index) {
|
|
|
|
|
VARIABLE(index_var, MachineType::PointerRepresentation(),
|
|
|
|
|
SmiUntag(from_index));
|
|
|
|
|
Node* array_length_untagged = SmiUntag(array_length);
|
|
|
|
|
|
|
|
|
|
Label nan_loop(this, &index_var), not_nan_loop(this, &index_var),
|
|
|
|
|
hole_loop(this, &index_var), search_notnan(this), return_found(this),
|
|
|
|
|
return_not_found(this);
|
|
|
|
|
VARIABLE(search_num, MachineRepresentation::kFloat64);
|
|
|
|
|
search_num.Bind(Float64Constant(0));
|
|
|
|
|
|
|
|
|
|
GotoIfNot(TaggedIsSmi(search_element), &search_notnan);
|
|
|
|
|
search_num.Bind(SmiToFloat64(search_element));
|
|
|
|
|
Goto(¬_nan_loop);
|
|
|
|
|
|
|
|
|
|
BIND(&search_notnan);
|
|
|
|
|
if (variant == kIncludes) {
|
|
|
|
|
GotoIf(IsUndefined(search_element), &hole_loop);
|
|
|
|
|
}
|
|
|
|
|
GotoIfNot(IsHeapNumber(search_element), &return_not_found);
|
|
|
|
|
|
|
|
|
|
search_num.Bind(LoadHeapNumberValue(search_element));
|
|
|
|
|
|
|
|
|
|
Label* nan_handling = variant == kIncludes ? &nan_loop : &return_not_found;
|
|
|
|
|
BranchIfFloat64IsNaN(search_num.value(), nan_handling, ¬_nan_loop);
|
|
|
|
|
|
|
|
|
|
BIND(¬_nan_loop);
|
2017-05-15 08:32:22 +00:00
|
|
|
|
{
|
2018-03-05 12:48:04 +00:00
|
|
|
|
Label continue_loop(this);
|
|
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
|
|
|
|
|
&return_not_found);
|
|
|
|
|
|
|
|
|
|
// No need for hole checking here; the following Float64Equal will
|
|
|
|
|
// return 'not equal' for holes anyway.
|
|
|
|
|
Node* element_k = LoadFixedDoubleArrayElement(elements, index_var.value(),
|
|
|
|
|
MachineType::Float64());
|
|
|
|
|
|
|
|
|
|
Branch(Float64Equal(element_k, search_num.value()), &return_found,
|
|
|
|
|
&continue_loop);
|
|
|
|
|
BIND(&continue_loop);
|
|
|
|
|
Increment(&index_var);
|
|
|
|
|
Goto(¬_nan_loop);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Array.p.includes uses SameValueZero comparisons, where NaN == NaN.
|
|
|
|
|
if (variant == kIncludes) {
|
|
|
|
|
BIND(&nan_loop);
|
|
|
|
|
Label continue_loop(this);
|
|
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
|
|
|
|
|
&return_not_found);
|
|
|
|
|
|
|
|
|
|
// Load double value or continue if it's the hole NaN.
|
|
|
|
|
Node* element_k = LoadFixedDoubleArrayElement(
|
|
|
|
|
elements, index_var.value(), MachineType::Float64(), 0,
|
|
|
|
|
INTPTR_PARAMETERS, &continue_loop);
|
|
|
|
|
|
|
|
|
|
BranchIfFloat64IsNaN(element_k, &return_found, &continue_loop);
|
|
|
|
|
BIND(&continue_loop);
|
|
|
|
|
Increment(&index_var);
|
|
|
|
|
Goto(&nan_loop);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Array.p.includes treats the hole as undefined.
|
|
|
|
|
if (variant == kIncludes) {
|
|
|
|
|
BIND(&hole_loop);
|
|
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
|
|
|
|
|
&return_not_found);
|
|
|
|
|
|
|
|
|
|
// Check if the element is a double hole, but don't load it.
|
|
|
|
|
LoadFixedDoubleArrayElement(elements, index_var.value(),
|
|
|
|
|
MachineType::None(), 0, INTPTR_PARAMETERS,
|
|
|
|
|
&return_found);
|
|
|
|
|
|
|
|
|
|
Increment(&index_var);
|
|
|
|
|
Goto(&hole_loop);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BIND(&return_found);
|
|
|
|
|
if (variant == kIncludes) {
|
|
|
|
|
Return(TrueConstant());
|
|
|
|
|
} else {
|
|
|
|
|
Return(SmiTag(index_var.value()));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BIND(&return_not_found);
|
|
|
|
|
if (variant == kIncludes) {
|
|
|
|
|
Return(FalseConstant());
|
|
|
|
|
} else {
|
|
|
|
|
Return(NumberConstant(-1));
|
2017-05-15 08:32:22 +00:00
|
|
|
|
}
|
2017-03-16 11:32:01 +00:00
|
|
|
|
}
|
|
|
|
|
|
2017-05-16 15:05:29 +00:00
|
|
|
|
TF_BUILTIN(ArrayIncludes, ArrayIncludesIndexofAssembler) {
|
|
|
|
|
Generate(kIncludes);
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-05 12:48:04 +00:00
|
|
|
|
TF_BUILTIN(ArrayIncludesSmiOrObject, ArrayIncludesIndexofAssembler) {
|
|
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
|
Node* elements = Parameter(Descriptor::kElements);
|
|
|
|
|
Node* search_element = Parameter(Descriptor::kSearchElement);
|
|
|
|
|
Node* array_length = Parameter(Descriptor::kLength);
|
|
|
|
|
Node* from_index = Parameter(Descriptor::kFromIndex);
|
|
|
|
|
|
|
|
|
|
GenerateSmiOrObject(kIncludes, context, elements, search_element,
|
|
|
|
|
array_length, from_index);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TF_BUILTIN(ArrayIncludesPackedDoubles, ArrayIncludesIndexofAssembler) {
|
|
|
|
|
Node* elements = Parameter(Descriptor::kElements);
|
|
|
|
|
Node* search_element = Parameter(Descriptor::kSearchElement);
|
|
|
|
|
Node* array_length = Parameter(Descriptor::kLength);
|
|
|
|
|
Node* from_index = Parameter(Descriptor::kFromIndex);
|
|
|
|
|
|
|
|
|
|
GeneratePackedDoubles(kIncludes, elements, search_element, array_length,
|
|
|
|
|
from_index);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TF_BUILTIN(ArrayIncludesHoleyDoubles, ArrayIncludesIndexofAssembler) {
|
|
|
|
|
Node* elements = Parameter(Descriptor::kElements);
|
|
|
|
|
Node* search_element = Parameter(Descriptor::kSearchElement);
|
|
|
|
|
Node* array_length = Parameter(Descriptor::kLength);
|
|
|
|
|
Node* from_index = Parameter(Descriptor::kFromIndex);
|
|
|
|
|
|
|
|
|
|
GenerateHoleyDoubles(kIncludes, elements, search_element, array_length,
|
|
|
|
|
from_index);
|
|
|
|
|
}
|
|
|
|
|
|
2017-05-16 15:05:29 +00:00
|
|
|
|
TF_BUILTIN(ArrayIndexOf, ArrayIncludesIndexofAssembler) { Generate(kIndexOf); }
|
|
|
|
|
|
2018-03-05 12:48:04 +00:00
|
|
|
|
TF_BUILTIN(ArrayIndexOfSmiOrObject, ArrayIncludesIndexofAssembler) {
|
|
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
|
Node* elements = Parameter(Descriptor::kElements);
|
|
|
|
|
Node* search_element = Parameter(Descriptor::kSearchElement);
|
|
|
|
|
Node* array_length = Parameter(Descriptor::kLength);
|
|
|
|
|
Node* from_index = Parameter(Descriptor::kFromIndex);
|
|
|
|
|
|
|
|
|
|
GenerateSmiOrObject(kIndexOf, context, elements, search_element, array_length,
|
|
|
|
|
from_index);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TF_BUILTIN(ArrayIndexOfPackedDoubles, ArrayIncludesIndexofAssembler) {
|
|
|
|
|
Node* elements = Parameter(Descriptor::kElements);
|
|
|
|
|
Node* search_element = Parameter(Descriptor::kSearchElement);
|
|
|
|
|
Node* array_length = Parameter(Descriptor::kLength);
|
|
|
|
|
Node* from_index = Parameter(Descriptor::kFromIndex);
|
|
|
|
|
|
|
|
|
|
GeneratePackedDoubles(kIndexOf, elements, search_element, array_length,
|
|
|
|
|
from_index);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TF_BUILTIN(ArrayIndexOfHoleyDoubles, ArrayIncludesIndexofAssembler) {
|
|
|
|
|
Node* elements = Parameter(Descriptor::kElements);
|
|
|
|
|
Node* search_element = Parameter(Descriptor::kSearchElement);
|
|
|
|
|
Node* array_length = Parameter(Descriptor::kLength);
|
|
|
|
|
Node* from_index = Parameter(Descriptor::kFromIndex);
|
|
|
|
|
|
|
|
|
|
GenerateHoleyDoubles(kIndexOf, elements, search_element, array_length,
|
|
|
|
|
from_index);
|
|
|
|
|
}
|
|
|
|
|
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
// ES #sec-array.prototype.values
|
|
|
|
|
TF_BUILTIN(ArrayPrototypeValues, CodeStubAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
Return(CreateArrayIterator(context, ToObject(context, receiver),
|
|
|
|
|
IterationKind::kValues));
|
2017-03-16 11:32:01 +00:00
|
|
|
|
}
|
|
|
|
|
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
// ES #sec-array.prototype.entries
|
|
|
|
|
TF_BUILTIN(ArrayPrototypeEntries, CodeStubAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
Return(CreateArrayIterator(context, ToObject(context, receiver),
|
|
|
|
|
IterationKind::kEntries));
|
2017-03-16 11:32:01 +00:00
|
|
|
|
}
|
|
|
|
|
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
// ES #sec-array.prototype.keys
|
|
|
|
|
TF_BUILTIN(ArrayPrototypeKeys, CodeStubAssembler) {
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
|
|
|
|
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
Return(CreateArrayIterator(context, ToObject(context, receiver),
|
|
|
|
|
IterationKind::kKeys));
|
2017-03-16 11:32:01 +00:00
|
|
|
|
}
|
|
|
|
|
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
// ES #sec-%arrayiteratorprototype%.next
|
2017-03-16 11:32:01 +00:00
|
|
|
|
TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
|
2018-02-07 15:50:03 +00:00
|
|
|
|
const char* method_name = "Array Iterator.prototype.next";
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2018-02-23 13:08:34 +00:00
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
2017-03-20 10:55:37 +00:00
|
|
|
|
Node* iterator = Parameter(Descriptor::kReceiver);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2017-04-07 08:20:35 +00:00
|
|
|
|
VARIABLE(var_value, MachineRepresentation::kTagged);
|
|
|
|
|
VARIABLE(var_done, MachineRepresentation::kTagged);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
|
|
// Required, or else `throw_bad_receiver` fails a DCHECK due to these
|
|
|
|
|
// variables not being bound along all paths, despite not being used.
|
|
|
|
|
var_done.Bind(TrueConstant());
|
|
|
|
|
var_value.Bind(UndefinedConstant());
|
|
|
|
|
|
|
|
|
|
Label throw_bad_receiver(this, Label::kDeferred);
|
|
|
|
|
Label set_done(this);
|
|
|
|
|
Label allocate_entry_if_needed(this);
|
|
|
|
|
Label allocate_iterator_result(this);
|
|
|
|
|
|
|
|
|
|
// If O does not have all of the internal slots of an Array Iterator Instance
|
|
|
|
|
// (22.1.5.3), throw a TypeError exception
|
|
|
|
|
GotoIf(TaggedIsSmi(iterator), &throw_bad_receiver);
|
2018-04-27 11:27:17 +00:00
|
|
|
|
GotoIfNot(IsJSArrayIterator(CAST(iterator)), &throw_bad_receiver);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
|
|
// Let a be O.[[IteratedObject]].
|
|
|
|
|
Node* array =
|
|
|
|
|
LoadObjectField(iterator, JSArrayIterator::kIteratedObjectOffset);
|
|
|
|
|
|
|
|
|
|
// Let index be O.[[ArrayIteratorNextIndex]].
|
|
|
|
|
Node* index = LoadObjectField(iterator, JSArrayIterator::kNextIndexOffset);
|
|
|
|
|
Node* array_map = LoadMap(array);
|
|
|
|
|
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
Label if_detached(this, Label::kDeferred);
|
|
|
|
|
|
|
|
|
|
Label if_typedarray(this), if_other(this, Label::kDeferred), if_array(this),
|
|
|
|
|
if_generic(this, Label::kDeferred);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
Node* array_type = LoadInstanceType(array);
|
|
|
|
|
GotoIf(InstanceTypeEqual(array_type, JS_ARRAY_TYPE), &if_array);
|
|
|
|
|
Branch(InstanceTypeEqual(array_type, JS_TYPED_ARRAY_TYPE), &if_typedarray,
|
|
|
|
|
&if_other);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
BIND(&if_array);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
{
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
// We can only handle fast elements here.
|
|
|
|
|
Node* elements_kind = LoadMapElementsKind(array_map);
|
|
|
|
|
GotoIfNot(IsFastElementsKind(elements_kind), &if_other);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2018-05-17 11:56:28 +00:00
|
|
|
|
TNode<Smi> length = CAST(LoadJSArrayLength(array));
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2018-05-17 11:56:28 +00:00
|
|
|
|
GotoIfNot(SmiBelow(CAST(index), length), &set_done);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
var_value.Bind(index);
|
2018-05-17 11:56:28 +00:00
|
|
|
|
TNode<Smi> one = SmiConstant(1);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
StoreObjectFieldNoWriteBarrier(iterator, JSArrayIterator::kNextIndexOffset,
|
2018-05-17 11:56:28 +00:00
|
|
|
|
SmiAdd(CAST(index), one));
|
2017-03-16 11:32:01 +00:00
|
|
|
|
var_done.Bind(FalseConstant());
|
|
|
|
|
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
GotoIf(Word32Equal(LoadAndUntagToWord32ObjectField(
|
|
|
|
|
iterator, JSArrayIterator::kKindOffset),
|
|
|
|
|
Int32Constant(static_cast<int>(IterationKind::kKeys))),
|
|
|
|
|
&allocate_iterator_result);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
Node* elements = LoadElements(array);
|
|
|
|
|
Label if_packed(this), if_holey(this), if_packed_double(this),
|
|
|
|
|
if_holey_double(this), if_unknown_kind(this, Label::kDeferred);
|
|
|
|
|
int32_t kinds[] = {// Handled by if_packed.
|
|
|
|
|
PACKED_SMI_ELEMENTS, PACKED_ELEMENTS,
|
|
|
|
|
// Handled by if_holey.
|
|
|
|
|
HOLEY_SMI_ELEMENTS, HOLEY_ELEMENTS,
|
|
|
|
|
// Handled by if_packed_double.
|
|
|
|
|
PACKED_DOUBLE_ELEMENTS,
|
|
|
|
|
// Handled by if_holey_double.
|
|
|
|
|
HOLEY_DOUBLE_ELEMENTS};
|
|
|
|
|
Label* labels[] = {// PACKED_{SMI,}_ELEMENTS
|
|
|
|
|
&if_packed, &if_packed,
|
|
|
|
|
// HOLEY_{SMI,}_ELEMENTS
|
|
|
|
|
&if_holey, &if_holey,
|
|
|
|
|
// PACKED_DOUBLE_ELEMENTS
|
|
|
|
|
&if_packed_double,
|
|
|
|
|
// HOLEY_DOUBLE_ELEMENTS
|
|
|
|
|
&if_holey_double};
|
|
|
|
|
Switch(elements_kind, &if_unknown_kind, kinds, labels, arraysize(kinds));
|
|
|
|
|
|
|
|
|
|
BIND(&if_packed);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
{
|
|
|
|
|
var_value.Bind(LoadFixedArrayElement(elements, index, 0, SMI_PARAMETERS));
|
|
|
|
|
Goto(&allocate_entry_if_needed);
|
|
|
|
|
}
|
|
|
|
|
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
BIND(&if_holey);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
{
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
Node* element = LoadFixedArrayElement(elements, index, 0, SMI_PARAMETERS);
|
|
|
|
|
var_value.Bind(element);
|
|
|
|
|
GotoIfNot(WordEqual(element, TheHoleConstant()),
|
|
|
|
|
&allocate_entry_if_needed);
|
|
|
|
|
GotoIf(IsNoElementsProtectorCellInvalid(), &if_generic);
|
|
|
|
|
var_value.Bind(UndefinedConstant());
|
2017-03-16 11:32:01 +00:00
|
|
|
|
Goto(&allocate_entry_if_needed);
|
|
|
|
|
}
|
|
|
|
|
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
BIND(&if_packed_double);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
{
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
Node* value = LoadFixedDoubleArrayElement(
|
|
|
|
|
elements, index, MachineType::Float64(), 0, SMI_PARAMETERS);
|
|
|
|
|
var_value.Bind(AllocateHeapNumberWithValue(value));
|
2017-03-16 11:32:01 +00:00
|
|
|
|
Goto(&allocate_entry_if_needed);
|
|
|
|
|
}
|
|
|
|
|
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
BIND(&if_holey_double);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
{
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
Label if_hole(this, Label::kDeferred);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
Node* value = LoadFixedDoubleArrayElement(
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
elements, index, MachineType::Float64(), 0, SMI_PARAMETERS, &if_hole);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
var_value.Bind(AllocateHeapNumberWithValue(value));
|
|
|
|
|
Goto(&allocate_entry_if_needed);
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
BIND(&if_hole);
|
|
|
|
|
GotoIf(IsNoElementsProtectorCellInvalid(), &if_generic);
|
|
|
|
|
var_value.Bind(UndefinedConstant());
|
|
|
|
|
Goto(&allocate_entry_if_needed);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
}
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
|
|
|
|
|
BIND(&if_unknown_kind);
|
|
|
|
|
Unreachable();
|
2017-03-16 11:32:01 +00:00
|
|
|
|
}
|
|
|
|
|
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
BIND(&if_other);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
{
|
|
|
|
|
// If a is undefined, return CreateIterResultObject(undefined, true)
|
2017-10-24 19:09:18 +00:00
|
|
|
|
GotoIf(IsUndefined(array), &allocate_iterator_result);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
Node* length =
|
|
|
|
|
CallBuiltin(Builtins::kToLength, context,
|
|
|
|
|
GetProperty(context, array, factory()->length_string()));
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
GotoIfNumberGreaterThanOrEqual(index, length, &set_done);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
var_value.Bind(index);
|
|
|
|
|
StoreObjectField(iterator, JSArrayIterator::kNextIndexOffset,
|
|
|
|
|
NumberInc(index));
|
|
|
|
|
var_done.Bind(FalseConstant());
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
GotoIf(Word32Equal(LoadAndUntagToWord32ObjectField(
|
|
|
|
|
iterator, JSArrayIterator::kKindOffset),
|
|
|
|
|
Int32Constant(static_cast<int>(IterationKind::kKeys))),
|
|
|
|
|
&allocate_iterator_result);
|
|
|
|
|
Goto(&if_generic);
|
|
|
|
|
}
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
BIND(&if_generic);
|
|
|
|
|
{
|
|
|
|
|
var_value.Bind(GetProperty(context, array, index));
|
|
|
|
|
Goto(&allocate_entry_if_needed);
|
|
|
|
|
}
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
BIND(&if_typedarray);
|
|
|
|
|
{
|
|
|
|
|
Node* buffer = LoadObjectField(array, JSTypedArray::kBufferOffset);
|
|
|
|
|
GotoIf(IsDetachedBuffer(buffer), &if_detached);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2018-05-17 11:56:28 +00:00
|
|
|
|
TNode<Smi> length =
|
|
|
|
|
CAST(LoadObjectField(array, JSTypedArray::kLengthOffset));
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
2018-05-17 11:56:28 +00:00
|
|
|
|
GotoIfNot(SmiBelow(CAST(index), length), &set_done);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
var_value.Bind(index);
|
2018-05-17 11:56:28 +00:00
|
|
|
|
TNode<Smi> one = SmiConstant(1);
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
StoreObjectFieldNoWriteBarrier(iterator, JSArrayIterator::kNextIndexOffset,
|
2018-05-17 11:56:28 +00:00
|
|
|
|
SmiAdd(CAST(index), one));
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
var_done.Bind(FalseConstant());
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
GotoIf(Word32Equal(LoadAndUntagToWord32ObjectField(
|
|
|
|
|
iterator, JSArrayIterator::kKindOffset),
|
|
|
|
|
Int32Constant(static_cast<int>(IterationKind::kKeys))),
|
|
|
|
|
&allocate_iterator_result);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
Node* elements_kind = LoadMapElementsKind(array_map);
|
|
|
|
|
Node* elements = LoadElements(array);
|
|
|
|
|
Node* base_ptr =
|
|
|
|
|
LoadObjectField(elements, FixedTypedArrayBase::kBasePointerOffset);
|
|
|
|
|
Node* external_ptr =
|
|
|
|
|
LoadObjectField(elements, FixedTypedArrayBase::kExternalPointerOffset,
|
|
|
|
|
MachineType::Pointer());
|
|
|
|
|
Node* data_ptr = IntPtrAdd(BitcastTaggedToWord(base_ptr), external_ptr);
|
|
|
|
|
|
|
|
|
|
Label if_unknown_type(this, Label::kDeferred);
|
|
|
|
|
int32_t elements_kinds[] = {
|
|
|
|
|
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) TYPE##_ELEMENTS,
|
|
|
|
|
TYPED_ARRAYS(TYPED_ARRAY_CASE)
|
|
|
|
|
#undef TYPED_ARRAY_CASE
|
|
|
|
|
};
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
|
|
|
|
|
Label if_##type##array(this);
|
|
|
|
|
TYPED_ARRAYS(TYPED_ARRAY_CASE)
|
|
|
|
|
#undef TYPED_ARRAY_CASE
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
Label* elements_kind_labels[] = {
|
|
|
|
|
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) &if_##type##array,
|
|
|
|
|
TYPED_ARRAYS(TYPED_ARRAY_CASE)
|
|
|
|
|
#undef TYPED_ARRAY_CASE
|
|
|
|
|
};
|
|
|
|
|
STATIC_ASSERT(arraysize(elements_kinds) == arraysize(elements_kind_labels));
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
Switch(elements_kind, &if_unknown_type, elements_kinds,
|
|
|
|
|
elements_kind_labels, arraysize(elements_kinds));
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
BIND(&if_unknown_type);
|
|
|
|
|
Unreachable();
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
|
|
|
|
|
BIND(&if_##type##array); \
|
|
|
|
|
{ \
|
|
|
|
|
var_value.Bind(LoadFixedTypedArrayElementAsTagged( \
|
|
|
|
|
data_ptr, index, TYPE##_ELEMENTS, SMI_PARAMETERS)); \
|
|
|
|
|
Goto(&allocate_entry_if_needed); \
|
|
|
|
|
}
|
|
|
|
|
TYPED_ARRAYS(TYPED_ARRAY_CASE)
|
|
|
|
|
#undef TYPED_ARRAY_CASE
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
BIND(&if_detached);
|
|
|
|
|
ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
}
|
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&set_done);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
{
|
|
|
|
|
StoreObjectFieldNoWriteBarrier(
|
|
|
|
|
iterator, JSArrayIterator::kIteratedObjectOffset, UndefinedConstant());
|
|
|
|
|
Goto(&allocate_iterator_result);
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&allocate_entry_if_needed);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
{
|
[es2015] Refactor the JSArrayIterator.
This changes the JSArrayIterator to always have only a single instance
type, instead of the zoo of instance types that we had before, and
which became less useful with the specification update to when "next"
is loaded from the iterator now. This greatly simplifies the baseline
implementation of the array iterator, which now only looks at the
iterated object during %ArrayIteratorPrototype%.next invocations.
In TurboFan we introduce a new JSCreateArrayIterator operator, that
holds the IterationKind and get's the iterated object as input. When
optimizing %ArrayIteratorPrototype%.next in the JSCallReducer, we
check whether the receiver is a JSCreateArrayIterator, and if so,
we try to infer maps for the iterated object from there. If we find
any, we speculatively assume that these won't have changed during
iteration (as we did before with the previous approach), and generate
fast code for both JSArray and JSTypedArray iteration.
Drive-by-fix: Drop the fast_array_iteration protector, it's not
necessary anymore since we have the deoptimization guard bit in
the JSCallReducer now.
This addresses the performance cliff noticed in webpack 4. The minimal
repro on the tracking bug goes from
console.timeEnd: mono, 124.773000
console.timeEnd: poly, 670.353000
to
console.timeEnd: mono, 118.709000
console.timeEnd: poly, 141.393000
so that's a 4.7x improvement.
Also make presubmit happy by adding the missing #undef's.
Bug: v8:7510, v7:7514
Change-Id: I79a46bfa2cd0f0710e09365ef72519b1bbb667b5
Reviewed-on: https://chromium-review.googlesource.com/946098
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51725}
2018-03-02 19:31:01 +00:00
|
|
|
|
GotoIf(Word32Equal(LoadAndUntagToWord32ObjectField(
|
|
|
|
|
iterator, JSArrayIterator::kKindOffset),
|
|
|
|
|
Int32Constant(static_cast<int>(IterationKind::kValues))),
|
2017-03-16 11:32:01 +00:00
|
|
|
|
&allocate_iterator_result);
|
|
|
|
|
|
2017-06-30 11:26:14 +00:00
|
|
|
|
Node* elements = AllocateFixedArray(PACKED_ELEMENTS, IntPtrConstant(2));
|
2017-03-16 11:32:01 +00:00
|
|
|
|
StoreFixedArrayElement(elements, 0, index, SKIP_WRITE_BARRIER);
|
|
|
|
|
StoreFixedArrayElement(elements, 1, var_value.value(), SKIP_WRITE_BARRIER);
|
|
|
|
|
|
|
|
|
|
Node* entry = Allocate(JSArray::kSize);
|
|
|
|
|
Node* map = LoadContextElement(LoadNativeContext(context),
|
2017-06-30 11:26:14 +00:00
|
|
|
|
Context::JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
|
|
StoreMapNoWriteBarrier(entry, map);
|
2017-07-13 00:49:10 +00:00
|
|
|
|
StoreObjectFieldRoot(entry, JSArray::kPropertiesOrHashOffset,
|
2017-03-16 11:32:01 +00:00
|
|
|
|
Heap::kEmptyFixedArrayRootIndex);
|
|
|
|
|
StoreObjectFieldNoWriteBarrier(entry, JSArray::kElementsOffset, elements);
|
|
|
|
|
StoreObjectFieldNoWriteBarrier(entry, JSArray::kLengthOffset,
|
2017-07-10 15:02:17 +00:00
|
|
|
|
SmiConstant(2));
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
|
|
var_value.Bind(entry);
|
|
|
|
|
Goto(&allocate_iterator_result);
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&allocate_iterator_result);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
{
|
|
|
|
|
Node* result = Allocate(JSIteratorResult::kSize);
|
|
|
|
|
Node* map = LoadContextElement(LoadNativeContext(context),
|
|
|
|
|
Context::ITERATOR_RESULT_MAP_INDEX);
|
|
|
|
|
StoreMapNoWriteBarrier(result, map);
|
2017-07-13 00:49:10 +00:00
|
|
|
|
StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOrHashOffset,
|
2017-03-16 11:32:01 +00:00
|
|
|
|
Heap::kEmptyFixedArrayRootIndex);
|
|
|
|
|
StoreObjectFieldRoot(result, JSIteratorResult::kElementsOffset,
|
|
|
|
|
Heap::kEmptyFixedArrayRootIndex);
|
|
|
|
|
StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kValueOffset,
|
|
|
|
|
var_value.value());
|
|
|
|
|
StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kDoneOffset,
|
|
|
|
|
var_done.value());
|
|
|
|
|
Return(result);
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
|
BIND(&throw_bad_receiver);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
{
|
|
|
|
|
// The {receiver} is not a valid JSArrayIterator.
|
2018-02-07 15:50:03 +00:00
|
|
|
|
ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
|
|
|
|
|
StringConstant(method_name), iterator);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-15 16:51:01 +00:00
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
|
|
class ArrayFlattenAssembler : public CodeStubAssembler {
|
|
|
|
|
public:
|
|
|
|
|
explicit ArrayFlattenAssembler(compiler::CodeAssemblerState* state)
|
|
|
|
|
: CodeStubAssembler(state) {}
|
|
|
|
|
|
|
|
|
|
// https://tc39.github.io/proposal-flatMap/#sec-FlattenIntoArray
|
|
|
|
|
Node* FlattenIntoArray(Node* context, Node* target, Node* source,
|
|
|
|
|
Node* source_length, Node* start, Node* depth,
|
|
|
|
|
Node* mapper_function = nullptr,
|
|
|
|
|
Node* this_arg = nullptr) {
|
|
|
|
|
CSA_ASSERT(this, IsJSReceiver(target));
|
|
|
|
|
CSA_ASSERT(this, IsJSReceiver(source));
|
|
|
|
|
CSA_ASSERT(this, IsNumberPositive(source_length));
|
|
|
|
|
CSA_ASSERT(this, IsNumberPositive(start));
|
|
|
|
|
CSA_ASSERT(this, IsNumber(depth));
|
|
|
|
|
|
|
|
|
|
// 1. Let targetIndex be start.
|
|
|
|
|
VARIABLE(var_target_index, MachineRepresentation::kTagged, start);
|
|
|
|
|
|
|
|
|
|
// 2. Let sourceIndex be 0.
|
|
|
|
|
VARIABLE(var_source_index, MachineRepresentation::kTagged, SmiConstant(0));
|
|
|
|
|
|
|
|
|
|
// 3. Repeat...
|
|
|
|
|
Label loop(this, {&var_target_index, &var_source_index}), done_loop(this);
|
|
|
|
|
Goto(&loop);
|
|
|
|
|
BIND(&loop);
|
|
|
|
|
{
|
|
|
|
|
Node* const source_index = var_source_index.value();
|
|
|
|
|
Node* const target_index = var_target_index.value();
|
|
|
|
|
|
|
|
|
|
// ...while sourceIndex < sourceLen
|
|
|
|
|
GotoIfNumberGreaterThanOrEqual(source_index, source_length, &done_loop);
|
|
|
|
|
|
|
|
|
|
// a. Let P be ! ToString(sourceIndex).
|
|
|
|
|
// b. Let exists be ? HasProperty(source, P).
|
2018-05-17 11:56:28 +00:00
|
|
|
|
CSA_ASSERT(this,
|
|
|
|
|
SmiGreaterThanOrEqual(CAST(source_index), SmiConstant(0)));
|
2018-03-15 16:51:01 +00:00
|
|
|
|
Node* const exists =
|
|
|
|
|
HasProperty(source, source_index, context, kHasProperty);
|
|
|
|
|
|
|
|
|
|
// c. If exists is true, then
|
|
|
|
|
Label next(this);
|
|
|
|
|
GotoIfNot(IsTrue(exists), &next);
|
|
|
|
|
{
|
|
|
|
|
// i. Let element be ? Get(source, P).
|
|
|
|
|
Node* element = GetProperty(context, source, source_index);
|
|
|
|
|
|
|
|
|
|
// ii. If mapperFunction is present, then
|
|
|
|
|
if (mapper_function != nullptr) {
|
|
|
|
|
CSA_ASSERT(this, Word32Or(IsUndefined(mapper_function),
|
|
|
|
|
IsCallable(mapper_function)));
|
|
|
|
|
DCHECK_NOT_NULL(this_arg);
|
|
|
|
|
|
|
|
|
|
// 1. Set element to ? Call(mapperFunction, thisArg , « element,
|
|
|
|
|
// sourceIndex, source »).
|
|
|
|
|
element =
|
|
|
|
|
CallJS(CodeFactory::Call(isolate()), context, mapper_function,
|
|
|
|
|
this_arg, element, source_index, source);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// iii. Let shouldFlatten be false.
|
|
|
|
|
Label if_flatten_array(this), if_flatten_proxy(this, Label::kDeferred),
|
|
|
|
|
if_noflatten(this);
|
|
|
|
|
// iv. If depth > 0, then
|
|
|
|
|
GotoIfNumberGreaterThanOrEqual(SmiConstant(0), depth, &if_noflatten);
|
|
|
|
|
// 1. Set shouldFlatten to ? IsArray(element).
|
|
|
|
|
GotoIf(TaggedIsSmi(element), &if_noflatten);
|
|
|
|
|
GotoIf(IsJSArray(element), &if_flatten_array);
|
|
|
|
|
GotoIfNot(IsJSProxy(element), &if_noflatten);
|
|
|
|
|
Branch(IsTrue(CallRuntime(Runtime::kArrayIsArray, context, element)),
|
|
|
|
|
&if_flatten_proxy, &if_noflatten);
|
|
|
|
|
|
|
|
|
|
BIND(&if_flatten_array);
|
|
|
|
|
{
|
|
|
|
|
CSA_ASSERT(this, IsJSArray(element));
|
|
|
|
|
|
|
|
|
|
// 1. Let elementLen be ? ToLength(? Get(element, "length")).
|
|
|
|
|
Node* const element_length =
|
|
|
|
|
LoadObjectField(element, JSArray::kLengthOffset);
|
|
|
|
|
|
|
|
|
|
// 2. Set targetIndex to ? FlattenIntoArray(target, element,
|
|
|
|
|
// elementLen, targetIndex,
|
|
|
|
|
// depth - 1).
|
|
|
|
|
var_target_index.Bind(
|
|
|
|
|
CallBuiltin(Builtins::kFlattenIntoArray, context, target, element,
|
|
|
|
|
element_length, target_index, NumberDec(depth)));
|
|
|
|
|
Goto(&next);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BIND(&if_flatten_proxy);
|
|
|
|
|
{
|
|
|
|
|
CSA_ASSERT(this, IsJSProxy(element));
|
|
|
|
|
|
|
|
|
|
// 1. Let elementLen be ? ToLength(? Get(element, "length")).
|
|
|
|
|
Node* const element_length = ToLength_Inline(
|
|
|
|
|
context, GetProperty(context, element, LengthStringConstant()));
|
|
|
|
|
|
|
|
|
|
// 2. Set targetIndex to ? FlattenIntoArray(target, element,
|
|
|
|
|
// elementLen, targetIndex,
|
|
|
|
|
// depth - 1).
|
|
|
|
|
var_target_index.Bind(
|
|
|
|
|
CallBuiltin(Builtins::kFlattenIntoArray, context, target, element,
|
|
|
|
|
element_length, target_index, NumberDec(depth)));
|
|
|
|
|
Goto(&next);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BIND(&if_noflatten);
|
|
|
|
|
{
|
|
|
|
|
// 1. If targetIndex >= 2^53-1, throw a TypeError exception.
|
|
|
|
|
Label throw_error(this, Label::kDeferred);
|
|
|
|
|
GotoIfNumberGreaterThanOrEqual(
|
|
|
|
|
target_index, NumberConstant(kMaxSafeInteger), &throw_error);
|
|
|
|
|
|
|
|
|
|
// 2. Perform ? CreateDataPropertyOrThrow(target,
|
|
|
|
|
// ! ToString(targetIndex),
|
|
|
|
|
// element).
|
|
|
|
|
CallRuntime(Runtime::kCreateDataProperty, context, target,
|
|
|
|
|
target_index, element);
|
|
|
|
|
|
|
|
|
|
// 3. Increase targetIndex by 1.
|
|
|
|
|
var_target_index.Bind(NumberInc(target_index));
|
|
|
|
|
Goto(&next);
|
|
|
|
|
|
|
|
|
|
BIND(&throw_error);
|
|
|
|
|
ThrowTypeError(context, MessageTemplate::kFlattenPastSafeLength,
|
|
|
|
|
source_length, target_index);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
BIND(&next);
|
|
|
|
|
|
|
|
|
|
// d. Increase sourceIndex by 1.
|
|
|
|
|
var_source_index.Bind(NumberInc(source_index));
|
|
|
|
|
Goto(&loop);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BIND(&done_loop);
|
|
|
|
|
return var_target_index.value();
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
|
|
// https://tc39.github.io/proposal-flatMap/#sec-FlattenIntoArray
|
|
|
|
|
TF_BUILTIN(FlattenIntoArray, ArrayFlattenAssembler) {
|
|
|
|
|
Node* const context = Parameter(Descriptor::kContext);
|
|
|
|
|
Node* const target = Parameter(Descriptor::kTarget);
|
|
|
|
|
Node* const source = Parameter(Descriptor::kSource);
|
|
|
|
|
Node* const source_length = Parameter(Descriptor::kSourceLength);
|
|
|
|
|
Node* const start = Parameter(Descriptor::kStart);
|
|
|
|
|
Node* const depth = Parameter(Descriptor::kDepth);
|
|
|
|
|
|
|
|
|
|
Return(
|
|
|
|
|
FlattenIntoArray(context, target, source, source_length, start, depth));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// https://tc39.github.io/proposal-flatMap/#sec-FlattenIntoArray
|
|
|
|
|
TF_BUILTIN(FlatMapIntoArray, ArrayFlattenAssembler) {
|
|
|
|
|
Node* const context = Parameter(Descriptor::kContext);
|
|
|
|
|
Node* const target = Parameter(Descriptor::kTarget);
|
|
|
|
|
Node* const source = Parameter(Descriptor::kSource);
|
|
|
|
|
Node* const source_length = Parameter(Descriptor::kSourceLength);
|
|
|
|
|
Node* const start = Parameter(Descriptor::kStart);
|
|
|
|
|
Node* const depth = Parameter(Descriptor::kDepth);
|
|
|
|
|
Node* const mapper_function = Parameter(Descriptor::kMapperFunction);
|
|
|
|
|
Node* const this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
|
|
|
|
|
|
Return(FlattenIntoArray(context, target, source, source_length, start, depth,
|
|
|
|
|
mapper_function, this_arg));
|
|
|
|
|
}
|
|
|
|
|
|
2018-05-23 01:25:41 +00:00
|
|
|
|
// https://tc39.github.io/proposal-flatMap/#sec-Array.prototype.flat
|
|
|
|
|
TF_BUILTIN(ArrayPrototypeFlat, CodeStubAssembler) {
|
2018-03-15 16:51:01 +00:00
|
|
|
|
Node* const argc =
|
|
|
|
|
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
|
|
|
|
|
CodeStubArguments args(this, argc);
|
|
|
|
|
Node* const context = Parameter(BuiltinDescriptor::kContext);
|
|
|
|
|
Node* const receiver = args.GetReceiver();
|
|
|
|
|
Node* const depth = args.GetOptionalArgumentValue(0);
|
|
|
|
|
|
|
|
|
|
// 1. Let O be ? ToObject(this value).
|
|
|
|
|
Node* const o = ToObject(context, receiver);
|
|
|
|
|
|
|
|
|
|
// 2. Let sourceLen be ? ToLength(? Get(O, "length")).
|
|
|
|
|
Node* const source_length =
|
|
|
|
|
ToLength_Inline(context, GetProperty(context, o, LengthStringConstant()));
|
|
|
|
|
|
|
|
|
|
// 3. Let depthNum be 1.
|
|
|
|
|
VARIABLE(var_depth_num, MachineRepresentation::kTagged, SmiConstant(1));
|
|
|
|
|
|
|
|
|
|
// 4. If depth is not undefined, then
|
|
|
|
|
Label done(this);
|
|
|
|
|
GotoIf(IsUndefined(depth), &done);
|
|
|
|
|
{
|
|
|
|
|
// a. Set depthNum to ? ToInteger(depth).
|
|
|
|
|
var_depth_num.Bind(ToInteger_Inline(context, depth));
|
|
|
|
|
Goto(&done);
|
|
|
|
|
}
|
|
|
|
|
BIND(&done);
|
|
|
|
|
|
|
|
|
|
// 5. Let A be ? ArraySpeciesCreate(O, 0).
|
|
|
|
|
Node* const constructor =
|
|
|
|
|
CallRuntime(Runtime::kArraySpeciesConstructor, context, o);
|
|
|
|
|
Node* const a = ConstructJS(CodeFactory::Construct(isolate()), context,
|
|
|
|
|
constructor, SmiConstant(0));
|
|
|
|
|
|
|
|
|
|
// 6. Perform ? FlattenIntoArray(A, O, sourceLen, 0, depthNum).
|
|
|
|
|
CallBuiltin(Builtins::kFlattenIntoArray, context, a, o, source_length,
|
|
|
|
|
SmiConstant(0), var_depth_num.value());
|
|
|
|
|
|
|
|
|
|
// 7. Return A.
|
|
|
|
|
args.PopAndReturn(a);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// https://tc39.github.io/proposal-flatMap/#sec-Array.prototype.flatMap
|
|
|
|
|
TF_BUILTIN(ArrayPrototypeFlatMap, CodeStubAssembler) {
|
|
|
|
|
Node* const argc =
|
|
|
|
|
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
|
|
|
|
|
CodeStubArguments args(this, argc);
|
|
|
|
|
Node* const context = Parameter(BuiltinDescriptor::kContext);
|
|
|
|
|
Node* const receiver = args.GetReceiver();
|
|
|
|
|
Node* const mapper_function = args.GetOptionalArgumentValue(0);
|
|
|
|
|
|
|
|
|
|
// 1. Let O be ? ToObject(this value).
|
|
|
|
|
Node* const o = ToObject(context, receiver);
|
|
|
|
|
|
|
|
|
|
// 2. Let sourceLen be ? ToLength(? Get(O, "length")).
|
|
|
|
|
Node* const source_length =
|
|
|
|
|
ToLength_Inline(context, GetProperty(context, o, LengthStringConstant()));
|
|
|
|
|
|
|
|
|
|
// 3. If IsCallable(mapperFunction) is false, throw a TypeError exception.
|
|
|
|
|
Label if_not_callable(this, Label::kDeferred);
|
|
|
|
|
GotoIf(TaggedIsSmi(mapper_function), &if_not_callable);
|
|
|
|
|
GotoIfNot(IsCallable(mapper_function), &if_not_callable);
|
|
|
|
|
|
|
|
|
|
// 4. If thisArg is present, let T be thisArg; else let T be undefined.
|
|
|
|
|
Node* const t = args.GetOptionalArgumentValue(1);
|
|
|
|
|
|
|
|
|
|
// 5. Let A be ? ArraySpeciesCreate(O, 0).
|
|
|
|
|
Node* const constructor =
|
|
|
|
|
CallRuntime(Runtime::kArraySpeciesConstructor, context, o);
|
|
|
|
|
Node* const a = ConstructJS(CodeFactory::Construct(isolate()), context,
|
|
|
|
|
constructor, SmiConstant(0));
|
|
|
|
|
|
|
|
|
|
// 6. Perform ? FlattenIntoArray(A, O, sourceLen, 0, 1, mapperFunction, T).
|
|
|
|
|
CallBuiltin(Builtins::kFlatMapIntoArray, context, a, o, source_length,
|
|
|
|
|
SmiConstant(0), SmiConstant(1), mapper_function, t);
|
|
|
|
|
|
|
|
|
|
// 7. Return A.
|
|
|
|
|
args.PopAndReturn(a);
|
|
|
|
|
|
|
|
|
|
BIND(&if_not_callable);
|
|
|
|
|
{ ThrowTypeError(context, MessageTemplate::kMapperFunctionNonCallable); }
|
|
|
|
|
}
|
|
|
|
|
|
2017-03-16 11:32:01 +00:00
|
|
|
|
} // namespace internal
|
|
|
|
|
} // namespace v8
|