2017-03-16 11:32:01 +00:00
|
|
|
// Copyright 2017 the V8 project authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file.
|
|
|
|
|
2017-05-15 19:42:35 +00:00
|
|
|
#include "src/builtins/builtins-string-gen.h"
|
2017-03-16 11:32:01 +00:00
|
|
|
#include "src/builtins/builtins-utils-gen.h"
|
|
|
|
#include "src/builtins/builtins.h"
|
|
|
|
#include "src/code-stub-assembler.h"
|
|
|
|
|
|
|
|
namespace v8 {
|
|
|
|
namespace internal {
|
|
|
|
|
|
|
|
class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
|
|
|
|
public:
|
|
|
|
explicit ArrayBuiltinCodeStubAssembler(compiler::CodeAssemblerState* state)
|
2017-03-21 15:57:38 +00:00
|
|
|
: CodeStubAssembler(state),
|
2017-04-11 11:02:27 +00:00
|
|
|
k_(this, MachineRepresentation::kTagged),
|
|
|
|
a_(this, MachineRepresentation::kTagged),
|
2017-05-03 14:11:44 +00:00
|
|
|
to_(this, MachineRepresentation::kTagged, SmiConstant(0)),
|
|
|
|
fully_spec_compliant_(this, {&k_, &a_, &to_}) {}
|
2017-03-21 15:57:38 +00:00
|
|
|
|
|
|
|
typedef std::function<void(ArrayBuiltinCodeStubAssembler* masm)>
|
2017-05-03 14:11:44 +00:00
|
|
|
BuiltinResultGenerator;
|
2017-03-21 15:57:38 +00:00
|
|
|
|
|
|
|
typedef std::function<Node*(ArrayBuiltinCodeStubAssembler* masm,
|
|
|
|
Node* k_value, Node* k)>
|
2017-03-16 11:32:01 +00:00
|
|
|
CallResultProcessor;
|
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
typedef std::function<void(ArrayBuiltinCodeStubAssembler* masm)>
|
|
|
|
PostLoopAction;
|
|
|
|
|
2017-05-03 14:11:44 +00:00
|
|
|
void ForEachResultGenerator() { a_.Bind(UndefinedConstant()); }
|
2017-03-21 15:57:38 +00:00
|
|
|
|
|
|
|
Node* ForEachProcessor(Node* k_value, Node* k) {
|
|
|
|
CallJS(CodeFactory::Call(isolate()), context(), callbackfn(), this_arg(),
|
|
|
|
k_value, k, o());
|
|
|
|
return a();
|
|
|
|
}
|
|
|
|
|
2017-05-03 14:11:44 +00:00
|
|
|
void SomeResultGenerator() { a_.Bind(FalseConstant()); }
|
2017-03-21 15:57:38 +00:00
|
|
|
|
|
|
|
Node* SomeProcessor(Node* k_value, Node* k) {
|
|
|
|
Node* value = CallJS(CodeFactory::Call(isolate()), context(), callbackfn(),
|
|
|
|
this_arg(), k_value, k, o());
|
|
|
|
Label false_continue(this), return_true(this);
|
|
|
|
BranchIfToBooleanIsTrue(value, &return_true, &false_continue);
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&return_true);
|
2017-04-29 11:40:48 +00:00
|
|
|
ReturnFromBuiltin(TrueConstant());
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&false_continue);
|
2017-03-21 15:57:38 +00:00
|
|
|
return a();
|
|
|
|
}
|
|
|
|
|
2017-05-03 14:11:44 +00:00
|
|
|
void EveryResultGenerator() { a_.Bind(TrueConstant()); }
|
2017-03-21 15:57:38 +00:00
|
|
|
|
|
|
|
Node* EveryProcessor(Node* k_value, Node* k) {
|
|
|
|
Node* value = CallJS(CodeFactory::Call(isolate()), context(), callbackfn(),
|
|
|
|
this_arg(), k_value, k, o());
|
|
|
|
Label true_continue(this), return_false(this);
|
|
|
|
BranchIfToBooleanIsTrue(value, &true_continue, &return_false);
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&return_false);
|
2017-04-29 11:40:48 +00:00
|
|
|
ReturnFromBuiltin(FalseConstant());
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&true_continue);
|
2017-03-21 15:57:38 +00:00
|
|
|
return a();
|
|
|
|
}
|
|
|
|
|
2017-05-03 14:11:44 +00:00
|
|
|
void ReduceResultGenerator() { return a_.Bind(this_arg()); }
|
2017-03-21 15:57:38 +00:00
|
|
|
|
|
|
|
Node* ReduceProcessor(Node* k_value, Node* k) {
|
2017-04-07 08:20:35 +00:00
|
|
|
VARIABLE(result, MachineRepresentation::kTagged);
|
2017-03-21 15:57:38 +00:00
|
|
|
Label done(this, {&result}), initial(this);
|
|
|
|
GotoIf(WordEqual(a(), TheHoleConstant()), &initial);
|
|
|
|
result.Bind(CallJS(CodeFactory::Call(isolate()), context(), callbackfn(),
|
|
|
|
UndefinedConstant(), a(), k_value, k, o()));
|
|
|
|
Goto(&done);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&initial);
|
2017-03-21 15:57:38 +00:00
|
|
|
result.Bind(k_value);
|
|
|
|
Goto(&done);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&done);
|
2017-03-21 15:57:38 +00:00
|
|
|
return result.value();
|
|
|
|
}
|
|
|
|
|
|
|
|
void ReducePostLoopAction() {
|
|
|
|
Label ok(this);
|
|
|
|
GotoIf(WordNotEqual(a(), TheHoleConstant()), &ok);
|
|
|
|
CallRuntime(Runtime::kThrowTypeError, context(),
|
|
|
|
SmiConstant(MessageTemplate::kReduceNoInitial));
|
|
|
|
Unreachable();
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&ok);
|
2017-03-21 15:57:38 +00:00
|
|
|
}
|
|
|
|
|
2017-05-03 14:11:44 +00:00
|
|
|
void FilterResultGenerator() {
|
2017-03-21 15:57:38 +00:00
|
|
|
// 7. Let A be ArraySpeciesCreate(O, 0).
|
2017-05-12 11:37:21 +00:00
|
|
|
Node* len = SmiConstant(0);
|
|
|
|
ArraySpeciesCreate(len);
|
2017-03-21 15:57:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Node* FilterProcessor(Node* k_value, Node* k) {
|
2017-03-24 11:01:53 +00:00
|
|
|
// ii. Let selected be ToBoolean(? Call(callbackfn, T, kValue, k, O)).
|
|
|
|
Node* selected = CallJS(CodeFactory::Call(isolate()), context(),
|
|
|
|
callbackfn(), this_arg(), k_value, k, o());
|
2017-03-21 15:57:38 +00:00
|
|
|
Label true_continue(this, &to_), false_continue(this);
|
2017-03-24 11:01:53 +00:00
|
|
|
BranchIfToBooleanIsTrue(selected, &true_continue, &false_continue);
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&true_continue);
|
2017-03-24 11:01:53 +00:00
|
|
|
// iii. If selected is true, then...
|
|
|
|
{
|
2017-04-24 12:47:24 +00:00
|
|
|
Label after_work(this, &to_);
|
|
|
|
Node* kind = nullptr;
|
|
|
|
|
|
|
|
// If a() is a JSArray, we can have a fast path.
|
|
|
|
Label fast(this);
|
|
|
|
Label runtime(this);
|
|
|
|
Label object_push_pre(this), object_push(this), double_push(this);
|
|
|
|
BranchIfFastJSArray(a(), context(), FastJSArrayAccessMode::ANY_ACCESS,
|
|
|
|
&fast, &runtime);
|
|
|
|
|
|
|
|
BIND(&fast);
|
|
|
|
{
|
|
|
|
kind = EnsureArrayPushable(a(), &runtime);
|
|
|
|
GotoIf(IsElementsKindGreaterThan(kind, FAST_HOLEY_SMI_ELEMENTS),
|
|
|
|
&object_push_pre);
|
|
|
|
|
|
|
|
BuildAppendJSArray(FAST_SMI_ELEMENTS, a(), k_value, &runtime);
|
|
|
|
Goto(&after_work);
|
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&object_push_pre);
|
|
|
|
{
|
|
|
|
Branch(IsElementsKindGreaterThan(kind, FAST_HOLEY_ELEMENTS),
|
|
|
|
&double_push, &object_push);
|
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&object_push);
|
|
|
|
{
|
|
|
|
BuildAppendJSArray(FAST_ELEMENTS, a(), k_value, &runtime);
|
|
|
|
Goto(&after_work);
|
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&double_push);
|
|
|
|
{
|
|
|
|
BuildAppendJSArray(FAST_DOUBLE_ELEMENTS, a(), k_value, &runtime);
|
|
|
|
Goto(&after_work);
|
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&runtime);
|
|
|
|
{
|
|
|
|
// 1. Perform ? CreateDataPropertyOrThrow(A, ToString(to), kValue).
|
|
|
|
CallRuntime(Runtime::kCreateDataProperty, context(), a(), to_.value(),
|
|
|
|
k_value);
|
|
|
|
Goto(&after_work);
|
|
|
|
}
|
2017-03-21 15:57:38 +00:00
|
|
|
|
2017-04-24 12:47:24 +00:00
|
|
|
BIND(&after_work);
|
|
|
|
{
|
|
|
|
// 2. Increase to by 1.
|
|
|
|
to_.Bind(NumberInc(to_.value()));
|
|
|
|
Goto(&false_continue);
|
|
|
|
}
|
2017-03-24 11:01:53 +00:00
|
|
|
}
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&false_continue);
|
2017-03-21 15:57:38 +00:00
|
|
|
return a();
|
|
|
|
}
|
|
|
|
|
2017-05-12 11:37:21 +00:00
|
|
|
void MapResultGenerator() { ArraySpeciesCreate(len_); }
|
2017-05-03 14:11:44 +00:00
|
|
|
|
|
|
|
Node* SpecCompliantMapProcessor(Node* k_value, Node* k) {
|
|
|
|
// i. Let kValue be ? Get(O, Pk). Performed by the caller of
|
|
|
|
// SpecCompliantMapProcessor.
|
|
|
|
// ii. Let mappedValue be ? Call(callbackfn, T, kValue, k, O).
|
|
|
|
Node* mappedValue = CallJS(CodeFactory::Call(isolate()), context(),
|
|
|
|
callbackfn(), this_arg(), k_value, k, o());
|
|
|
|
|
|
|
|
// iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mappedValue).
|
|
|
|
CallRuntime(Runtime::kCreateDataProperty, context(), a(), k, mappedValue);
|
|
|
|
return a();
|
2017-03-24 11:01:53 +00:00
|
|
|
}
|
|
|
|
|
2017-05-03 14:11:44 +00:00
|
|
|
Node* FastMapProcessor(Node* k_value, Node* k) {
|
|
|
|
// i. Let kValue be ? Get(O, Pk). Performed by the caller of
|
|
|
|
// FastMapProcessor.
|
2017-03-24 11:01:53 +00:00
|
|
|
// ii. Let mappedValue be ? Call(callbackfn, T, kValue, k, O).
|
|
|
|
Node* mappedValue = CallJS(CodeFactory::Call(isolate()), context(),
|
|
|
|
callbackfn(), this_arg(), k_value, k, o());
|
|
|
|
|
2017-04-24 12:47:24 +00:00
|
|
|
Label finished(this);
|
|
|
|
Node* kind = nullptr;
|
|
|
|
Node* elements = nullptr;
|
|
|
|
|
|
|
|
// If a() is a JSArray, we can have a fast path.
|
|
|
|
// mode is SMI_PARAMETERS because k has tagged representation.
|
|
|
|
ParameterMode mode = SMI_PARAMETERS;
|
|
|
|
Label fast(this);
|
|
|
|
Label runtime(this);
|
|
|
|
Label object_push_pre(this), object_push(this), double_push(this);
|
|
|
|
BranchIfFastJSArray(a(), context(), FastJSArrayAccessMode::ANY_ACCESS,
|
|
|
|
&fast, &runtime);
|
|
|
|
|
|
|
|
BIND(&fast);
|
|
|
|
{
|
|
|
|
kind = EnsureArrayPushable(a(), &runtime);
|
|
|
|
elements = LoadElements(a());
|
|
|
|
GotoIf(IsElementsKindGreaterThan(kind, FAST_HOLEY_SMI_ELEMENTS),
|
|
|
|
&object_push_pre);
|
|
|
|
TryStoreArrayElement(FAST_SMI_ELEMENTS, mode, &runtime, elements, k,
|
|
|
|
mappedValue);
|
|
|
|
Goto(&finished);
|
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&object_push_pre);
|
|
|
|
{
|
|
|
|
Branch(IsElementsKindGreaterThan(kind, FAST_HOLEY_ELEMENTS), &double_push,
|
|
|
|
&object_push);
|
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&object_push);
|
|
|
|
{
|
|
|
|
TryStoreArrayElement(FAST_ELEMENTS, mode, &runtime, elements, k,
|
|
|
|
mappedValue);
|
|
|
|
Goto(&finished);
|
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&double_push);
|
|
|
|
{
|
|
|
|
TryStoreArrayElement(FAST_DOUBLE_ELEMENTS, mode, &runtime, elements, k,
|
|
|
|
mappedValue);
|
|
|
|
Goto(&finished);
|
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&runtime);
|
|
|
|
{
|
|
|
|
// iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mappedValue).
|
|
|
|
CallRuntime(Runtime::kCreateDataProperty, context(), a(), k, mappedValue);
|
|
|
|
Goto(&finished);
|
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&finished);
|
2017-03-24 11:01:53 +00:00
|
|
|
return a();
|
|
|
|
}
|
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
void NullPostLoopAction() {}
|
|
|
|
|
|
|
|
protected:
|
|
|
|
Node* context() { return context_; }
|
2017-03-21 17:25:35 +00:00
|
|
|
Node* receiver() { return receiver_; }
|
|
|
|
Node* new_target() { return new_target_; }
|
2017-04-29 11:40:48 +00:00
|
|
|
Node* argc() { return argc_; }
|
2017-03-21 15:57:38 +00:00
|
|
|
Node* o() { return o_; }
|
|
|
|
Node* len() { return len_; }
|
|
|
|
Node* callbackfn() { return callbackfn_; }
|
|
|
|
Node* this_arg() { return this_arg_; }
|
|
|
|
Node* k() { return k_.value(); }
|
|
|
|
Node* a() { return a_.value(); }
|
|
|
|
|
2017-04-29 11:40:48 +00:00
|
|
|
void ReturnFromBuiltin(Node* value) {
|
|
|
|
if (argc_ == nullptr) {
|
|
|
|
Return(value);
|
|
|
|
} else {
|
|
|
|
// argc_ doesn't include the receiver, so it has to be added back in
|
|
|
|
// manually.
|
|
|
|
PopAndReturn(IntPtrAdd(argc_, IntPtrConstant(1)), value);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-21 17:25:35 +00:00
|
|
|
void InitIteratingArrayBuiltinBody(Node* context, Node* receiver,
|
|
|
|
Node* callbackfn, Node* this_arg,
|
2017-04-29 11:40:48 +00:00
|
|
|
Node* new_target, Node* argc) {
|
2017-03-21 17:25:35 +00:00
|
|
|
context_ = context;
|
|
|
|
receiver_ = receiver;
|
|
|
|
new_target_ = new_target;
|
|
|
|
callbackfn_ = callbackfn;
|
|
|
|
this_arg_ = this_arg;
|
2017-04-29 11:40:48 +00:00
|
|
|
argc_ = argc;
|
2017-03-21 17:25:35 +00:00
|
|
|
}
|
2017-03-21 15:57:38 +00:00
|
|
|
|
2017-03-21 17:25:35 +00:00
|
|
|
void GenerateIteratingArrayBuiltinBody(
|
|
|
|
const char* name, const BuiltinResultGenerator& generator,
|
|
|
|
const CallResultProcessor& processor, const PostLoopAction& action,
|
2017-03-24 13:35:56 +00:00
|
|
|
const Callable& slow_case_continuation,
|
|
|
|
ForEachDirection direction = ForEachDirection::kForward) {
|
2017-05-03 14:11:44 +00:00
|
|
|
Label non_array(this), array_changes(this, {&k_, &a_, &to_});
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
// TODO(danno): Seriously? Do we really need to throw the exact error
|
|
|
|
// message on null and undefined so that the webkit tests pass?
|
|
|
|
Label throw_null_undefined_exception(this, Label::kDeferred);
|
2017-03-21 17:25:35 +00:00
|
|
|
GotoIf(WordEqual(receiver(), NullConstant()),
|
2017-03-16 11:32:01 +00:00
|
|
|
&throw_null_undefined_exception);
|
2017-03-21 17:25:35 +00:00
|
|
|
GotoIf(WordEqual(receiver(), UndefinedConstant()),
|
2017-03-16 11:32:01 +00:00
|
|
|
&throw_null_undefined_exception);
|
|
|
|
|
|
|
|
// By the book: taken directly from the ECMAScript 2015 specification
|
|
|
|
|
|
|
|
// 1. Let O be ToObject(this value).
|
|
|
|
// 2. ReturnIfAbrupt(O)
|
2017-03-21 17:25:35 +00:00
|
|
|
o_ = CallStub(CodeFactory::ToObject(isolate()), context(), receiver());
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
// 3. Let len be ToLength(Get(O, "length")).
|
|
|
|
// 4. ReturnIfAbrupt(len).
|
2017-04-07 08:20:35 +00:00
|
|
|
VARIABLE(merged_length, MachineRepresentation::kTagged);
|
2017-03-16 11:32:01 +00:00
|
|
|
Label has_length(this, &merged_length), not_js_array(this);
|
2017-03-21 15:57:38 +00:00
|
|
|
GotoIf(DoesntHaveInstanceType(o(), JS_ARRAY_TYPE), ¬_js_array);
|
|
|
|
merged_length.Bind(LoadJSArrayLength(o()));
|
2017-03-16 11:32:01 +00:00
|
|
|
Goto(&has_length);
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(¬_js_array);
|
2017-03-16 11:32:01 +00:00
|
|
|
Node* len_property =
|
2017-03-21 15:57:38 +00:00
|
|
|
GetProperty(context(), o(), isolate()->factory()->length_string());
|
2017-05-15 07:51:15 +00:00
|
|
|
merged_length.Bind(ToLength_Inline(context(), len_property));
|
2017-03-16 11:32:01 +00:00
|
|
|
Goto(&has_length);
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&has_length);
|
2017-03-21 15:57:38 +00:00
|
|
|
len_ = merged_length.value();
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
// 5. If IsCallable(callbackfn) is false, throw a TypeError exception.
|
|
|
|
Label type_exception(this, Label::kDeferred);
|
|
|
|
Label done(this);
|
2017-03-21 15:57:38 +00:00
|
|
|
GotoIf(TaggedIsSmi(callbackfn()), &type_exception);
|
|
|
|
Branch(IsCallableMap(LoadMap(callbackfn())), &done, &type_exception);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&throw_null_undefined_exception);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
CallRuntime(
|
2017-03-21 15:57:38 +00:00
|
|
|
Runtime::kThrowTypeError, context(),
|
2017-03-16 11:32:01 +00:00
|
|
|
SmiConstant(MessageTemplate::kCalledOnNullOrUndefined),
|
|
|
|
HeapConstant(isolate()->factory()->NewStringFromAsciiChecked(name)));
|
|
|
|
Unreachable();
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&type_exception);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
2017-03-21 15:57:38 +00:00
|
|
|
CallRuntime(Runtime::kThrowTypeError, context(),
|
|
|
|
SmiConstant(MessageTemplate::kCalledNonCallable),
|
|
|
|
callbackfn());
|
2017-03-16 11:32:01 +00:00
|
|
|
Unreachable();
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&done);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
// 6. If thisArg was supplied, let T be thisArg; else let T be undefined.
|
|
|
|
// [Already done by the arguments adapter]
|
|
|
|
|
2017-03-24 13:35:56 +00:00
|
|
|
if (direction == ForEachDirection::kForward) {
|
|
|
|
// 7. Let k be 0.
|
|
|
|
k_.Bind(SmiConstant(0));
|
|
|
|
} else {
|
2017-04-11 11:02:27 +00:00
|
|
|
k_.Bind(NumberDec(len()));
|
2017-03-24 13:35:56 +00:00
|
|
|
}
|
2017-03-21 15:57:38 +00:00
|
|
|
|
2017-05-03 14:11:44 +00:00
|
|
|
generator(this);
|
2017-03-21 15:57:38 +00:00
|
|
|
|
2017-05-03 14:11:44 +00:00
|
|
|
HandleFastElements(processor, action, &fully_spec_compliant_, direction);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2017-05-03 14:11:44 +00:00
|
|
|
BIND(&fully_spec_compliant_);
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
|
2017-04-29 11:40:48 +00:00
|
|
|
Node* result =
|
|
|
|
CallStub(slow_case_continuation, context(), receiver(), callbackfn(),
|
|
|
|
this_arg(), a_.value(), o(), k_.value(), len(), to_.value());
|
|
|
|
ReturnFromBuiltin(result);
|
2017-03-21 17:25:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void InitIteratingArrayBuiltinLoopContinuation(Node* context, Node* receiver,
|
|
|
|
Node* callbackfn,
|
|
|
|
Node* this_arg, Node* a,
|
|
|
|
Node* o, Node* initial_k,
|
2017-03-22 13:18:26 +00:00
|
|
|
Node* len, Node* to) {
|
2017-03-21 17:25:35 +00:00
|
|
|
context_ = context;
|
|
|
|
this_arg_ = this_arg;
|
|
|
|
callbackfn_ = callbackfn;
|
2017-04-29 11:40:48 +00:00
|
|
|
argc_ = nullptr;
|
2017-03-21 17:25:35 +00:00
|
|
|
a_.Bind(a);
|
|
|
|
k_.Bind(initial_k);
|
|
|
|
o_ = o;
|
|
|
|
len_ = len;
|
2017-03-22 13:18:26 +00:00
|
|
|
to_.Bind(to);
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
}
|
|
|
|
|
2017-03-30 16:36:53 +00:00
|
|
|
void GenerateIteratingTypedArrayBuiltinBody(
|
|
|
|
const char* name, const BuiltinResultGenerator& generator,
|
2017-04-11 11:02:27 +00:00
|
|
|
const CallResultProcessor& processor, const PostLoopAction& action,
|
|
|
|
ForEachDirection direction = ForEachDirection::kForward) {
|
2017-03-30 16:36:53 +00:00
|
|
|
Node* name_string =
|
|
|
|
HeapConstant(isolate()->factory()->NewStringFromAsciiChecked(name));
|
|
|
|
|
|
|
|
// ValidateTypedArray: tc39.github.io/ecma262/#sec-validatetypedarray
|
|
|
|
|
|
|
|
Label throw_not_typed_array(this, Label::kDeferred),
|
|
|
|
throw_detached(this, Label::kDeferred);
|
|
|
|
|
|
|
|
GotoIf(TaggedIsSmi(receiver_), &throw_not_typed_array);
|
|
|
|
GotoIfNot(HasInstanceType(receiver_, JS_TYPED_ARRAY_TYPE),
|
|
|
|
&throw_not_typed_array);
|
|
|
|
|
|
|
|
o_ = receiver_;
|
|
|
|
Node* array_buffer = LoadObjectField(o_, JSTypedArray::kBufferOffset);
|
|
|
|
GotoIf(IsDetachedBuffer(array_buffer), &throw_detached);
|
|
|
|
|
|
|
|
len_ = LoadObjectField(o_, JSTypedArray::kLengthOffset);
|
|
|
|
|
|
|
|
Label throw_not_callable(this, Label::kDeferred);
|
|
|
|
Label distinguish_types(this);
|
|
|
|
GotoIf(TaggedIsSmi(callbackfn_), &throw_not_callable);
|
|
|
|
Branch(IsCallableMap(LoadMap(callbackfn_)), &distinguish_types,
|
|
|
|
&throw_not_callable);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&throw_not_typed_array);
|
2017-03-30 16:36:53 +00:00
|
|
|
{
|
|
|
|
CallRuntime(Runtime::kThrowTypeError, context_,
|
|
|
|
SmiConstant(MessageTemplate::kNotTypedArray));
|
|
|
|
Unreachable();
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&throw_detached);
|
2017-03-30 16:36:53 +00:00
|
|
|
{
|
|
|
|
CallRuntime(Runtime::kThrowTypeError, context_,
|
|
|
|
SmiConstant(MessageTemplate::kDetachedOperation),
|
|
|
|
name_string);
|
|
|
|
Unreachable();
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&throw_not_callable);
|
2017-03-30 16:36:53 +00:00
|
|
|
{
|
|
|
|
CallRuntime(Runtime::kThrowTypeError, context_,
|
|
|
|
SmiConstant(MessageTemplate::kCalledNonCallable),
|
|
|
|
callbackfn_);
|
|
|
|
Unreachable();
|
|
|
|
}
|
|
|
|
|
|
|
|
Label unexpected_instance_type(this);
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&unexpected_instance_type);
|
2017-03-30 16:36:53 +00:00
|
|
|
Unreachable();
|
|
|
|
|
|
|
|
std::vector<int32_t> instance_types = {
|
|
|
|
#define INSTANCE_TYPE(Type, type, TYPE, ctype, size) FIXED_##TYPE##_ARRAY_TYPE,
|
|
|
|
TYPED_ARRAYS(INSTANCE_TYPE)
|
|
|
|
#undef INSTANCE_TYPE
|
|
|
|
};
|
|
|
|
std::vector<Label> labels;
|
|
|
|
for (size_t i = 0; i < instance_types.size(); ++i) {
|
|
|
|
labels.push_back(Label(this));
|
|
|
|
}
|
|
|
|
std::vector<Label*> label_ptrs;
|
|
|
|
for (Label& label : labels) {
|
|
|
|
label_ptrs.push_back(&label);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&distinguish_types);
|
2017-04-11 11:02:27 +00:00
|
|
|
|
|
|
|
if (direction == ForEachDirection::kForward) {
|
|
|
|
k_.Bind(SmiConstant(0));
|
|
|
|
} else {
|
|
|
|
k_.Bind(NumberDec(len()));
|
|
|
|
}
|
2017-05-03 14:11:44 +00:00
|
|
|
generator(this);
|
2017-03-30 16:36:53 +00:00
|
|
|
Node* elements_type = LoadInstanceType(LoadElements(o_));
|
|
|
|
Switch(elements_type, &unexpected_instance_type, instance_types.data(),
|
|
|
|
label_ptrs.data(), labels.size());
|
|
|
|
|
|
|
|
for (size_t i = 0; i < labels.size(); ++i) {
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&labels[i]);
|
2017-03-30 16:36:53 +00:00
|
|
|
Label done(this);
|
|
|
|
// TODO(tebbi): Silently cancelling the loop on buffer detachment is a
|
|
|
|
// spec violation. Should go to &detached and throw a TypeError instead.
|
|
|
|
VisitAllTypedArrayElements(
|
|
|
|
ElementsKindForInstanceType(
|
|
|
|
static_cast<InstanceType>(instance_types[i])),
|
2017-04-11 11:02:27 +00:00
|
|
|
array_buffer, processor, &done, direction);
|
2017-03-30 16:36:53 +00:00
|
|
|
Goto(&done);
|
|
|
|
// No exception, return success
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&done);
|
2017-04-11 11:02:27 +00:00
|
|
|
action(this);
|
2017-04-29 11:40:48 +00:00
|
|
|
ReturnFromBuiltin(a_.value());
|
2017-03-30 16:36:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
void GenerateIteratingArrayBuiltinLoopContinuation(
|
2017-03-24 13:35:56 +00:00
|
|
|
const CallResultProcessor& processor, const PostLoopAction& action,
|
|
|
|
ForEachDirection direction = ForEachDirection::kForward) {
|
2017-03-21 15:57:38 +00:00
|
|
|
Label loop(this, {&k_, &a_, &to_});
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
Label after_loop(this);
|
|
|
|
Goto(&loop);
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
2017-03-24 13:35:56 +00:00
|
|
|
if (direction == ForEachDirection::kForward) {
|
|
|
|
// 8. Repeat, while k < len
|
|
|
|
GotoUnlessNumberLessThan(k(), len_, &after_loop);
|
|
|
|
} else {
|
|
|
|
// OR
|
|
|
|
// 10. Repeat, while k >= 0
|
|
|
|
GotoUnlessNumberLessThan(SmiConstant(-1), k(), &after_loop);
|
|
|
|
}
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
|
2017-03-21 08:56:56 +00:00
|
|
|
Label done_element(this, &to_);
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
// a. Let Pk be ToString(k).
|
2017-03-21 15:57:38 +00:00
|
|
|
Node* p_k = ToString(context(), k());
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
|
|
|
|
// b. Let kPresent be HasProperty(O, Pk).
|
|
|
|
// c. ReturnIfAbrupt(kPresent).
|
2017-03-21 15:57:38 +00:00
|
|
|
Node* k_present = HasProperty(o(), p_k, context());
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
|
|
|
|
// d. If kPresent is true, then
|
|
|
|
GotoIf(WordNotEqual(k_present, TrueConstant()), &done_element);
|
|
|
|
|
|
|
|
// i. Let kValue be Get(O, Pk).
|
|
|
|
// ii. ReturnIfAbrupt(kValue).
|
2017-03-21 15:57:38 +00:00
|
|
|
Node* k_value = GetProperty(context(), o(), k());
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
|
|
|
|
// iii. Let funcResult be Call(callbackfn, T, «kValue, k, O»).
|
|
|
|
// iv. ReturnIfAbrupt(funcResult).
|
2017-03-21 15:57:38 +00:00
|
|
|
a_.Bind(processor(this, k_value, k()));
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
Goto(&done_element);
|
2017-03-21 15:57:38 +00:00
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&done_element);
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
|
2017-03-24 13:35:56 +00:00
|
|
|
if (direction == ForEachDirection::kForward) {
|
|
|
|
// e. Increase k by 1.
|
|
|
|
k_.Bind(NumberInc(k()));
|
|
|
|
} else {
|
|
|
|
// e. Decrease k by 1.
|
|
|
|
k_.Bind(NumberDec(k()));
|
|
|
|
}
|
2017-03-16 11:32:01 +00:00
|
|
|
Goto(&loop);
|
|
|
|
}
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&after_loop);
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
action(this);
|
|
|
|
Return(a_.value());
|
2017-03-21 08:56:56 +00:00
|
|
|
}
|
|
|
|
|
2017-03-16 11:32:01 +00:00
|
|
|
private:
|
2017-03-30 16:36:53 +00:00
|
|
|
static ElementsKind ElementsKindForInstanceType(InstanceType type) {
|
|
|
|
switch (type) {
|
|
|
|
#define INSTANCE_TYPE_TO_ELEMENTS_KIND(Type, type, TYPE, ctype, size) \
|
|
|
|
case FIXED_##TYPE##_ARRAY_TYPE: \
|
|
|
|
return TYPE##_ELEMENTS;
|
|
|
|
|
|
|
|
TYPED_ARRAYS(INSTANCE_TYPE_TO_ELEMENTS_KIND)
|
|
|
|
#undef INSTANCE_TYPE_TO_ELEMENTS_KIND
|
|
|
|
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
return static_cast<ElementsKind>(-1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void VisitAllTypedArrayElements(ElementsKind kind, Node* array_buffer,
|
|
|
|
const CallResultProcessor& processor,
|
2017-04-11 11:02:27 +00:00
|
|
|
Label* detached, ForEachDirection direction) {
|
2017-03-30 16:36:53 +00:00
|
|
|
VariableList list({&a_, &k_, &to_}, zone());
|
|
|
|
|
|
|
|
FastLoopBody body = [&](Node* index) {
|
|
|
|
GotoIf(IsDetachedBuffer(array_buffer), detached);
|
|
|
|
Node* elements = LoadElements(o_);
|
|
|
|
Node* base_ptr =
|
|
|
|
LoadObjectField(elements, FixedTypedArrayBase::kBasePointerOffset);
|
|
|
|
Node* external_ptr =
|
|
|
|
LoadObjectField(elements, FixedTypedArrayBase::kExternalPointerOffset,
|
|
|
|
MachineType::Pointer());
|
|
|
|
Node* data_ptr = IntPtrAdd(BitcastTaggedToWord(base_ptr), external_ptr);
|
|
|
|
Node* value = LoadFixedTypedArrayElementAsTagged(data_ptr, index, kind,
|
|
|
|
SMI_PARAMETERS);
|
|
|
|
k_.Bind(index);
|
|
|
|
a_.Bind(processor(this, value, index));
|
|
|
|
};
|
2017-04-11 11:02:27 +00:00
|
|
|
Node* start = SmiConstant(0);
|
|
|
|
Node* end = len_;
|
|
|
|
IndexAdvanceMode advance_mode = IndexAdvanceMode::kPost;
|
|
|
|
int incr = 1;
|
|
|
|
if (direction == ForEachDirection::kReverse) {
|
|
|
|
std::swap(start, end);
|
|
|
|
advance_mode = IndexAdvanceMode::kPre;
|
|
|
|
incr = -1;
|
|
|
|
}
|
|
|
|
BuildFastLoop(list, start, end, body, incr, ParameterMode::SMI_PARAMETERS,
|
|
|
|
advance_mode);
|
2017-03-30 16:36:53 +00:00
|
|
|
}
|
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
void VisitAllFastElementsOneKind(ElementsKind kind,
|
|
|
|
const CallResultProcessor& processor,
|
2017-03-24 13:35:56 +00:00
|
|
|
Label* array_changed, ParameterMode mode,
|
|
|
|
ForEachDirection direction) {
|
2017-03-16 11:32:01 +00:00
|
|
|
Comment("begin VisitAllFastElementsOneKind");
|
2017-04-07 08:20:35 +00:00
|
|
|
VARIABLE(original_map, MachineRepresentation::kTagged);
|
2017-03-21 15:57:38 +00:00
|
|
|
original_map.Bind(LoadMap(o()));
|
|
|
|
VariableList list({&original_map, &a_, &k_, &to_}, zone());
|
2017-03-24 13:35:56 +00:00
|
|
|
Node* start = IntPtrOrSmiConstant(0, mode);
|
|
|
|
Node* end = TaggedToParameter(len(), mode);
|
|
|
|
IndexAdvanceMode advance_mode = direction == ForEachDirection::kReverse
|
|
|
|
? IndexAdvanceMode::kPre
|
|
|
|
: IndexAdvanceMode::kPost;
|
|
|
|
if (direction == ForEachDirection::kReverse) std::swap(start, end);
|
2017-03-16 11:32:01 +00:00
|
|
|
BuildFastLoop(
|
2017-03-24 13:35:56 +00:00
|
|
|
list, start, end,
|
2017-03-21 15:57:38 +00:00
|
|
|
[=, &original_map](Node* index) {
|
|
|
|
k_.Bind(ParameterToTagged(index, mode));
|
2017-03-16 11:32:01 +00:00
|
|
|
Label one_element_done(this), hole_element(this);
|
|
|
|
|
|
|
|
// Check if o's map has changed during the callback. If so, we have to
|
|
|
|
// fall back to the slower spec implementation for the rest of the
|
|
|
|
// iteration.
|
2017-03-21 15:57:38 +00:00
|
|
|
Node* o_map = LoadMap(o());
|
2017-03-16 11:32:01 +00:00
|
|
|
GotoIf(WordNotEqual(o_map, original_map.value()), array_changed);
|
|
|
|
|
|
|
|
// Check if o's length has changed during the callback and if the
|
|
|
|
// index is now out of range of the new length.
|
2017-03-21 15:57:38 +00:00
|
|
|
GotoIf(SmiGreaterThanOrEqual(k_.value(), LoadJSArrayLength(o())),
|
2017-03-16 11:32:01 +00:00
|
|
|
array_changed);
|
|
|
|
|
|
|
|
// Re-load the elements array. If may have been resized.
|
2017-03-21 15:57:38 +00:00
|
|
|
Node* elements = LoadElements(o());
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
// Fast case: load the element directly from the elements FixedArray
|
|
|
|
// and call the callback if the element is not the hole.
|
|
|
|
DCHECK(kind == FAST_ELEMENTS || kind == FAST_DOUBLE_ELEMENTS);
|
|
|
|
int base_size = kind == FAST_ELEMENTS
|
|
|
|
? FixedArray::kHeaderSize
|
|
|
|
: (FixedArray::kHeaderSize - kHeapObjectTag);
|
|
|
|
Node* offset = ElementOffsetFromIndex(index, kind, mode, base_size);
|
|
|
|
Node* value = nullptr;
|
|
|
|
if (kind == FAST_ELEMENTS) {
|
|
|
|
value = LoadObjectField(elements, offset);
|
|
|
|
GotoIf(WordEqual(value, TheHoleConstant()), &hole_element);
|
|
|
|
} else {
|
|
|
|
Node* double_value =
|
|
|
|
LoadDoubleWithHoleCheck(elements, offset, &hole_element);
|
|
|
|
value = AllocateHeapNumberWithValue(double_value);
|
|
|
|
}
|
2017-03-21 15:57:38 +00:00
|
|
|
a_.Bind(processor(this, value, k()));
|
2017-03-16 11:32:01 +00:00
|
|
|
Goto(&one_element_done);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&hole_element);
|
2017-03-16 11:32:01 +00:00
|
|
|
// Check if o's prototype change unexpectedly has elements after the
|
|
|
|
// callback in the case of a hole.
|
|
|
|
BranchIfPrototypesHaveNoElements(o_map, &one_element_done,
|
|
|
|
array_changed);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&one_element_done);
|
2017-03-16 11:32:01 +00:00
|
|
|
},
|
2017-03-24 13:35:56 +00:00
|
|
|
1, mode, advance_mode);
|
2017-03-16 11:32:01 +00:00
|
|
|
Comment("end VisitAllFastElementsOneKind");
|
|
|
|
}
|
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
void HandleFastElements(const CallResultProcessor& processor,
|
2017-03-24 13:35:56 +00:00
|
|
|
const PostLoopAction& action, Label* slow,
|
|
|
|
ForEachDirection direction) {
|
2017-03-16 11:32:01 +00:00
|
|
|
Label switch_on_elements_kind(this), fast_elements(this),
|
|
|
|
maybe_double_elements(this), fast_double_elements(this);
|
|
|
|
|
|
|
|
Comment("begin HandleFastElements");
|
|
|
|
// Non-smi lengths must use the slow path.
|
2017-03-21 15:57:38 +00:00
|
|
|
GotoIf(TaggedIsNotSmi(len()), slow);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
BranchIfFastJSArray(o(), context(),
|
2017-03-16 11:32:01 +00:00
|
|
|
CodeStubAssembler::FastJSArrayAccessMode::INBOUNDS_READ,
|
|
|
|
&switch_on_elements_kind, slow);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&switch_on_elements_kind);
|
2017-03-16 11:32:01 +00:00
|
|
|
// Select by ElementsKind
|
2017-03-21 15:57:38 +00:00
|
|
|
Node* o_map = LoadMap(o());
|
2017-03-16 11:32:01 +00:00
|
|
|
Node* bit_field2 = LoadMapBitField2(o_map);
|
|
|
|
Node* kind = DecodeWord32<Map::ElementsKindBits>(bit_field2);
|
2017-04-24 12:47:24 +00:00
|
|
|
Branch(IsElementsKindGreaterThan(kind, FAST_HOLEY_ELEMENTS),
|
2017-03-16 11:32:01 +00:00
|
|
|
&maybe_double_elements, &fast_elements);
|
|
|
|
|
|
|
|
ParameterMode mode = OptimalParameterMode();
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&fast_elements);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
2017-03-24 13:35:56 +00:00
|
|
|
VisitAllFastElementsOneKind(FAST_ELEMENTS, processor, slow, mode,
|
|
|
|
direction);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
action(this);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
// No exception, return success
|
2017-04-29 11:40:48 +00:00
|
|
|
ReturnFromBuiltin(a_.value());
|
2017-03-16 11:32:01 +00:00
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&maybe_double_elements);
|
2017-04-24 12:47:24 +00:00
|
|
|
Branch(IsElementsKindGreaterThan(kind, FAST_HOLEY_DOUBLE_ELEMENTS), slow,
|
|
|
|
&fast_double_elements);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&fast_double_elements);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
2017-03-24 13:35:56 +00:00
|
|
|
VisitAllFastElementsOneKind(FAST_DOUBLE_ELEMENTS, processor, slow, mode,
|
|
|
|
direction);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
action(this);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
// No exception, return success
|
2017-04-29 11:40:48 +00:00
|
|
|
ReturnFromBuiltin(a_.value());
|
2017-03-16 11:32:01 +00:00
|
|
|
}
|
|
|
|
}
|
2017-03-21 08:56:56 +00:00
|
|
|
|
2017-05-12 11:37:21 +00:00
|
|
|
// Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate).
|
|
|
|
void ArraySpeciesCreate(Node* len) {
|
|
|
|
Label runtime(this, Label::kDeferred), done(this);
|
|
|
|
|
|
|
|
Node* const original_map = LoadMap(o());
|
|
|
|
GotoIf(Word32NotEqual(LoadMapInstanceType(original_map),
|
|
|
|
Int32Constant(JS_ARRAY_TYPE)),
|
|
|
|
&runtime);
|
|
|
|
|
|
|
|
Node* const native_context = LoadNativeContext(context());
|
|
|
|
Node* const initial_array_prototype = LoadContextElement(
|
|
|
|
native_context, Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
|
|
|
|
Node* proto = LoadMapPrototype(original_map);
|
|
|
|
GotoIf(WordNotEqual(proto, initial_array_prototype), &runtime);
|
|
|
|
|
|
|
|
Node* species_protector = SpeciesProtectorConstant();
|
|
|
|
Node* value = LoadObjectField(species_protector, Cell::kValueOffset);
|
|
|
|
Node* const protector_invalid = SmiConstant(Isolate::kProtectorInvalid);
|
|
|
|
GotoIf(WordEqual(value, protector_invalid), &runtime);
|
|
|
|
|
|
|
|
GotoIfNot(TaggedIsPositiveSmi(len), &runtime);
|
|
|
|
GotoIf(SmiAbove(len, SmiConstant(JSArray::kInitialMaxFastElementArray)),
|
|
|
|
&runtime);
|
|
|
|
|
|
|
|
const ElementsKind elements_kind =
|
|
|
|
GetHoleyElementsKind(GetInitialFastElementsKind());
|
|
|
|
Node* array_map = LoadJSArrayElementsMap(elements_kind, native_context);
|
|
|
|
a_.Bind(AllocateJSArray(FAST_SMI_ELEMENTS, array_map, len, len, nullptr,
|
|
|
|
CodeStubAssembler::SMI_PARAMETERS));
|
|
|
|
|
|
|
|
Goto(&done);
|
|
|
|
|
|
|
|
BIND(&runtime);
|
|
|
|
{
|
|
|
|
// 5. Let A be ? ArraySpeciesCreate(O, len).
|
|
|
|
Node* constructor =
|
|
|
|
CallRuntime(Runtime::kArraySpeciesConstructor, context(), o());
|
|
|
|
a_.Bind(ConstructJS(CodeFactory::Construct(isolate()), context(),
|
|
|
|
constructor, len));
|
|
|
|
Goto(&fully_spec_compliant_);
|
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&done);
|
|
|
|
}
|
|
|
|
|
2017-03-21 17:25:35 +00:00
|
|
|
Node* callbackfn_ = nullptr;
|
|
|
|
Node* o_ = nullptr;
|
|
|
|
Node* this_arg_ = nullptr;
|
|
|
|
Node* len_ = nullptr;
|
|
|
|
Node* context_ = nullptr;
|
|
|
|
Node* receiver_ = nullptr;
|
|
|
|
Node* new_target_ = nullptr;
|
2017-04-29 11:40:48 +00:00
|
|
|
Node* argc_ = nullptr;
|
2017-03-21 15:57:38 +00:00
|
|
|
Variable k_;
|
|
|
|
Variable a_;
|
2017-03-21 08:56:56 +00:00
|
|
|
Variable to_;
|
2017-05-03 14:11:44 +00:00
|
|
|
Label fully_spec_compliant_;
|
2017-03-16 11:32:01 +00:00
|
|
|
};
|
|
|
|
|
2017-05-05 12:11:36 +00:00
|
|
|
TF_BUILTIN(FastArrayPop, CodeStubAssembler) {
|
|
|
|
Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
|
|
|
|
Node* context = Parameter(BuiltinDescriptor::kContext);
|
|
|
|
CSA_ASSERT(this, WordEqual(Parameter(BuiltinDescriptor::kNewTarget),
|
|
|
|
UndefinedConstant()));
|
|
|
|
|
|
|
|
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
|
|
|
|
Node* receiver = args.GetReceiver();
|
|
|
|
|
|
|
|
Label runtime(this, Label::kDeferred);
|
|
|
|
Label fast(this);
|
|
|
|
|
|
|
|
// Only pop in this stub if
|
|
|
|
// 1) the array has fast elements
|
|
|
|
// 2) the length is writable,
|
|
|
|
// 3) the elements backing store isn't copy-on-write,
|
|
|
|
// 4) we aren't supposed to shrink the backing store.
|
|
|
|
|
|
|
|
// 1) Check that the array has fast elements.
|
|
|
|
BranchIfFastJSArray(receiver, context, FastJSArrayAccessMode::INBOUNDS_READ,
|
|
|
|
&fast, &runtime);
|
|
|
|
|
|
|
|
BIND(&fast);
|
|
|
|
{
|
|
|
|
CSA_ASSERT(this, TaggedIsPositiveSmi(
|
|
|
|
LoadObjectField(receiver, JSArray::kLengthOffset)));
|
|
|
|
Node* length = LoadAndUntagObjectField(receiver, JSArray::kLengthOffset);
|
|
|
|
Label return_undefined(this), fast_elements(this);
|
|
|
|
GotoIf(IntPtrEqual(length, IntPtrConstant(0)), &return_undefined);
|
|
|
|
|
2017-05-17 14:39:34 +00:00
|
|
|
// 2) Ensure that the length is writable.
|
2017-05-05 12:11:36 +00:00
|
|
|
EnsureArrayLengthWritable(LoadMap(receiver), &runtime);
|
|
|
|
|
|
|
|
// 3) Check that the elements backing store isn't copy-on-write.
|
|
|
|
Node* elements = LoadElements(receiver);
|
|
|
|
GotoIf(WordEqual(LoadMap(elements),
|
|
|
|
LoadRoot(Heap::kFixedCOWArrayMapRootIndex)),
|
|
|
|
&runtime);
|
|
|
|
|
|
|
|
Node* new_length = IntPtrSub(length, IntPtrConstant(1));
|
|
|
|
|
2017-05-15 15:56:10 +00:00
|
|
|
// 4) Check that we're not supposed to shrink the backing store, as
|
|
|
|
// implemented in elements.cc:ElementsAccessorBase::SetLengthImpl.
|
2017-05-05 12:11:36 +00:00
|
|
|
Node* capacity = SmiUntag(LoadFixedArrayBaseLength(elements));
|
2017-05-15 15:56:10 +00:00
|
|
|
GotoIf(IntPtrLessThan(
|
|
|
|
IntPtrAdd(IntPtrAdd(new_length, new_length),
|
|
|
|
IntPtrConstant(JSObject::kMinAddedElementsCapacity)),
|
|
|
|
capacity),
|
2017-05-05 12:11:36 +00:00
|
|
|
&runtime);
|
|
|
|
|
|
|
|
StoreObjectFieldNoWriteBarrier(receiver, JSArray::kLengthOffset,
|
|
|
|
SmiTag(new_length));
|
|
|
|
|
|
|
|
Node* elements_kind = LoadMapElementsKind(LoadMap(receiver));
|
|
|
|
GotoIf(Int32LessThanOrEqual(elements_kind,
|
|
|
|
Int32Constant(TERMINAL_FAST_ELEMENTS_KIND)),
|
|
|
|
&fast_elements);
|
|
|
|
|
|
|
|
Node* value = LoadFixedDoubleArrayElement(
|
|
|
|
elements, new_length, MachineType::Float64(), 0, INTPTR_PARAMETERS,
|
|
|
|
&return_undefined);
|
|
|
|
|
|
|
|
int32_t header_size = FixedDoubleArray::kHeaderSize - kHeapObjectTag;
|
|
|
|
Node* offset = ElementOffsetFromIndex(
|
|
|
|
new_length, FAST_HOLEY_DOUBLE_ELEMENTS, INTPTR_PARAMETERS, header_size);
|
|
|
|
if (Is64()) {
|
|
|
|
Node* double_hole = Int64Constant(kHoleNanInt64);
|
|
|
|
StoreNoWriteBarrier(MachineRepresentation::kWord64, elements, offset,
|
|
|
|
double_hole);
|
|
|
|
} else {
|
|
|
|
STATIC_ASSERT(kHoleNanLower32 == kHoleNanUpper32);
|
|
|
|
Node* double_hole = Int32Constant(kHoleNanLower32);
|
|
|
|
StoreNoWriteBarrier(MachineRepresentation::kWord32, elements, offset,
|
|
|
|
double_hole);
|
|
|
|
StoreNoWriteBarrier(MachineRepresentation::kWord32, elements,
|
|
|
|
IntPtrAdd(offset, IntPtrConstant(kPointerSize)),
|
|
|
|
double_hole);
|
|
|
|
}
|
|
|
|
args.PopAndReturn(AllocateHeapNumberWithValue(value));
|
|
|
|
|
|
|
|
Bind(&fast_elements);
|
|
|
|
{
|
|
|
|
Node* value = LoadFixedArrayElement(elements, new_length);
|
|
|
|
StoreFixedArrayElement(elements, new_length, TheHoleConstant());
|
|
|
|
GotoIf(WordEqual(value, TheHoleConstant()), &return_undefined);
|
|
|
|
args.PopAndReturn(value);
|
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&return_undefined);
|
|
|
|
{ args.PopAndReturn(UndefinedConstant()); }
|
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&runtime);
|
|
|
|
{
|
|
|
|
Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
|
|
|
|
MachineType::TaggedPointer());
|
|
|
|
TailCallStub(CodeFactory::ArrayPop(isolate()), context, target,
|
|
|
|
UndefinedConstant(), argc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-16 11:32:01 +00:00
|
|
|
TF_BUILTIN(FastArrayPush, CodeStubAssembler) {
|
2017-04-07 08:20:35 +00:00
|
|
|
VARIABLE(arg_index, MachineType::PointerRepresentation());
|
2017-03-16 11:32:01 +00:00
|
|
|
Label default_label(this, &arg_index);
|
|
|
|
Label smi_transition(this);
|
|
|
|
Label object_push_pre(this);
|
|
|
|
Label object_push(this, &arg_index);
|
|
|
|
Label double_push(this, &arg_index);
|
|
|
|
Label double_transition(this);
|
|
|
|
Label runtime(this, Label::kDeferred);
|
|
|
|
|
2017-03-20 10:55:37 +00:00
|
|
|
// TODO(ishell): use constants from Descriptor once the JSFunction linkage
|
|
|
|
// arguments are reordered.
|
2017-03-16 11:32:01 +00:00
|
|
|
Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
|
|
|
|
Node* context = Parameter(BuiltinDescriptor::kContext);
|
2017-05-05 12:11:36 +00:00
|
|
|
CSA_ASSERT(this, WordEqual(Parameter(BuiltinDescriptor::kNewTarget),
|
|
|
|
UndefinedConstant()));
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
|
|
|
|
Node* receiver = args.GetReceiver();
|
|
|
|
Node* kind = nullptr;
|
|
|
|
|
|
|
|
Label fast(this);
|
|
|
|
BranchIfFastJSArray(receiver, context, FastJSArrayAccessMode::ANY_ACCESS,
|
|
|
|
&fast, &runtime);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&fast);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
arg_index.Bind(IntPtrConstant(0));
|
2017-04-24 12:47:24 +00:00
|
|
|
kind = EnsureArrayPushable(receiver, &runtime);
|
|
|
|
GotoIf(IsElementsKindGreaterThan(kind, FAST_HOLEY_SMI_ELEMENTS),
|
2017-03-16 11:32:01 +00:00
|
|
|
&object_push_pre);
|
|
|
|
|
2017-04-24 12:47:24 +00:00
|
|
|
Node* new_length = BuildAppendJSArray(FAST_SMI_ELEMENTS, receiver, args,
|
|
|
|
arg_index, &smi_transition);
|
2017-03-16 11:32:01 +00:00
|
|
|
args.PopAndReturn(new_length);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the argument is not a smi, then use a heavyweight SetProperty to
|
|
|
|
// transition the array for only the single next element. If the argument is
|
|
|
|
// a smi, the failure is due to some other reason and we should fall back on
|
|
|
|
// the most generic implementation for the rest of the array.
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&smi_transition);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Node* arg = args.AtIndex(arg_index.value());
|
|
|
|
GotoIf(TaggedIsSmi(arg), &default_label);
|
|
|
|
Node* length = LoadJSArrayLength(receiver);
|
|
|
|
// TODO(danno): Use the KeyedStoreGeneric stub here when possible,
|
|
|
|
// calling into the runtime to do the elements transition is overkill.
|
|
|
|
CallRuntime(Runtime::kSetProperty, context, receiver, length, arg,
|
|
|
|
SmiConstant(STRICT));
|
|
|
|
Increment(arg_index);
|
|
|
|
// The runtime SetProperty call could have converted the array to dictionary
|
|
|
|
// mode, which must be detected to abort the fast-path.
|
|
|
|
Node* map = LoadMap(receiver);
|
|
|
|
Node* bit_field2 = LoadMapBitField2(map);
|
|
|
|
Node* kind = DecodeWord32<Map::ElementsKindBits>(bit_field2);
|
|
|
|
GotoIf(Word32Equal(kind, Int32Constant(DICTIONARY_ELEMENTS)),
|
|
|
|
&default_label);
|
|
|
|
|
|
|
|
GotoIfNotNumber(arg, &object_push);
|
|
|
|
Goto(&double_push);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&object_push_pre);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
2017-04-24 12:47:24 +00:00
|
|
|
Branch(IsElementsKindGreaterThan(kind, FAST_HOLEY_ELEMENTS), &double_push,
|
|
|
|
&object_push);
|
2017-03-16 11:32:01 +00:00
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&object_push);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
2017-04-24 12:47:24 +00:00
|
|
|
Node* new_length = BuildAppendJSArray(FAST_ELEMENTS, receiver, args,
|
|
|
|
arg_index, &default_label);
|
2017-03-16 11:32:01 +00:00
|
|
|
args.PopAndReturn(new_length);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&double_push);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
2017-04-24 12:47:24 +00:00
|
|
|
Node* new_length = BuildAppendJSArray(FAST_DOUBLE_ELEMENTS, receiver, args,
|
|
|
|
arg_index, &double_transition);
|
2017-03-16 11:32:01 +00:00
|
|
|
args.PopAndReturn(new_length);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the argument is not a double, then use a heavyweight SetProperty to
|
|
|
|
// transition the array for only the single next element. If the argument is
|
|
|
|
// a double, the failure is due to some other reason and we should fall back
|
|
|
|
// on the most generic implementation for the rest of the array.
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&double_transition);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Node* arg = args.AtIndex(arg_index.value());
|
|
|
|
GotoIfNumber(arg, &default_label);
|
|
|
|
Node* length = LoadJSArrayLength(receiver);
|
|
|
|
// TODO(danno): Use the KeyedStoreGeneric stub here when possible,
|
|
|
|
// calling into the runtime to do the elements transition is overkill.
|
|
|
|
CallRuntime(Runtime::kSetProperty, context, receiver, length, arg,
|
|
|
|
SmiConstant(STRICT));
|
|
|
|
Increment(arg_index);
|
|
|
|
// The runtime SetProperty call could have converted the array to dictionary
|
|
|
|
// mode, which must be detected to abort the fast-path.
|
|
|
|
Node* map = LoadMap(receiver);
|
|
|
|
Node* bit_field2 = LoadMapBitField2(map);
|
|
|
|
Node* kind = DecodeWord32<Map::ElementsKindBits>(bit_field2);
|
|
|
|
GotoIf(Word32Equal(kind, Int32Constant(DICTIONARY_ELEMENTS)),
|
|
|
|
&default_label);
|
|
|
|
Goto(&object_push);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fallback that stores un-processed arguments using the full, heavyweight
|
|
|
|
// SetProperty machinery.
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&default_label);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
args.ForEach(
|
|
|
|
[this, receiver, context](Node* arg) {
|
|
|
|
Node* length = LoadJSArrayLength(receiver);
|
|
|
|
CallRuntime(Runtime::kSetProperty, context, receiver, length, arg,
|
|
|
|
SmiConstant(STRICT));
|
|
|
|
},
|
|
|
|
arg_index.value());
|
|
|
|
args.PopAndReturn(LoadJSArrayLength(receiver));
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&runtime);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
|
|
|
|
MachineType::TaggedPointer());
|
2017-05-05 12:11:36 +00:00
|
|
|
TailCallStub(CodeFactory::ArrayPush(isolate()), context, target,
|
|
|
|
UndefinedConstant(), argc);
|
2017-03-16 11:32:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-17 14:39:34 +00:00
|
|
|
TF_BUILTIN(FastArrayShift, CodeStubAssembler) {
|
|
|
|
Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
|
|
|
|
Node* context = Parameter(BuiltinDescriptor::kContext);
|
|
|
|
CSA_ASSERT(this, WordEqual(Parameter(BuiltinDescriptor::kNewTarget),
|
|
|
|
UndefinedConstant()));
|
|
|
|
|
|
|
|
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
|
|
|
|
Node* receiver = args.GetReceiver();
|
|
|
|
|
|
|
|
Label runtime(this, Label::kDeferred);
|
|
|
|
Label fast(this);
|
|
|
|
|
|
|
|
// Only shift in this stub if
|
|
|
|
// 1) the array has fast elements
|
|
|
|
// 2) the length is writable,
|
|
|
|
// 3) the elements backing store isn't copy-on-write,
|
|
|
|
// 4) we aren't supposed to shrink the backing store,
|
|
|
|
// 5) we aren't supposed to left-trim the backing store.
|
|
|
|
|
|
|
|
// 1) Check that the array has fast elements.
|
|
|
|
BranchIfFastJSArray(receiver, context, FastJSArrayAccessMode::INBOUNDS_READ,
|
|
|
|
&fast, &runtime);
|
|
|
|
|
|
|
|
BIND(&fast);
|
|
|
|
{
|
|
|
|
CSA_ASSERT(this, TaggedIsPositiveSmi(
|
|
|
|
LoadObjectField(receiver, JSArray::kLengthOffset)));
|
|
|
|
Node* length = LoadAndUntagObjectField(receiver, JSArray::kLengthOffset);
|
|
|
|
Label return_undefined(this), fast_elements_tagged(this),
|
|
|
|
fast_elements_untagged(this);
|
|
|
|
GotoIf(IntPtrEqual(length, IntPtrConstant(0)), &return_undefined);
|
|
|
|
|
|
|
|
// 2) Ensure that the length is writable.
|
|
|
|
EnsureArrayLengthWritable(LoadMap(receiver), &runtime);
|
|
|
|
|
|
|
|
// 3) Check that the elements backing store isn't copy-on-write.
|
|
|
|
Node* elements = LoadElements(receiver);
|
|
|
|
GotoIf(WordEqual(LoadMap(elements),
|
|
|
|
LoadRoot(Heap::kFixedCOWArrayMapRootIndex)),
|
|
|
|
&runtime);
|
|
|
|
|
|
|
|
Node* new_length = IntPtrSub(length, IntPtrConstant(1));
|
|
|
|
|
|
|
|
// 4) Check that we're not supposed to right-trim the backing store, as
|
|
|
|
// implemented in elements.cc:ElementsAccessorBase::SetLengthImpl.
|
|
|
|
Node* capacity = SmiUntag(LoadFixedArrayBaseLength(elements));
|
|
|
|
GotoIf(IntPtrLessThan(
|
|
|
|
IntPtrAdd(IntPtrAdd(new_length, new_length),
|
|
|
|
IntPtrConstant(JSObject::kMinAddedElementsCapacity)),
|
|
|
|
capacity),
|
|
|
|
&runtime);
|
|
|
|
|
|
|
|
// 5) Check that we're not supposed to left-trim the backing store, as
|
|
|
|
// implemented in elements.cc:FastElementsAccessor::MoveElements.
|
|
|
|
GotoIf(IntPtrGreaterThan(new_length,
|
|
|
|
IntPtrConstant(JSArray::kMaxCopyElements)),
|
|
|
|
&runtime);
|
|
|
|
|
|
|
|
StoreObjectFieldNoWriteBarrier(receiver, JSArray::kLengthOffset,
|
|
|
|
SmiTag(new_length));
|
|
|
|
|
|
|
|
Node* elements_kind = LoadMapElementsKind(LoadMap(receiver));
|
|
|
|
GotoIf(Int32LessThanOrEqual(elements_kind,
|
|
|
|
Int32Constant(FAST_HOLEY_SMI_ELEMENTS)),
|
|
|
|
&fast_elements_untagged);
|
|
|
|
GotoIf(Int32LessThanOrEqual(elements_kind,
|
|
|
|
Int32Constant(TERMINAL_FAST_ELEMENTS_KIND)),
|
|
|
|
&fast_elements_tagged);
|
|
|
|
Node* value = LoadFixedDoubleArrayElement(
|
|
|
|
elements, IntPtrConstant(0), MachineType::Float64(), 0,
|
|
|
|
INTPTR_PARAMETERS, &return_undefined);
|
|
|
|
|
|
|
|
int32_t header_size = FixedDoubleArray::kHeaderSize - kHeapObjectTag;
|
|
|
|
Node* memmove =
|
|
|
|
ExternalConstant(ExternalReference::libc_memmove_function(isolate()));
|
|
|
|
Node* start = IntPtrAdd(
|
|
|
|
BitcastTaggedToWord(elements),
|
|
|
|
ElementOffsetFromIndex(IntPtrConstant(0), FAST_HOLEY_DOUBLE_ELEMENTS,
|
|
|
|
INTPTR_PARAMETERS, header_size));
|
|
|
|
CallCFunction3(MachineType::AnyTagged(), MachineType::Pointer(),
|
|
|
|
MachineType::Pointer(), MachineType::UintPtr(), memmove,
|
|
|
|
start, IntPtrAdd(start, IntPtrConstant(kDoubleSize)),
|
|
|
|
IntPtrMul(new_length, IntPtrConstant(kDoubleSize)));
|
|
|
|
Node* offset = ElementOffsetFromIndex(
|
|
|
|
new_length, FAST_HOLEY_DOUBLE_ELEMENTS, INTPTR_PARAMETERS, header_size);
|
|
|
|
if (Is64()) {
|
|
|
|
Node* double_hole = Int64Constant(kHoleNanInt64);
|
|
|
|
StoreNoWriteBarrier(MachineRepresentation::kWord64, elements, offset,
|
|
|
|
double_hole);
|
|
|
|
} else {
|
|
|
|
STATIC_ASSERT(kHoleNanLower32 == kHoleNanUpper32);
|
|
|
|
Node* double_hole = Int32Constant(kHoleNanLower32);
|
|
|
|
StoreNoWriteBarrier(MachineRepresentation::kWord32, elements, offset,
|
|
|
|
double_hole);
|
|
|
|
StoreNoWriteBarrier(MachineRepresentation::kWord32, elements,
|
|
|
|
IntPtrAdd(offset, IntPtrConstant(kPointerSize)),
|
|
|
|
double_hole);
|
|
|
|
}
|
|
|
|
args.PopAndReturn(AllocateHeapNumberWithValue(value));
|
|
|
|
|
|
|
|
Bind(&fast_elements_tagged);
|
|
|
|
{
|
|
|
|
Node* value = LoadFixedArrayElement(elements, 0);
|
|
|
|
BuildFastLoop(IntPtrConstant(0), new_length,
|
|
|
|
[&](Node* index) {
|
|
|
|
StoreFixedArrayElement(
|
|
|
|
elements, index,
|
|
|
|
LoadFixedArrayElement(
|
|
|
|
elements, IntPtrAdd(index, IntPtrConstant(1))));
|
|
|
|
},
|
|
|
|
1, ParameterMode::INTPTR_PARAMETERS,
|
|
|
|
IndexAdvanceMode::kPost);
|
|
|
|
StoreFixedArrayElement(elements, new_length, TheHoleConstant());
|
|
|
|
GotoIf(WordEqual(value, TheHoleConstant()), &return_undefined);
|
|
|
|
args.PopAndReturn(value);
|
|
|
|
}
|
|
|
|
|
|
|
|
Bind(&fast_elements_untagged);
|
|
|
|
{
|
|
|
|
Node* value = LoadFixedArrayElement(elements, 0);
|
|
|
|
Node* memmove =
|
|
|
|
ExternalConstant(ExternalReference::libc_memmove_function(isolate()));
|
|
|
|
Node* start = IntPtrAdd(
|
|
|
|
BitcastTaggedToWord(elements),
|
|
|
|
ElementOffsetFromIndex(IntPtrConstant(0), FAST_HOLEY_SMI_ELEMENTS,
|
|
|
|
INTPTR_PARAMETERS, header_size));
|
|
|
|
CallCFunction3(MachineType::AnyTagged(), MachineType::Pointer(),
|
|
|
|
MachineType::Pointer(), MachineType::UintPtr(), memmove,
|
|
|
|
start, IntPtrAdd(start, IntPtrConstant(kPointerSize)),
|
|
|
|
IntPtrMul(new_length, IntPtrConstant(kPointerSize)));
|
|
|
|
StoreFixedArrayElement(elements, new_length, TheHoleConstant());
|
|
|
|
GotoIf(WordEqual(value, TheHoleConstant()), &return_undefined);
|
|
|
|
args.PopAndReturn(value);
|
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&return_undefined);
|
|
|
|
{ args.PopAndReturn(UndefinedConstant()); }
|
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&runtime);
|
|
|
|
{
|
|
|
|
Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
|
|
|
|
MachineType::TaggedPointer());
|
|
|
|
TailCallStub(CodeFactory::ArrayShift(isolate()), context, target,
|
|
|
|
UndefinedConstant(), argc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
TF_BUILTIN(ArrayForEachLoopContinuation, ArrayBuiltinCodeStubAssembler) {
|
2017-03-21 17:25:35 +00:00
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
Node* receiver = Parameter(Descriptor::kReceiver);
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
Node* array = Parameter(Descriptor::kArray);
|
|
|
|
Node* object = Parameter(Descriptor::kObject);
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
|
|
|
Node* len = Parameter(Descriptor::kLength);
|
2017-03-22 13:18:26 +00:00
|
|
|
Node* to = Parameter(Descriptor::kTo);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
2017-03-22 13:18:26 +00:00
|
|
|
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
|
|
|
|
this_arg, array, object, initial_k,
|
|
|
|
len, to);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
GenerateIteratingArrayBuiltinLoopContinuation(
|
2017-03-21 15:57:38 +00:00
|
|
|
&ArrayBuiltinCodeStubAssembler::ForEachProcessor,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
}
|
2017-03-16 11:32:01 +00:00
|
|
|
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
TF_BUILTIN(ArrayForEach, ArrayBuiltinCodeStubAssembler) {
|
2017-04-29 11:40:48 +00:00
|
|
|
Node* argc =
|
|
|
|
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
|
|
|
|
CodeStubArguments args(this, argc);
|
|
|
|
Node* context = Parameter(BuiltinDescriptor::kContext);
|
|
|
|
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
|
|
|
|
Node* receiver = args.GetReceiver();
|
|
|
|
Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
|
|
|
|
Node* this_arg = args.GetOptionalArgumentValue(1, UndefinedConstant());
|
2017-03-21 17:25:35 +00:00
|
|
|
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
|
2017-04-29 11:40:48 +00:00
|
|
|
new_target, argc);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
GenerateIteratingArrayBuiltinBody(
|
|
|
|
"Array.prototype.forEach",
|
2017-03-21 15:57:38 +00:00
|
|
|
&ArrayBuiltinCodeStubAssembler::ForEachResultGenerator,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::ForEachProcessor,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
|
2017-04-29 11:40:48 +00:00
|
|
|
Builtins::CallableFor(isolate(),
|
|
|
|
Builtins::kArrayForEachLoopContinuation));
|
2017-03-16 11:32:01 +00:00
|
|
|
}
|
|
|
|
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
TF_BUILTIN(ArraySomeLoopContinuation, ArrayBuiltinCodeStubAssembler) {
|
2017-03-21 17:25:35 +00:00
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
Node* receiver = Parameter(Descriptor::kReceiver);
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
Node* array = Parameter(Descriptor::kArray);
|
|
|
|
Node* object = Parameter(Descriptor::kObject);
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
|
|
|
Node* len = Parameter(Descriptor::kLength);
|
2017-03-22 13:18:26 +00:00
|
|
|
Node* to = Parameter(Descriptor::kTo);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
2017-03-22 13:18:26 +00:00
|
|
|
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
|
|
|
|
this_arg, array, object, initial_k,
|
|
|
|
len, to);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
GenerateIteratingArrayBuiltinLoopContinuation(
|
2017-03-21 15:57:38 +00:00
|
|
|
&ArrayBuiltinCodeStubAssembler::SomeProcessor,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
|
2017-03-16 11:32:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TF_BUILTIN(ArraySome, ArrayBuiltinCodeStubAssembler) {
|
2017-04-29 11:40:48 +00:00
|
|
|
Node* argc =
|
|
|
|
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
|
|
|
|
CodeStubArguments args(this, argc);
|
|
|
|
Node* context = Parameter(BuiltinDescriptor::kContext);
|
|
|
|
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
|
|
|
|
Node* receiver = args.GetReceiver();
|
|
|
|
Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
|
|
|
|
Node* this_arg = args.GetOptionalArgumentValue(1, UndefinedConstant());
|
2017-03-21 17:25:35 +00:00
|
|
|
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
|
2017-04-29 11:40:48 +00:00
|
|
|
new_target, argc);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
GenerateIteratingArrayBuiltinBody(
|
2017-03-21 08:56:56 +00:00
|
|
|
"Array.prototype.some",
|
2017-03-21 15:57:38 +00:00
|
|
|
&ArrayBuiltinCodeStubAssembler::SomeResultGenerator,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::SomeProcessor,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
|
2017-04-29 11:40:48 +00:00
|
|
|
Builtins::CallableFor(isolate(), Builtins::kArraySomeLoopContinuation));
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
}
|
|
|
|
|
2017-03-30 16:36:53 +00:00
|
|
|
TF_BUILTIN(TypedArrayPrototypeSome, ArrayBuiltinCodeStubAssembler) {
|
2017-04-29 11:40:48 +00:00
|
|
|
Node* argc =
|
|
|
|
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
|
|
|
|
CodeStubArguments args(this, argc);
|
|
|
|
Node* context = Parameter(BuiltinDescriptor::kContext);
|
|
|
|
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
|
|
|
|
Node* receiver = args.GetReceiver();
|
|
|
|
Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
|
|
|
|
Node* this_arg = args.GetOptionalArgumentValue(1, UndefinedConstant());
|
2017-03-30 16:36:53 +00:00
|
|
|
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
|
2017-04-29 11:40:48 +00:00
|
|
|
new_target, argc);
|
2017-03-30 16:36:53 +00:00
|
|
|
|
|
|
|
GenerateIteratingTypedArrayBuiltinBody(
|
|
|
|
"%TypedArray%.prototype.some",
|
|
|
|
&ArrayBuiltinCodeStubAssembler::SomeResultGenerator,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::SomeProcessor,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
|
|
|
|
}
|
|
|
|
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
TF_BUILTIN(ArrayEveryLoopContinuation, ArrayBuiltinCodeStubAssembler) {
|
2017-03-21 17:25:35 +00:00
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
Node* receiver = Parameter(Descriptor::kReceiver);
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
Node* array = Parameter(Descriptor::kArray);
|
|
|
|
Node* object = Parameter(Descriptor::kObject);
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
|
|
|
Node* len = Parameter(Descriptor::kLength);
|
2017-03-22 13:18:26 +00:00
|
|
|
Node* to = Parameter(Descriptor::kTo);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
2017-03-22 13:18:26 +00:00
|
|
|
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
|
|
|
|
this_arg, array, object, initial_k,
|
|
|
|
len, to);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
GenerateIteratingArrayBuiltinLoopContinuation(
|
2017-03-21 15:57:38 +00:00
|
|
|
&ArrayBuiltinCodeStubAssembler::EveryProcessor,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
|
2017-03-16 11:32:01 +00:00
|
|
|
}
|
|
|
|
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
TF_BUILTIN(ArrayEvery, ArrayBuiltinCodeStubAssembler) {
|
2017-04-29 11:40:48 +00:00
|
|
|
Node* argc =
|
|
|
|
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
|
|
|
|
CodeStubArguments args(this, argc);
|
|
|
|
Node* context = Parameter(BuiltinDescriptor::kContext);
|
|
|
|
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
|
|
|
|
Node* receiver = args.GetReceiver();
|
|
|
|
Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
|
|
|
|
Node* this_arg = args.GetOptionalArgumentValue(1, UndefinedConstant());
|
2017-03-21 17:25:35 +00:00
|
|
|
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
|
2017-04-29 11:40:48 +00:00
|
|
|
new_target, argc);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
GenerateIteratingArrayBuiltinBody(
|
2017-03-21 08:56:56 +00:00
|
|
|
"Array.prototype.every",
|
2017-03-21 15:57:38 +00:00
|
|
|
&ArrayBuiltinCodeStubAssembler::EveryResultGenerator,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::EveryProcessor,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
|
2017-04-29 11:40:48 +00:00
|
|
|
Builtins::CallableFor(isolate(), Builtins::kArrayEveryLoopContinuation));
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
}
|
|
|
|
|
2017-03-30 16:36:53 +00:00
|
|
|
TF_BUILTIN(TypedArrayPrototypeEvery, ArrayBuiltinCodeStubAssembler) {
|
2017-04-29 11:40:48 +00:00
|
|
|
Node* argc =
|
|
|
|
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
|
|
|
|
CodeStubArguments args(this, argc);
|
|
|
|
Node* context = Parameter(BuiltinDescriptor::kContext);
|
|
|
|
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
|
|
|
|
Node* receiver = args.GetReceiver();
|
|
|
|
Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
|
|
|
|
Node* this_arg = args.GetOptionalArgumentValue(1, UndefinedConstant());
|
2017-03-30 16:36:53 +00:00
|
|
|
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
|
2017-04-29 11:40:48 +00:00
|
|
|
new_target, argc);
|
2017-03-30 16:36:53 +00:00
|
|
|
|
|
|
|
GenerateIteratingTypedArrayBuiltinBody(
|
|
|
|
"%TypedArray%.prototype.every",
|
|
|
|
&ArrayBuiltinCodeStubAssembler::EveryResultGenerator,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::EveryProcessor,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
|
|
|
|
}
|
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
TF_BUILTIN(ArrayReduceLoopContinuation, ArrayBuiltinCodeStubAssembler) {
|
2017-03-21 17:25:35 +00:00
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
Node* receiver = Parameter(Descriptor::kReceiver);
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
Node* accumulator = Parameter(Descriptor::kAccumulator);
|
|
|
|
Node* object = Parameter(Descriptor::kObject);
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
|
|
|
Node* len = Parameter(Descriptor::kLength);
|
2017-03-22 13:18:26 +00:00
|
|
|
Node* to = Parameter(Descriptor::kTo);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
|
|
|
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
|
|
|
|
this_arg, accumulator, object,
|
2017-03-22 13:18:26 +00:00
|
|
|
initial_k, len, to);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
GenerateIteratingArrayBuiltinLoopContinuation(
|
|
|
|
&ArrayBuiltinCodeStubAssembler::ReduceProcessor,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::ReducePostLoopAction);
|
|
|
|
}
|
|
|
|
|
|
|
|
TF_BUILTIN(ArrayReduce, ArrayBuiltinCodeStubAssembler) {
|
2017-04-29 11:40:48 +00:00
|
|
|
Node* argc =
|
|
|
|
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
|
|
|
|
CodeStubArguments args(this, argc);
|
|
|
|
Node* context = Parameter(BuiltinDescriptor::kContext);
|
|
|
|
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
|
|
|
|
Node* receiver = args.GetReceiver();
|
|
|
|
Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
|
|
|
|
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
|
2017-03-21 17:25:35 +00:00
|
|
|
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, initial_value,
|
2017-04-29 11:40:48 +00:00
|
|
|
new_target, argc);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
GenerateIteratingArrayBuiltinBody(
|
|
|
|
"Array.prototype.reduce",
|
|
|
|
&ArrayBuiltinCodeStubAssembler::ReduceResultGenerator,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::ReduceProcessor,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::ReducePostLoopAction,
|
2017-04-29 11:40:48 +00:00
|
|
|
Builtins::CallableFor(isolate(), Builtins::kArrayReduceLoopContinuation));
|
2017-03-21 15:57:38 +00:00
|
|
|
}
|
|
|
|
|
2017-04-11 11:02:27 +00:00
|
|
|
TF_BUILTIN(TypedArrayPrototypeReduce, ArrayBuiltinCodeStubAssembler) {
|
2017-04-29 11:40:48 +00:00
|
|
|
Node* argc =
|
|
|
|
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
|
|
|
|
CodeStubArguments args(this, argc);
|
|
|
|
Node* context = Parameter(BuiltinDescriptor::kContext);
|
|
|
|
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
|
|
|
|
Node* receiver = args.GetReceiver();
|
|
|
|
Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
|
|
|
|
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
|
2017-04-11 11:02:27 +00:00
|
|
|
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, initial_value,
|
2017-04-29 11:40:48 +00:00
|
|
|
new_target, argc);
|
2017-04-11 11:02:27 +00:00
|
|
|
|
|
|
|
GenerateIteratingTypedArrayBuiltinBody(
|
|
|
|
"%TypedArray%.prototype.reduce",
|
|
|
|
&ArrayBuiltinCodeStubAssembler::ReduceResultGenerator,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::ReduceProcessor,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::ReducePostLoopAction);
|
|
|
|
}
|
|
|
|
|
2017-03-24 13:35:56 +00:00
|
|
|
TF_BUILTIN(ArrayReduceRightLoopContinuation, ArrayBuiltinCodeStubAssembler) {
|
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
Node* receiver = Parameter(Descriptor::kReceiver);
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
Node* accumulator = Parameter(Descriptor::kAccumulator);
|
|
|
|
Node* object = Parameter(Descriptor::kObject);
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
|
|
|
Node* len = Parameter(Descriptor::kLength);
|
|
|
|
Node* to = Parameter(Descriptor::kTo);
|
|
|
|
|
|
|
|
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
|
|
|
|
this_arg, accumulator, object,
|
|
|
|
initial_k, len, to);
|
|
|
|
|
|
|
|
GenerateIteratingArrayBuiltinLoopContinuation(
|
|
|
|
&ArrayBuiltinCodeStubAssembler::ReduceProcessor,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::ReducePostLoopAction,
|
|
|
|
ForEachDirection::kReverse);
|
|
|
|
}
|
|
|
|
|
|
|
|
TF_BUILTIN(ArrayReduceRight, ArrayBuiltinCodeStubAssembler) {
|
2017-04-29 11:40:48 +00:00
|
|
|
Node* argc =
|
|
|
|
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
|
|
|
|
CodeStubArguments args(this, argc);
|
|
|
|
Node* context = Parameter(BuiltinDescriptor::kContext);
|
|
|
|
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
|
|
|
|
Node* receiver = args.GetReceiver();
|
|
|
|
Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
|
|
|
|
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
|
2017-03-24 13:35:56 +00:00
|
|
|
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, initial_value,
|
2017-04-29 11:40:48 +00:00
|
|
|
new_target, argc);
|
2017-03-24 13:35:56 +00:00
|
|
|
|
|
|
|
GenerateIteratingArrayBuiltinBody(
|
|
|
|
"Array.prototype.reduceRight",
|
|
|
|
&ArrayBuiltinCodeStubAssembler::ReduceResultGenerator,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::ReduceProcessor,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::ReducePostLoopAction,
|
2017-04-29 11:40:48 +00:00
|
|
|
Builtins::CallableFor(isolate(),
|
|
|
|
Builtins::kArrayReduceRightLoopContinuation),
|
2017-03-24 13:35:56 +00:00
|
|
|
ForEachDirection::kReverse);
|
|
|
|
}
|
|
|
|
|
2017-04-11 11:02:27 +00:00
|
|
|
TF_BUILTIN(TypedArrayPrototypeReduceRight, ArrayBuiltinCodeStubAssembler) {
|
2017-04-29 11:40:48 +00:00
|
|
|
Node* argc =
|
|
|
|
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
|
|
|
|
CodeStubArguments args(this, argc);
|
|
|
|
Node* context = Parameter(BuiltinDescriptor::kContext);
|
|
|
|
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
|
|
|
|
Node* receiver = args.GetReceiver();
|
|
|
|
Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
|
|
|
|
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
|
2017-04-11 11:02:27 +00:00
|
|
|
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, initial_value,
|
2017-04-29 11:40:48 +00:00
|
|
|
new_target, argc);
|
2017-04-11 11:02:27 +00:00
|
|
|
|
|
|
|
GenerateIteratingTypedArrayBuiltinBody(
|
|
|
|
"%TypedArray%.prototype.reduceRight",
|
|
|
|
&ArrayBuiltinCodeStubAssembler::ReduceResultGenerator,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::ReduceProcessor,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::ReducePostLoopAction,
|
|
|
|
ForEachDirection::kReverse);
|
|
|
|
}
|
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
TF_BUILTIN(ArrayFilterLoopContinuation, ArrayBuiltinCodeStubAssembler) {
|
2017-03-21 17:25:35 +00:00
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
Node* receiver = Parameter(Descriptor::kReceiver);
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
Node* array = Parameter(Descriptor::kArray);
|
|
|
|
Node* object = Parameter(Descriptor::kObject);
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
|
|
|
Node* len = Parameter(Descriptor::kLength);
|
2017-03-22 13:18:26 +00:00
|
|
|
Node* to = Parameter(Descriptor::kTo);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
2017-03-22 13:18:26 +00:00
|
|
|
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
|
|
|
|
this_arg, array, object, initial_k,
|
|
|
|
len, to);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
GenerateIteratingArrayBuiltinLoopContinuation(
|
|
|
|
&ArrayBuiltinCodeStubAssembler::FilterProcessor,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
|
|
|
|
}
|
|
|
|
|
|
|
|
TF_BUILTIN(ArrayFilter, ArrayBuiltinCodeStubAssembler) {
|
2017-04-29 11:40:48 +00:00
|
|
|
Node* argc =
|
|
|
|
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
|
|
|
|
CodeStubArguments args(this, argc);
|
|
|
|
Node* context = Parameter(BuiltinDescriptor::kContext);
|
|
|
|
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
|
|
|
|
Node* receiver = args.GetReceiver();
|
|
|
|
Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
|
|
|
|
Node* this_arg = args.GetOptionalArgumentValue(1, UndefinedConstant());
|
2017-03-21 17:25:35 +00:00
|
|
|
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
|
2017-04-29 11:40:48 +00:00
|
|
|
new_target, argc);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
GenerateIteratingArrayBuiltinBody(
|
2017-03-24 11:01:53 +00:00
|
|
|
"Array.prototype.filter",
|
2017-03-21 15:57:38 +00:00
|
|
|
&ArrayBuiltinCodeStubAssembler::FilterResultGenerator,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::FilterProcessor,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
|
2017-04-29 11:40:48 +00:00
|
|
|
Builtins::CallableFor(isolate(), Builtins::kArrayFilterLoopContinuation));
|
2017-03-21 15:57:38 +00:00
|
|
|
}
|
|
|
|
|
2017-03-24 11:01:53 +00:00
|
|
|
TF_BUILTIN(ArrayMapLoopContinuation, ArrayBuiltinCodeStubAssembler) {
|
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
Node* receiver = Parameter(Descriptor::kReceiver);
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
Node* array = Parameter(Descriptor::kArray);
|
|
|
|
Node* object = Parameter(Descriptor::kObject);
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
|
|
|
Node* len = Parameter(Descriptor::kLength);
|
|
|
|
Node* to = Parameter(Descriptor::kTo);
|
|
|
|
|
|
|
|
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
|
|
|
|
this_arg, array, object, initial_k,
|
|
|
|
len, to);
|
|
|
|
|
|
|
|
GenerateIteratingArrayBuiltinLoopContinuation(
|
2017-05-03 14:11:44 +00:00
|
|
|
&ArrayBuiltinCodeStubAssembler::SpecCompliantMapProcessor,
|
2017-03-24 11:01:53 +00:00
|
|
|
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
|
|
|
|
}
|
|
|
|
|
|
|
|
TF_BUILTIN(ArrayMap, ArrayBuiltinCodeStubAssembler) {
|
2017-04-29 11:40:48 +00:00
|
|
|
Node* argc =
|
|
|
|
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
|
|
|
|
CodeStubArguments args(this, argc);
|
|
|
|
Node* context = Parameter(BuiltinDescriptor::kContext);
|
|
|
|
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
|
|
|
|
Node* receiver = args.GetReceiver();
|
|
|
|
Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
|
|
|
|
Node* this_arg = args.GetOptionalArgumentValue(1, UndefinedConstant());
|
2017-03-24 11:01:53 +00:00
|
|
|
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
|
2017-04-29 11:40:48 +00:00
|
|
|
new_target, argc);
|
2017-03-24 11:01:53 +00:00
|
|
|
|
|
|
|
GenerateIteratingArrayBuiltinBody(
|
|
|
|
"Array.prototype.map", &ArrayBuiltinCodeStubAssembler::MapResultGenerator,
|
2017-05-03 14:11:44 +00:00
|
|
|
&ArrayBuiltinCodeStubAssembler::FastMapProcessor,
|
2017-03-24 11:01:53 +00:00
|
|
|
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
|
2017-04-29 11:40:48 +00:00
|
|
|
Builtins::CallableFor(isolate(), Builtins::kArrayMapLoopContinuation));
|
2017-03-24 11:01:53 +00:00
|
|
|
}
|
|
|
|
|
2017-03-16 11:32:01 +00:00
|
|
|
TF_BUILTIN(ArrayIsArray, CodeStubAssembler) {
|
2017-03-20 10:55:37 +00:00
|
|
|
Node* object = Parameter(Descriptor::kArg);
|
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
Label call_runtime(this), return_true(this), return_false(this);
|
|
|
|
|
|
|
|
GotoIf(TaggedIsSmi(object), &return_false);
|
|
|
|
Node* instance_type = LoadInstanceType(object);
|
|
|
|
|
|
|
|
GotoIf(Word32Equal(instance_type, Int32Constant(JS_ARRAY_TYPE)),
|
|
|
|
&return_true);
|
|
|
|
|
|
|
|
// TODO(verwaest): Handle proxies in-place.
|
|
|
|
Branch(Word32Equal(instance_type, Int32Constant(JS_PROXY_TYPE)),
|
|
|
|
&call_runtime, &return_false);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&return_true);
|
2017-03-16 11:32:01 +00:00
|
|
|
Return(BooleanConstant(true));
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&return_false);
|
2017-03-16 11:32:01 +00:00
|
|
|
Return(BooleanConstant(false));
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&call_runtime);
|
2017-03-16 11:32:01 +00:00
|
|
|
Return(CallRuntime(Runtime::kArrayIsArray, context, object));
|
|
|
|
}
|
|
|
|
|
2017-05-16 15:05:29 +00:00
|
|
|
class ArrayIncludesIndexofAssembler : public CodeStubAssembler {
|
|
|
|
public:
|
|
|
|
explicit ArrayIncludesIndexofAssembler(compiler::CodeAssemblerState* state)
|
|
|
|
: CodeStubAssembler(state) {}
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2017-05-16 15:05:29 +00:00
|
|
|
enum SearchVariant { kIncludes, kIndexOf };
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2017-05-16 15:05:29 +00:00
|
|
|
void Generate(SearchVariant variant);
|
|
|
|
};
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2017-05-16 15:05:29 +00:00
|
|
|
void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
|
2017-05-10 06:46:29 +00:00
|
|
|
const int kSearchElementArg = 0;
|
|
|
|
const int kFromIndexArg = 1;
|
|
|
|
|
|
|
|
Node* argc =
|
|
|
|
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
|
|
|
|
CodeStubArguments args(this, argc);
|
|
|
|
|
|
|
|
Node* array = args.GetReceiver();
|
|
|
|
Node* search_element =
|
|
|
|
args.GetOptionalArgumentValue(kSearchElementArg, UndefinedConstant());
|
|
|
|
Node* context = Parameter(BuiltinDescriptor::kContext);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
Node* intptr_zero = IntPtrConstant(0);
|
|
|
|
|
2017-05-15 08:32:22 +00:00
|
|
|
Label init_index(this), return_found(this), return_not_found(this),
|
2017-03-16 11:32:01 +00:00
|
|
|
call_runtime(this);
|
|
|
|
|
|
|
|
// Take slow path if not a JSArray, if retrieving elements requires
|
|
|
|
// traversing prototype, or if access checks are required.
|
|
|
|
BranchIfFastJSArray(array, context, FastJSArrayAccessMode::INBOUNDS_READ,
|
2017-05-15 08:32:22 +00:00
|
|
|
&init_index, &call_runtime);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2017-05-15 08:32:22 +00:00
|
|
|
BIND(&init_index);
|
|
|
|
VARIABLE(index_var, MachineType::PointerRepresentation(), intptr_zero);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2017-05-15 08:32:22 +00:00
|
|
|
// JSArray length is always a positive Smi for fast arrays.
|
|
|
|
CSA_ASSERT(this, TaggedIsPositiveSmi(LoadJSArrayLength(array)));
|
|
|
|
Node* array_length = SmiUntag(LoadJSArrayLength(array));
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
{
|
2017-05-15 19:42:35 +00:00
|
|
|
// Initialize fromIndex.
|
|
|
|
Label is_smi(this), is_nonsmi(this), done(this);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2017-05-15 08:32:22 +00:00
|
|
|
// If no fromIndex was passed, default to 0.
|
|
|
|
GotoIf(IntPtrLessThanOrEqual(argc, IntPtrConstant(kFromIndexArg)), &done);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2017-05-15 08:32:22 +00:00
|
|
|
Node* start_from = args.AtIndex(kFromIndexArg);
|
2017-05-15 19:42:35 +00:00
|
|
|
// Handle Smis and undefined here and everything else in runtime.
|
|
|
|
// We must be very careful with side effects from the ToInteger conversion,
|
|
|
|
// as the side effects might render previously checked assumptions about
|
|
|
|
// the receiver being a fast JSArray and its length invalid.
|
|
|
|
Branch(TaggedIsSmi(start_from), &is_smi, &is_nonsmi);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2017-05-15 19:42:35 +00:00
|
|
|
BIND(&is_nonsmi);
|
2017-05-15 08:32:22 +00:00
|
|
|
{
|
2017-05-15 19:42:35 +00:00
|
|
|
GotoIfNot(IsUndefined(start_from), &call_runtime);
|
|
|
|
Goto(&done);
|
|
|
|
}
|
|
|
|
BIND(&is_smi);
|
|
|
|
{
|
|
|
|
Node* intptr_start_from = SmiUntag(start_from);
|
|
|
|
index_var.Bind(intptr_start_from);
|
|
|
|
|
|
|
|
GotoIf(IntPtrGreaterThanOrEqual(index_var.value(), intptr_zero), &done);
|
|
|
|
// The fromIndex is negative: add it to the array's length.
|
|
|
|
index_var.Bind(IntPtrAdd(array_length, index_var.value()));
|
|
|
|
// Clamp negative results at zero.
|
|
|
|
GotoIf(IntPtrGreaterThanOrEqual(index_var.value(), intptr_zero), &done);
|
|
|
|
index_var.Bind(intptr_zero);
|
2017-05-15 08:32:22 +00:00
|
|
|
Goto(&done);
|
2017-03-16 11:32:01 +00:00
|
|
|
}
|
2017-05-15 08:32:22 +00:00
|
|
|
BIND(&done);
|
2017-03-16 11:32:01 +00:00
|
|
|
}
|
|
|
|
|
2017-05-15 08:32:22 +00:00
|
|
|
// Fail early if startIndex >= array.length.
|
|
|
|
GotoIf(IntPtrGreaterThanOrEqual(index_var.value(), array_length),
|
|
|
|
&return_not_found);
|
|
|
|
|
2017-03-16 11:32:01 +00:00
|
|
|
Label if_smiorobjects(this), if_packed_doubles(this), if_holey_doubles(this);
|
|
|
|
|
2017-05-16 15:05:29 +00:00
|
|
|
Node* elements_kind = LoadMapElementsKind(LoadMap(array));
|
2017-03-16 11:32:01 +00:00
|
|
|
Node* elements = LoadElements(array);
|
2017-05-15 19:42:35 +00:00
|
|
|
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
|
|
|
|
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
|
|
|
|
STATIC_ASSERT(FAST_ELEMENTS == 2);
|
|
|
|
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
|
|
|
|
GotoIf(
|
|
|
|
Uint32LessThanOrEqual(elements_kind, Int32Constant(FAST_HOLEY_ELEMENTS)),
|
|
|
|
&if_smiorobjects);
|
|
|
|
GotoIf(Word32Equal(elements_kind, Int32Constant(FAST_DOUBLE_ELEMENTS)),
|
|
|
|
&if_packed_doubles);
|
|
|
|
GotoIf(Word32Equal(elements_kind, Int32Constant(FAST_HOLEY_DOUBLE_ELEMENTS)),
|
|
|
|
&if_holey_doubles);
|
|
|
|
Goto(&return_not_found);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&if_smiorobjects);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
2017-04-07 08:20:35 +00:00
|
|
|
VARIABLE(search_num, MachineRepresentation::kFloat64);
|
2017-03-16 11:32:01 +00:00
|
|
|
Label ident_loop(this, &index_var), heap_num_loop(this, &search_num),
|
2017-05-16 15:05:29 +00:00
|
|
|
string_loop(this), undef_loop(this, &index_var), not_smi(this),
|
|
|
|
not_heap_num(this);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
GotoIfNot(TaggedIsSmi(search_element), ¬_smi);
|
|
|
|
search_num.Bind(SmiToFloat64(search_element));
|
|
|
|
Goto(&heap_num_loop);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(¬_smi);
|
2017-05-16 15:05:29 +00:00
|
|
|
if (variant == kIncludes) {
|
|
|
|
GotoIf(IsUndefined(search_element), &undef_loop);
|
|
|
|
}
|
2017-03-16 11:32:01 +00:00
|
|
|
Node* map = LoadMap(search_element);
|
|
|
|
GotoIfNot(IsHeapNumberMap(map), ¬_heap_num);
|
|
|
|
search_num.Bind(LoadHeapNumberValue(search_element));
|
|
|
|
Goto(&heap_num_loop);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(¬_heap_num);
|
2017-03-16 11:32:01 +00:00
|
|
|
Node* search_type = LoadMapInstanceType(map);
|
|
|
|
GotoIf(IsStringInstanceType(search_type), &string_loop);
|
|
|
|
Goto(&ident_loop);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&ident_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
2017-05-15 08:32:22 +00:00
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
|
2017-03-16 11:32:01 +00:00
|
|
|
&return_not_found);
|
|
|
|
Node* element_k = LoadFixedArrayElement(elements, index_var.value());
|
|
|
|
GotoIf(WordEqual(element_k, search_element), &return_found);
|
|
|
|
|
2017-05-16 15:05:29 +00:00
|
|
|
Increment(index_var);
|
2017-03-16 11:32:01 +00:00
|
|
|
Goto(&ident_loop);
|
|
|
|
}
|
|
|
|
|
2017-05-16 15:05:29 +00:00
|
|
|
if (variant == kIncludes) {
|
|
|
|
BIND(&undef_loop);
|
|
|
|
|
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
|
|
|
|
&return_not_found);
|
|
|
|
Node* element_k = LoadFixedArrayElement(elements, index_var.value());
|
|
|
|
GotoIf(IsUndefined(element_k), &return_found);
|
|
|
|
GotoIf(IsTheHole(element_k), &return_found);
|
|
|
|
|
|
|
|
Increment(index_var);
|
|
|
|
Goto(&undef_loop);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&heap_num_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
2017-05-16 15:05:29 +00:00
|
|
|
Label nan_loop(this, &index_var), not_nan_loop(this, &index_var);
|
|
|
|
Label* nan_handling =
|
|
|
|
variant == kIncludes ? &nan_loop : &return_not_found;
|
|
|
|
BranchIfFloat64IsNaN(search_num.value(), nan_handling, ¬_nan_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(¬_nan_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Label continue_loop(this), not_smi(this);
|
2017-05-15 08:32:22 +00:00
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
|
2017-03-16 11:32:01 +00:00
|
|
|
&return_not_found);
|
|
|
|
Node* element_k = LoadFixedArrayElement(elements, index_var.value());
|
|
|
|
GotoIfNot(TaggedIsSmi(element_k), ¬_smi);
|
|
|
|
Branch(Float64Equal(search_num.value(), SmiToFloat64(element_k)),
|
|
|
|
&return_found, &continue_loop);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(¬_smi);
|
2017-03-16 11:32:01 +00:00
|
|
|
GotoIfNot(IsHeapNumber(element_k), &continue_loop);
|
|
|
|
Branch(Float64Equal(search_num.value(), LoadHeapNumberValue(element_k)),
|
|
|
|
&return_found, &continue_loop);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&continue_loop);
|
2017-05-16 15:05:29 +00:00
|
|
|
Increment(index_var);
|
2017-03-16 11:32:01 +00:00
|
|
|
Goto(¬_nan_loop);
|
|
|
|
}
|
2017-05-16 15:05:29 +00:00
|
|
|
|
|
|
|
// Array.p.includes uses SameValueZero comparisons, where NaN == NaN.
|
|
|
|
if (variant == kIncludes) {
|
|
|
|
BIND(&nan_loop);
|
|
|
|
Label continue_loop(this);
|
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
|
|
|
|
&return_not_found);
|
|
|
|
Node* element_k = LoadFixedArrayElement(elements, index_var.value());
|
|
|
|
GotoIf(TaggedIsSmi(element_k), &continue_loop);
|
|
|
|
GotoIfNot(IsHeapNumber(element_k), &continue_loop);
|
|
|
|
BranchIfFloat64IsNaN(LoadHeapNumberValue(element_k), &return_found,
|
|
|
|
&continue_loop);
|
|
|
|
|
|
|
|
BIND(&continue_loop);
|
|
|
|
Increment(index_var);
|
|
|
|
Goto(&nan_loop);
|
|
|
|
}
|
2017-03-16 11:32:01 +00:00
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&string_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
2017-05-15 08:32:22 +00:00
|
|
|
CSA_ASSERT(this, IsString(search_element));
|
2017-05-15 19:42:35 +00:00
|
|
|
Label continue_loop(this), next_iteration(this, &index_var),
|
|
|
|
slow_compare(this), runtime(this, Label::kDeferred);
|
|
|
|
Node* search_length = LoadStringLength(search_element);
|
|
|
|
Goto(&next_iteration);
|
|
|
|
BIND(&next_iteration);
|
2017-05-15 08:32:22 +00:00
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
|
2017-03-16 11:32:01 +00:00
|
|
|
&return_not_found);
|
|
|
|
Node* element_k = LoadFixedArrayElement(elements, index_var.value());
|
|
|
|
GotoIf(TaggedIsSmi(element_k), &continue_loop);
|
2017-05-15 19:42:35 +00:00
|
|
|
GotoIf(WordEqual(search_element, element_k), &return_found);
|
|
|
|
Node* element_k_type = LoadInstanceType(element_k);
|
|
|
|
GotoIfNot(IsStringInstanceType(element_k_type), &continue_loop);
|
|
|
|
Branch(WordEqual(search_length, LoadStringLength(element_k)),
|
|
|
|
&slow_compare, &continue_loop);
|
|
|
|
|
|
|
|
BIND(&slow_compare);
|
|
|
|
StringBuiltinsAssembler string_asm(state());
|
|
|
|
string_asm.StringEqual_Core(context, search_element, search_type,
|
|
|
|
search_length, element_k, element_k_type,
|
|
|
|
&return_found, &continue_loop, &runtime);
|
|
|
|
BIND(&runtime);
|
|
|
|
Node* result = CallRuntime(Runtime::kStringEqual, context, search_element,
|
|
|
|
element_k);
|
2017-03-16 11:32:01 +00:00
|
|
|
Branch(WordEqual(BooleanConstant(true), result), &return_found,
|
|
|
|
&continue_loop);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&continue_loop);
|
2017-05-15 08:32:22 +00:00
|
|
|
Increment(index_var);
|
2017-05-15 19:42:35 +00:00
|
|
|
Goto(&next_iteration);
|
2017-03-16 11:32:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&if_packed_doubles);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
2017-05-16 15:05:29 +00:00
|
|
|
Label nan_loop(this, &index_var), not_nan_loop(this, &index_var),
|
|
|
|
hole_loop(this, &index_var), search_notnan(this);
|
2017-04-07 08:20:35 +00:00
|
|
|
VARIABLE(search_num, MachineRepresentation::kFloat64);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
GotoIfNot(TaggedIsSmi(search_element), &search_notnan);
|
|
|
|
search_num.Bind(SmiToFloat64(search_element));
|
|
|
|
Goto(¬_nan_loop);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&search_notnan);
|
2017-03-16 11:32:01 +00:00
|
|
|
GotoIfNot(IsHeapNumber(search_element), &return_not_found);
|
|
|
|
|
|
|
|
search_num.Bind(LoadHeapNumberValue(search_element));
|
|
|
|
|
2017-05-16 15:05:29 +00:00
|
|
|
Label* nan_handling = variant == kIncludes ? &nan_loop : &return_not_found;
|
|
|
|
BranchIfFloat64IsNaN(search_num.value(), nan_handling, ¬_nan_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(¬_nan_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
2017-05-16 15:05:29 +00:00
|
|
|
Label continue_loop(this);
|
2017-05-15 08:32:22 +00:00
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
|
2017-03-16 11:32:01 +00:00
|
|
|
&return_not_found);
|
|
|
|
Node* element_k = LoadFixedDoubleArrayElement(elements, index_var.value(),
|
|
|
|
MachineType::Float64());
|
2017-05-16 15:05:29 +00:00
|
|
|
Branch(Float64Equal(element_k, search_num.value()), &return_found,
|
|
|
|
&continue_loop);
|
|
|
|
BIND(&continue_loop);
|
|
|
|
Increment(index_var);
|
2017-03-16 11:32:01 +00:00
|
|
|
Goto(¬_nan_loop);
|
|
|
|
}
|
2017-05-16 15:05:29 +00:00
|
|
|
|
|
|
|
// Array.p.includes uses SameValueZero comparisons, where NaN == NaN.
|
|
|
|
if (variant == kIncludes) {
|
|
|
|
BIND(&nan_loop);
|
|
|
|
Label continue_loop(this);
|
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
|
|
|
|
&return_not_found);
|
|
|
|
Node* element_k = LoadFixedDoubleArrayElement(elements, index_var.value(),
|
|
|
|
MachineType::Float64());
|
|
|
|
BranchIfFloat64IsNaN(element_k, &return_found, &continue_loop);
|
|
|
|
BIND(&continue_loop);
|
|
|
|
Increment(index_var);
|
|
|
|
Goto(&nan_loop);
|
|
|
|
}
|
2017-03-16 11:32:01 +00:00
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&if_holey_doubles);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
2017-05-16 15:05:29 +00:00
|
|
|
Label nan_loop(this, &index_var), not_nan_loop(this, &index_var),
|
|
|
|
hole_loop(this, &index_var), search_notnan(this);
|
2017-04-07 08:20:35 +00:00
|
|
|
VARIABLE(search_num, MachineRepresentation::kFloat64);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
GotoIfNot(TaggedIsSmi(search_element), &search_notnan);
|
|
|
|
search_num.Bind(SmiToFloat64(search_element));
|
|
|
|
Goto(¬_nan_loop);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&search_notnan);
|
2017-05-16 15:05:29 +00:00
|
|
|
if (variant == kIncludes) {
|
|
|
|
GotoIf(IsUndefined(search_element), &hole_loop);
|
|
|
|
}
|
2017-03-16 11:32:01 +00:00
|
|
|
GotoIfNot(IsHeapNumber(search_element), &return_not_found);
|
|
|
|
|
|
|
|
search_num.Bind(LoadHeapNumberValue(search_element));
|
|
|
|
|
2017-05-16 15:05:29 +00:00
|
|
|
Label* nan_handling = variant == kIncludes ? &nan_loop : &return_not_found;
|
|
|
|
BranchIfFloat64IsNaN(search_num.value(), nan_handling, ¬_nan_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(¬_nan_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Label continue_loop(this);
|
2017-05-15 08:32:22 +00:00
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
|
2017-03-16 11:32:01 +00:00
|
|
|
&return_not_found);
|
|
|
|
|
2017-05-16 15:05:29 +00:00
|
|
|
// No need for hole checking here; the following Float64Equal will
|
|
|
|
// return 'not equal' for holes anyway.
|
|
|
|
Node* element_k = LoadFixedDoubleArrayElement(elements, index_var.value(),
|
|
|
|
MachineType::Float64());
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
Branch(Float64Equal(element_k, search_num.value()), &return_found,
|
|
|
|
&continue_loop);
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&continue_loop);
|
2017-05-16 15:05:29 +00:00
|
|
|
Increment(index_var);
|
2017-03-16 11:32:01 +00:00
|
|
|
Goto(¬_nan_loop);
|
|
|
|
}
|
2017-05-16 15:05:29 +00:00
|
|
|
|
|
|
|
// Array.p.includes uses SameValueZero comparisons, where NaN == NaN.
|
|
|
|
if (variant == kIncludes) {
|
|
|
|
BIND(&nan_loop);
|
|
|
|
Label continue_loop(this);
|
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
|
|
|
|
&return_not_found);
|
|
|
|
|
|
|
|
// Load double value or continue if it's the hole NaN.
|
|
|
|
Node* element_k = LoadFixedDoubleArrayElement(
|
|
|
|
elements, index_var.value(), MachineType::Float64(), 0,
|
|
|
|
INTPTR_PARAMETERS, &continue_loop);
|
|
|
|
|
|
|
|
BranchIfFloat64IsNaN(element_k, &return_found, &continue_loop);
|
|
|
|
BIND(&continue_loop);
|
|
|
|
Increment(index_var);
|
|
|
|
Goto(&nan_loop);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Array.p.includes treats the hole as undefined.
|
|
|
|
if (variant == kIncludes) {
|
|
|
|
BIND(&hole_loop);
|
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
|
|
|
|
&return_not_found);
|
|
|
|
|
|
|
|
// Check if the element is a double hole, but don't load it.
|
|
|
|
LoadFixedDoubleArrayElement(elements, index_var.value(),
|
|
|
|
MachineType::None(), 0, INTPTR_PARAMETERS,
|
|
|
|
&return_found);
|
|
|
|
|
|
|
|
Increment(index_var);
|
|
|
|
Goto(&hole_loop);
|
|
|
|
}
|
2017-03-16 11:32:01 +00:00
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&return_found);
|
2017-05-16 15:05:29 +00:00
|
|
|
args.PopAndReturn(variant == kIncludes ? TrueConstant()
|
|
|
|
: SmiTag(index_var.value()));
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&return_not_found);
|
2017-05-16 15:05:29 +00:00
|
|
|
args.PopAndReturn(variant == kIncludes ? FalseConstant()
|
|
|
|
: NumberConstant(-1));
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&call_runtime);
|
2017-05-15 08:32:22 +00:00
|
|
|
{
|
|
|
|
Node* start_from =
|
|
|
|
args.GetOptionalArgumentValue(kFromIndexArg, UndefinedConstant());
|
2017-05-16 15:05:29 +00:00
|
|
|
Runtime::FunctionId function = variant == kIncludes
|
|
|
|
? Runtime::kArrayIncludes_Slow
|
|
|
|
: Runtime::kArrayIndexOf;
|
|
|
|
args.PopAndReturn(
|
|
|
|
CallRuntime(function, context, array, search_element, start_from));
|
2017-05-15 08:32:22 +00:00
|
|
|
}
|
2017-03-16 11:32:01 +00:00
|
|
|
}
|
|
|
|
|
2017-05-16 15:05:29 +00:00
|
|
|
TF_BUILTIN(ArrayIncludes, ArrayIncludesIndexofAssembler) {
|
|
|
|
Generate(kIncludes);
|
|
|
|
}
|
|
|
|
|
|
|
|
TF_BUILTIN(ArrayIndexOf, ArrayIncludesIndexofAssembler) { Generate(kIndexOf); }
|
|
|
|
|
2017-03-16 11:32:01 +00:00
|
|
|
class ArrayPrototypeIterationAssembler : public CodeStubAssembler {
|
|
|
|
public:
|
|
|
|
explicit ArrayPrototypeIterationAssembler(compiler::CodeAssemblerState* state)
|
|
|
|
: CodeStubAssembler(state) {}
|
|
|
|
|
|
|
|
protected:
|
2017-03-20 10:55:37 +00:00
|
|
|
void Generate_ArrayPrototypeIterationMethod(Node* context, Node* receiver,
|
|
|
|
IterationKind iteration_kind) {
|
2017-04-07 08:20:35 +00:00
|
|
|
VARIABLE(var_array, MachineRepresentation::kTagged);
|
|
|
|
VARIABLE(var_map, MachineRepresentation::kTagged);
|
|
|
|
VARIABLE(var_type, MachineRepresentation::kWord32);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
Label if_isnotobject(this, Label::kDeferred);
|
|
|
|
Label create_array_iterator(this);
|
|
|
|
|
|
|
|
GotoIf(TaggedIsSmi(receiver), &if_isnotobject);
|
|
|
|
var_array.Bind(receiver);
|
|
|
|
var_map.Bind(LoadMap(receiver));
|
|
|
|
var_type.Bind(LoadMapInstanceType(var_map.value()));
|
|
|
|
Branch(IsJSReceiverInstanceType(var_type.value()), &create_array_iterator,
|
|
|
|
&if_isnotobject);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&if_isnotobject);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Callable callable = CodeFactory::ToObject(isolate());
|
|
|
|
Node* result = CallStub(callable, context, receiver);
|
|
|
|
var_array.Bind(result);
|
|
|
|
var_map.Bind(LoadMap(result));
|
|
|
|
var_type.Bind(LoadMapInstanceType(var_map.value()));
|
|
|
|
Goto(&create_array_iterator);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&create_array_iterator);
|
2017-03-16 11:32:01 +00:00
|
|
|
Return(CreateArrayIterator(var_array.value(), var_map.value(),
|
|
|
|
var_type.value(), context, iteration_kind));
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
TF_BUILTIN(ArrayPrototypeValues, ArrayPrototypeIterationAssembler) {
|
2017-03-20 10:55:37 +00:00
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
Node* receiver = Parameter(Descriptor::kReceiver);
|
|
|
|
Generate_ArrayPrototypeIterationMethod(context, receiver,
|
|
|
|
IterationKind::kValues);
|
2017-03-16 11:32:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TF_BUILTIN(ArrayPrototypeEntries, ArrayPrototypeIterationAssembler) {
|
2017-03-20 10:55:37 +00:00
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
Node* receiver = Parameter(Descriptor::kReceiver);
|
|
|
|
Generate_ArrayPrototypeIterationMethod(context, receiver,
|
|
|
|
IterationKind::kEntries);
|
2017-03-16 11:32:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TF_BUILTIN(ArrayPrototypeKeys, ArrayPrototypeIterationAssembler) {
|
2017-03-20 10:55:37 +00:00
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
Node* receiver = Parameter(Descriptor::kReceiver);
|
|
|
|
Generate_ArrayPrototypeIterationMethod(context, receiver,
|
|
|
|
IterationKind::kKeys);
|
2017-03-16 11:32:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
|
|
|
|
Handle<String> operation = factory()->NewStringFromAsciiChecked(
|
|
|
|
"Array Iterator.prototype.next", TENURED);
|
|
|
|
|
2017-03-20 10:55:37 +00:00
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
Node* iterator = Parameter(Descriptor::kReceiver);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2017-04-07 08:20:35 +00:00
|
|
|
VARIABLE(var_value, MachineRepresentation::kTagged);
|
|
|
|
VARIABLE(var_done, MachineRepresentation::kTagged);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
// Required, or else `throw_bad_receiver` fails a DCHECK due to these
|
|
|
|
// variables not being bound along all paths, despite not being used.
|
|
|
|
var_done.Bind(TrueConstant());
|
|
|
|
var_value.Bind(UndefinedConstant());
|
|
|
|
|
|
|
|
Label throw_bad_receiver(this, Label::kDeferred);
|
|
|
|
Label set_done(this);
|
|
|
|
Label allocate_key_result(this);
|
|
|
|
Label allocate_entry_if_needed(this);
|
|
|
|
Label allocate_iterator_result(this);
|
|
|
|
Label generic_values(this);
|
|
|
|
|
|
|
|
// If O does not have all of the internal slots of an Array Iterator Instance
|
|
|
|
// (22.1.5.3), throw a TypeError exception
|
|
|
|
GotoIf(TaggedIsSmi(iterator), &throw_bad_receiver);
|
|
|
|
Node* instance_type = LoadInstanceType(iterator);
|
|
|
|
GotoIf(
|
|
|
|
Uint32LessThan(
|
|
|
|
Int32Constant(LAST_ARRAY_ITERATOR_TYPE - FIRST_ARRAY_ITERATOR_TYPE),
|
|
|
|
Int32Sub(instance_type, Int32Constant(FIRST_ARRAY_ITERATOR_TYPE))),
|
|
|
|
&throw_bad_receiver);
|
|
|
|
|
|
|
|
// Let a be O.[[IteratedObject]].
|
|
|
|
Node* array =
|
|
|
|
LoadObjectField(iterator, JSArrayIterator::kIteratedObjectOffset);
|
|
|
|
|
|
|
|
// Let index be O.[[ArrayIteratorNextIndex]].
|
|
|
|
Node* index = LoadObjectField(iterator, JSArrayIterator::kNextIndexOffset);
|
|
|
|
Node* orig_map =
|
|
|
|
LoadObjectField(iterator, JSArrayIterator::kIteratedObjectMapOffset);
|
|
|
|
Node* array_map = LoadMap(array);
|
|
|
|
|
|
|
|
Label if_isfastarray(this), if_isnotfastarray(this),
|
|
|
|
if_isdetached(this, Label::kDeferred);
|
|
|
|
|
|
|
|
Branch(WordEqual(orig_map, array_map), &if_isfastarray, &if_isnotfastarray);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&if_isfastarray);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
CSA_ASSERT(this, Word32Equal(LoadMapInstanceType(array_map),
|
|
|
|
Int32Constant(JS_ARRAY_TYPE)));
|
|
|
|
|
|
|
|
Node* length = LoadObjectField(array, JSArray::kLengthOffset);
|
|
|
|
|
|
|
|
CSA_ASSERT(this, TaggedIsSmi(length));
|
|
|
|
CSA_ASSERT(this, TaggedIsSmi(index));
|
|
|
|
|
|
|
|
GotoIfNot(SmiBelow(index, length), &set_done);
|
|
|
|
|
|
|
|
Node* one = SmiConstant(Smi::FromInt(1));
|
|
|
|
StoreObjectFieldNoWriteBarrier(iterator, JSArrayIterator::kNextIndexOffset,
|
|
|
|
SmiAdd(index, one));
|
|
|
|
|
|
|
|
var_done.Bind(FalseConstant());
|
|
|
|
Node* elements = LoadElements(array);
|
|
|
|
|
|
|
|
static int32_t kInstanceType[] = {
|
|
|
|
JS_FAST_ARRAY_KEY_ITERATOR_TYPE,
|
|
|
|
JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_FAST_ARRAY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE,
|
|
|
|
};
|
|
|
|
|
|
|
|
Label packed_object_values(this), holey_object_values(this),
|
|
|
|
packed_double_values(this), holey_double_values(this);
|
|
|
|
Label* kInstanceTypeHandlers[] = {
|
|
|
|
&allocate_key_result, &packed_object_values, &holey_object_values,
|
|
|
|
&packed_object_values, &holey_object_values, &packed_double_values,
|
|
|
|
&holey_double_values, &packed_object_values, &holey_object_values,
|
|
|
|
&packed_object_values, &holey_object_values, &packed_double_values,
|
|
|
|
&holey_double_values};
|
|
|
|
|
|
|
|
Switch(instance_type, &throw_bad_receiver, kInstanceType,
|
|
|
|
kInstanceTypeHandlers, arraysize(kInstanceType));
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&packed_object_values);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
var_value.Bind(LoadFixedArrayElement(elements, index, 0, SMI_PARAMETERS));
|
|
|
|
Goto(&allocate_entry_if_needed);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&packed_double_values);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Node* value = LoadFixedDoubleArrayElement(
|
|
|
|
elements, index, MachineType::Float64(), 0, SMI_PARAMETERS);
|
|
|
|
var_value.Bind(AllocateHeapNumberWithValue(value));
|
|
|
|
Goto(&allocate_entry_if_needed);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&holey_object_values);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
// Check the array_protector cell, and take the slow path if it's invalid.
|
|
|
|
Node* invalid = SmiConstant(Smi::FromInt(Isolate::kProtectorInvalid));
|
|
|
|
Node* cell = LoadRoot(Heap::kArrayProtectorRootIndex);
|
|
|
|
Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
|
|
|
|
GotoIf(WordEqual(cell_value, invalid), &generic_values);
|
|
|
|
|
|
|
|
var_value.Bind(UndefinedConstant());
|
|
|
|
Node* value = LoadFixedArrayElement(elements, index, 0, SMI_PARAMETERS);
|
|
|
|
GotoIf(WordEqual(value, TheHoleConstant()), &allocate_entry_if_needed);
|
|
|
|
var_value.Bind(value);
|
|
|
|
Goto(&allocate_entry_if_needed);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&holey_double_values);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
// Check the array_protector cell, and take the slow path if it's invalid.
|
|
|
|
Node* invalid = SmiConstant(Smi::FromInt(Isolate::kProtectorInvalid));
|
|
|
|
Node* cell = LoadRoot(Heap::kArrayProtectorRootIndex);
|
|
|
|
Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
|
|
|
|
GotoIf(WordEqual(cell_value, invalid), &generic_values);
|
|
|
|
|
|
|
|
var_value.Bind(UndefinedConstant());
|
|
|
|
Node* value = LoadFixedDoubleArrayElement(
|
|
|
|
elements, index, MachineType::Float64(), 0, SMI_PARAMETERS,
|
|
|
|
&allocate_entry_if_needed);
|
|
|
|
var_value.Bind(AllocateHeapNumberWithValue(value));
|
|
|
|
Goto(&allocate_entry_if_needed);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&if_isnotfastarray);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Label if_istypedarray(this), if_isgeneric(this);
|
|
|
|
|
|
|
|
// If a is undefined, return CreateIterResultObject(undefined, true)
|
|
|
|
GotoIf(WordEqual(array, UndefinedConstant()), &allocate_iterator_result);
|
|
|
|
|
|
|
|
Node* array_type = LoadInstanceType(array);
|
|
|
|
Branch(Word32Equal(array_type, Int32Constant(JS_TYPED_ARRAY_TYPE)),
|
|
|
|
&if_istypedarray, &if_isgeneric);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&if_isgeneric);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Label if_wasfastarray(this);
|
|
|
|
|
|
|
|
Node* length = nullptr;
|
|
|
|
{
|
2017-04-07 08:20:35 +00:00
|
|
|
VARIABLE(var_length, MachineRepresentation::kTagged);
|
2017-03-16 11:32:01 +00:00
|
|
|
Label if_isarray(this), if_isnotarray(this), done(this);
|
|
|
|
Branch(Word32Equal(array_type, Int32Constant(JS_ARRAY_TYPE)),
|
|
|
|
&if_isarray, &if_isnotarray);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&if_isarray);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
var_length.Bind(LoadObjectField(array, JSArray::kLengthOffset));
|
|
|
|
|
|
|
|
// Invalidate protector cell if needed
|
|
|
|
Branch(WordNotEqual(orig_map, UndefinedConstant()), &if_wasfastarray,
|
|
|
|
&done);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&if_wasfastarray);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Label if_invalid(this, Label::kDeferred);
|
|
|
|
// A fast array iterator transitioned to a slow iterator during
|
|
|
|
// iteration. Invalidate fast_array_iteration_prtoector cell to
|
|
|
|
// prevent potential deopt loops.
|
|
|
|
StoreObjectFieldNoWriteBarrier(
|
|
|
|
iterator, JSArrayIterator::kIteratedObjectMapOffset,
|
|
|
|
UndefinedConstant());
|
|
|
|
GotoIf(Uint32LessThanOrEqual(
|
|
|
|
instance_type,
|
|
|
|
Int32Constant(JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE)),
|
|
|
|
&done);
|
|
|
|
|
|
|
|
Node* invalid =
|
|
|
|
SmiConstant(Smi::FromInt(Isolate::kProtectorInvalid));
|
|
|
|
Node* cell = LoadRoot(Heap::kFastArrayIterationProtectorRootIndex);
|
|
|
|
StoreObjectFieldNoWriteBarrier(cell, Cell::kValueOffset, invalid);
|
|
|
|
Goto(&done);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&if_isnotarray);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Node* length =
|
|
|
|
GetProperty(context, array, factory()->length_string());
|
2017-05-15 07:51:15 +00:00
|
|
|
var_length.Bind(ToLength_Inline(context, length));
|
2017-03-16 11:32:01 +00:00
|
|
|
Goto(&done);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&done);
|
2017-03-16 11:32:01 +00:00
|
|
|
length = var_length.value();
|
|
|
|
}
|
|
|
|
|
|
|
|
GotoUnlessNumberLessThan(index, length, &set_done);
|
|
|
|
|
|
|
|
StoreObjectField(iterator, JSArrayIterator::kNextIndexOffset,
|
|
|
|
NumberInc(index));
|
|
|
|
var_done.Bind(FalseConstant());
|
|
|
|
|
|
|
|
Branch(
|
|
|
|
Uint32LessThanOrEqual(
|
|
|
|
instance_type, Int32Constant(JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE)),
|
|
|
|
&allocate_key_result, &generic_values);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&generic_values);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
var_value.Bind(GetProperty(context, array, index));
|
|
|
|
Goto(&allocate_entry_if_needed);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&if_istypedarray);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Node* buffer = LoadObjectField(array, JSTypedArray::kBufferOffset);
|
|
|
|
GotoIf(IsDetachedBuffer(buffer), &if_isdetached);
|
|
|
|
|
|
|
|
Node* length = LoadObjectField(array, JSTypedArray::kLengthOffset);
|
|
|
|
|
|
|
|
CSA_ASSERT(this, TaggedIsSmi(length));
|
|
|
|
CSA_ASSERT(this, TaggedIsSmi(index));
|
|
|
|
|
|
|
|
GotoIfNot(SmiBelow(index, length), &set_done);
|
|
|
|
|
|
|
|
Node* one = SmiConstant(1);
|
|
|
|
StoreObjectFieldNoWriteBarrier(
|
|
|
|
iterator, JSArrayIterator::kNextIndexOffset, SmiAdd(index, one));
|
|
|
|
var_done.Bind(FalseConstant());
|
|
|
|
|
|
|
|
Node* elements = LoadElements(array);
|
|
|
|
Node* base_ptr =
|
|
|
|
LoadObjectField(elements, FixedTypedArrayBase::kBasePointerOffset);
|
|
|
|
Node* external_ptr =
|
|
|
|
LoadObjectField(elements, FixedTypedArrayBase::kExternalPointerOffset,
|
|
|
|
MachineType::Pointer());
|
|
|
|
Node* data_ptr = IntPtrAdd(BitcastTaggedToWord(base_ptr), external_ptr);
|
|
|
|
|
|
|
|
static int32_t kInstanceType[] = {
|
|
|
|
JS_TYPED_ARRAY_KEY_ITERATOR_TYPE,
|
|
|
|
JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_INT8_ARRAY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_INT16_ARRAY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_INT32_ARRAY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE,
|
|
|
|
};
|
|
|
|
|
|
|
|
Label uint8_values(this), int8_values(this), uint16_values(this),
|
|
|
|
int16_values(this), uint32_values(this), int32_values(this),
|
|
|
|
float32_values(this), float64_values(this);
|
|
|
|
Label* kInstanceTypeHandlers[] = {
|
|
|
|
&allocate_key_result, &uint8_values, &uint8_values,
|
|
|
|
&int8_values, &uint16_values, &int16_values,
|
|
|
|
&uint32_values, &int32_values, &float32_values,
|
|
|
|
&float64_values, &uint8_values, &uint8_values,
|
|
|
|
&int8_values, &uint16_values, &int16_values,
|
|
|
|
&uint32_values, &int32_values, &float32_values,
|
|
|
|
&float64_values,
|
|
|
|
};
|
|
|
|
|
|
|
|
var_done.Bind(FalseConstant());
|
|
|
|
Switch(instance_type, &throw_bad_receiver, kInstanceType,
|
|
|
|
kInstanceTypeHandlers, arraysize(kInstanceType));
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&uint8_values);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Node* value_uint8 = LoadFixedTypedArrayElement(
|
|
|
|
data_ptr, index, UINT8_ELEMENTS, SMI_PARAMETERS);
|
|
|
|
var_value.Bind(SmiFromWord32(value_uint8));
|
|
|
|
Goto(&allocate_entry_if_needed);
|
|
|
|
}
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&int8_values);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Node* value_int8 = LoadFixedTypedArrayElement(
|
|
|
|
data_ptr, index, INT8_ELEMENTS, SMI_PARAMETERS);
|
|
|
|
var_value.Bind(SmiFromWord32(value_int8));
|
|
|
|
Goto(&allocate_entry_if_needed);
|
|
|
|
}
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&uint16_values);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Node* value_uint16 = LoadFixedTypedArrayElement(
|
|
|
|
data_ptr, index, UINT16_ELEMENTS, SMI_PARAMETERS);
|
|
|
|
var_value.Bind(SmiFromWord32(value_uint16));
|
|
|
|
Goto(&allocate_entry_if_needed);
|
|
|
|
}
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&int16_values);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Node* value_int16 = LoadFixedTypedArrayElement(
|
|
|
|
data_ptr, index, INT16_ELEMENTS, SMI_PARAMETERS);
|
|
|
|
var_value.Bind(SmiFromWord32(value_int16));
|
|
|
|
Goto(&allocate_entry_if_needed);
|
|
|
|
}
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&uint32_values);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Node* value_uint32 = LoadFixedTypedArrayElement(
|
|
|
|
data_ptr, index, UINT32_ELEMENTS, SMI_PARAMETERS);
|
|
|
|
var_value.Bind(ChangeUint32ToTagged(value_uint32));
|
|
|
|
Goto(&allocate_entry_if_needed);
|
|
|
|
}
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&int32_values);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Node* value_int32 = LoadFixedTypedArrayElement(
|
|
|
|
data_ptr, index, INT32_ELEMENTS, SMI_PARAMETERS);
|
|
|
|
var_value.Bind(ChangeInt32ToTagged(value_int32));
|
|
|
|
Goto(&allocate_entry_if_needed);
|
|
|
|
}
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&float32_values);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Node* value_float32 = LoadFixedTypedArrayElement(
|
|
|
|
data_ptr, index, FLOAT32_ELEMENTS, SMI_PARAMETERS);
|
|
|
|
var_value.Bind(
|
|
|
|
AllocateHeapNumberWithValue(ChangeFloat32ToFloat64(value_float32)));
|
|
|
|
Goto(&allocate_entry_if_needed);
|
|
|
|
}
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&float64_values);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Node* value_float64 = LoadFixedTypedArrayElement(
|
|
|
|
data_ptr, index, FLOAT64_ELEMENTS, SMI_PARAMETERS);
|
|
|
|
var_value.Bind(AllocateHeapNumberWithValue(value_float64));
|
|
|
|
Goto(&allocate_entry_if_needed);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&set_done);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
StoreObjectFieldNoWriteBarrier(
|
|
|
|
iterator, JSArrayIterator::kIteratedObjectOffset, UndefinedConstant());
|
|
|
|
Goto(&allocate_iterator_result);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&allocate_key_result);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
var_value.Bind(index);
|
|
|
|
var_done.Bind(FalseConstant());
|
|
|
|
Goto(&allocate_iterator_result);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&allocate_entry_if_needed);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
GotoIf(Int32GreaterThan(instance_type,
|
|
|
|
Int32Constant(LAST_ARRAY_KEY_VALUE_ITERATOR_TYPE)),
|
|
|
|
&allocate_iterator_result);
|
|
|
|
|
|
|
|
Node* elements = AllocateFixedArray(FAST_ELEMENTS, IntPtrConstant(2));
|
|
|
|
StoreFixedArrayElement(elements, 0, index, SKIP_WRITE_BARRIER);
|
|
|
|
StoreFixedArrayElement(elements, 1, var_value.value(), SKIP_WRITE_BARRIER);
|
|
|
|
|
|
|
|
Node* entry = Allocate(JSArray::kSize);
|
|
|
|
Node* map = LoadContextElement(LoadNativeContext(context),
|
|
|
|
Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX);
|
|
|
|
|
|
|
|
StoreMapNoWriteBarrier(entry, map);
|
|
|
|
StoreObjectFieldRoot(entry, JSArray::kPropertiesOffset,
|
|
|
|
Heap::kEmptyFixedArrayRootIndex);
|
|
|
|
StoreObjectFieldNoWriteBarrier(entry, JSArray::kElementsOffset, elements);
|
|
|
|
StoreObjectFieldNoWriteBarrier(entry, JSArray::kLengthOffset,
|
|
|
|
SmiConstant(Smi::FromInt(2)));
|
|
|
|
|
|
|
|
var_value.Bind(entry);
|
|
|
|
Goto(&allocate_iterator_result);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&allocate_iterator_result);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Node* result = Allocate(JSIteratorResult::kSize);
|
|
|
|
Node* map = LoadContextElement(LoadNativeContext(context),
|
|
|
|
Context::ITERATOR_RESULT_MAP_INDEX);
|
|
|
|
StoreMapNoWriteBarrier(result, map);
|
|
|
|
StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOffset,
|
|
|
|
Heap::kEmptyFixedArrayRootIndex);
|
|
|
|
StoreObjectFieldRoot(result, JSIteratorResult::kElementsOffset,
|
|
|
|
Heap::kEmptyFixedArrayRootIndex);
|
|
|
|
StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kValueOffset,
|
|
|
|
var_value.value());
|
|
|
|
StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kDoneOffset,
|
|
|
|
var_done.value());
|
|
|
|
Return(result);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&throw_bad_receiver);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
// The {receiver} is not a valid JSArrayIterator.
|
|
|
|
CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
|
|
|
|
HeapConstant(operation), iterator);
|
|
|
|
Unreachable();
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&if_isdetached);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Node* message = SmiConstant(MessageTemplate::kDetachedOperation);
|
|
|
|
CallRuntime(Runtime::kThrowTypeError, context, message,
|
|
|
|
HeapConstant(operation));
|
|
|
|
Unreachable();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace internal
|
|
|
|
} // namespace v8
|