2017-03-16 11:32:01 +00:00
|
|
|
// Copyright 2017 the V8 project authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file.
|
|
|
|
|
|
|
|
#include "src/builtins/builtins-utils-gen.h"
|
|
|
|
#include "src/builtins/builtins.h"
|
|
|
|
#include "src/code-stub-assembler.h"
|
|
|
|
|
|
|
|
namespace v8 {
|
|
|
|
namespace internal {
|
|
|
|
|
|
|
|
class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
|
|
|
|
public:
|
|
|
|
explicit ArrayBuiltinCodeStubAssembler(compiler::CodeAssemblerState* state)
|
2017-03-21 15:57:38 +00:00
|
|
|
: CodeStubAssembler(state),
|
2017-04-11 11:02:27 +00:00
|
|
|
k_(this, MachineRepresentation::kTagged),
|
|
|
|
a_(this, MachineRepresentation::kTagged),
|
2017-03-21 15:57:38 +00:00
|
|
|
to_(this, MachineRepresentation::kTagged, SmiConstant(0)) {}
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
typedef std::function<Node*(ArrayBuiltinCodeStubAssembler* masm)>
|
2017-03-21 08:56:56 +00:00
|
|
|
BuiltinResultGenerator;
|
2017-03-21 15:57:38 +00:00
|
|
|
|
|
|
|
typedef std::function<void(ArrayBuiltinCodeStubAssembler* masm)>
|
2017-03-21 08:56:56 +00:00
|
|
|
BuiltinResultIndexInitializer;
|
2017-03-21 15:57:38 +00:00
|
|
|
|
|
|
|
typedef std::function<Node*(ArrayBuiltinCodeStubAssembler* masm,
|
|
|
|
Node* k_value, Node* k)>
|
2017-03-16 11:32:01 +00:00
|
|
|
CallResultProcessor;
|
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
typedef std::function<void(ArrayBuiltinCodeStubAssembler* masm)>
|
|
|
|
PostLoopAction;
|
|
|
|
|
|
|
|
Node* ForEachResultGenerator() { return UndefinedConstant(); }
|
|
|
|
|
|
|
|
Node* ForEachProcessor(Node* k_value, Node* k) {
|
|
|
|
CallJS(CodeFactory::Call(isolate()), context(), callbackfn(), this_arg(),
|
|
|
|
k_value, k, o());
|
|
|
|
return a();
|
|
|
|
}
|
|
|
|
|
|
|
|
Node* SomeResultGenerator() { return FalseConstant(); }
|
|
|
|
|
|
|
|
Node* SomeProcessor(Node* k_value, Node* k) {
|
|
|
|
Node* value = CallJS(CodeFactory::Call(isolate()), context(), callbackfn(),
|
|
|
|
this_arg(), k_value, k, o());
|
|
|
|
Label false_continue(this), return_true(this);
|
|
|
|
BranchIfToBooleanIsTrue(value, &return_true, &false_continue);
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&return_true);
|
2017-04-29 10:58:50 +00:00
|
|
|
Return(TrueConstant());
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&false_continue);
|
2017-03-21 15:57:38 +00:00
|
|
|
return a();
|
|
|
|
}
|
|
|
|
|
|
|
|
Node* EveryResultGenerator() { return TrueConstant(); }
|
|
|
|
|
|
|
|
Node* EveryProcessor(Node* k_value, Node* k) {
|
|
|
|
Node* value = CallJS(CodeFactory::Call(isolate()), context(), callbackfn(),
|
|
|
|
this_arg(), k_value, k, o());
|
|
|
|
Label true_continue(this), return_false(this);
|
|
|
|
BranchIfToBooleanIsTrue(value, &true_continue, &return_false);
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&return_false);
|
2017-04-29 10:58:50 +00:00
|
|
|
Return(FalseConstant());
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&true_continue);
|
2017-03-21 15:57:38 +00:00
|
|
|
return a();
|
|
|
|
}
|
|
|
|
|
2017-04-29 10:58:50 +00:00
|
|
|
Node* ReduceResultGenerator() {
|
|
|
|
VARIABLE(a, MachineRepresentation::kTagged, UndefinedConstant());
|
|
|
|
Label no_initial_value(this), has_initial_value(this), done(this, {&a});
|
|
|
|
|
|
|
|
// 8. If initialValue is present, then
|
|
|
|
Node* parent_frame_ptr = LoadParentFramePointer();
|
|
|
|
Node* marker_or_function = LoadBufferObject(
|
|
|
|
parent_frame_ptr, CommonFrameConstants::kContextOrFrameTypeOffset);
|
|
|
|
GotoIf(
|
|
|
|
MarkerIsNotFrameType(marker_or_function, StackFrame::ARGUMENTS_ADAPTOR),
|
|
|
|
&has_initial_value);
|
|
|
|
|
|
|
|
// Has arguments adapter, check count.
|
|
|
|
Node* adapted_parameter_count = LoadBufferObject(
|
|
|
|
parent_frame_ptr, ArgumentsAdaptorFrameConstants::kLengthOffset);
|
|
|
|
Branch(SmiLessThan(adapted_parameter_count,
|
|
|
|
SmiConstant(IteratingArrayBuiltinDescriptor::kThisArg)),
|
|
|
|
&no_initial_value, &has_initial_value);
|
|
|
|
|
|
|
|
// a. Set accumulator to initialValue.
|
|
|
|
BIND(&has_initial_value);
|
|
|
|
a.Bind(this_arg());
|
|
|
|
Goto(&done);
|
|
|
|
|
|
|
|
// 9. Else initialValue is not present,
|
|
|
|
BIND(&no_initial_value);
|
|
|
|
|
|
|
|
// a. Let kPresent be false.
|
|
|
|
a.Bind(TheHoleConstant());
|
|
|
|
Goto(&done);
|
|
|
|
BIND(&done);
|
|
|
|
return a.value();
|
|
|
|
}
|
2017-03-21 15:57:38 +00:00
|
|
|
|
|
|
|
Node* ReduceProcessor(Node* k_value, Node* k) {
|
2017-04-07 08:20:35 +00:00
|
|
|
VARIABLE(result, MachineRepresentation::kTagged);
|
2017-03-21 15:57:38 +00:00
|
|
|
Label done(this, {&result}), initial(this);
|
|
|
|
GotoIf(WordEqual(a(), TheHoleConstant()), &initial);
|
|
|
|
result.Bind(CallJS(CodeFactory::Call(isolate()), context(), callbackfn(),
|
|
|
|
UndefinedConstant(), a(), k_value, k, o()));
|
|
|
|
Goto(&done);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&initial);
|
2017-03-21 15:57:38 +00:00
|
|
|
result.Bind(k_value);
|
|
|
|
Goto(&done);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&done);
|
2017-03-21 15:57:38 +00:00
|
|
|
return result.value();
|
|
|
|
}
|
|
|
|
|
|
|
|
void ReducePostLoopAction() {
|
|
|
|
Label ok(this);
|
|
|
|
GotoIf(WordNotEqual(a(), TheHoleConstant()), &ok);
|
|
|
|
CallRuntime(Runtime::kThrowTypeError, context(),
|
|
|
|
SmiConstant(MessageTemplate::kReduceNoInitial));
|
|
|
|
Unreachable();
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&ok);
|
2017-03-21 15:57:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Node* FilterResultGenerator() {
|
|
|
|
// 7. Let A be ArraySpeciesCreate(O, 0).
|
|
|
|
return ArraySpeciesCreate(context(), o(), SmiConstant(0));
|
|
|
|
}
|
|
|
|
|
|
|
|
Node* FilterProcessor(Node* k_value, Node* k) {
|
2017-03-24 11:01:53 +00:00
|
|
|
// ii. Let selected be ToBoolean(? Call(callbackfn, T, kValue, k, O)).
|
|
|
|
Node* selected = CallJS(CodeFactory::Call(isolate()), context(),
|
|
|
|
callbackfn(), this_arg(), k_value, k, o());
|
2017-03-21 15:57:38 +00:00
|
|
|
Label true_continue(this, &to_), false_continue(this);
|
2017-03-24 11:01:53 +00:00
|
|
|
BranchIfToBooleanIsTrue(selected, &true_continue, &false_continue);
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&true_continue);
|
2017-03-24 11:01:53 +00:00
|
|
|
// iii. If selected is true, then...
|
|
|
|
{
|
2017-04-24 12:47:24 +00:00
|
|
|
Label after_work(this, &to_);
|
|
|
|
Node* kind = nullptr;
|
|
|
|
|
|
|
|
// If a() is a JSArray, we can have a fast path.
|
|
|
|
Label fast(this);
|
|
|
|
Label runtime(this);
|
|
|
|
Label object_push_pre(this), object_push(this), double_push(this);
|
|
|
|
BranchIfFastJSArray(a(), context(), FastJSArrayAccessMode::ANY_ACCESS,
|
|
|
|
&fast, &runtime);
|
|
|
|
|
|
|
|
BIND(&fast);
|
|
|
|
{
|
|
|
|
kind = EnsureArrayPushable(a(), &runtime);
|
|
|
|
GotoIf(IsElementsKindGreaterThan(kind, FAST_HOLEY_SMI_ELEMENTS),
|
|
|
|
&object_push_pre);
|
|
|
|
|
|
|
|
BuildAppendJSArray(FAST_SMI_ELEMENTS, a(), k_value, &runtime);
|
|
|
|
Goto(&after_work);
|
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&object_push_pre);
|
|
|
|
{
|
|
|
|
Branch(IsElementsKindGreaterThan(kind, FAST_HOLEY_ELEMENTS),
|
|
|
|
&double_push, &object_push);
|
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&object_push);
|
|
|
|
{
|
|
|
|
BuildAppendJSArray(FAST_ELEMENTS, a(), k_value, &runtime);
|
|
|
|
Goto(&after_work);
|
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&double_push);
|
|
|
|
{
|
|
|
|
BuildAppendJSArray(FAST_DOUBLE_ELEMENTS, a(), k_value, &runtime);
|
|
|
|
Goto(&after_work);
|
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&runtime);
|
|
|
|
{
|
|
|
|
// 1. Perform ? CreateDataPropertyOrThrow(A, ToString(to), kValue).
|
|
|
|
CallRuntime(Runtime::kCreateDataProperty, context(), a(), to_.value(),
|
|
|
|
k_value);
|
|
|
|
Goto(&after_work);
|
|
|
|
}
|
2017-03-21 15:57:38 +00:00
|
|
|
|
2017-04-24 12:47:24 +00:00
|
|
|
BIND(&after_work);
|
|
|
|
{
|
|
|
|
// 2. Increase to by 1.
|
|
|
|
to_.Bind(NumberInc(to_.value()));
|
|
|
|
Goto(&false_continue);
|
|
|
|
}
|
2017-03-24 11:01:53 +00:00
|
|
|
}
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&false_continue);
|
2017-03-21 15:57:38 +00:00
|
|
|
return a();
|
|
|
|
}
|
|
|
|
|
2017-03-24 11:01:53 +00:00
|
|
|
Node* MapResultGenerator() {
|
|
|
|
// 5. Let A be ? ArraySpeciesCreate(O, len).
|
|
|
|
return ArraySpeciesCreate(context(), o(), len_);
|
|
|
|
}
|
|
|
|
|
|
|
|
Node* MapProcessor(Node* k_value, Node* k) {
|
|
|
|
// i. Let kValue be ? Get(O, Pk). Performed by the caller of MapProcessor.
|
|
|
|
// ii. Let mappedValue be ? Call(callbackfn, T, kValue, k, O).
|
|
|
|
Node* mappedValue = CallJS(CodeFactory::Call(isolate()), context(),
|
|
|
|
callbackfn(), this_arg(), k_value, k, o());
|
|
|
|
|
2017-04-24 12:47:24 +00:00
|
|
|
Label finished(this);
|
|
|
|
Node* kind = nullptr;
|
|
|
|
Node* elements = nullptr;
|
|
|
|
|
|
|
|
// If a() is a JSArray, we can have a fast path.
|
|
|
|
// mode is SMI_PARAMETERS because k has tagged representation.
|
|
|
|
ParameterMode mode = SMI_PARAMETERS;
|
|
|
|
Label fast(this);
|
|
|
|
Label runtime(this);
|
|
|
|
Label object_push_pre(this), object_push(this), double_push(this);
|
|
|
|
BranchIfFastJSArray(a(), context(), FastJSArrayAccessMode::ANY_ACCESS,
|
|
|
|
&fast, &runtime);
|
|
|
|
|
|
|
|
BIND(&fast);
|
|
|
|
{
|
|
|
|
kind = EnsureArrayPushable(a(), &runtime);
|
|
|
|
elements = LoadElements(a());
|
|
|
|
GotoIf(IsElementsKindGreaterThan(kind, FAST_HOLEY_SMI_ELEMENTS),
|
|
|
|
&object_push_pre);
|
|
|
|
TryStoreArrayElement(FAST_SMI_ELEMENTS, mode, &runtime, elements, k,
|
|
|
|
mappedValue);
|
|
|
|
Goto(&finished);
|
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&object_push_pre);
|
|
|
|
{
|
|
|
|
Branch(IsElementsKindGreaterThan(kind, FAST_HOLEY_ELEMENTS), &double_push,
|
|
|
|
&object_push);
|
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&object_push);
|
|
|
|
{
|
|
|
|
TryStoreArrayElement(FAST_ELEMENTS, mode, &runtime, elements, k,
|
|
|
|
mappedValue);
|
|
|
|
Goto(&finished);
|
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&double_push);
|
|
|
|
{
|
|
|
|
TryStoreArrayElement(FAST_DOUBLE_ELEMENTS, mode, &runtime, elements, k,
|
|
|
|
mappedValue);
|
|
|
|
Goto(&finished);
|
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&runtime);
|
|
|
|
{
|
|
|
|
// iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mappedValue).
|
|
|
|
CallRuntime(Runtime::kCreateDataProperty, context(), a(), k, mappedValue);
|
|
|
|
Goto(&finished);
|
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&finished);
|
2017-03-24 11:01:53 +00:00
|
|
|
return a();
|
|
|
|
}
|
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
void NullPostLoopAction() {}
|
|
|
|
|
|
|
|
protected:
|
|
|
|
Node* context() { return context_; }
|
2017-03-21 17:25:35 +00:00
|
|
|
Node* receiver() { return receiver_; }
|
|
|
|
Node* new_target() { return new_target_; }
|
2017-03-21 15:57:38 +00:00
|
|
|
Node* o() { return o_; }
|
|
|
|
Node* len() { return len_; }
|
|
|
|
Node* callbackfn() { return callbackfn_; }
|
|
|
|
Node* this_arg() { return this_arg_; }
|
|
|
|
Node* k() { return k_.value(); }
|
|
|
|
Node* a() { return a_.value(); }
|
|
|
|
|
2017-03-21 17:25:35 +00:00
|
|
|
void InitIteratingArrayBuiltinBody(Node* context, Node* receiver,
|
|
|
|
Node* callbackfn, Node* this_arg,
|
2017-04-29 10:58:50 +00:00
|
|
|
Node* new_target) {
|
2017-03-21 17:25:35 +00:00
|
|
|
context_ = context;
|
|
|
|
receiver_ = receiver;
|
|
|
|
new_target_ = new_target;
|
|
|
|
callbackfn_ = callbackfn;
|
|
|
|
this_arg_ = this_arg;
|
|
|
|
}
|
2017-03-21 15:57:38 +00:00
|
|
|
|
2017-03-21 17:25:35 +00:00
|
|
|
void GenerateIteratingArrayBuiltinBody(
|
|
|
|
const char* name, const BuiltinResultGenerator& generator,
|
|
|
|
const CallResultProcessor& processor, const PostLoopAction& action,
|
2017-03-24 13:35:56 +00:00
|
|
|
const Callable& slow_case_continuation,
|
|
|
|
ForEachDirection direction = ForEachDirection::kForward) {
|
2017-03-21 15:57:38 +00:00
|
|
|
Label non_array(this), slow(this, {&k_, &a_, &to_}),
|
|
|
|
array_changes(this, {&k_, &a_, &to_});
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
// TODO(danno): Seriously? Do we really need to throw the exact error
|
|
|
|
// message on null and undefined so that the webkit tests pass?
|
|
|
|
Label throw_null_undefined_exception(this, Label::kDeferred);
|
2017-03-21 17:25:35 +00:00
|
|
|
GotoIf(WordEqual(receiver(), NullConstant()),
|
2017-03-16 11:32:01 +00:00
|
|
|
&throw_null_undefined_exception);
|
2017-03-21 17:25:35 +00:00
|
|
|
GotoIf(WordEqual(receiver(), UndefinedConstant()),
|
2017-03-16 11:32:01 +00:00
|
|
|
&throw_null_undefined_exception);
|
|
|
|
|
|
|
|
// By the book: taken directly from the ECMAScript 2015 specification
|
|
|
|
|
|
|
|
// 1. Let O be ToObject(this value).
|
|
|
|
// 2. ReturnIfAbrupt(O)
|
2017-03-21 17:25:35 +00:00
|
|
|
o_ = CallStub(CodeFactory::ToObject(isolate()), context(), receiver());
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
// 3. Let len be ToLength(Get(O, "length")).
|
|
|
|
// 4. ReturnIfAbrupt(len).
|
2017-04-07 08:20:35 +00:00
|
|
|
VARIABLE(merged_length, MachineRepresentation::kTagged);
|
2017-03-16 11:32:01 +00:00
|
|
|
Label has_length(this, &merged_length), not_js_array(this);
|
2017-03-21 15:57:38 +00:00
|
|
|
GotoIf(DoesntHaveInstanceType(o(), JS_ARRAY_TYPE), ¬_js_array);
|
|
|
|
merged_length.Bind(LoadJSArrayLength(o()));
|
2017-03-16 11:32:01 +00:00
|
|
|
Goto(&has_length);
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(¬_js_array);
|
2017-03-16 11:32:01 +00:00
|
|
|
Node* len_property =
|
2017-03-21 15:57:38 +00:00
|
|
|
GetProperty(context(), o(), isolate()->factory()->length_string());
|
2017-03-16 11:32:01 +00:00
|
|
|
merged_length.Bind(
|
2017-03-21 15:57:38 +00:00
|
|
|
CallStub(CodeFactory::ToLength(isolate()), context(), len_property));
|
2017-03-16 11:32:01 +00:00
|
|
|
Goto(&has_length);
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&has_length);
|
2017-03-21 15:57:38 +00:00
|
|
|
len_ = merged_length.value();
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
// 5. If IsCallable(callbackfn) is false, throw a TypeError exception.
|
|
|
|
Label type_exception(this, Label::kDeferred);
|
|
|
|
Label done(this);
|
2017-03-21 15:57:38 +00:00
|
|
|
GotoIf(TaggedIsSmi(callbackfn()), &type_exception);
|
|
|
|
Branch(IsCallableMap(LoadMap(callbackfn())), &done, &type_exception);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&throw_null_undefined_exception);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
CallRuntime(
|
2017-03-21 15:57:38 +00:00
|
|
|
Runtime::kThrowTypeError, context(),
|
2017-03-16 11:32:01 +00:00
|
|
|
SmiConstant(MessageTemplate::kCalledOnNullOrUndefined),
|
|
|
|
HeapConstant(isolate()->factory()->NewStringFromAsciiChecked(name)));
|
|
|
|
Unreachable();
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&type_exception);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
2017-03-21 15:57:38 +00:00
|
|
|
CallRuntime(Runtime::kThrowTypeError, context(),
|
|
|
|
SmiConstant(MessageTemplate::kCalledNonCallable),
|
|
|
|
callbackfn());
|
2017-03-16 11:32:01 +00:00
|
|
|
Unreachable();
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&done);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
// 6. If thisArg was supplied, let T be thisArg; else let T be undefined.
|
|
|
|
// [Already done by the arguments adapter]
|
|
|
|
|
2017-03-24 13:35:56 +00:00
|
|
|
if (direction == ForEachDirection::kForward) {
|
|
|
|
// 7. Let k be 0.
|
|
|
|
k_.Bind(SmiConstant(0));
|
|
|
|
} else {
|
2017-04-11 11:02:27 +00:00
|
|
|
k_.Bind(NumberDec(len()));
|
2017-03-24 13:35:56 +00:00
|
|
|
}
|
2017-03-21 15:57:38 +00:00
|
|
|
|
|
|
|
a_.Bind(generator(this));
|
|
|
|
|
2017-03-24 13:35:56 +00:00
|
|
|
HandleFastElements(processor, action, &slow, direction);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&slow);
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
|
2017-04-29 10:58:50 +00:00
|
|
|
Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
|
|
|
|
MachineType::TaggedPointer());
|
|
|
|
TailCallStub(
|
|
|
|
slow_case_continuation, context(), target, new_target(),
|
|
|
|
Int32Constant(IteratingArrayBuiltinLoopContinuationDescriptor::kArity),
|
|
|
|
receiver(), callbackfn(), this_arg(), a_.value(), o(), k_.value(),
|
|
|
|
len(), to_.value());
|
2017-03-21 17:25:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void InitIteratingArrayBuiltinLoopContinuation(Node* context, Node* receiver,
|
|
|
|
Node* callbackfn,
|
|
|
|
Node* this_arg, Node* a,
|
|
|
|
Node* o, Node* initial_k,
|
2017-03-22 13:18:26 +00:00
|
|
|
Node* len, Node* to) {
|
2017-03-21 17:25:35 +00:00
|
|
|
context_ = context;
|
|
|
|
this_arg_ = this_arg;
|
|
|
|
callbackfn_ = callbackfn;
|
|
|
|
a_.Bind(a);
|
|
|
|
k_.Bind(initial_k);
|
|
|
|
o_ = o;
|
|
|
|
len_ = len;
|
2017-03-22 13:18:26 +00:00
|
|
|
to_.Bind(to);
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
}
|
|
|
|
|
2017-03-30 16:36:53 +00:00
|
|
|
void GenerateIteratingTypedArrayBuiltinBody(
|
|
|
|
const char* name, const BuiltinResultGenerator& generator,
|
2017-04-11 11:02:27 +00:00
|
|
|
const CallResultProcessor& processor, const PostLoopAction& action,
|
|
|
|
ForEachDirection direction = ForEachDirection::kForward) {
|
2017-03-30 16:36:53 +00:00
|
|
|
Node* name_string =
|
|
|
|
HeapConstant(isolate()->factory()->NewStringFromAsciiChecked(name));
|
|
|
|
|
|
|
|
// ValidateTypedArray: tc39.github.io/ecma262/#sec-validatetypedarray
|
|
|
|
|
|
|
|
Label throw_not_typed_array(this, Label::kDeferred),
|
|
|
|
throw_detached(this, Label::kDeferred);
|
|
|
|
|
|
|
|
GotoIf(TaggedIsSmi(receiver_), &throw_not_typed_array);
|
|
|
|
GotoIfNot(HasInstanceType(receiver_, JS_TYPED_ARRAY_TYPE),
|
|
|
|
&throw_not_typed_array);
|
|
|
|
|
|
|
|
o_ = receiver_;
|
|
|
|
Node* array_buffer = LoadObjectField(o_, JSTypedArray::kBufferOffset);
|
|
|
|
GotoIf(IsDetachedBuffer(array_buffer), &throw_detached);
|
|
|
|
|
|
|
|
len_ = LoadObjectField(o_, JSTypedArray::kLengthOffset);
|
|
|
|
|
|
|
|
Label throw_not_callable(this, Label::kDeferred);
|
|
|
|
Label distinguish_types(this);
|
|
|
|
GotoIf(TaggedIsSmi(callbackfn_), &throw_not_callable);
|
|
|
|
Branch(IsCallableMap(LoadMap(callbackfn_)), &distinguish_types,
|
|
|
|
&throw_not_callable);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&throw_not_typed_array);
|
2017-03-30 16:36:53 +00:00
|
|
|
{
|
|
|
|
CallRuntime(Runtime::kThrowTypeError, context_,
|
|
|
|
SmiConstant(MessageTemplate::kNotTypedArray));
|
|
|
|
Unreachable();
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&throw_detached);
|
2017-03-30 16:36:53 +00:00
|
|
|
{
|
|
|
|
CallRuntime(Runtime::kThrowTypeError, context_,
|
|
|
|
SmiConstant(MessageTemplate::kDetachedOperation),
|
|
|
|
name_string);
|
|
|
|
Unreachable();
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&throw_not_callable);
|
2017-03-30 16:36:53 +00:00
|
|
|
{
|
|
|
|
CallRuntime(Runtime::kThrowTypeError, context_,
|
|
|
|
SmiConstant(MessageTemplate::kCalledNonCallable),
|
|
|
|
callbackfn_);
|
|
|
|
Unreachable();
|
|
|
|
}
|
|
|
|
|
|
|
|
Label unexpected_instance_type(this);
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&unexpected_instance_type);
|
2017-03-30 16:36:53 +00:00
|
|
|
Unreachable();
|
|
|
|
|
|
|
|
std::vector<int32_t> instance_types = {
|
|
|
|
#define INSTANCE_TYPE(Type, type, TYPE, ctype, size) FIXED_##TYPE##_ARRAY_TYPE,
|
|
|
|
TYPED_ARRAYS(INSTANCE_TYPE)
|
|
|
|
#undef INSTANCE_TYPE
|
|
|
|
};
|
|
|
|
std::vector<Label> labels;
|
|
|
|
for (size_t i = 0; i < instance_types.size(); ++i) {
|
|
|
|
labels.push_back(Label(this));
|
|
|
|
}
|
|
|
|
std::vector<Label*> label_ptrs;
|
|
|
|
for (Label& label : labels) {
|
|
|
|
label_ptrs.push_back(&label);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&distinguish_types);
|
2017-04-11 11:02:27 +00:00
|
|
|
|
|
|
|
if (direction == ForEachDirection::kForward) {
|
|
|
|
k_.Bind(SmiConstant(0));
|
|
|
|
} else {
|
|
|
|
k_.Bind(NumberDec(len()));
|
|
|
|
}
|
2017-03-30 16:36:53 +00:00
|
|
|
a_.Bind(generator(this));
|
|
|
|
Node* elements_type = LoadInstanceType(LoadElements(o_));
|
|
|
|
Switch(elements_type, &unexpected_instance_type, instance_types.data(),
|
|
|
|
label_ptrs.data(), labels.size());
|
|
|
|
|
|
|
|
for (size_t i = 0; i < labels.size(); ++i) {
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&labels[i]);
|
2017-03-30 16:36:53 +00:00
|
|
|
Label done(this);
|
|
|
|
// TODO(tebbi): Silently cancelling the loop on buffer detachment is a
|
|
|
|
// spec violation. Should go to &detached and throw a TypeError instead.
|
|
|
|
VisitAllTypedArrayElements(
|
|
|
|
ElementsKindForInstanceType(
|
|
|
|
static_cast<InstanceType>(instance_types[i])),
|
2017-04-11 11:02:27 +00:00
|
|
|
array_buffer, processor, &done, direction);
|
2017-03-30 16:36:53 +00:00
|
|
|
Goto(&done);
|
|
|
|
// No exception, return success
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&done);
|
2017-04-11 11:02:27 +00:00
|
|
|
action(this);
|
2017-04-29 10:58:50 +00:00
|
|
|
Return(a_.value());
|
2017-03-30 16:36:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
void GenerateIteratingArrayBuiltinLoopContinuation(
|
2017-03-24 13:35:56 +00:00
|
|
|
const CallResultProcessor& processor, const PostLoopAction& action,
|
|
|
|
ForEachDirection direction = ForEachDirection::kForward) {
|
2017-03-21 15:57:38 +00:00
|
|
|
Label loop(this, {&k_, &a_, &to_});
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
Label after_loop(this);
|
|
|
|
Goto(&loop);
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
2017-03-24 13:35:56 +00:00
|
|
|
if (direction == ForEachDirection::kForward) {
|
|
|
|
// 8. Repeat, while k < len
|
|
|
|
GotoUnlessNumberLessThan(k(), len_, &after_loop);
|
|
|
|
} else {
|
|
|
|
// OR
|
|
|
|
// 10. Repeat, while k >= 0
|
|
|
|
GotoUnlessNumberLessThan(SmiConstant(-1), k(), &after_loop);
|
|
|
|
}
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
|
2017-03-21 08:56:56 +00:00
|
|
|
Label done_element(this, &to_);
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
// a. Let Pk be ToString(k).
|
2017-03-21 15:57:38 +00:00
|
|
|
Node* p_k = ToString(context(), k());
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
|
|
|
|
// b. Let kPresent be HasProperty(O, Pk).
|
|
|
|
// c. ReturnIfAbrupt(kPresent).
|
2017-03-21 15:57:38 +00:00
|
|
|
Node* k_present = HasProperty(o(), p_k, context());
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
|
|
|
|
// d. If kPresent is true, then
|
|
|
|
GotoIf(WordNotEqual(k_present, TrueConstant()), &done_element);
|
|
|
|
|
|
|
|
// i. Let kValue be Get(O, Pk).
|
|
|
|
// ii. ReturnIfAbrupt(kValue).
|
2017-03-21 15:57:38 +00:00
|
|
|
Node* k_value = GetProperty(context(), o(), k());
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
|
|
|
|
// iii. Let funcResult be Call(callbackfn, T, «kValue, k, O»).
|
|
|
|
// iv. ReturnIfAbrupt(funcResult).
|
2017-03-21 15:57:38 +00:00
|
|
|
a_.Bind(processor(this, k_value, k()));
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
Goto(&done_element);
|
2017-03-21 15:57:38 +00:00
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&done_element);
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
|
2017-03-24 13:35:56 +00:00
|
|
|
if (direction == ForEachDirection::kForward) {
|
|
|
|
// e. Increase k by 1.
|
|
|
|
k_.Bind(NumberInc(k()));
|
|
|
|
} else {
|
|
|
|
// e. Decrease k by 1.
|
|
|
|
k_.Bind(NumberDec(k()));
|
|
|
|
}
|
2017-03-16 11:32:01 +00:00
|
|
|
Goto(&loop);
|
|
|
|
}
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&after_loop);
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
action(this);
|
|
|
|
Return(a_.value());
|
2017-03-21 08:56:56 +00:00
|
|
|
}
|
|
|
|
|
2017-03-16 11:32:01 +00:00
|
|
|
private:
|
2017-03-30 16:36:53 +00:00
|
|
|
static ElementsKind ElementsKindForInstanceType(InstanceType type) {
|
|
|
|
switch (type) {
|
|
|
|
#define INSTANCE_TYPE_TO_ELEMENTS_KIND(Type, type, TYPE, ctype, size) \
|
|
|
|
case FIXED_##TYPE##_ARRAY_TYPE: \
|
|
|
|
return TYPE##_ELEMENTS;
|
|
|
|
|
|
|
|
TYPED_ARRAYS(INSTANCE_TYPE_TO_ELEMENTS_KIND)
|
|
|
|
#undef INSTANCE_TYPE_TO_ELEMENTS_KIND
|
|
|
|
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
return static_cast<ElementsKind>(-1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void VisitAllTypedArrayElements(ElementsKind kind, Node* array_buffer,
|
|
|
|
const CallResultProcessor& processor,
|
2017-04-11 11:02:27 +00:00
|
|
|
Label* detached, ForEachDirection direction) {
|
2017-03-30 16:36:53 +00:00
|
|
|
VariableList list({&a_, &k_, &to_}, zone());
|
|
|
|
|
|
|
|
FastLoopBody body = [&](Node* index) {
|
|
|
|
GotoIf(IsDetachedBuffer(array_buffer), detached);
|
|
|
|
Node* elements = LoadElements(o_);
|
|
|
|
Node* base_ptr =
|
|
|
|
LoadObjectField(elements, FixedTypedArrayBase::kBasePointerOffset);
|
|
|
|
Node* external_ptr =
|
|
|
|
LoadObjectField(elements, FixedTypedArrayBase::kExternalPointerOffset,
|
|
|
|
MachineType::Pointer());
|
|
|
|
Node* data_ptr = IntPtrAdd(BitcastTaggedToWord(base_ptr), external_ptr);
|
|
|
|
Node* value = LoadFixedTypedArrayElementAsTagged(data_ptr, index, kind,
|
|
|
|
SMI_PARAMETERS);
|
|
|
|
k_.Bind(index);
|
|
|
|
a_.Bind(processor(this, value, index));
|
|
|
|
};
|
2017-04-11 11:02:27 +00:00
|
|
|
Node* start = SmiConstant(0);
|
|
|
|
Node* end = len_;
|
|
|
|
IndexAdvanceMode advance_mode = IndexAdvanceMode::kPost;
|
|
|
|
int incr = 1;
|
|
|
|
if (direction == ForEachDirection::kReverse) {
|
|
|
|
std::swap(start, end);
|
|
|
|
advance_mode = IndexAdvanceMode::kPre;
|
|
|
|
incr = -1;
|
|
|
|
}
|
|
|
|
BuildFastLoop(list, start, end, body, incr, ParameterMode::SMI_PARAMETERS,
|
|
|
|
advance_mode);
|
2017-03-30 16:36:53 +00:00
|
|
|
}
|
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
void VisitAllFastElementsOneKind(ElementsKind kind,
|
|
|
|
const CallResultProcessor& processor,
|
2017-03-24 13:35:56 +00:00
|
|
|
Label* array_changed, ParameterMode mode,
|
|
|
|
ForEachDirection direction) {
|
2017-03-16 11:32:01 +00:00
|
|
|
Comment("begin VisitAllFastElementsOneKind");
|
2017-04-07 08:20:35 +00:00
|
|
|
VARIABLE(original_map, MachineRepresentation::kTagged);
|
2017-03-21 15:57:38 +00:00
|
|
|
original_map.Bind(LoadMap(o()));
|
|
|
|
VariableList list({&original_map, &a_, &k_, &to_}, zone());
|
2017-03-24 13:35:56 +00:00
|
|
|
Node* start = IntPtrOrSmiConstant(0, mode);
|
|
|
|
Node* end = TaggedToParameter(len(), mode);
|
|
|
|
IndexAdvanceMode advance_mode = direction == ForEachDirection::kReverse
|
|
|
|
? IndexAdvanceMode::kPre
|
|
|
|
: IndexAdvanceMode::kPost;
|
|
|
|
if (direction == ForEachDirection::kReverse) std::swap(start, end);
|
2017-03-16 11:32:01 +00:00
|
|
|
BuildFastLoop(
|
2017-03-24 13:35:56 +00:00
|
|
|
list, start, end,
|
2017-03-21 15:57:38 +00:00
|
|
|
[=, &original_map](Node* index) {
|
|
|
|
k_.Bind(ParameterToTagged(index, mode));
|
2017-03-16 11:32:01 +00:00
|
|
|
Label one_element_done(this), hole_element(this);
|
|
|
|
|
|
|
|
// Check if o's map has changed during the callback. If so, we have to
|
|
|
|
// fall back to the slower spec implementation for the rest of the
|
|
|
|
// iteration.
|
2017-03-21 15:57:38 +00:00
|
|
|
Node* o_map = LoadMap(o());
|
2017-03-16 11:32:01 +00:00
|
|
|
GotoIf(WordNotEqual(o_map, original_map.value()), array_changed);
|
|
|
|
|
|
|
|
// Check if o's length has changed during the callback and if the
|
|
|
|
// index is now out of range of the new length.
|
2017-03-21 15:57:38 +00:00
|
|
|
GotoIf(SmiGreaterThanOrEqual(k_.value(), LoadJSArrayLength(o())),
|
2017-03-16 11:32:01 +00:00
|
|
|
array_changed);
|
|
|
|
|
|
|
|
// Re-load the elements array. If may have been resized.
|
2017-03-21 15:57:38 +00:00
|
|
|
Node* elements = LoadElements(o());
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
// Fast case: load the element directly from the elements FixedArray
|
|
|
|
// and call the callback if the element is not the hole.
|
|
|
|
DCHECK(kind == FAST_ELEMENTS || kind == FAST_DOUBLE_ELEMENTS);
|
|
|
|
int base_size = kind == FAST_ELEMENTS
|
|
|
|
? FixedArray::kHeaderSize
|
|
|
|
: (FixedArray::kHeaderSize - kHeapObjectTag);
|
|
|
|
Node* offset = ElementOffsetFromIndex(index, kind, mode, base_size);
|
|
|
|
Node* value = nullptr;
|
|
|
|
if (kind == FAST_ELEMENTS) {
|
|
|
|
value = LoadObjectField(elements, offset);
|
|
|
|
GotoIf(WordEqual(value, TheHoleConstant()), &hole_element);
|
|
|
|
} else {
|
|
|
|
Node* double_value =
|
|
|
|
LoadDoubleWithHoleCheck(elements, offset, &hole_element);
|
|
|
|
value = AllocateHeapNumberWithValue(double_value);
|
|
|
|
}
|
2017-03-21 15:57:38 +00:00
|
|
|
a_.Bind(processor(this, value, k()));
|
2017-03-16 11:32:01 +00:00
|
|
|
Goto(&one_element_done);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&hole_element);
|
2017-03-16 11:32:01 +00:00
|
|
|
// Check if o's prototype change unexpectedly has elements after the
|
|
|
|
// callback in the case of a hole.
|
|
|
|
BranchIfPrototypesHaveNoElements(o_map, &one_element_done,
|
|
|
|
array_changed);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&one_element_done);
|
2017-03-16 11:32:01 +00:00
|
|
|
},
|
2017-03-24 13:35:56 +00:00
|
|
|
1, mode, advance_mode);
|
2017-03-16 11:32:01 +00:00
|
|
|
Comment("end VisitAllFastElementsOneKind");
|
|
|
|
}
|
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
void HandleFastElements(const CallResultProcessor& processor,
|
2017-03-24 13:35:56 +00:00
|
|
|
const PostLoopAction& action, Label* slow,
|
|
|
|
ForEachDirection direction) {
|
2017-03-16 11:32:01 +00:00
|
|
|
Label switch_on_elements_kind(this), fast_elements(this),
|
|
|
|
maybe_double_elements(this), fast_double_elements(this);
|
|
|
|
|
|
|
|
Comment("begin HandleFastElements");
|
|
|
|
// Non-smi lengths must use the slow path.
|
2017-03-21 15:57:38 +00:00
|
|
|
GotoIf(TaggedIsNotSmi(len()), slow);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
BranchIfFastJSArray(o(), context(),
|
2017-03-16 11:32:01 +00:00
|
|
|
CodeStubAssembler::FastJSArrayAccessMode::INBOUNDS_READ,
|
|
|
|
&switch_on_elements_kind, slow);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&switch_on_elements_kind);
|
2017-03-16 11:32:01 +00:00
|
|
|
// Select by ElementsKind
|
2017-03-21 15:57:38 +00:00
|
|
|
Node* o_map = LoadMap(o());
|
2017-03-16 11:32:01 +00:00
|
|
|
Node* bit_field2 = LoadMapBitField2(o_map);
|
|
|
|
Node* kind = DecodeWord32<Map::ElementsKindBits>(bit_field2);
|
2017-04-24 12:47:24 +00:00
|
|
|
Branch(IsElementsKindGreaterThan(kind, FAST_HOLEY_ELEMENTS),
|
2017-03-16 11:32:01 +00:00
|
|
|
&maybe_double_elements, &fast_elements);
|
|
|
|
|
|
|
|
ParameterMode mode = OptimalParameterMode();
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&fast_elements);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
2017-03-24 13:35:56 +00:00
|
|
|
VisitAllFastElementsOneKind(FAST_ELEMENTS, processor, slow, mode,
|
|
|
|
direction);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
action(this);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
// No exception, return success
|
2017-04-29 10:58:50 +00:00
|
|
|
Return(a_.value());
|
2017-03-16 11:32:01 +00:00
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&maybe_double_elements);
|
2017-04-24 12:47:24 +00:00
|
|
|
Branch(IsElementsKindGreaterThan(kind, FAST_HOLEY_DOUBLE_ELEMENTS), slow,
|
|
|
|
&fast_double_elements);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&fast_double_elements);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
2017-03-24 13:35:56 +00:00
|
|
|
VisitAllFastElementsOneKind(FAST_DOUBLE_ELEMENTS, processor, slow, mode,
|
|
|
|
direction);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
action(this);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
// No exception, return success
|
2017-04-29 10:58:50 +00:00
|
|
|
Return(a_.value());
|
2017-03-16 11:32:01 +00:00
|
|
|
}
|
|
|
|
}
|
2017-03-21 08:56:56 +00:00
|
|
|
|
2017-03-21 17:25:35 +00:00
|
|
|
Node* callbackfn_ = nullptr;
|
|
|
|
Node* o_ = nullptr;
|
|
|
|
Node* this_arg_ = nullptr;
|
|
|
|
Node* len_ = nullptr;
|
|
|
|
Node* context_ = nullptr;
|
|
|
|
Node* receiver_ = nullptr;
|
|
|
|
Node* new_target_ = nullptr;
|
2017-03-21 15:57:38 +00:00
|
|
|
Variable k_;
|
|
|
|
Variable a_;
|
2017-03-21 08:56:56 +00:00
|
|
|
Variable to_;
|
2017-03-16 11:32:01 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
TF_BUILTIN(FastArrayPush, CodeStubAssembler) {
|
2017-04-07 08:20:35 +00:00
|
|
|
VARIABLE(arg_index, MachineType::PointerRepresentation());
|
2017-03-16 11:32:01 +00:00
|
|
|
Label default_label(this, &arg_index);
|
|
|
|
Label smi_transition(this);
|
|
|
|
Label object_push_pre(this);
|
|
|
|
Label object_push(this, &arg_index);
|
|
|
|
Label double_push(this, &arg_index);
|
|
|
|
Label double_transition(this);
|
|
|
|
Label runtime(this, Label::kDeferred);
|
|
|
|
|
2017-03-20 10:55:37 +00:00
|
|
|
// TODO(ishell): use constants from Descriptor once the JSFunction linkage
|
|
|
|
// arguments are reordered.
|
2017-03-16 11:32:01 +00:00
|
|
|
Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
|
|
|
|
Node* context = Parameter(BuiltinDescriptor::kContext);
|
|
|
|
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
|
|
|
|
|
|
|
|
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
|
|
|
|
Node* receiver = args.GetReceiver();
|
|
|
|
Node* kind = nullptr;
|
|
|
|
|
|
|
|
Label fast(this);
|
|
|
|
BranchIfFastJSArray(receiver, context, FastJSArrayAccessMode::ANY_ACCESS,
|
|
|
|
&fast, &runtime);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&fast);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
arg_index.Bind(IntPtrConstant(0));
|
2017-04-24 12:47:24 +00:00
|
|
|
kind = EnsureArrayPushable(receiver, &runtime);
|
|
|
|
GotoIf(IsElementsKindGreaterThan(kind, FAST_HOLEY_SMI_ELEMENTS),
|
2017-03-16 11:32:01 +00:00
|
|
|
&object_push_pre);
|
|
|
|
|
2017-04-24 12:47:24 +00:00
|
|
|
Node* new_length = BuildAppendJSArray(FAST_SMI_ELEMENTS, receiver, args,
|
|
|
|
arg_index, &smi_transition);
|
2017-03-16 11:32:01 +00:00
|
|
|
args.PopAndReturn(new_length);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the argument is not a smi, then use a heavyweight SetProperty to
|
|
|
|
// transition the array for only the single next element. If the argument is
|
|
|
|
// a smi, the failure is due to some other reason and we should fall back on
|
|
|
|
// the most generic implementation for the rest of the array.
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&smi_transition);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Node* arg = args.AtIndex(arg_index.value());
|
|
|
|
GotoIf(TaggedIsSmi(arg), &default_label);
|
|
|
|
Node* length = LoadJSArrayLength(receiver);
|
|
|
|
// TODO(danno): Use the KeyedStoreGeneric stub here when possible,
|
|
|
|
// calling into the runtime to do the elements transition is overkill.
|
|
|
|
CallRuntime(Runtime::kSetProperty, context, receiver, length, arg,
|
|
|
|
SmiConstant(STRICT));
|
|
|
|
Increment(arg_index);
|
|
|
|
// The runtime SetProperty call could have converted the array to dictionary
|
|
|
|
// mode, which must be detected to abort the fast-path.
|
|
|
|
Node* map = LoadMap(receiver);
|
|
|
|
Node* bit_field2 = LoadMapBitField2(map);
|
|
|
|
Node* kind = DecodeWord32<Map::ElementsKindBits>(bit_field2);
|
|
|
|
GotoIf(Word32Equal(kind, Int32Constant(DICTIONARY_ELEMENTS)),
|
|
|
|
&default_label);
|
|
|
|
|
|
|
|
GotoIfNotNumber(arg, &object_push);
|
|
|
|
Goto(&double_push);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&object_push_pre);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
2017-04-24 12:47:24 +00:00
|
|
|
Branch(IsElementsKindGreaterThan(kind, FAST_HOLEY_ELEMENTS), &double_push,
|
|
|
|
&object_push);
|
2017-03-16 11:32:01 +00:00
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&object_push);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
2017-04-24 12:47:24 +00:00
|
|
|
Node* new_length = BuildAppendJSArray(FAST_ELEMENTS, receiver, args,
|
|
|
|
arg_index, &default_label);
|
2017-03-16 11:32:01 +00:00
|
|
|
args.PopAndReturn(new_length);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&double_push);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
2017-04-24 12:47:24 +00:00
|
|
|
Node* new_length = BuildAppendJSArray(FAST_DOUBLE_ELEMENTS, receiver, args,
|
|
|
|
arg_index, &double_transition);
|
2017-03-16 11:32:01 +00:00
|
|
|
args.PopAndReturn(new_length);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the argument is not a double, then use a heavyweight SetProperty to
|
|
|
|
// transition the array for only the single next element. If the argument is
|
|
|
|
// a double, the failure is due to some other reason and we should fall back
|
|
|
|
// on the most generic implementation for the rest of the array.
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&double_transition);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Node* arg = args.AtIndex(arg_index.value());
|
|
|
|
GotoIfNumber(arg, &default_label);
|
|
|
|
Node* length = LoadJSArrayLength(receiver);
|
|
|
|
// TODO(danno): Use the KeyedStoreGeneric stub here when possible,
|
|
|
|
// calling into the runtime to do the elements transition is overkill.
|
|
|
|
CallRuntime(Runtime::kSetProperty, context, receiver, length, arg,
|
|
|
|
SmiConstant(STRICT));
|
|
|
|
Increment(arg_index);
|
|
|
|
// The runtime SetProperty call could have converted the array to dictionary
|
|
|
|
// mode, which must be detected to abort the fast-path.
|
|
|
|
Node* map = LoadMap(receiver);
|
|
|
|
Node* bit_field2 = LoadMapBitField2(map);
|
|
|
|
Node* kind = DecodeWord32<Map::ElementsKindBits>(bit_field2);
|
|
|
|
GotoIf(Word32Equal(kind, Int32Constant(DICTIONARY_ELEMENTS)),
|
|
|
|
&default_label);
|
|
|
|
Goto(&object_push);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fallback that stores un-processed arguments using the full, heavyweight
|
|
|
|
// SetProperty machinery.
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&default_label);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
args.ForEach(
|
|
|
|
[this, receiver, context](Node* arg) {
|
|
|
|
Node* length = LoadJSArrayLength(receiver);
|
|
|
|
CallRuntime(Runtime::kSetProperty, context, receiver, length, arg,
|
|
|
|
SmiConstant(STRICT));
|
|
|
|
},
|
|
|
|
arg_index.value());
|
|
|
|
args.PopAndReturn(LoadJSArrayLength(receiver));
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&runtime);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
|
|
|
|
MachineType::TaggedPointer());
|
|
|
|
TailCallStub(CodeFactory::ArrayPush(isolate()), context, target, new_target,
|
|
|
|
argc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
TF_BUILTIN(ArrayForEachLoopContinuation, ArrayBuiltinCodeStubAssembler) {
|
2017-03-21 17:25:35 +00:00
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
Node* receiver = Parameter(Descriptor::kReceiver);
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
Node* array = Parameter(Descriptor::kArray);
|
|
|
|
Node* object = Parameter(Descriptor::kObject);
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
|
|
|
Node* len = Parameter(Descriptor::kLength);
|
2017-03-22 13:18:26 +00:00
|
|
|
Node* to = Parameter(Descriptor::kTo);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
2017-03-22 13:18:26 +00:00
|
|
|
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
|
|
|
|
this_arg, array, object, initial_k,
|
|
|
|
len, to);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
GenerateIteratingArrayBuiltinLoopContinuation(
|
2017-03-21 15:57:38 +00:00
|
|
|
&ArrayBuiltinCodeStubAssembler::ForEachProcessor,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
}
|
2017-03-16 11:32:01 +00:00
|
|
|
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
TF_BUILTIN(ArrayForEach, ArrayBuiltinCodeStubAssembler) {
|
2017-04-29 10:58:50 +00:00
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
Node* receiver = Parameter(Descriptor::kReceiver);
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
Node* new_target = Parameter(Descriptor::kNewTarget);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
|
2017-04-29 10:58:50 +00:00
|
|
|
new_target);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
GenerateIteratingArrayBuiltinBody(
|
|
|
|
"Array.prototype.forEach",
|
2017-03-21 15:57:38 +00:00
|
|
|
&ArrayBuiltinCodeStubAssembler::ForEachResultGenerator,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::ForEachProcessor,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
|
2017-04-29 10:58:50 +00:00
|
|
|
CodeFactory::ArrayForEachLoopContinuation(isolate()));
|
2017-03-16 11:32:01 +00:00
|
|
|
}
|
|
|
|
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
TF_BUILTIN(ArraySomeLoopContinuation, ArrayBuiltinCodeStubAssembler) {
|
2017-03-21 17:25:35 +00:00
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
Node* receiver = Parameter(Descriptor::kReceiver);
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
Node* array = Parameter(Descriptor::kArray);
|
|
|
|
Node* object = Parameter(Descriptor::kObject);
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
|
|
|
Node* len = Parameter(Descriptor::kLength);
|
2017-03-22 13:18:26 +00:00
|
|
|
Node* to = Parameter(Descriptor::kTo);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
2017-03-22 13:18:26 +00:00
|
|
|
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
|
|
|
|
this_arg, array, object, initial_k,
|
|
|
|
len, to);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
GenerateIteratingArrayBuiltinLoopContinuation(
|
2017-03-21 15:57:38 +00:00
|
|
|
&ArrayBuiltinCodeStubAssembler::SomeProcessor,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
|
2017-03-16 11:32:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TF_BUILTIN(ArraySome, ArrayBuiltinCodeStubAssembler) {
|
2017-04-29 10:58:50 +00:00
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
Node* receiver = Parameter(Descriptor::kReceiver);
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
Node* new_target = Parameter(Descriptor::kNewTarget);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
|
2017-04-29 10:58:50 +00:00
|
|
|
new_target);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
GenerateIteratingArrayBuiltinBody(
|
2017-03-21 08:56:56 +00:00
|
|
|
"Array.prototype.some",
|
2017-03-21 15:57:38 +00:00
|
|
|
&ArrayBuiltinCodeStubAssembler::SomeResultGenerator,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::SomeProcessor,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
|
2017-04-29 10:58:50 +00:00
|
|
|
CodeFactory::ArraySomeLoopContinuation(isolate()));
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
}
|
|
|
|
|
2017-03-30 16:36:53 +00:00
|
|
|
TF_BUILTIN(TypedArrayPrototypeSome, ArrayBuiltinCodeStubAssembler) {
|
2017-04-29 10:58:50 +00:00
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
Node* receiver = Parameter(Descriptor::kReceiver);
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
Node* new_target = Parameter(Descriptor::kNewTarget);
|
2017-03-30 16:36:53 +00:00
|
|
|
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
|
2017-04-29 10:58:50 +00:00
|
|
|
new_target);
|
2017-03-30 16:36:53 +00:00
|
|
|
|
|
|
|
GenerateIteratingTypedArrayBuiltinBody(
|
|
|
|
"%TypedArray%.prototype.some",
|
|
|
|
&ArrayBuiltinCodeStubAssembler::SomeResultGenerator,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::SomeProcessor,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
|
|
|
|
}
|
|
|
|
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
TF_BUILTIN(ArrayEveryLoopContinuation, ArrayBuiltinCodeStubAssembler) {
|
2017-03-21 17:25:35 +00:00
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
Node* receiver = Parameter(Descriptor::kReceiver);
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
Node* array = Parameter(Descriptor::kArray);
|
|
|
|
Node* object = Parameter(Descriptor::kObject);
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
|
|
|
Node* len = Parameter(Descriptor::kLength);
|
2017-03-22 13:18:26 +00:00
|
|
|
Node* to = Parameter(Descriptor::kTo);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
2017-03-22 13:18:26 +00:00
|
|
|
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
|
|
|
|
this_arg, array, object, initial_k,
|
|
|
|
len, to);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
GenerateIteratingArrayBuiltinLoopContinuation(
|
2017-03-21 15:57:38 +00:00
|
|
|
&ArrayBuiltinCodeStubAssembler::EveryProcessor,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
|
2017-03-16 11:32:01 +00:00
|
|
|
}
|
|
|
|
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
TF_BUILTIN(ArrayEvery, ArrayBuiltinCodeStubAssembler) {
|
2017-04-29 10:58:50 +00:00
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
Node* receiver = Parameter(Descriptor::kReceiver);
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
Node* new_target = Parameter(Descriptor::kNewTarget);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
|
2017-04-29 10:58:50 +00:00
|
|
|
new_target);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
GenerateIteratingArrayBuiltinBody(
|
2017-03-21 08:56:56 +00:00
|
|
|
"Array.prototype.every",
|
2017-03-21 15:57:38 +00:00
|
|
|
&ArrayBuiltinCodeStubAssembler::EveryResultGenerator,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::EveryProcessor,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
|
2017-04-29 10:58:50 +00:00
|
|
|
CodeFactory::ArrayEveryLoopContinuation(isolate()));
|
[builtins] Separate Array.prototype.* CSA builtins into two parts
Previous to this CL, CSA-optimized Array builtins--like forEach, some, and
every--were written in a single, monolithic block of CSA code.
This CL teases the code for each of these builtins apart into two chunks, a main
body with optimizations for fast cases, and a "continuation" builtin that
performs a spec-compliant, but slower version of the main loop of the
builtin. The general idea is that when the "fast" main body builtin encounters
an unexpected condition that invalidates assumptions allowing fast-case code, it
tail calls to the slow, correct version of the loop that finishes the builtin
execution.
This separation currently doens't really provide any specific advantage over the
combined version. However, it paves the way to TF-optimized inlined Array
builtins. Inlined Array builtins may trigger deopts during the execution of the
builtin's loop, and those deopt must continue execution from the point at which
they failed. With some massaging of the deoptimizer, it will be possible to make
those deopt points create an extra frame on the top of the stack which resumes
execution in the slow-loop builtin created in this CL.
BUG=v8:1956
LOG=N
Review-Url: https://codereview.chromium.org/2753793002
Cr-Commit-Position: refs/heads/master@{#43867}
2017-03-16 15:34:01 +00:00
|
|
|
}
|
|
|
|
|
2017-03-30 16:36:53 +00:00
|
|
|
TF_BUILTIN(TypedArrayPrototypeEvery, ArrayBuiltinCodeStubAssembler) {
|
2017-04-29 10:58:50 +00:00
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
Node* receiver = Parameter(Descriptor::kReceiver);
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
Node* new_target = Parameter(Descriptor::kNewTarget);
|
2017-03-30 16:36:53 +00:00
|
|
|
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
|
2017-04-29 10:58:50 +00:00
|
|
|
new_target);
|
2017-03-30 16:36:53 +00:00
|
|
|
|
|
|
|
GenerateIteratingTypedArrayBuiltinBody(
|
|
|
|
"%TypedArray%.prototype.every",
|
|
|
|
&ArrayBuiltinCodeStubAssembler::EveryResultGenerator,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::EveryProcessor,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
|
|
|
|
}
|
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
TF_BUILTIN(ArrayReduceLoopContinuation, ArrayBuiltinCodeStubAssembler) {
|
2017-03-21 17:25:35 +00:00
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
Node* receiver = Parameter(Descriptor::kReceiver);
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
Node* accumulator = Parameter(Descriptor::kAccumulator);
|
|
|
|
Node* object = Parameter(Descriptor::kObject);
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
|
|
|
Node* len = Parameter(Descriptor::kLength);
|
2017-03-22 13:18:26 +00:00
|
|
|
Node* to = Parameter(Descriptor::kTo);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
|
|
|
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
|
|
|
|
this_arg, accumulator, object,
|
2017-03-22 13:18:26 +00:00
|
|
|
initial_k, len, to);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
GenerateIteratingArrayBuiltinLoopContinuation(
|
|
|
|
&ArrayBuiltinCodeStubAssembler::ReduceProcessor,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::ReducePostLoopAction);
|
|
|
|
}
|
|
|
|
|
|
|
|
TF_BUILTIN(ArrayReduce, ArrayBuiltinCodeStubAssembler) {
|
2017-04-29 10:58:50 +00:00
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
Node* receiver = Parameter(Descriptor::kReceiver);
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
Node* initial_value = Parameter(Descriptor::kInitialValue);
|
|
|
|
Node* new_target = Parameter(Descriptor::kNewTarget);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, initial_value,
|
2017-04-29 10:58:50 +00:00
|
|
|
new_target);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
GenerateIteratingArrayBuiltinBody(
|
|
|
|
"Array.prototype.reduce",
|
|
|
|
&ArrayBuiltinCodeStubAssembler::ReduceResultGenerator,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::ReduceProcessor,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::ReducePostLoopAction,
|
2017-04-29 10:58:50 +00:00
|
|
|
CodeFactory::ArrayReduceLoopContinuation(isolate()));
|
2017-03-21 15:57:38 +00:00
|
|
|
}
|
|
|
|
|
2017-04-11 11:02:27 +00:00
|
|
|
TF_BUILTIN(TypedArrayPrototypeReduce, ArrayBuiltinCodeStubAssembler) {
|
2017-04-29 10:58:50 +00:00
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
Node* receiver = Parameter(Descriptor::kReceiver);
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
Node* initial_value = Parameter(Descriptor::kInitialValue);
|
|
|
|
Node* new_target = Parameter(Descriptor::kNewTarget);
|
2017-04-11 11:02:27 +00:00
|
|
|
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, initial_value,
|
2017-04-29 10:58:50 +00:00
|
|
|
new_target);
|
2017-04-11 11:02:27 +00:00
|
|
|
|
|
|
|
GenerateIteratingTypedArrayBuiltinBody(
|
|
|
|
"%TypedArray%.prototype.reduce",
|
|
|
|
&ArrayBuiltinCodeStubAssembler::ReduceResultGenerator,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::ReduceProcessor,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::ReducePostLoopAction);
|
|
|
|
}
|
|
|
|
|
2017-03-24 13:35:56 +00:00
|
|
|
TF_BUILTIN(ArrayReduceRightLoopContinuation, ArrayBuiltinCodeStubAssembler) {
|
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
Node* receiver = Parameter(Descriptor::kReceiver);
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
Node* accumulator = Parameter(Descriptor::kAccumulator);
|
|
|
|
Node* object = Parameter(Descriptor::kObject);
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
|
|
|
Node* len = Parameter(Descriptor::kLength);
|
|
|
|
Node* to = Parameter(Descriptor::kTo);
|
|
|
|
|
|
|
|
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
|
|
|
|
this_arg, accumulator, object,
|
|
|
|
initial_k, len, to);
|
|
|
|
|
|
|
|
GenerateIteratingArrayBuiltinLoopContinuation(
|
|
|
|
&ArrayBuiltinCodeStubAssembler::ReduceProcessor,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::ReducePostLoopAction,
|
|
|
|
ForEachDirection::kReverse);
|
|
|
|
}
|
|
|
|
|
|
|
|
TF_BUILTIN(ArrayReduceRight, ArrayBuiltinCodeStubAssembler) {
|
2017-04-29 10:58:50 +00:00
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
Node* receiver = Parameter(Descriptor::kReceiver);
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
Node* initial_value = Parameter(Descriptor::kInitialValue);
|
|
|
|
Node* new_target = Parameter(Descriptor::kNewTarget);
|
2017-03-24 13:35:56 +00:00
|
|
|
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, initial_value,
|
2017-04-29 10:58:50 +00:00
|
|
|
new_target);
|
2017-03-24 13:35:56 +00:00
|
|
|
|
|
|
|
GenerateIteratingArrayBuiltinBody(
|
|
|
|
"Array.prototype.reduceRight",
|
|
|
|
&ArrayBuiltinCodeStubAssembler::ReduceResultGenerator,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::ReduceProcessor,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::ReducePostLoopAction,
|
2017-04-29 10:58:50 +00:00
|
|
|
CodeFactory::ArrayReduceRightLoopContinuation(isolate()),
|
2017-03-24 13:35:56 +00:00
|
|
|
ForEachDirection::kReverse);
|
|
|
|
}
|
|
|
|
|
2017-04-11 11:02:27 +00:00
|
|
|
TF_BUILTIN(TypedArrayPrototypeReduceRight, ArrayBuiltinCodeStubAssembler) {
|
2017-04-29 10:58:50 +00:00
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
Node* receiver = Parameter(Descriptor::kReceiver);
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
Node* initial_value = Parameter(Descriptor::kInitialValue);
|
|
|
|
Node* new_target = Parameter(Descriptor::kNewTarget);
|
2017-04-11 11:02:27 +00:00
|
|
|
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, initial_value,
|
2017-04-29 10:58:50 +00:00
|
|
|
new_target);
|
2017-04-11 11:02:27 +00:00
|
|
|
|
|
|
|
GenerateIteratingTypedArrayBuiltinBody(
|
|
|
|
"%TypedArray%.prototype.reduceRight",
|
|
|
|
&ArrayBuiltinCodeStubAssembler::ReduceResultGenerator,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::ReduceProcessor,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::ReducePostLoopAction,
|
|
|
|
ForEachDirection::kReverse);
|
|
|
|
}
|
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
TF_BUILTIN(ArrayFilterLoopContinuation, ArrayBuiltinCodeStubAssembler) {
|
2017-03-21 17:25:35 +00:00
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
Node* receiver = Parameter(Descriptor::kReceiver);
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
Node* array = Parameter(Descriptor::kArray);
|
|
|
|
Node* object = Parameter(Descriptor::kObject);
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
|
|
|
Node* len = Parameter(Descriptor::kLength);
|
2017-03-22 13:18:26 +00:00
|
|
|
Node* to = Parameter(Descriptor::kTo);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
2017-03-22 13:18:26 +00:00
|
|
|
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
|
|
|
|
this_arg, array, object, initial_k,
|
|
|
|
len, to);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
GenerateIteratingArrayBuiltinLoopContinuation(
|
|
|
|
&ArrayBuiltinCodeStubAssembler::FilterProcessor,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
|
|
|
|
}
|
|
|
|
|
|
|
|
TF_BUILTIN(ArrayFilter, ArrayBuiltinCodeStubAssembler) {
|
2017-04-29 10:58:50 +00:00
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
Node* receiver = Parameter(Descriptor::kReceiver);
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
Node* new_target = Parameter(Descriptor::kNewTarget);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
|
2017-04-29 10:58:50 +00:00
|
|
|
new_target);
|
2017-03-21 17:25:35 +00:00
|
|
|
|
2017-03-21 15:57:38 +00:00
|
|
|
GenerateIteratingArrayBuiltinBody(
|
2017-03-24 11:01:53 +00:00
|
|
|
"Array.prototype.filter",
|
2017-03-21 15:57:38 +00:00
|
|
|
&ArrayBuiltinCodeStubAssembler::FilterResultGenerator,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::FilterProcessor,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
|
2017-04-29 10:58:50 +00:00
|
|
|
CodeFactory::ArrayFilterLoopContinuation(isolate()));
|
2017-03-21 15:57:38 +00:00
|
|
|
}
|
|
|
|
|
2017-03-24 11:01:53 +00:00
|
|
|
TF_BUILTIN(ArrayMapLoopContinuation, ArrayBuiltinCodeStubAssembler) {
|
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
Node* receiver = Parameter(Descriptor::kReceiver);
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
Node* array = Parameter(Descriptor::kArray);
|
|
|
|
Node* object = Parameter(Descriptor::kObject);
|
|
|
|
Node* initial_k = Parameter(Descriptor::kInitialK);
|
|
|
|
Node* len = Parameter(Descriptor::kLength);
|
|
|
|
Node* to = Parameter(Descriptor::kTo);
|
|
|
|
|
|
|
|
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
|
|
|
|
this_arg, array, object, initial_k,
|
|
|
|
len, to);
|
|
|
|
|
|
|
|
GenerateIteratingArrayBuiltinLoopContinuation(
|
|
|
|
&ArrayBuiltinCodeStubAssembler::MapProcessor,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
|
|
|
|
}
|
|
|
|
|
|
|
|
TF_BUILTIN(ArrayMap, ArrayBuiltinCodeStubAssembler) {
|
2017-04-29 10:58:50 +00:00
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
Node* receiver = Parameter(Descriptor::kReceiver);
|
|
|
|
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
|
|
|
|
Node* this_arg = Parameter(Descriptor::kThisArg);
|
|
|
|
Node* new_target = Parameter(Descriptor::kNewTarget);
|
2017-03-24 11:01:53 +00:00
|
|
|
|
|
|
|
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
|
2017-04-29 10:58:50 +00:00
|
|
|
new_target);
|
2017-03-24 11:01:53 +00:00
|
|
|
|
|
|
|
GenerateIteratingArrayBuiltinBody(
|
|
|
|
"Array.prototype.map", &ArrayBuiltinCodeStubAssembler::MapResultGenerator,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::MapProcessor,
|
|
|
|
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
|
2017-04-29 10:58:50 +00:00
|
|
|
CodeFactory::ArrayMapLoopContinuation(isolate()));
|
2017-03-24 11:01:53 +00:00
|
|
|
}
|
|
|
|
|
2017-03-16 11:32:01 +00:00
|
|
|
TF_BUILTIN(ArrayIsArray, CodeStubAssembler) {
|
2017-03-20 10:55:37 +00:00
|
|
|
Node* object = Parameter(Descriptor::kArg);
|
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
Label call_runtime(this), return_true(this), return_false(this);
|
|
|
|
|
|
|
|
GotoIf(TaggedIsSmi(object), &return_false);
|
|
|
|
Node* instance_type = LoadInstanceType(object);
|
|
|
|
|
|
|
|
GotoIf(Word32Equal(instance_type, Int32Constant(JS_ARRAY_TYPE)),
|
|
|
|
&return_true);
|
|
|
|
|
|
|
|
// TODO(verwaest): Handle proxies in-place.
|
|
|
|
Branch(Word32Equal(instance_type, Int32Constant(JS_PROXY_TYPE)),
|
|
|
|
&call_runtime, &return_false);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&return_true);
|
2017-03-16 11:32:01 +00:00
|
|
|
Return(BooleanConstant(true));
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&return_false);
|
2017-03-16 11:32:01 +00:00
|
|
|
Return(BooleanConstant(false));
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&call_runtime);
|
2017-03-16 11:32:01 +00:00
|
|
|
Return(CallRuntime(Runtime::kArrayIsArray, context, object));
|
|
|
|
}
|
|
|
|
|
|
|
|
TF_BUILTIN(ArrayIncludes, CodeStubAssembler) {
|
2017-03-20 10:55:37 +00:00
|
|
|
Node* const array = Parameter(Descriptor::kReceiver);
|
|
|
|
Node* const search_element = Parameter(Descriptor::kSearchElement);
|
|
|
|
Node* const start_from = Parameter(Descriptor::kFromIndex);
|
|
|
|
Node* const context = Parameter(Descriptor::kContext);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2017-04-07 08:20:35 +00:00
|
|
|
VARIABLE(index_var, MachineType::PointerRepresentation());
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
Label init_k(this), return_true(this), return_false(this), call_runtime(this);
|
|
|
|
Label init_len(this), select_loop(this);
|
|
|
|
|
|
|
|
index_var.Bind(IntPtrConstant(0));
|
|
|
|
|
|
|
|
// Take slow path if not a JSArray, if retrieving elements requires
|
|
|
|
// traversing prototype, or if access checks are required.
|
|
|
|
BranchIfFastJSArray(array, context, FastJSArrayAccessMode::INBOUNDS_READ,
|
|
|
|
&init_len, &call_runtime);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&init_len);
|
2017-03-16 11:32:01 +00:00
|
|
|
// JSArray length is always an Smi for fast arrays.
|
|
|
|
CSA_ASSERT(this, TaggedIsSmi(LoadObjectField(array, JSArray::kLengthOffset)));
|
|
|
|
Node* const len = LoadAndUntagObjectField(array, JSArray::kLengthOffset);
|
|
|
|
|
|
|
|
GotoIf(IsUndefined(start_from), &select_loop);
|
|
|
|
|
|
|
|
// Bailout to slow path if startIndex is not an Smi.
|
|
|
|
Branch(TaggedIsSmi(start_from), &init_k, &call_runtime);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&init_k);
|
2017-03-16 11:32:01 +00:00
|
|
|
CSA_ASSERT(this, TaggedIsSmi(start_from));
|
|
|
|
Node* const untagged_start_from = SmiToWord(start_from);
|
|
|
|
index_var.Bind(
|
|
|
|
Select(IntPtrGreaterThanOrEqual(untagged_start_from, IntPtrConstant(0)),
|
|
|
|
[=]() { return untagged_start_from; },
|
|
|
|
[=]() {
|
|
|
|
Node* const index = IntPtrAdd(len, untagged_start_from);
|
|
|
|
return SelectConstant(IntPtrLessThan(index, IntPtrConstant(0)),
|
|
|
|
IntPtrConstant(0), index,
|
|
|
|
MachineType::PointerRepresentation());
|
|
|
|
},
|
|
|
|
MachineType::PointerRepresentation()));
|
|
|
|
|
|
|
|
Goto(&select_loop);
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&select_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
static int32_t kElementsKind[] = {
|
|
|
|
FAST_SMI_ELEMENTS, FAST_HOLEY_SMI_ELEMENTS, FAST_ELEMENTS,
|
|
|
|
FAST_HOLEY_ELEMENTS, FAST_DOUBLE_ELEMENTS, FAST_HOLEY_DOUBLE_ELEMENTS,
|
|
|
|
};
|
|
|
|
|
|
|
|
Label if_smiorobjects(this), if_packed_doubles(this), if_holey_doubles(this);
|
|
|
|
Label* element_kind_handlers[] = {&if_smiorobjects, &if_smiorobjects,
|
|
|
|
&if_smiorobjects, &if_smiorobjects,
|
|
|
|
&if_packed_doubles, &if_holey_doubles};
|
|
|
|
|
|
|
|
Node* map = LoadMap(array);
|
|
|
|
Node* elements_kind = LoadMapElementsKind(map);
|
|
|
|
Node* elements = LoadElements(array);
|
|
|
|
Switch(elements_kind, &return_false, kElementsKind, element_kind_handlers,
|
|
|
|
arraysize(kElementsKind));
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&if_smiorobjects);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
2017-04-07 08:20:35 +00:00
|
|
|
VARIABLE(search_num, MachineRepresentation::kFloat64);
|
2017-03-16 11:32:01 +00:00
|
|
|
Label ident_loop(this, &index_var), heap_num_loop(this, &search_num),
|
|
|
|
string_loop(this, &index_var), undef_loop(this, &index_var),
|
|
|
|
not_smi(this), not_heap_num(this);
|
|
|
|
|
|
|
|
GotoIfNot(TaggedIsSmi(search_element), ¬_smi);
|
|
|
|
search_num.Bind(SmiToFloat64(search_element));
|
|
|
|
Goto(&heap_num_loop);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(¬_smi);
|
2017-03-16 11:32:01 +00:00
|
|
|
GotoIf(WordEqual(search_element, UndefinedConstant()), &undef_loop);
|
|
|
|
Node* map = LoadMap(search_element);
|
|
|
|
GotoIfNot(IsHeapNumberMap(map), ¬_heap_num);
|
|
|
|
search_num.Bind(LoadHeapNumberValue(search_element));
|
|
|
|
Goto(&heap_num_loop);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(¬_heap_num);
|
2017-03-16 11:32:01 +00:00
|
|
|
Node* search_type = LoadMapInstanceType(map);
|
|
|
|
GotoIf(IsStringInstanceType(search_type), &string_loop);
|
|
|
|
Goto(&ident_loop);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&ident_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), len), &return_false);
|
|
|
|
Node* element_k = LoadFixedArrayElement(elements, index_var.value());
|
|
|
|
GotoIf(WordEqual(element_k, search_element), &return_true);
|
|
|
|
|
|
|
|
index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
|
|
|
|
Goto(&ident_loop);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&undef_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), len), &return_false);
|
|
|
|
Node* element_k = LoadFixedArrayElement(elements, index_var.value());
|
|
|
|
GotoIf(WordEqual(element_k, UndefinedConstant()), &return_true);
|
|
|
|
GotoIf(WordEqual(element_k, TheHoleConstant()), &return_true);
|
|
|
|
|
|
|
|
index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
|
|
|
|
Goto(&undef_loop);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&heap_num_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Label nan_loop(this, &index_var), not_nan_loop(this, &index_var);
|
|
|
|
BranchIfFloat64IsNaN(search_num.value(), &nan_loop, ¬_nan_loop);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(¬_nan_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Label continue_loop(this), not_smi(this);
|
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), len), &return_false);
|
|
|
|
Node* element_k = LoadFixedArrayElement(elements, index_var.value());
|
|
|
|
GotoIfNot(TaggedIsSmi(element_k), ¬_smi);
|
|
|
|
Branch(Float64Equal(search_num.value(), SmiToFloat64(element_k)),
|
|
|
|
&return_true, &continue_loop);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(¬_smi);
|
2017-03-16 11:32:01 +00:00
|
|
|
GotoIfNot(IsHeapNumber(element_k), &continue_loop);
|
|
|
|
Branch(Float64Equal(search_num.value(), LoadHeapNumberValue(element_k)),
|
|
|
|
&return_true, &continue_loop);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&continue_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
|
|
|
|
Goto(¬_nan_loop);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&nan_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Label continue_loop(this);
|
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), len), &return_false);
|
|
|
|
Node* element_k = LoadFixedArrayElement(elements, index_var.value());
|
|
|
|
GotoIf(TaggedIsSmi(element_k), &continue_loop);
|
|
|
|
GotoIfNot(IsHeapNumber(element_k), &continue_loop);
|
|
|
|
BranchIfFloat64IsNaN(LoadHeapNumberValue(element_k), &return_true,
|
|
|
|
&continue_loop);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&continue_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
|
|
|
|
Goto(&nan_loop);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&string_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Label continue_loop(this);
|
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), len), &return_false);
|
|
|
|
Node* element_k = LoadFixedArrayElement(elements, index_var.value());
|
|
|
|
GotoIf(TaggedIsSmi(element_k), &continue_loop);
|
|
|
|
GotoIfNot(IsStringInstanceType(LoadInstanceType(element_k)),
|
|
|
|
&continue_loop);
|
|
|
|
|
|
|
|
// TODO(bmeurer): Consider inlining the StringEqual logic here.
|
|
|
|
Node* result = CallStub(CodeFactory::StringEqual(isolate()), context,
|
|
|
|
search_element, element_k);
|
|
|
|
Branch(WordEqual(BooleanConstant(true), result), &return_true,
|
|
|
|
&continue_loop);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&continue_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
|
|
|
|
Goto(&string_loop);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&if_packed_doubles);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Label nan_loop(this, &index_var), not_nan_loop(this, &index_var),
|
|
|
|
hole_loop(this, &index_var), search_notnan(this);
|
2017-04-07 08:20:35 +00:00
|
|
|
VARIABLE(search_num, MachineRepresentation::kFloat64);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
GotoIfNot(TaggedIsSmi(search_element), &search_notnan);
|
|
|
|
search_num.Bind(SmiToFloat64(search_element));
|
|
|
|
Goto(¬_nan_loop);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&search_notnan);
|
2017-03-16 11:32:01 +00:00
|
|
|
GotoIfNot(IsHeapNumber(search_element), &return_false);
|
|
|
|
|
|
|
|
search_num.Bind(LoadHeapNumberValue(search_element));
|
|
|
|
|
|
|
|
BranchIfFloat64IsNaN(search_num.value(), &nan_loop, ¬_nan_loop);
|
|
|
|
|
|
|
|
// Search for HeapNumber
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(¬_nan_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Label continue_loop(this);
|
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), len), &return_false);
|
|
|
|
Node* element_k = LoadFixedDoubleArrayElement(elements, index_var.value(),
|
|
|
|
MachineType::Float64());
|
|
|
|
Branch(Float64Equal(element_k, search_num.value()), &return_true,
|
|
|
|
&continue_loop);
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&continue_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
|
|
|
|
Goto(¬_nan_loop);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Search for NaN
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&nan_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Label continue_loop(this);
|
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), len), &return_false);
|
|
|
|
Node* element_k = LoadFixedDoubleArrayElement(elements, index_var.value(),
|
|
|
|
MachineType::Float64());
|
|
|
|
BranchIfFloat64IsNaN(element_k, &return_true, &continue_loop);
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&continue_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
|
|
|
|
Goto(&nan_loop);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&if_holey_doubles);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Label nan_loop(this, &index_var), not_nan_loop(this, &index_var),
|
|
|
|
hole_loop(this, &index_var), search_notnan(this);
|
2017-04-07 08:20:35 +00:00
|
|
|
VARIABLE(search_num, MachineRepresentation::kFloat64);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
GotoIfNot(TaggedIsSmi(search_element), &search_notnan);
|
|
|
|
search_num.Bind(SmiToFloat64(search_element));
|
|
|
|
Goto(¬_nan_loop);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&search_notnan);
|
2017-03-16 11:32:01 +00:00
|
|
|
GotoIf(WordEqual(search_element, UndefinedConstant()), &hole_loop);
|
|
|
|
GotoIfNot(IsHeapNumber(search_element), &return_false);
|
|
|
|
|
|
|
|
search_num.Bind(LoadHeapNumberValue(search_element));
|
|
|
|
|
|
|
|
BranchIfFloat64IsNaN(search_num.value(), &nan_loop, ¬_nan_loop);
|
|
|
|
|
|
|
|
// Search for HeapNumber
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(¬_nan_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Label continue_loop(this);
|
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), len), &return_false);
|
|
|
|
|
|
|
|
// Load double value or continue if it contains a double hole.
|
|
|
|
Node* element_k = LoadFixedDoubleArrayElement(
|
|
|
|
elements, index_var.value(), MachineType::Float64(), 0,
|
|
|
|
INTPTR_PARAMETERS, &continue_loop);
|
|
|
|
|
|
|
|
Branch(Float64Equal(element_k, search_num.value()), &return_true,
|
|
|
|
&continue_loop);
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&continue_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
|
|
|
|
Goto(¬_nan_loop);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Search for NaN
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&nan_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Label continue_loop(this);
|
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), len), &return_false);
|
|
|
|
|
|
|
|
// Load double value or continue if it contains a double hole.
|
|
|
|
Node* element_k = LoadFixedDoubleArrayElement(
|
|
|
|
elements, index_var.value(), MachineType::Float64(), 0,
|
|
|
|
INTPTR_PARAMETERS, &continue_loop);
|
|
|
|
|
|
|
|
BranchIfFloat64IsNaN(element_k, &return_true, &continue_loop);
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&continue_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
|
|
|
|
Goto(&nan_loop);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Search for the Hole
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&hole_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), len), &return_false);
|
|
|
|
|
|
|
|
// Check if the element is a double hole, but don't load it.
|
|
|
|
LoadFixedDoubleArrayElement(elements, index_var.value(),
|
|
|
|
MachineType::None(), 0, INTPTR_PARAMETERS,
|
|
|
|
&return_true);
|
|
|
|
|
|
|
|
index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
|
|
|
|
Goto(&hole_loop);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&return_true);
|
2017-03-16 11:32:01 +00:00
|
|
|
Return(TrueConstant());
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&return_false);
|
2017-03-16 11:32:01 +00:00
|
|
|
Return(FalseConstant());
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&call_runtime);
|
2017-03-16 11:32:01 +00:00
|
|
|
Return(CallRuntime(Runtime::kArrayIncludes_Slow, context, array,
|
|
|
|
search_element, start_from));
|
|
|
|
}
|
|
|
|
|
|
|
|
TF_BUILTIN(ArrayIndexOf, CodeStubAssembler) {
|
2017-03-20 10:55:37 +00:00
|
|
|
Node* array = Parameter(Descriptor::kReceiver);
|
|
|
|
Node* search_element = Parameter(Descriptor::kSearchElement);
|
|
|
|
Node* start_from = Parameter(Descriptor::kFromIndex);
|
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
Node* intptr_zero = IntPtrConstant(0);
|
|
|
|
Node* intptr_one = IntPtrConstant(1);
|
|
|
|
|
2017-04-07 08:20:35 +00:00
|
|
|
VARIABLE(len_var, MachineType::PointerRepresentation());
|
|
|
|
VARIABLE(index_var, MachineType::PointerRepresentation());
|
|
|
|
VARIABLE(start_from_var, MachineType::PointerRepresentation());
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
Label init_k(this), return_found(this), return_not_found(this),
|
|
|
|
call_runtime(this);
|
|
|
|
|
|
|
|
Label init_len(this);
|
|
|
|
|
|
|
|
index_var.Bind(intptr_zero);
|
|
|
|
len_var.Bind(intptr_zero);
|
|
|
|
|
|
|
|
// Take slow path if not a JSArray, if retrieving elements requires
|
|
|
|
// traversing prototype, or if access checks are required.
|
|
|
|
BranchIfFastJSArray(array, context, FastJSArrayAccessMode::INBOUNDS_READ,
|
|
|
|
&init_len, &call_runtime);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&init_len);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
// JSArray length is always an Smi for fast arrays.
|
|
|
|
CSA_ASSERT(this,
|
|
|
|
TaggedIsSmi(LoadObjectField(array, JSArray::kLengthOffset)));
|
|
|
|
Node* len = LoadAndUntagObjectField(array, JSArray::kLengthOffset);
|
|
|
|
|
|
|
|
len_var.Bind(len);
|
|
|
|
Branch(WordEqual(len_var.value(), intptr_zero), &return_not_found, &init_k);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&init_k);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
// For now only deal with undefined and Smis here; we must be really careful
|
|
|
|
// with side-effects from the ToInteger conversion as the side-effects might
|
|
|
|
// render our assumptions about the receiver being a fast JSArray and the
|
|
|
|
// length invalid.
|
|
|
|
Label done(this), init_k_smi(this), init_k_other(this), init_k_zero(this),
|
|
|
|
init_k_n(this);
|
|
|
|
Branch(TaggedIsSmi(start_from), &init_k_smi, &init_k_other);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&init_k_smi);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
// The fromIndex is a Smi.
|
|
|
|
start_from_var.Bind(SmiUntag(start_from));
|
|
|
|
Goto(&init_k_n);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&init_k_other);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
// The fromIndex must be undefined then, otherwise bailout and let the
|
|
|
|
// runtime deal with the full ToInteger conversion.
|
|
|
|
GotoIfNot(IsUndefined(start_from), &call_runtime);
|
|
|
|
start_from_var.Bind(intptr_zero);
|
|
|
|
Goto(&init_k_n);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&init_k_n);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Label if_positive(this), if_negative(this), done(this);
|
|
|
|
Branch(IntPtrLessThan(start_from_var.value(), intptr_zero), &if_negative,
|
|
|
|
&if_positive);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&if_positive);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
index_var.Bind(start_from_var.value());
|
|
|
|
Goto(&done);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&if_negative);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
index_var.Bind(IntPtrAdd(len_var.value(), start_from_var.value()));
|
|
|
|
Branch(IntPtrLessThan(index_var.value(), intptr_zero), &init_k_zero,
|
|
|
|
&done);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&init_k_zero);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
index_var.Bind(intptr_zero);
|
|
|
|
Goto(&done);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&done);
|
2017-03-16 11:32:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int32_t kElementsKind[] = {
|
|
|
|
FAST_SMI_ELEMENTS, FAST_HOLEY_SMI_ELEMENTS, FAST_ELEMENTS,
|
|
|
|
FAST_HOLEY_ELEMENTS, FAST_DOUBLE_ELEMENTS, FAST_HOLEY_DOUBLE_ELEMENTS,
|
|
|
|
};
|
|
|
|
|
|
|
|
Label if_smiorobjects(this), if_packed_doubles(this), if_holey_doubles(this);
|
|
|
|
Label* element_kind_handlers[] = {&if_smiorobjects, &if_smiorobjects,
|
|
|
|
&if_smiorobjects, &if_smiorobjects,
|
|
|
|
&if_packed_doubles, &if_holey_doubles};
|
|
|
|
|
|
|
|
Node* map = LoadMap(array);
|
|
|
|
Node* elements_kind = LoadMapElementsKind(map);
|
|
|
|
Node* elements = LoadElements(array);
|
|
|
|
Switch(elements_kind, &return_not_found, kElementsKind, element_kind_handlers,
|
|
|
|
arraysize(kElementsKind));
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&if_smiorobjects);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
2017-04-07 08:20:35 +00:00
|
|
|
VARIABLE(search_num, MachineRepresentation::kFloat64);
|
2017-03-16 11:32:01 +00:00
|
|
|
Label ident_loop(this, &index_var), heap_num_loop(this, &search_num),
|
|
|
|
string_loop(this, &index_var), not_smi(this), not_heap_num(this);
|
|
|
|
|
|
|
|
GotoIfNot(TaggedIsSmi(search_element), ¬_smi);
|
|
|
|
search_num.Bind(SmiToFloat64(search_element));
|
|
|
|
Goto(&heap_num_loop);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(¬_smi);
|
2017-03-16 11:32:01 +00:00
|
|
|
Node* map = LoadMap(search_element);
|
|
|
|
GotoIfNot(IsHeapNumberMap(map), ¬_heap_num);
|
|
|
|
search_num.Bind(LoadHeapNumberValue(search_element));
|
|
|
|
Goto(&heap_num_loop);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(¬_heap_num);
|
2017-03-16 11:32:01 +00:00
|
|
|
Node* search_type = LoadMapInstanceType(map);
|
|
|
|
GotoIf(IsStringInstanceType(search_type), &string_loop);
|
|
|
|
Goto(&ident_loop);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&ident_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), len_var.value()),
|
|
|
|
&return_not_found);
|
|
|
|
Node* element_k = LoadFixedArrayElement(elements, index_var.value());
|
|
|
|
GotoIf(WordEqual(element_k, search_element), &return_found);
|
|
|
|
|
|
|
|
index_var.Bind(IntPtrAdd(index_var.value(), intptr_one));
|
|
|
|
Goto(&ident_loop);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&heap_num_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Label not_nan_loop(this, &index_var);
|
|
|
|
BranchIfFloat64IsNaN(search_num.value(), &return_not_found,
|
|
|
|
¬_nan_loop);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(¬_nan_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Label continue_loop(this), not_smi(this);
|
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), len_var.value()),
|
|
|
|
&return_not_found);
|
|
|
|
Node* element_k = LoadFixedArrayElement(elements, index_var.value());
|
|
|
|
GotoIfNot(TaggedIsSmi(element_k), ¬_smi);
|
|
|
|
Branch(Float64Equal(search_num.value(), SmiToFloat64(element_k)),
|
|
|
|
&return_found, &continue_loop);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(¬_smi);
|
2017-03-16 11:32:01 +00:00
|
|
|
GotoIfNot(IsHeapNumber(element_k), &continue_loop);
|
|
|
|
Branch(Float64Equal(search_num.value(), LoadHeapNumberValue(element_k)),
|
|
|
|
&return_found, &continue_loop);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&continue_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
index_var.Bind(IntPtrAdd(index_var.value(), intptr_one));
|
|
|
|
Goto(¬_nan_loop);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&string_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Label continue_loop(this);
|
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), len_var.value()),
|
|
|
|
&return_not_found);
|
|
|
|
Node* element_k = LoadFixedArrayElement(elements, index_var.value());
|
|
|
|
GotoIf(TaggedIsSmi(element_k), &continue_loop);
|
|
|
|
GotoIfNot(IsString(element_k), &continue_loop);
|
|
|
|
|
|
|
|
// TODO(bmeurer): Consider inlining the StringEqual logic here.
|
|
|
|
Callable callable = CodeFactory::StringEqual(isolate());
|
|
|
|
Node* result = CallStub(callable, context, search_element, element_k);
|
|
|
|
Branch(WordEqual(BooleanConstant(true), result), &return_found,
|
|
|
|
&continue_loop);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&continue_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
index_var.Bind(IntPtrAdd(index_var.value(), intptr_one));
|
|
|
|
Goto(&string_loop);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&if_packed_doubles);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Label not_nan_loop(this, &index_var), search_notnan(this);
|
2017-04-07 08:20:35 +00:00
|
|
|
VARIABLE(search_num, MachineRepresentation::kFloat64);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
GotoIfNot(TaggedIsSmi(search_element), &search_notnan);
|
|
|
|
search_num.Bind(SmiToFloat64(search_element));
|
|
|
|
Goto(¬_nan_loop);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&search_notnan);
|
2017-03-16 11:32:01 +00:00
|
|
|
GotoIfNot(IsHeapNumber(search_element), &return_not_found);
|
|
|
|
|
|
|
|
search_num.Bind(LoadHeapNumberValue(search_element));
|
|
|
|
|
|
|
|
BranchIfFloat64IsNaN(search_num.value(), &return_not_found, ¬_nan_loop);
|
|
|
|
|
|
|
|
// Search for HeapNumber
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(¬_nan_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), len_var.value()),
|
|
|
|
&return_not_found);
|
|
|
|
Node* element_k = LoadFixedDoubleArrayElement(elements, index_var.value(),
|
|
|
|
MachineType::Float64());
|
|
|
|
GotoIf(Float64Equal(element_k, search_num.value()), &return_found);
|
|
|
|
|
|
|
|
index_var.Bind(IntPtrAdd(index_var.value(), intptr_one));
|
|
|
|
Goto(¬_nan_loop);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&if_holey_doubles);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Label not_nan_loop(this, &index_var), search_notnan(this);
|
2017-04-07 08:20:35 +00:00
|
|
|
VARIABLE(search_num, MachineRepresentation::kFloat64);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
GotoIfNot(TaggedIsSmi(search_element), &search_notnan);
|
|
|
|
search_num.Bind(SmiToFloat64(search_element));
|
|
|
|
Goto(¬_nan_loop);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&search_notnan);
|
2017-03-16 11:32:01 +00:00
|
|
|
GotoIfNot(IsHeapNumber(search_element), &return_not_found);
|
|
|
|
|
|
|
|
search_num.Bind(LoadHeapNumberValue(search_element));
|
|
|
|
|
|
|
|
BranchIfFloat64IsNaN(search_num.value(), &return_not_found, ¬_nan_loop);
|
|
|
|
|
|
|
|
// Search for HeapNumber
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(¬_nan_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Label continue_loop(this);
|
|
|
|
GotoIfNot(UintPtrLessThan(index_var.value(), len_var.value()),
|
|
|
|
&return_not_found);
|
|
|
|
|
|
|
|
// Load double value or continue if it contains a double hole.
|
|
|
|
Node* element_k = LoadFixedDoubleArrayElement(
|
|
|
|
elements, index_var.value(), MachineType::Float64(), 0,
|
|
|
|
INTPTR_PARAMETERS, &continue_loop);
|
|
|
|
|
|
|
|
Branch(Float64Equal(element_k, search_num.value()), &return_found,
|
|
|
|
&continue_loop);
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&continue_loop);
|
2017-03-16 11:32:01 +00:00
|
|
|
index_var.Bind(IntPtrAdd(index_var.value(), intptr_one));
|
|
|
|
Goto(¬_nan_loop);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&return_found);
|
2017-03-16 11:32:01 +00:00
|
|
|
Return(SmiTag(index_var.value()));
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&return_not_found);
|
2017-03-16 11:32:01 +00:00
|
|
|
Return(NumberConstant(-1));
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&call_runtime);
|
2017-03-16 11:32:01 +00:00
|
|
|
Return(CallRuntime(Runtime::kArrayIndexOf, context, array, search_element,
|
|
|
|
start_from));
|
|
|
|
}
|
|
|
|
|
|
|
|
class ArrayPrototypeIterationAssembler : public CodeStubAssembler {
|
|
|
|
public:
|
|
|
|
explicit ArrayPrototypeIterationAssembler(compiler::CodeAssemblerState* state)
|
|
|
|
: CodeStubAssembler(state) {}
|
|
|
|
|
|
|
|
protected:
|
2017-03-20 10:55:37 +00:00
|
|
|
void Generate_ArrayPrototypeIterationMethod(Node* context, Node* receiver,
|
|
|
|
IterationKind iteration_kind) {
|
2017-04-07 08:20:35 +00:00
|
|
|
VARIABLE(var_array, MachineRepresentation::kTagged);
|
|
|
|
VARIABLE(var_map, MachineRepresentation::kTagged);
|
|
|
|
VARIABLE(var_type, MachineRepresentation::kWord32);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
Label if_isnotobject(this, Label::kDeferred);
|
|
|
|
Label create_array_iterator(this);
|
|
|
|
|
|
|
|
GotoIf(TaggedIsSmi(receiver), &if_isnotobject);
|
|
|
|
var_array.Bind(receiver);
|
|
|
|
var_map.Bind(LoadMap(receiver));
|
|
|
|
var_type.Bind(LoadMapInstanceType(var_map.value()));
|
|
|
|
Branch(IsJSReceiverInstanceType(var_type.value()), &create_array_iterator,
|
|
|
|
&if_isnotobject);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&if_isnotobject);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Callable callable = CodeFactory::ToObject(isolate());
|
|
|
|
Node* result = CallStub(callable, context, receiver);
|
|
|
|
var_array.Bind(result);
|
|
|
|
var_map.Bind(LoadMap(result));
|
|
|
|
var_type.Bind(LoadMapInstanceType(var_map.value()));
|
|
|
|
Goto(&create_array_iterator);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&create_array_iterator);
|
2017-03-16 11:32:01 +00:00
|
|
|
Return(CreateArrayIterator(var_array.value(), var_map.value(),
|
|
|
|
var_type.value(), context, iteration_kind));
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
TF_BUILTIN(ArrayPrototypeValues, ArrayPrototypeIterationAssembler) {
|
2017-03-20 10:55:37 +00:00
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
Node* receiver = Parameter(Descriptor::kReceiver);
|
|
|
|
Generate_ArrayPrototypeIterationMethod(context, receiver,
|
|
|
|
IterationKind::kValues);
|
2017-03-16 11:32:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TF_BUILTIN(ArrayPrototypeEntries, ArrayPrototypeIterationAssembler) {
|
2017-03-20 10:55:37 +00:00
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
Node* receiver = Parameter(Descriptor::kReceiver);
|
|
|
|
Generate_ArrayPrototypeIterationMethod(context, receiver,
|
|
|
|
IterationKind::kEntries);
|
2017-03-16 11:32:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TF_BUILTIN(ArrayPrototypeKeys, ArrayPrototypeIterationAssembler) {
|
2017-03-20 10:55:37 +00:00
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
Node* receiver = Parameter(Descriptor::kReceiver);
|
|
|
|
Generate_ArrayPrototypeIterationMethod(context, receiver,
|
|
|
|
IterationKind::kKeys);
|
2017-03-16 11:32:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
|
|
|
|
Handle<String> operation = factory()->NewStringFromAsciiChecked(
|
|
|
|
"Array Iterator.prototype.next", TENURED);
|
|
|
|
|
2017-03-20 10:55:37 +00:00
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
Node* iterator = Parameter(Descriptor::kReceiver);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2017-04-07 08:20:35 +00:00
|
|
|
VARIABLE(var_value, MachineRepresentation::kTagged);
|
|
|
|
VARIABLE(var_done, MachineRepresentation::kTagged);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
// Required, or else `throw_bad_receiver` fails a DCHECK due to these
|
|
|
|
// variables not being bound along all paths, despite not being used.
|
|
|
|
var_done.Bind(TrueConstant());
|
|
|
|
var_value.Bind(UndefinedConstant());
|
|
|
|
|
|
|
|
Label throw_bad_receiver(this, Label::kDeferred);
|
|
|
|
Label set_done(this);
|
|
|
|
Label allocate_key_result(this);
|
|
|
|
Label allocate_entry_if_needed(this);
|
|
|
|
Label allocate_iterator_result(this);
|
|
|
|
Label generic_values(this);
|
|
|
|
|
|
|
|
// If O does not have all of the internal slots of an Array Iterator Instance
|
|
|
|
// (22.1.5.3), throw a TypeError exception
|
|
|
|
GotoIf(TaggedIsSmi(iterator), &throw_bad_receiver);
|
|
|
|
Node* instance_type = LoadInstanceType(iterator);
|
|
|
|
GotoIf(
|
|
|
|
Uint32LessThan(
|
|
|
|
Int32Constant(LAST_ARRAY_ITERATOR_TYPE - FIRST_ARRAY_ITERATOR_TYPE),
|
|
|
|
Int32Sub(instance_type, Int32Constant(FIRST_ARRAY_ITERATOR_TYPE))),
|
|
|
|
&throw_bad_receiver);
|
|
|
|
|
|
|
|
// Let a be O.[[IteratedObject]].
|
|
|
|
Node* array =
|
|
|
|
LoadObjectField(iterator, JSArrayIterator::kIteratedObjectOffset);
|
|
|
|
|
|
|
|
// Let index be O.[[ArrayIteratorNextIndex]].
|
|
|
|
Node* index = LoadObjectField(iterator, JSArrayIterator::kNextIndexOffset);
|
|
|
|
Node* orig_map =
|
|
|
|
LoadObjectField(iterator, JSArrayIterator::kIteratedObjectMapOffset);
|
|
|
|
Node* array_map = LoadMap(array);
|
|
|
|
|
|
|
|
Label if_isfastarray(this), if_isnotfastarray(this),
|
|
|
|
if_isdetached(this, Label::kDeferred);
|
|
|
|
|
|
|
|
Branch(WordEqual(orig_map, array_map), &if_isfastarray, &if_isnotfastarray);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&if_isfastarray);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
CSA_ASSERT(this, Word32Equal(LoadMapInstanceType(array_map),
|
|
|
|
Int32Constant(JS_ARRAY_TYPE)));
|
|
|
|
|
|
|
|
Node* length = LoadObjectField(array, JSArray::kLengthOffset);
|
|
|
|
|
|
|
|
CSA_ASSERT(this, TaggedIsSmi(length));
|
|
|
|
CSA_ASSERT(this, TaggedIsSmi(index));
|
|
|
|
|
|
|
|
GotoIfNot(SmiBelow(index, length), &set_done);
|
|
|
|
|
|
|
|
Node* one = SmiConstant(Smi::FromInt(1));
|
|
|
|
StoreObjectFieldNoWriteBarrier(iterator, JSArrayIterator::kNextIndexOffset,
|
|
|
|
SmiAdd(index, one));
|
|
|
|
|
|
|
|
var_done.Bind(FalseConstant());
|
|
|
|
Node* elements = LoadElements(array);
|
|
|
|
|
|
|
|
static int32_t kInstanceType[] = {
|
|
|
|
JS_FAST_ARRAY_KEY_ITERATOR_TYPE,
|
|
|
|
JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_FAST_ARRAY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE,
|
|
|
|
};
|
|
|
|
|
|
|
|
Label packed_object_values(this), holey_object_values(this),
|
|
|
|
packed_double_values(this), holey_double_values(this);
|
|
|
|
Label* kInstanceTypeHandlers[] = {
|
|
|
|
&allocate_key_result, &packed_object_values, &holey_object_values,
|
|
|
|
&packed_object_values, &holey_object_values, &packed_double_values,
|
|
|
|
&holey_double_values, &packed_object_values, &holey_object_values,
|
|
|
|
&packed_object_values, &holey_object_values, &packed_double_values,
|
|
|
|
&holey_double_values};
|
|
|
|
|
|
|
|
Switch(instance_type, &throw_bad_receiver, kInstanceType,
|
|
|
|
kInstanceTypeHandlers, arraysize(kInstanceType));
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&packed_object_values);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
var_value.Bind(LoadFixedArrayElement(elements, index, 0, SMI_PARAMETERS));
|
|
|
|
Goto(&allocate_entry_if_needed);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&packed_double_values);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Node* value = LoadFixedDoubleArrayElement(
|
|
|
|
elements, index, MachineType::Float64(), 0, SMI_PARAMETERS);
|
|
|
|
var_value.Bind(AllocateHeapNumberWithValue(value));
|
|
|
|
Goto(&allocate_entry_if_needed);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&holey_object_values);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
// Check the array_protector cell, and take the slow path if it's invalid.
|
|
|
|
Node* invalid = SmiConstant(Smi::FromInt(Isolate::kProtectorInvalid));
|
|
|
|
Node* cell = LoadRoot(Heap::kArrayProtectorRootIndex);
|
|
|
|
Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
|
|
|
|
GotoIf(WordEqual(cell_value, invalid), &generic_values);
|
|
|
|
|
|
|
|
var_value.Bind(UndefinedConstant());
|
|
|
|
Node* value = LoadFixedArrayElement(elements, index, 0, SMI_PARAMETERS);
|
|
|
|
GotoIf(WordEqual(value, TheHoleConstant()), &allocate_entry_if_needed);
|
|
|
|
var_value.Bind(value);
|
|
|
|
Goto(&allocate_entry_if_needed);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&holey_double_values);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
// Check the array_protector cell, and take the slow path if it's invalid.
|
|
|
|
Node* invalid = SmiConstant(Smi::FromInt(Isolate::kProtectorInvalid));
|
|
|
|
Node* cell = LoadRoot(Heap::kArrayProtectorRootIndex);
|
|
|
|
Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
|
|
|
|
GotoIf(WordEqual(cell_value, invalid), &generic_values);
|
|
|
|
|
|
|
|
var_value.Bind(UndefinedConstant());
|
|
|
|
Node* value = LoadFixedDoubleArrayElement(
|
|
|
|
elements, index, MachineType::Float64(), 0, SMI_PARAMETERS,
|
|
|
|
&allocate_entry_if_needed);
|
|
|
|
var_value.Bind(AllocateHeapNumberWithValue(value));
|
|
|
|
Goto(&allocate_entry_if_needed);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&if_isnotfastarray);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Label if_istypedarray(this), if_isgeneric(this);
|
|
|
|
|
|
|
|
// If a is undefined, return CreateIterResultObject(undefined, true)
|
|
|
|
GotoIf(WordEqual(array, UndefinedConstant()), &allocate_iterator_result);
|
|
|
|
|
|
|
|
Node* array_type = LoadInstanceType(array);
|
|
|
|
Branch(Word32Equal(array_type, Int32Constant(JS_TYPED_ARRAY_TYPE)),
|
|
|
|
&if_istypedarray, &if_isgeneric);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&if_isgeneric);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Label if_wasfastarray(this);
|
|
|
|
|
|
|
|
Node* length = nullptr;
|
|
|
|
{
|
2017-04-07 08:20:35 +00:00
|
|
|
VARIABLE(var_length, MachineRepresentation::kTagged);
|
2017-03-16 11:32:01 +00:00
|
|
|
Label if_isarray(this), if_isnotarray(this), done(this);
|
|
|
|
Branch(Word32Equal(array_type, Int32Constant(JS_ARRAY_TYPE)),
|
|
|
|
&if_isarray, &if_isnotarray);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&if_isarray);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
var_length.Bind(LoadObjectField(array, JSArray::kLengthOffset));
|
|
|
|
|
|
|
|
// Invalidate protector cell if needed
|
|
|
|
Branch(WordNotEqual(orig_map, UndefinedConstant()), &if_wasfastarray,
|
|
|
|
&done);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&if_wasfastarray);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Label if_invalid(this, Label::kDeferred);
|
|
|
|
// A fast array iterator transitioned to a slow iterator during
|
|
|
|
// iteration. Invalidate fast_array_iteration_prtoector cell to
|
|
|
|
// prevent potential deopt loops.
|
|
|
|
StoreObjectFieldNoWriteBarrier(
|
|
|
|
iterator, JSArrayIterator::kIteratedObjectMapOffset,
|
|
|
|
UndefinedConstant());
|
|
|
|
GotoIf(Uint32LessThanOrEqual(
|
|
|
|
instance_type,
|
|
|
|
Int32Constant(JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE)),
|
|
|
|
&done);
|
|
|
|
|
|
|
|
Node* invalid =
|
|
|
|
SmiConstant(Smi::FromInt(Isolate::kProtectorInvalid));
|
|
|
|
Node* cell = LoadRoot(Heap::kFastArrayIterationProtectorRootIndex);
|
|
|
|
StoreObjectFieldNoWriteBarrier(cell, Cell::kValueOffset, invalid);
|
|
|
|
Goto(&done);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&if_isnotarray);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Node* length =
|
|
|
|
GetProperty(context, array, factory()->length_string());
|
|
|
|
Callable to_length = CodeFactory::ToLength(isolate());
|
|
|
|
var_length.Bind(CallStub(to_length, context, length));
|
|
|
|
Goto(&done);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&done);
|
2017-03-16 11:32:01 +00:00
|
|
|
length = var_length.value();
|
|
|
|
}
|
|
|
|
|
|
|
|
GotoUnlessNumberLessThan(index, length, &set_done);
|
|
|
|
|
|
|
|
StoreObjectField(iterator, JSArrayIterator::kNextIndexOffset,
|
|
|
|
NumberInc(index));
|
|
|
|
var_done.Bind(FalseConstant());
|
|
|
|
|
|
|
|
Branch(
|
|
|
|
Uint32LessThanOrEqual(
|
|
|
|
instance_type, Int32Constant(JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE)),
|
|
|
|
&allocate_key_result, &generic_values);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&generic_values);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
var_value.Bind(GetProperty(context, array, index));
|
|
|
|
Goto(&allocate_entry_if_needed);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&if_istypedarray);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Node* buffer = LoadObjectField(array, JSTypedArray::kBufferOffset);
|
|
|
|
GotoIf(IsDetachedBuffer(buffer), &if_isdetached);
|
|
|
|
|
|
|
|
Node* length = LoadObjectField(array, JSTypedArray::kLengthOffset);
|
|
|
|
|
|
|
|
CSA_ASSERT(this, TaggedIsSmi(length));
|
|
|
|
CSA_ASSERT(this, TaggedIsSmi(index));
|
|
|
|
|
|
|
|
GotoIfNot(SmiBelow(index, length), &set_done);
|
|
|
|
|
|
|
|
Node* one = SmiConstant(1);
|
|
|
|
StoreObjectFieldNoWriteBarrier(
|
|
|
|
iterator, JSArrayIterator::kNextIndexOffset, SmiAdd(index, one));
|
|
|
|
var_done.Bind(FalseConstant());
|
|
|
|
|
|
|
|
Node* elements = LoadElements(array);
|
|
|
|
Node* base_ptr =
|
|
|
|
LoadObjectField(elements, FixedTypedArrayBase::kBasePointerOffset);
|
|
|
|
Node* external_ptr =
|
|
|
|
LoadObjectField(elements, FixedTypedArrayBase::kExternalPointerOffset,
|
|
|
|
MachineType::Pointer());
|
|
|
|
Node* data_ptr = IntPtrAdd(BitcastTaggedToWord(base_ptr), external_ptr);
|
|
|
|
|
|
|
|
static int32_t kInstanceType[] = {
|
|
|
|
JS_TYPED_ARRAY_KEY_ITERATOR_TYPE,
|
|
|
|
JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_INT8_ARRAY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_INT16_ARRAY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_INT32_ARRAY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE,
|
|
|
|
JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE,
|
|
|
|
};
|
|
|
|
|
|
|
|
Label uint8_values(this), int8_values(this), uint16_values(this),
|
|
|
|
int16_values(this), uint32_values(this), int32_values(this),
|
|
|
|
float32_values(this), float64_values(this);
|
|
|
|
Label* kInstanceTypeHandlers[] = {
|
|
|
|
&allocate_key_result, &uint8_values, &uint8_values,
|
|
|
|
&int8_values, &uint16_values, &int16_values,
|
|
|
|
&uint32_values, &int32_values, &float32_values,
|
|
|
|
&float64_values, &uint8_values, &uint8_values,
|
|
|
|
&int8_values, &uint16_values, &int16_values,
|
|
|
|
&uint32_values, &int32_values, &float32_values,
|
|
|
|
&float64_values,
|
|
|
|
};
|
|
|
|
|
|
|
|
var_done.Bind(FalseConstant());
|
|
|
|
Switch(instance_type, &throw_bad_receiver, kInstanceType,
|
|
|
|
kInstanceTypeHandlers, arraysize(kInstanceType));
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&uint8_values);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Node* value_uint8 = LoadFixedTypedArrayElement(
|
|
|
|
data_ptr, index, UINT8_ELEMENTS, SMI_PARAMETERS);
|
|
|
|
var_value.Bind(SmiFromWord32(value_uint8));
|
|
|
|
Goto(&allocate_entry_if_needed);
|
|
|
|
}
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&int8_values);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Node* value_int8 = LoadFixedTypedArrayElement(
|
|
|
|
data_ptr, index, INT8_ELEMENTS, SMI_PARAMETERS);
|
|
|
|
var_value.Bind(SmiFromWord32(value_int8));
|
|
|
|
Goto(&allocate_entry_if_needed);
|
|
|
|
}
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&uint16_values);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Node* value_uint16 = LoadFixedTypedArrayElement(
|
|
|
|
data_ptr, index, UINT16_ELEMENTS, SMI_PARAMETERS);
|
|
|
|
var_value.Bind(SmiFromWord32(value_uint16));
|
|
|
|
Goto(&allocate_entry_if_needed);
|
|
|
|
}
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&int16_values);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Node* value_int16 = LoadFixedTypedArrayElement(
|
|
|
|
data_ptr, index, INT16_ELEMENTS, SMI_PARAMETERS);
|
|
|
|
var_value.Bind(SmiFromWord32(value_int16));
|
|
|
|
Goto(&allocate_entry_if_needed);
|
|
|
|
}
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&uint32_values);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Node* value_uint32 = LoadFixedTypedArrayElement(
|
|
|
|
data_ptr, index, UINT32_ELEMENTS, SMI_PARAMETERS);
|
|
|
|
var_value.Bind(ChangeUint32ToTagged(value_uint32));
|
|
|
|
Goto(&allocate_entry_if_needed);
|
|
|
|
}
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&int32_values);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Node* value_int32 = LoadFixedTypedArrayElement(
|
|
|
|
data_ptr, index, INT32_ELEMENTS, SMI_PARAMETERS);
|
|
|
|
var_value.Bind(ChangeInt32ToTagged(value_int32));
|
|
|
|
Goto(&allocate_entry_if_needed);
|
|
|
|
}
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&float32_values);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Node* value_float32 = LoadFixedTypedArrayElement(
|
|
|
|
data_ptr, index, FLOAT32_ELEMENTS, SMI_PARAMETERS);
|
|
|
|
var_value.Bind(
|
|
|
|
AllocateHeapNumberWithValue(ChangeFloat32ToFloat64(value_float32)));
|
|
|
|
Goto(&allocate_entry_if_needed);
|
|
|
|
}
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&float64_values);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Node* value_float64 = LoadFixedTypedArrayElement(
|
|
|
|
data_ptr, index, FLOAT64_ELEMENTS, SMI_PARAMETERS);
|
|
|
|
var_value.Bind(AllocateHeapNumberWithValue(value_float64));
|
|
|
|
Goto(&allocate_entry_if_needed);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&set_done);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
StoreObjectFieldNoWriteBarrier(
|
|
|
|
iterator, JSArrayIterator::kIteratedObjectOffset, UndefinedConstant());
|
|
|
|
Goto(&allocate_iterator_result);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&allocate_key_result);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
var_value.Bind(index);
|
|
|
|
var_done.Bind(FalseConstant());
|
|
|
|
Goto(&allocate_iterator_result);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&allocate_entry_if_needed);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
GotoIf(Int32GreaterThan(instance_type,
|
|
|
|
Int32Constant(LAST_ARRAY_KEY_VALUE_ITERATOR_TYPE)),
|
|
|
|
&allocate_iterator_result);
|
|
|
|
|
|
|
|
Node* elements = AllocateFixedArray(FAST_ELEMENTS, IntPtrConstant(2));
|
|
|
|
StoreFixedArrayElement(elements, 0, index, SKIP_WRITE_BARRIER);
|
|
|
|
StoreFixedArrayElement(elements, 1, var_value.value(), SKIP_WRITE_BARRIER);
|
|
|
|
|
|
|
|
Node* entry = Allocate(JSArray::kSize);
|
|
|
|
Node* map = LoadContextElement(LoadNativeContext(context),
|
|
|
|
Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX);
|
|
|
|
|
|
|
|
StoreMapNoWriteBarrier(entry, map);
|
|
|
|
StoreObjectFieldRoot(entry, JSArray::kPropertiesOffset,
|
|
|
|
Heap::kEmptyFixedArrayRootIndex);
|
|
|
|
StoreObjectFieldNoWriteBarrier(entry, JSArray::kElementsOffset, elements);
|
|
|
|
StoreObjectFieldNoWriteBarrier(entry, JSArray::kLengthOffset,
|
|
|
|
SmiConstant(Smi::FromInt(2)));
|
|
|
|
|
|
|
|
var_value.Bind(entry);
|
|
|
|
Goto(&allocate_iterator_result);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&allocate_iterator_result);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Node* result = Allocate(JSIteratorResult::kSize);
|
|
|
|
Node* map = LoadContextElement(LoadNativeContext(context),
|
|
|
|
Context::ITERATOR_RESULT_MAP_INDEX);
|
|
|
|
StoreMapNoWriteBarrier(result, map);
|
|
|
|
StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOffset,
|
|
|
|
Heap::kEmptyFixedArrayRootIndex);
|
|
|
|
StoreObjectFieldRoot(result, JSIteratorResult::kElementsOffset,
|
|
|
|
Heap::kEmptyFixedArrayRootIndex);
|
|
|
|
StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kValueOffset,
|
|
|
|
var_value.value());
|
|
|
|
StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kDoneOffset,
|
|
|
|
var_done.value());
|
|
|
|
Return(result);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&throw_bad_receiver);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
// The {receiver} is not a valid JSArrayIterator.
|
|
|
|
CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
|
|
|
|
HeapConstant(operation), iterator);
|
|
|
|
Unreachable();
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&if_isdetached);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
Node* message = SmiConstant(MessageTemplate::kDetachedOperation);
|
|
|
|
CallRuntime(Runtime::kThrowTypeError, context, message,
|
|
|
|
HeapConstant(operation));
|
|
|
|
Unreachable();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace internal
|
|
|
|
} // namespace v8
|