[turbofan] Switch JSForInPrepare to %ForInPrepare style.

Now TurboFan always uses the newly introduced %ForInPrepare, no matter
whether baseline is the interpreter or fullcodegen. For fullcodegen, we
introduce a new PrepareId bailout point for this purpose.

Drive-by-fix: Avoid the NoObservableSideEffectsScope in Crankshaft and
use the PrepareId bailout point instead.

R=jarin@chromium.org
BUG=v8:3650
LOG=n

Review URL: https://codereview.chromium.org/1630523002

Cr-Commit-Position: refs/heads/master@{#33480}
This commit is contained in:
bmeurer 2016-01-24 22:00:26 -08:00 committed by Commit bot
parent 3a2fbe0851
commit 825ece488b
15 changed files with 63 additions and 373 deletions

View File

@ -862,12 +862,13 @@ class ForInStatement final : public ForEachStatement {
ForInType for_in_type() const { return for_in_type_; } ForInType for_in_type() const { return for_in_type_; }
void set_for_in_type(ForInType type) { for_in_type_ = type; } void set_for_in_type(ForInType type) { for_in_type_ = type; }
static int num_ids() { return parent_num_ids() + 5; } static int num_ids() { return parent_num_ids() + 6; }
BailoutId BodyId() const { return BailoutId(local_id(0)); } BailoutId BodyId() const { return BailoutId(local_id(0)); }
BailoutId EnumId() const { return BailoutId(local_id(1)); } BailoutId EnumId() const { return BailoutId(local_id(1)); }
BailoutId ToObjectId() const { return BailoutId(local_id(2)); } BailoutId ToObjectId() const { return BailoutId(local_id(2)); }
BailoutId FilterId() const { return BailoutId(local_id(3)); } BailoutId PrepareId() const { return BailoutId(local_id(3)); }
BailoutId AssignmentId() const { return BailoutId(local_id(4)); } BailoutId FilterId() const { return BailoutId(local_id(4)); }
BailoutId AssignmentId() const { return BailoutId(local_id(5)); }
BailoutId ContinueId() const override { return EntryId(); } BailoutId ContinueId() const override { return EntryId(); }
BailoutId StackCheckId() const override { return BodyId(); } BailoutId StackCheckId() const override { return BodyId(); }

View File

@ -1331,7 +1331,8 @@ void AstGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
// Prepare for-in cache. // Prepare for-in cache.
Node* prepare = NewNode(javascript()->ForInPrepare(), object); Node* prepare = NewNode(javascript()->ForInPrepare(), object);
PrepareFrameState(prepare, stmt->EnumId(), OutputFrameStateCombine::Push()); PrepareFrameState(prepare, stmt->PrepareId(),
OutputFrameStateCombine::Push(3));
Node* cache_type = NewNode(common()->Projection(0), prepare); Node* cache_type = NewNode(common()->Projection(0), prepare);
Node* cache_array = NewNode(common()->Projection(1), prepare); Node* cache_array = NewNode(common()->Projection(1), prepare);
Node* cache_length = NewNode(common()->Projection(2), prepare); Node* cache_length = NewNode(common()->Projection(2), prepare);

View File

@ -616,173 +616,7 @@ void JSGenericLowering::LowerJSForInNext(Node* node) {
void JSGenericLowering::LowerJSForInPrepare(Node* node) { void JSGenericLowering::LowerJSForInPrepare(Node* node) {
Node* object = NodeProperties::GetValueInput(node, 0); ReplaceWithRuntimeCall(node, Runtime::kForInPrepare);
Node* context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
// Get the set of properties to enumerate.
Runtime::Function const* function =
Runtime::FunctionForId(Runtime::kGetPropertyNamesFast);
CallDescriptor const* descriptor = Linkage::GetRuntimeCallDescriptor(
zone(), function->function_id, 1, Operator::kNoProperties,
CallDescriptor::kNeedsFrameState);
Node* cache_type = effect = graph()->NewNode(
common()->Call(descriptor),
jsgraph()->CEntryStubConstant(function->result_size), object,
jsgraph()->ExternalConstant(function->function_id),
jsgraph()->Int32Constant(1), context, frame_state, effect, control);
control = graph()->NewNode(common()->IfSuccess(), cache_type);
Node* object_map = effect = graph()->NewNode(
machine()->Load(MachineType::AnyTagged()), object,
jsgraph()->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag),
effect, control);
Node* cache_type_map = effect = graph()->NewNode(
machine()->Load(MachineType::AnyTagged()), cache_type,
jsgraph()->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag),
effect, control);
Node* meta_map = jsgraph()->HeapConstant(isolate()->factory()->meta_map());
// If we got a map from the GetPropertyNamesFast runtime call, we can do a
// fast modification check. Otherwise, we got a fixed array, and we have to
// perform a slow check on every iteration.
Node* check0 =
graph()->NewNode(machine()->WordEqual(), cache_type_map, meta_map);
Node* branch0 =
graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
Node* cache_array_true0;
Node* cache_length_true0;
Node* cache_type_true0;
Node* etrue0;
{
// Enum cache case.
Node* cache_type_enum_length = etrue0 = graph()->NewNode(
machine()->Load(MachineType::Uint32()), cache_type,
jsgraph()->IntPtrConstant(Map::kBitField3Offset - kHeapObjectTag),
effect, if_true0);
cache_type_enum_length =
graph()->NewNode(machine()->Word32And(), cache_type_enum_length,
jsgraph()->Uint32Constant(Map::EnumLengthBits::kMask));
Node* check1 =
graph()->NewNode(machine()->Word32Equal(), cache_type_enum_length,
jsgraph()->Int32Constant(0));
Node* branch1 =
graph()->NewNode(common()->Branch(BranchHint::kTrue), check1, if_true0);
Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
Node* cache_array_true1;
Node* etrue1;
{
// No properties to enumerate.
cache_array_true1 =
jsgraph()->HeapConstant(isolate()->factory()->empty_fixed_array());
etrue1 = etrue0;
}
Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
Node* cache_array_false1;
Node* efalse1;
{
// Load the enumeration cache from the instance descriptors of {object}.
Node* object_map_descriptors = efalse1 = graph()->NewNode(
machine()->Load(MachineType::AnyTagged()), object_map,
jsgraph()->IntPtrConstant(Map::kDescriptorsOffset - kHeapObjectTag),
etrue0, if_false1);
Node* object_map_enum_cache = efalse1 = graph()->NewNode(
machine()->Load(MachineType::AnyTagged()), object_map_descriptors,
jsgraph()->IntPtrConstant(DescriptorArray::kEnumCacheOffset -
kHeapObjectTag),
efalse1, if_false1);
cache_array_false1 = efalse1 = graph()->NewNode(
machine()->Load(MachineType::AnyTagged()), object_map_enum_cache,
jsgraph()->IntPtrConstant(
DescriptorArray::kEnumCacheBridgeCacheOffset - kHeapObjectTag),
efalse1, if_false1);
}
if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
etrue0 =
graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
cache_array_true0 =
graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
cache_array_true1, cache_array_false1, if_true0);
cache_length_true0 = graph()->NewNode(
machine()->WordShl(),
machine()->Is64()
? graph()->NewNode(machine()->ChangeUint32ToUint64(),
cache_type_enum_length)
: cache_type_enum_length,
jsgraph()->Int32Constant(kSmiShiftSize + kSmiTagSize));
cache_type_true0 = cache_type;
}
Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
Node* cache_array_false0;
Node* cache_length_false0;
Node* cache_type_false0;
Node* efalse0;
{
// FixedArray case.
cache_type_false0 = jsgraph()->OneConstant(); // Smi means slow check
cache_array_false0 = cache_type;
cache_length_false0 = efalse0 = graph()->NewNode(
machine()->Load(MachineType::AnyTagged()), cache_array_false0,
jsgraph()->IntPtrConstant(FixedArray::kLengthOffset - kHeapObjectTag),
effect, if_false0);
}
control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
Node* cache_array =
graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
cache_array_true0, cache_array_false0, control);
Node* cache_length =
graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
cache_length_true0, cache_length_false0, control);
cache_type =
graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
cache_type_true0, cache_type_false0, control);
for (auto edge : node->use_edges()) {
if (NodeProperties::IsEffectEdge(edge)) {
edge.UpdateTo(effect);
} else if (NodeProperties::IsControlEdge(edge)) {
Node* const use = edge.from();
if (use->opcode() == IrOpcode::kIfSuccess) {
use->ReplaceUses(control);
use->Kill();
} else if (use->opcode() == IrOpcode::kIfException) {
edge.UpdateTo(cache_type_true0);
} else {
UNREACHABLE();
}
} else {
Node* const use = edge.from();
DCHECK(NodeProperties::IsValueEdge(edge));
DCHECK_EQ(IrOpcode::kProjection, use->opcode());
switch (ProjectionIndexOf(use->op())) {
case 0:
use->ReplaceUses(cache_type);
break;
case 1:
use->ReplaceUses(cache_array);
break;
case 2:
use->ReplaceUses(cache_length);
break;
default:
UNREACHABLE();
break;
}
use->Kill();
}
}
} }

View File

@ -2270,159 +2270,6 @@ Reduction JSTypedLowering::ReduceJSForInDone(Node* node) {
} }
Reduction JSTypedLowering::ReduceJSForInPrepare(Node* node) {
DCHECK_EQ(IrOpcode::kJSForInPrepare, node->opcode());
Node* receiver = NodeProperties::GetValueInput(node, 0);
Node* context = NodeProperties::GetContextInput(node);
Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
// Get the set of properties to enumerate.
Node* cache_type = effect = graph()->NewNode(
javascript()->CallRuntime(Runtime::kGetPropertyNamesFast), receiver,
context, frame_state, effect, control);
control = graph()->NewNode(common()->IfSuccess(), cache_type);
Node* receiver_map = effect =
graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
receiver, effect, control);
Node* cache_type_map = effect =
graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
cache_type, effect, control);
Node* meta_map = jsgraph()->HeapConstant(factory()->meta_map());
// If we got a map from the GetPropertyNamesFast runtime call, we can do a
// fast modification check. Otherwise, we got a fixed array, and we have to
// perform a slow check on every iteration.
Node* check0 = graph()->NewNode(simplified()->ReferenceEqual(Type::Any()),
cache_type_map, meta_map);
Node* branch0 =
graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
Node* cache_array_true0;
Node* cache_length_true0;
Node* cache_type_true0;
Node* etrue0;
{
// Enum cache case.
Node* cache_type_enum_length = etrue0 = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForMapBitField3()), cache_type,
effect, if_true0);
cache_length_true0 = graph()->NewNode(
simplified()->NumberBitwiseAnd(), cache_type_enum_length,
jsgraph()->Int32Constant(Map::EnumLengthBits::kMask));
Node* check1 =
graph()->NewNode(machine()->Word32Equal(), cache_length_true0,
jsgraph()->Int32Constant(0));
Node* branch1 =
graph()->NewNode(common()->Branch(BranchHint::kTrue), check1, if_true0);
Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
Node* cache_array_true1;
Node* etrue1;
{
// No properties to enumerate.
cache_array_true1 =
jsgraph()->HeapConstant(factory()->empty_fixed_array());
etrue1 = etrue0;
}
Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
Node* cache_array_false1;
Node* efalse1;
{
// Load the enumeration cache from the instance descriptors of {receiver}.
Node* receiver_map_descriptors = efalse1 = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForMapDescriptors()),
receiver_map, etrue0, if_false1);
Node* object_map_enum_cache = efalse1 = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForDescriptorArrayEnumCache()),
receiver_map_descriptors, efalse1, if_false1);
cache_array_false1 = efalse1 = graph()->NewNode(
simplified()->LoadField(
AccessBuilder::ForDescriptorArrayEnumCacheBridgeCache()),
object_map_enum_cache, efalse1, if_false1);
}
if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
etrue0 =
graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
cache_array_true0 =
graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
cache_array_true1, cache_array_false1, if_true0);
cache_type_true0 = cache_type;
}
Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
Node* cache_array_false0;
Node* cache_length_false0;
Node* cache_type_false0;
Node* efalse0;
{
// FixedArray case.
cache_type_false0 = jsgraph()->OneConstant(); // Smi means slow check
cache_array_false0 = cache_type;
cache_length_false0 = efalse0 = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
cache_array_false0, effect, if_false0);
}
control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
Node* cache_array =
graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
cache_array_true0, cache_array_false0, control);
Node* cache_length =
graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
cache_length_true0, cache_length_false0, control);
cache_type =
graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
cache_type_true0, cache_type_false0, control);
for (auto edge : node->use_edges()) {
Node* const use = edge.from();
if (NodeProperties::IsEffectEdge(edge)) {
edge.UpdateTo(effect);
Revisit(use);
} else {
if (NodeProperties::IsControlEdge(edge)) {
if (use->opcode() == IrOpcode::kIfSuccess) {
Replace(use, control);
} else if (use->opcode() == IrOpcode::kIfException) {
edge.UpdateTo(cache_type_true0);
continue;
} else {
UNREACHABLE();
}
} else {
DCHECK(NodeProperties::IsValueEdge(edge));
DCHECK_EQ(IrOpcode::kProjection, use->opcode());
switch (ProjectionIndexOf(use->op())) {
case 0:
Replace(use, cache_type);
break;
case 1:
Replace(use, cache_array);
break;
case 2:
Replace(use, cache_length);
break;
default:
UNREACHABLE();
break;
}
}
use->Kill();
}
}
return NoChange(); // All uses were replaced already above.
}
Reduction JSTypedLowering::ReduceJSForInNext(Node* node) { Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
DCHECK_EQ(IrOpcode::kJSForInNext, node->opcode()); DCHECK_EQ(IrOpcode::kJSForInNext, node->opcode());
Node* receiver = NodeProperties::GetValueInput(node, 0); Node* receiver = NodeProperties::GetValueInput(node, 0);
@ -2670,8 +2517,6 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSForInDone(node); return ReduceJSForInDone(node);
case IrOpcode::kJSForInNext: case IrOpcode::kJSForInNext:
return ReduceJSForInNext(node); return ReduceJSForInNext(node);
case IrOpcode::kJSForInPrepare:
return ReduceJSForInPrepare(node);
case IrOpcode::kJSForInStep: case IrOpcode::kJSForInStep:
return ReduceJSForInStep(node); return ReduceJSForInStep(node);
case IrOpcode::kSelect: case IrOpcode::kSelect:

View File

@ -83,7 +83,6 @@ class JSTypedLowering final : public AdvancedReducer {
Reduction ReduceJSCallFunction(Node* node); Reduction ReduceJSCallFunction(Node* node);
Reduction ReduceJSForInDone(Node* node); Reduction ReduceJSForInDone(Node* node);
Reduction ReduceJSForInNext(Node* node); Reduction ReduceJSForInNext(Node* node);
Reduction ReduceJSForInPrepare(Node* node);
Reduction ReduceJSForInStep(Node* node); Reduction ReduceJSForInStep(Node* node);
Reduction ReduceSelect(Node* node); Reduction ReduceSelect(Node* node);
Reduction ReduceNumberBinop(Node* node, const Operator* numberOp); Reduction ReduceNumberBinop(Node* node, const Operator* numberOp);

View File

@ -650,6 +650,11 @@ Type* Typer::Visitor::TypeCall(Node* node) { return Type::Any(); }
Type* Typer::Visitor::TypeProjection(Node* node) { Type* Typer::Visitor::TypeProjection(Node* node) {
// TODO(bmeurer): Make this beautiful! Use tuple type here.
if (node->InputAt(0)->opcode() == IrOpcode::kJSForInPrepare &&
ProjectionIndexOf(node->op()) == 2) {
return typer_->cache_.kSmi;
}
// TODO(titzer): use the output type of the input to determine the bounds. // TODO(titzer): use the output type of the input to determine the bounds.
return Type::Any(); return Type::Any();
} }

View File

@ -5315,38 +5315,39 @@ void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
void HOptimizedGraphBuilder::BuildForInBody(ForInStatement* stmt, void HOptimizedGraphBuilder::BuildForInBody(ForInStatement* stmt,
Variable* each_var, Variable* each_var,
HValue* enumerable) { HValue* enumerable) {
HValue* map;
HValue* array;
HValue* enum_length;
Handle<Map> meta_map = isolate()->factory()->meta_map(); Handle<Map> meta_map = isolate()->factory()->meta_map();
bool fast = stmt->for_in_type() == ForInStatement::FAST_FOR_IN; bool fast = stmt->for_in_type() == ForInStatement::FAST_FOR_IN;
BuildCheckHeapObject(enumerable); BuildCheckHeapObject(enumerable);
Add<HCheckInstanceType>(enumerable, HCheckInstanceType::IS_JS_RECEIVER); Add<HCheckInstanceType>(enumerable, HCheckInstanceType::IS_JS_RECEIVER);
Add<HSimulate>(stmt->ToObjectId()); Add<HSimulate>(stmt->ToObjectId());
if (fast) { if (fast) {
map = Add<HForInPrepareMap>(enumerable); HForInPrepareMap* map = Add<HForInPrepareMap>(enumerable);
Push(map); Push(map);
Add<HSimulate>(stmt->EnumId()); Add<HSimulate>(stmt->EnumId());
Drop(1); Drop(1);
Add<HCheckMaps>(map, meta_map); Add<HCheckMaps>(map, meta_map);
array = Add<HForInCacheArray>(enumerable, map, HForInCacheArray* array = Add<HForInCacheArray>(
DescriptorArray::kEnumCacheBridgeCacheIndex); enumerable, map, DescriptorArray::kEnumCacheBridgeCacheIndex);
enum_length = BuildEnumLength(map); HValue* enum_length = BuildEnumLength(map);
HInstruction* index_cache = Add<HForInCacheArray>( HForInCacheArray* index_cache = Add<HForInCacheArray>(
enumerable, map, DescriptorArray::kEnumCacheBridgeIndicesCacheIndex); enumerable, map, DescriptorArray::kEnumCacheBridgeIndicesCacheIndex);
HForInCacheArray::cast(array) array->set_index_cache(index_cache);
->set_index_cache(HForInCacheArray::cast(index_cache));
Push(map);
Push(array);
Push(enum_length);
Add<HSimulate>(stmt->PrepareId());
} else { } else {
Runtime::FunctionId function_id = Runtime::kGetPropertyNamesFast; Runtime::FunctionId function_id = Runtime::kGetPropertyNamesFast;
Add<HPushArguments>(enumerable); Add<HPushArguments>(enumerable);
array = Add<HCallRuntime>(Runtime::FunctionForId(function_id), 1); HCallRuntime* array =
Add<HCallRuntime>(Runtime::FunctionForId(function_id), 1);
Push(array); Push(array);
Add<HSimulate>(stmt->EnumId()); Add<HSimulate>(stmt->EnumId());
Drop(1); Drop(1);
{
NoObservableSideEffectsScope scope(this);
IfBuilder if_fast(this); IfBuilder if_fast(this);
if_fast.If<HCompareMap>(array, meta_map); if_fast.If<HCompareMap>(array, meta_map);
if_fast.Then(); if_fast.Then();
@ -5354,30 +5355,22 @@ void HOptimizedGraphBuilder::BuildForInBody(ForInStatement* stmt,
HValue* cache_map = array; HValue* cache_map = array;
HForInCacheArray* cache = Add<HForInCacheArray>( HForInCacheArray* cache = Add<HForInCacheArray>(
enumerable, cache_map, DescriptorArray::kEnumCacheBridgeCacheIndex); enumerable, cache_map, DescriptorArray::kEnumCacheBridgeCacheIndex);
enum_length = BuildEnumLength(cache_map); HValue* enum_length = BuildEnumLength(cache_map);
Push(cache_map); Push(cache_map);
Push(cache); Push(cache);
Push(enum_length); Push(enum_length);
Add<HSimulate>(stmt->PrepareId(), FIXED_SIMULATE);
} }
if_fast.Else(); if_fast.Else();
{ {
Push(graph()->GetConstant1()); Push(graph()->GetConstant1());
Push(array); Push(array);
Push(AddLoadFixedArrayLength(array)); Push(AddLoadFixedArrayLength(array));
} Add<HSimulate>(stmt->PrepareId(), FIXED_SIMULATE);
if_fast.End();
enum_length = Pop();
array = Pop();
map = Pop();
} }
} }
HInstruction* start_index = Add<HConstant>(0); Push(graph()->GetConstant0());
Push(map);
Push(array);
Push(enum_length);
Push(start_index);
HBasicBlock* loop_entry = BuildLoopEntry(stmt); HBasicBlock* loop_entry = BuildLoopEntry(stmt);

View File

@ -1125,8 +1125,10 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ mov(r1, Operand(Smi::FromInt(1))); // Smi(1) indicates slow check __ mov(r1, Operand(Smi::FromInt(1))); // Smi(1) indicates slow check
__ Push(r1, r0); // Smi and array __ Push(r1, r0); // Smi and array
__ ldr(r1, FieldMemOperand(r0, FixedArray::kLengthOffset)); __ ldr(r1, FieldMemOperand(r0, FixedArray::kLengthOffset));
__ Push(r1); // Fixed array length (as smi).
PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
__ mov(r0, Operand(Smi::FromInt(0))); __ mov(r0, Operand(Smi::FromInt(0)));
__ Push(r1, r0); // Fixed array length (as smi) and initial index. __ Push(r0); // Initial index.
// Generate code for doing the condition check. // Generate code for doing the condition check.
__ bind(&loop); __ bind(&loop);

View File

@ -1120,8 +1120,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Str(x10, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(vector_index))); __ Str(x10, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(vector_index)));
__ Mov(x1, Smi::FromInt(1)); // Smi(1) indicates slow check. __ Mov(x1, Smi::FromInt(1)); // Smi(1) indicates slow check.
__ Ldr(x2, FieldMemOperand(x0, FixedArray::kLengthOffset)); __ Ldr(x2, FieldMemOperand(x0, FixedArray::kLengthOffset));
// Smi and array, fixed array length (as smi) and initial index. __ Push(x1, x0, x2); // Smi and array, fixed array length (as smi).
__ Push(x1, x0, x2, xzr); PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
__ Push(xzr); // Initial index.
// Generate code for doing the condition check. // Generate code for doing the condition check.
__ Bind(&loop); __ Bind(&loop);

View File

@ -1055,6 +1055,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(eax); // Array __ push(eax); // Array
__ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset)); __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
__ push(eax); // Fixed array length (as smi). __ push(eax); // Fixed array length (as smi).
PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
__ push(Immediate(Smi::FromInt(0))); // Initial index. __ push(Immediate(Smi::FromInt(0))); // Initial index.
// Generate code for doing the condition check. // Generate code for doing the condition check.

View File

@ -1122,8 +1122,10 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ li(a1, Operand(Smi::FromInt(1))); // Smi(1) indicates slow check __ li(a1, Operand(Smi::FromInt(1))); // Smi(1) indicates slow check
__ Push(a1, v0); // Smi and array __ Push(a1, v0); // Smi and array
__ lw(a1, FieldMemOperand(v0, FixedArray::kLengthOffset)); __ lw(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
__ Push(a1); // Fixed array length (as smi).
PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
__ li(a0, Operand(Smi::FromInt(0))); __ li(a0, Operand(Smi::FromInt(0)));
__ Push(a1, a0); // Fixed array length (as smi) and initial index. __ Push(a0); // Initial index.
// Generate code for doing the condition check. // Generate code for doing the condition check.
__ bind(&loop); __ bind(&loop);

View File

@ -1122,8 +1122,10 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ li(a1, Operand(Smi::FromInt(1))); // Smi(1) indicates slow check __ li(a1, Operand(Smi::FromInt(1))); // Smi(1) indicates slow check
__ Push(a1, v0); // Smi and array __ Push(a1, v0); // Smi and array
__ ld(a1, FieldMemOperand(v0, FixedArray::kLengthOffset)); __ ld(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
__ Push(a1); // Fixed array length (as smi).
PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
__ li(a0, Operand(Smi::FromInt(0))); __ li(a0, Operand(Smi::FromInt(0)));
__ Push(a1, a0); // Fixed array length (as smi) and initial index. __ Push(a0); // Initial index.
// Generate code for doing the condition check. // Generate code for doing the condition check.
__ bind(&loop); __ bind(&loop);

View File

@ -1084,8 +1084,10 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ LoadSmiLiteral(r4, Smi::FromInt(1)); // Smi(1) indicates slow check __ LoadSmiLiteral(r4, Smi::FromInt(1)); // Smi(1) indicates slow check
__ Push(r4, r3); // Smi and array __ Push(r4, r3); // Smi and array
__ LoadP(r4, FieldMemOperand(r3, FixedArray::kLengthOffset)); __ LoadP(r4, FieldMemOperand(r3, FixedArray::kLengthOffset));
__ Push(r4); // Fixed array length (as smi).
PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
__ LoadSmiLiteral(r3, Smi::FromInt(0)); __ LoadSmiLiteral(r3, Smi::FromInt(0));
__ Push(r4, r3); // Fixed array length (as smi) and initial index. __ Push(r3); // Initial index.
// Generate code for doing the condition check. // Generate code for doing the condition check.
__ bind(&loop); __ bind(&loop);

View File

@ -1077,6 +1077,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Push(rax); // Array __ Push(rax); // Array
__ movp(rax, FieldOperand(rax, FixedArray::kLengthOffset)); __ movp(rax, FieldOperand(rax, FixedArray::kLengthOffset));
__ Push(rax); // Fixed array length (as smi). __ Push(rax); // Fixed array length (as smi).
PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
__ Push(Smi::FromInt(0)); // Initial index. __ Push(Smi::FromInt(0)); // Initial index.
// Generate code for doing the condition check. // Generate code for doing the condition check.

View File

@ -1047,6 +1047,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(eax); // Array __ push(eax); // Array
__ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset)); __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
__ push(eax); // Fixed array length (as smi). __ push(eax); // Fixed array length (as smi).
PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
__ push(Immediate(Smi::FromInt(0))); // Initial index. __ push(Immediate(Smi::FromInt(0))); // Initial index.
// Generate code for doing the condition check. // Generate code for doing the condition check.