diff --git a/src/compiler/frame.h b/src/compiler/frame.h index 9764b261ef..d375e5047b 100644 --- a/src/compiler/frame.h +++ b/src/compiler/frame.h @@ -136,6 +136,9 @@ class Frame : public ZoneObject { return frame_slot_count_ - 1; } + static const int kContextSlot = 2; + static const int kJSFunctionSlot = 3; + private: int AllocateAlignedFrameSlot(int width) { DCHECK(width == 4 || width == 8); diff --git a/src/compiler/instruction-selector-impl.h b/src/compiler/instruction-selector-impl.h index cd41e42eff..8543aa0b19 100644 --- a/src/compiler/instruction-selector-impl.h +++ b/src/compiler/instruction-selector-impl.h @@ -72,6 +72,14 @@ class OperandGenerator { return Define(node, ToUnallocatedOperand(location, type, GetVReg(node))); } + InstructionOperand DefineAsDualLocation(Node* node, + LinkageLocation primary_location, + LinkageLocation secondary_location) { + return Define(node, + ToDualLocationUnallocatedOperand( + primary_location, secondary_location, GetVReg(node))); + } + InstructionOperand Use(Node* node) { return Use(node, UnallocatedOperand(UnallocatedOperand::NONE, UnallocatedOperand::USED_AT_START, @@ -211,6 +219,18 @@ class OperandGenerator { return operand; } + UnallocatedOperand ToDualLocationUnallocatedOperand( + LinkageLocation primary_location, LinkageLocation secondary_location, + int virtual_register) { + // We only support the primary location being a register and the secondary + // one a slot. + DCHECK(primary_location.IsRegister() && + secondary_location.IsCalleeFrameSlot()); + int reg_id = primary_location.AsRegister(); + int slot_id = secondary_location.AsCalleeFrameSlot(); + return UnallocatedOperand(reg_id, slot_id, virtual_register); + } + UnallocatedOperand ToUnallocatedOperand(LinkageLocation location, MachineType type, int virtual_register) { diff --git a/src/compiler/instruction-selector.cc b/src/compiler/instruction-selector.cc index ce2812f739..59a4304bbf 100644 --- a/src/compiler/instruction-selector.cc +++ b/src/compiler/instruction-selector.cc @@ -1097,9 +1097,15 @@ void InstructionSelector::VisitGuard(Node* node) { void InstructionSelector::VisitParameter(Node* node) { OperandGenerator g(this); int index = ParameterIndexOf(node->op()); - Emit(kArchNop, - g.DefineAsLocation(node, linkage()->GetParameterLocation(index), - linkage()->GetParameterType(index))); + InstructionOperand op = + linkage()->ParameterHasSecondaryLocation(index) + ? g.DefineAsDualLocation( + node, linkage()->GetParameterLocation(index), + linkage()->GetParameterSecondaryLocation(index)) + : g.DefineAsLocation(node, linkage()->GetParameterLocation(index), + linkage()->GetParameterType(index)); + + Emit(kArchNop, op); } diff --git a/src/compiler/instruction.h b/src/compiler/instruction.h index 7ab2b90778..6b9ad82f34 100644 --- a/src/compiler/instruction.h +++ b/src/compiler/instruction.h @@ -192,6 +192,12 @@ class UnallocatedOperand : public InstructionOperand { value_ |= LifetimeField::encode(lifetime); } + UnallocatedOperand(int reg_id, int slot_id, int virtual_register) + : UnallocatedOperand(FIXED_REGISTER, reg_id, virtual_register) { + value_ |= HasSecondaryStorageField::encode(true); + value_ |= SecondaryStorageField::encode(slot_id); + } + // Predicates for the operand policy. bool HasAnyPolicy() const { return basic_policy() == EXTENDED_POLICY && extended_policy() == ANY; @@ -222,6 +228,15 @@ class UnallocatedOperand : public InstructionOperand { return basic_policy() == EXTENDED_POLICY && extended_policy() == FIXED_DOUBLE_REGISTER; } + bool HasSecondaryStorage() const { + return basic_policy() == EXTENDED_POLICY && + extended_policy() == FIXED_REGISTER && + HasSecondaryStorageField::decode(value_); + } + int GetSecondaryStorage() const { + DCHECK(HasSecondaryStorage()); + return SecondaryStorageField::decode(value_); + } // [basic_policy]: Distinguish between FIXED_SLOT and all other policies. BasicPolicy basic_policy() const { @@ -301,7 +316,9 @@ class UnallocatedOperand : public InstructionOperand { // BitFields specific to BasicPolicy::EXTENDED_POLICY. class ExtendedPolicyField : public BitField64 {}; class LifetimeField : public BitField64 {}; - class FixedRegisterField : public BitField64 {}; + class HasSecondaryStorageField : public BitField64 {}; + class FixedRegisterField : public BitField64 {}; + class SecondaryStorageField : public BitField64 {}; private: explicit UnallocatedOperand(int virtual_register) diff --git a/src/compiler/linkage.cc b/src/compiler/linkage.cc index f4c0105506..f7eb3b8b4c 100644 --- a/src/compiler/linkage.cc +++ b/src/compiler/linkage.cc @@ -524,6 +524,28 @@ LinkageLocation Linkage::GetOsrValueLocation(int index) const { return incoming_->GetInputLocation(parameter_index); } } + + +bool Linkage::ParameterHasSecondaryLocation(int index) const { + if (incoming_->kind() != CallDescriptor::kCallJSFunction) return false; + LinkageLocation loc = GetParameterLocation(index); + return (loc == regloc(kJSFunctionRegister) || + loc == regloc(kContextRegister)); +} + +LinkageLocation Linkage::GetParameterSecondaryLocation(int index) const { + DCHECK(ParameterHasSecondaryLocation(index)); + LinkageLocation loc = GetParameterLocation(index); + + if (loc == regloc(kJSFunctionRegister)) { + return LinkageLocation::ForCalleeFrameSlot(Frame::kJSFunctionSlot); + } else { + DCHECK(loc == regloc(kContextRegister)); + return LinkageLocation::ForCalleeFrameSlot(Frame::kContextSlot); + } +} + + } // namespace compiler } // namespace internal } // namespace v8 diff --git a/src/compiler/linkage.h b/src/compiler/linkage.h index 04b6e98202..61efb9bd8f 100644 --- a/src/compiler/linkage.h +++ b/src/compiler/linkage.h @@ -326,6 +326,9 @@ class Linkage : public ZoneObject { return incoming_->GetReturnType(index); } + bool ParameterHasSecondaryLocation(int index) const; + LinkageLocation GetParameterSecondaryLocation(int index) const; + // Get the frame offset for a given spill slot. The location depends on the // calling convention and the specific frame layout, and may thus be // architecture-specific. Negative spill slots indicate arguments on the diff --git a/src/compiler/register-allocator-verifier.cc b/src/compiler/register-allocator-verifier.cc index 68862add46..ee6b66f3b1 100644 --- a/src/compiler/register-allocator-verifier.cc +++ b/src/compiler/register-allocator-verifier.cc @@ -172,7 +172,12 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op, } break; case UnallocatedOperand::FIXED_REGISTER: - constraint->type_ = kFixedRegister; + if (unallocated->HasSecondaryStorage()) { + constraint->type_ = kRegisterAndSlot; + constraint->spilled_slot_ = unallocated->GetSecondaryStorage(); + } else { + constraint->type_ = kFixedRegister; + } constraint->value_ = unallocated->fixed_register_index(); break; case UnallocatedOperand::FIXED_DOUBLE_REGISTER: @@ -225,6 +230,7 @@ void RegisterAllocatorVerifier::CheckConstraint( CHECK(op->IsExplicit()); return; case kFixedRegister: + case kRegisterAndSlot: CHECK(op->IsRegister()); CHECK_EQ(LocationOperand::cast(op)->GetRegister().code(), constraint->value_); @@ -386,11 +392,13 @@ class OperandMap : public ZoneObject { } } - void Define(Zone* zone, const InstructionOperand* op, int virtual_register) { + MapValue* Define(Zone* zone, const InstructionOperand* op, + int virtual_register) { auto value = new (zone) MapValue(); value->define_vreg = virtual_register; auto res = map().insert(std::make_pair(op, value)); if (!res.second) res.first->second = value; + return value; } void Use(const InstructionOperand* op, int use_vreg, bool initial_pass) { @@ -704,7 +712,20 @@ void RegisterAllocatorVerifier::VerifyGapMoves(BlockMaps* block_maps, } for (size_t i = 0; i < instr->OutputCount(); ++i, ++count) { int virtual_register = op_constraints[count].virtual_register_; - current->Define(zone(), instr->OutputAt(i), virtual_register); + OperandMap::MapValue* value = + current->Define(zone(), instr->OutputAt(i), virtual_register); + if (op_constraints[count].type_ == kRegisterAndSlot) { + const AllocatedOperand* reg_op = + AllocatedOperand::cast(instr->OutputAt(i)); + MachineType mt = reg_op->machine_type(); + const AllocatedOperand* stack_op = AllocatedOperand::New( + zone(), LocationOperand::LocationKind::STACK_SLOT, mt, + op_constraints[i].spilled_slot_); + auto insert_result = + current->map().insert(std::make_pair(stack_op, value)); + DCHECK(insert_result.second); + USE(insert_result); + } } } } diff --git a/src/compiler/register-allocator-verifier.h b/src/compiler/register-allocator-verifier.h index 87b5cfbb7a..f3ab54f018 100644 --- a/src/compiler/register-allocator-verifier.h +++ b/src/compiler/register-allocator-verifier.h @@ -36,12 +36,14 @@ class RegisterAllocatorVerifier final : public ZoneObject { kNone, kNoneDouble, kExplicit, - kSameAsFirst + kSameAsFirst, + kRegisterAndSlot }; struct OperandConstraint { ConstraintType type_; int value_; // subkind index when relevant + int spilled_slot_; int virtual_register_; }; diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc index 0dc76000f7..49ae35b63d 100644 --- a/src/compiler/register-allocator.cc +++ b/src/compiler/register-allocator.cc @@ -671,13 +671,13 @@ unsigned LiveRange::GetSize() { } -struct TopLevelLiveRange::SpillAtDefinitionList : ZoneObject { - SpillAtDefinitionList(int gap_index, InstructionOperand* operand, - SpillAtDefinitionList* next) +struct TopLevelLiveRange::SpillMoveInsertionList : ZoneObject { + SpillMoveInsertionList(int gap_index, InstructionOperand* operand, + SpillMoveInsertionList* next) : gap_index(gap_index), operand(operand), next(next) {} const int gap_index; InstructionOperand* const operand; - SpillAtDefinitionList* const next; + SpillMoveInsertionList* const next; }; @@ -687,12 +687,13 @@ TopLevelLiveRange::TopLevelLiveRange(int vreg, MachineType machine_type) last_child_id_(0), splintered_from_(nullptr), spill_operand_(nullptr), - spills_at_definition_(nullptr), + spill_move_insertion_locations_(nullptr), spilled_in_deferred_blocks_(false), spill_start_index_(kMaxInt), last_child_(this), last_pos_(nullptr), - splinter_(nullptr) { + splinter_(nullptr), + has_preassigned_slot_(false) { bits_ |= SpillTypeField::encode(SpillType::kNoSpillType); } @@ -704,11 +705,11 @@ int TopLevelLiveRange::debug_virt_reg() const { #endif -void TopLevelLiveRange::SpillAtDefinition(Zone* zone, int gap_index, - InstructionOperand* operand) { +void TopLevelLiveRange::RecordSpillLocation(Zone* zone, int gap_index, + InstructionOperand* operand) { DCHECK(HasNoSpillType()); - spills_at_definition_ = new (zone) - SpillAtDefinitionList(gap_index, operand, spills_at_definition_); + spill_move_insertion_locations_ = new (zone) SpillMoveInsertionList( + gap_index, operand, spill_move_insertion_locations_); } @@ -754,7 +755,7 @@ void TopLevelLiveRange::MarkSpilledInDeferredBlock( spill_start_index_ = -1; spilled_in_deferred_blocks_ = true; - spills_at_definition_ = nullptr; + spill_move_insertion_locations_ = nullptr; } @@ -794,25 +795,26 @@ bool TopLevelLiveRange::TryCommitSpillInDeferredBlock( } -void TopLevelLiveRange::CommitSpillsAtDefinition(InstructionSequence* sequence, - const InstructionOperand& op, - bool might_be_duplicated) { - DCHECK_IMPLIES(op.IsConstant(), spills_at_definition_ == nullptr); +void TopLevelLiveRange::CommitSpillMoves(InstructionSequence* sequence, + const InstructionOperand& op, + bool might_be_duplicated) { + DCHECK_IMPLIES(op.IsConstant(), spill_move_insertion_locations() == nullptr); auto zone = sequence->zone(); - for (auto to_spill = spills_at_definition_; to_spill != nullptr; + for (auto to_spill = spill_move_insertion_locations(); to_spill != nullptr; to_spill = to_spill->next) { auto instr = sequence->InstructionAt(to_spill->gap_index); auto move = instr->GetOrCreateParallelMove(Instruction::START, zone); // Skip insertion if it's possible that the move exists already as a // constraint move from a fixed output register to a slot. - if (might_be_duplicated) { + if (might_be_duplicated || has_preassigned_slot()) { bool found = false; for (auto move_op : *move) { if (move_op->IsEliminated()) continue; if (move_op->source().Equals(*to_spill->operand) && move_op->destination().Equals(op)) { found = true; + if (has_preassigned_slot()) move_op->Eliminate(); break; } } @@ -1609,7 +1611,7 @@ void ConstraintBuilder::MeetRegisterConstraintsForLastInstructionInBlock( const InstructionBlock* successor = code()->InstructionBlockAt(succ); DCHECK(successor->PredecessorCount() == 1); int gap_index = successor->first_instruction_index(); - range->SpillAtDefinition(allocation_zone(), gap_index, output); + range->RecordSpillLocation(allocation_zone(), gap_index, output); range->SetSpillStartIndex(gap_index); } } @@ -1642,6 +1644,17 @@ void ConstraintBuilder::MeetConstraintsAfter(int instr_index) { int output_vreg = first_output->virtual_register(); UnallocatedOperand output_copy(UnallocatedOperand::ANY, output_vreg); bool is_tagged = code()->IsReference(output_vreg); + if (first_output->HasSecondaryStorage()) { + range->MarkHasPreassignedSlot(); + InstructionOperand* spill_op = AllocatedOperand::New( + data()->code_zone(), LocationOperand::LocationKind::STACK_SLOT, + range->machine_type(), first_output->GetSecondaryStorage()); + range->RecordSpillLocation(allocation_zone(), instr_index + 1, + first_output); + range->SetSpillOperand(spill_op); + range->SetSpillStartIndex(instr_index + 1); + assigned = true; + } AllocateFixed(first_output, instr_index, is_tagged); // This value is produced on the stack, we never need to spill it. @@ -1658,8 +1671,8 @@ void ConstraintBuilder::MeetConstraintsAfter(int instr_index) { // Make sure we add a gap move for spilling (if we have not done // so already). if (!assigned) { - range->SpillAtDefinition(allocation_zone(), instr_index + 1, - first_output); + range->RecordSpillLocation(allocation_zone(), instr_index + 1, + first_output); range->SetSpillStartIndex(instr_index + 1); } } @@ -1744,7 +1757,7 @@ void ConstraintBuilder::ResolvePhis(const InstructionBlock* block) { } auto live_range = data()->GetOrCreateLiveRangeFor(phi_vreg); int gap_index = block->first_instruction_index(); - live_range->SpillAtDefinition(allocation_zone(), gap_index, &output); + live_range->RecordSpillLocation(allocation_zone(), gap_index, &output); live_range->SetSpillStartIndex(gap_index); // We use the phi-ness of some nodes in some later heuristics. live_range->set_is_phi(true); @@ -2959,7 +2972,7 @@ void SpillSlotLocator::LocateSpillSlots() { } } } else { - auto spills = range->spills_at_definition(); + auto spills = range->spill_move_insertion_locations(); DCHECK_NOT_NULL(spills); for (; spills != nullptr; spills = spills->next) { code->GetInstructionBlock(spills->gap_index)->mark_needs_frame(); @@ -3032,7 +3045,7 @@ void OperandAssigner::CommitAssignment() { spill_operand)) { // Spill at definition if the range isn't spilled only in deferred // blocks. - top_range->CommitSpillsAtDefinition( + top_range->CommitSpillMoves( data()->code(), spill_operand, top_range->has_slot_use() || top_range->spilled()); } @@ -3073,6 +3086,7 @@ void ReferenceMapPopulator::PopulateReferenceMaps() { if (!data()->IsReference(range)) continue; // Skip empty live ranges. if (range->IsEmpty()) continue; + if (range->has_preassigned_slot()) continue; // Find the extent of the range and its children. int start = range->Start().ToInstructionIndex(); diff --git a/src/compiler/register-allocator.h b/src/compiler/register-allocator.h index 443232abb1..55b4cc5963 100644 --- a/src/compiler/register-allocator.h +++ b/src/compiler/register-allocator.h @@ -532,16 +532,16 @@ class TopLevelLiveRange final : public LiveRange { AllocatedOperand GetSpillRangeOperand() const; - void SpillAtDefinition(Zone* zone, int gap_index, - InstructionOperand* operand); + void RecordSpillLocation(Zone* zone, int gap_index, + InstructionOperand* operand); void SetSpillOperand(InstructionOperand* operand); void SetSpillStartIndex(int start) { spill_start_index_ = Min(start, spill_start_index_); } - void CommitSpillsAtDefinition(InstructionSequence* sequence, - const InstructionOperand& operand, - bool might_be_duplicated); + void CommitSpillMoves(InstructionSequence* sequence, + const InstructionOperand& operand, + bool might_be_duplicated); // If all the children of this range are spilled in deferred blocks, and if // for any non-spilled child with a use position requiring a slot, that range @@ -576,10 +576,10 @@ class TopLevelLiveRange final : public LiveRange { return spilled_in_deferred_blocks_; } - struct SpillAtDefinitionList; + struct SpillMoveInsertionList; - SpillAtDefinitionList* spills_at_definition() const { - return spills_at_definition_; + SpillMoveInsertionList* spill_move_insertion_locations() const { + return spill_move_insertion_locations_; } void set_last_child(LiveRange* range) { last_child_ = range; } LiveRange* last_child() const { return last_child_; } @@ -594,6 +594,9 @@ class TopLevelLiveRange final : public LiveRange { splinter->SetSplinteredFrom(this); } + void MarkHasPreassignedSlot() { has_preassigned_slot_ = true; } + bool has_preassigned_slot() const { return has_preassigned_slot_; } + private: void SetSplinteredFrom(TopLevelLiveRange* splinter_parent); @@ -610,7 +613,7 @@ class TopLevelLiveRange final : public LiveRange { InstructionOperand* spill_operand_; SpillRange* spill_range_; }; - SpillAtDefinitionList* spills_at_definition_; + SpillMoveInsertionList* spill_move_insertion_locations_; // TODO(mtrofin): generalize spilling after definition, currently specialized // just for spill in a single deferred block. bool spilled_in_deferred_blocks_; @@ -618,6 +621,7 @@ class TopLevelLiveRange final : public LiveRange { LiveRange* last_child_; UsePosition* last_pos_; TopLevelLiveRange* splinter_; + bool has_preassigned_slot_; DISALLOW_COPY_AND_ASSIGN(TopLevelLiveRange); };