[maglev] Support generators
Bug: v8:7700 Change-Id: Ia6036bbd8f75c825ed79a7c0e16552897da015aa Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3822864 Reviewed-by: Jakob Linke <jgruber@chromium.org> Commit-Queue: Victor Gomes <victorgomes@chromium.org> Cr-Commit-Position: refs/heads/main@{#82392}
This commit is contained in:
parent
b02b4001fe
commit
bd0c7cb643
@ -567,11 +567,6 @@ void MaglevCompiler::Compile(LocalIsolate* local_isolate,
|
||||
|
||||
graph_builder.Build();
|
||||
|
||||
// TODO(v8:7700): Clean up after all bytecodes are supported.
|
||||
if (graph_builder.found_unsupported_bytecode()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (FLAG_print_maglev_graph) {
|
||||
std::cout << "\nAfter graph buiding" << std::endl;
|
||||
PrintGraph(std::cout, compilation_info, graph_builder.graph());
|
||||
|
@ -99,6 +99,11 @@ void MaglevGraphBuilder::SetArgument(int i, ValueNode* value) {
|
||||
current_interpreter_frame_.set(reg, value);
|
||||
}
|
||||
|
||||
ValueNode* MaglevGraphBuilder::GetArgument(int i) const {
|
||||
interpreter::Register reg = interpreter::Register::FromParameterIndex(i);
|
||||
return current_interpreter_frame_.get(reg);
|
||||
}
|
||||
|
||||
void MaglevGraphBuilder::BuildRegisterFrameInitialization() {
|
||||
// TODO(leszeks): Extract out a separate "incoming context/closure" nodes,
|
||||
// to be able to read in the machine register but also use the frame-spilled
|
||||
@ -131,19 +136,6 @@ void MaglevGraphBuilder::BuildRegisterFrameInitialization() {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(v8:7700): Clean up after all bytecodes are supported.
|
||||
#define MAGLEV_UNIMPLEMENTED(BytecodeName) \
|
||||
do { \
|
||||
std::cerr << "Maglev: Can't compile " \
|
||||
<< Brief(*compilation_unit_->function().object()) \
|
||||
<< ", bytecode " #BytecodeName " is not supported\n"; \
|
||||
found_unsupported_bytecode_ = true; \
|
||||
this_field_will_be_unused_once_all_bytecodes_are_supported_ = true; \
|
||||
} while (false)
|
||||
|
||||
#define MAGLEV_UNIMPLEMENTED_BYTECODE(Name) \
|
||||
void MaglevGraphBuilder::Visit##Name() { MAGLEV_UNIMPLEMENTED(Name); }
|
||||
|
||||
namespace {
|
||||
template <Operation kOperation>
|
||||
struct NodeForOperationHelper;
|
||||
@ -2727,9 +2719,106 @@ void MaglevGraphBuilder::VisitThrowIfNotSuperConstructor() {
|
||||
AddNewNode<ThrowIfNotSuperConstructor>({constructor, function});
|
||||
}
|
||||
|
||||
MAGLEV_UNIMPLEMENTED_BYTECODE(SwitchOnGeneratorState)
|
||||
MAGLEV_UNIMPLEMENTED_BYTECODE(SuspendGenerator)
|
||||
MAGLEV_UNIMPLEMENTED_BYTECODE(ResumeGenerator)
|
||||
void MaglevGraphBuilder::VisitSwitchOnGeneratorState() {
|
||||
// SwitchOnGeneratorState <generator> <table_start> <table_length>
|
||||
// It should be the first bytecode in the bytecode array.
|
||||
DCHECK_EQ(block_offset_, 0);
|
||||
int generator_prologue_block_offset = block_offset_ + 1;
|
||||
DCHECK_LT(generator_prologue_block_offset, next_offset());
|
||||
|
||||
// We create an initial block that checks if the generator is undefined.
|
||||
ValueNode* maybe_generator = LoadRegisterTagged(0);
|
||||
BasicBlock* block_is_generator_undefined = CreateBlock<BranchIfRootConstant>(
|
||||
{maybe_generator}, &jump_targets_[next_offset()],
|
||||
&jump_targets_[generator_prologue_block_offset],
|
||||
RootIndex::kUndefinedValue);
|
||||
MergeIntoFrameState(block_is_generator_undefined, next_offset());
|
||||
ResolveJumpsToBlockAtOffset(block_is_generator_undefined, block_offset_);
|
||||
|
||||
// We create the generator prologue block.
|
||||
StartNewBlock(generator_prologue_block_offset);
|
||||
DCHECK_EQ(generator_prologue_block_offset, block_offset_);
|
||||
|
||||
// Generator prologue.
|
||||
ValueNode* generator = maybe_generator;
|
||||
ValueNode* state = AddNewNode<LoadTaggedField>(
|
||||
{generator}, JSGeneratorObject::kContinuationOffset);
|
||||
ValueNode* new_state = GetSmiConstant(JSGeneratorObject::kGeneratorExecuting);
|
||||
AddNewNode<StoreTaggedFieldNoWriteBarrier>(
|
||||
{generator, new_state}, JSGeneratorObject::kContinuationOffset);
|
||||
ValueNode* context = AddNewNode<LoadTaggedField>(
|
||||
{generator}, JSGeneratorObject::kContextOffset);
|
||||
SetContext(context);
|
||||
|
||||
// Guarantee that we have something in the accumulator.
|
||||
MoveNodeBetweenRegisters(iterator_.GetRegisterOperand(0),
|
||||
interpreter::Register::virtual_accumulator());
|
||||
|
||||
// Switch on generator state.
|
||||
interpreter::JumpTableTargetOffsets offsets =
|
||||
iterator_.GetJumpTableTargetOffsets();
|
||||
DCHECK_NE(offsets.size(), 0);
|
||||
int case_value_base = (*offsets.begin()).case_value;
|
||||
BasicBlockRef* targets = zone()->NewArray<BasicBlockRef>(offsets.size());
|
||||
for (interpreter::JumpTableTargetOffset offset : offsets) {
|
||||
BasicBlockRef* ref = &targets[offset.case_value - case_value_base];
|
||||
new (ref) BasicBlockRef(&jump_targets_[offset.target_offset]);
|
||||
}
|
||||
ValueNode* case_value = AddNewNode<CheckedSmiUntag>({state});
|
||||
BasicBlock* generator_prologue_block = CreateBlock<Switch>(
|
||||
{case_value}, case_value_base, targets, offsets.size());
|
||||
for (interpreter::JumpTableTargetOffset offset : offsets) {
|
||||
MergeIntoFrameState(generator_prologue_block, offset.target_offset);
|
||||
}
|
||||
ResolveJumpsToBlockAtOffset(generator_prologue_block, block_offset_);
|
||||
}
|
||||
|
||||
void MaglevGraphBuilder::VisitSuspendGenerator() {
|
||||
// SuspendGenerator <generator> <first input register> <register count>
|
||||
// <suspend_id>
|
||||
ValueNode* generator = LoadRegisterTagged(0);
|
||||
ValueNode* context = GetContext();
|
||||
interpreter::RegisterList args = iterator_.GetRegisterListOperand(1);
|
||||
uint32_t suspend_id = iterator_.GetUnsignedImmediateOperand(3);
|
||||
|
||||
int input_count = parameter_count_without_receiver() + args.register_count() +
|
||||
GeneratorStore::kFixedInputCount;
|
||||
GeneratorStore* node = CreateNewNode<GeneratorStore>(
|
||||
input_count, context, generator, suspend_id, iterator_.current_offset());
|
||||
int arg_index = 0;
|
||||
for (int i = 1 /* skip receiver */; i < parameter_count(); ++i) {
|
||||
node->set_parameters_and_registers(arg_index++, GetArgument(i));
|
||||
}
|
||||
for (int i = 0; i < args.register_count(); ++i) {
|
||||
node->set_parameters_and_registers(arg_index++, GetTaggedValue(args[i]));
|
||||
}
|
||||
AddNode(node);
|
||||
|
||||
const uint32_t relative_jump_bytecode_offset = iterator_.current_offset();
|
||||
if (relative_jump_bytecode_offset > 0) {
|
||||
AddNewNode<ReduceInterruptBudget>({}, relative_jump_bytecode_offset);
|
||||
}
|
||||
FinishBlock<Return>(next_offset(), {GetAccumulatorTagged()});
|
||||
}
|
||||
|
||||
void MaglevGraphBuilder::VisitResumeGenerator() {
|
||||
// ResumeGenerator <generator> <first output register> <register count>
|
||||
ValueNode* generator = LoadRegisterTagged(0);
|
||||
ValueNode* array = AddNewNode<LoadTaggedField>(
|
||||
{generator}, JSGeneratorObject::kParametersAndRegistersOffset);
|
||||
interpreter::RegisterList registers = iterator_.GetRegisterListOperand(1);
|
||||
const compiler::BytecodeLivenessState* liveness =
|
||||
GetOutLivenessFor(next_offset());
|
||||
for (int i = 0; i < registers.register_count(); ++i) {
|
||||
if (liveness->RegisterIsLive(registers[i].index())) {
|
||||
int array_index = parameter_count_without_receiver() + i;
|
||||
StoreRegister(registers[i],
|
||||
AddNewNode<GeneratorRestoreRegister>({array}, array_index));
|
||||
}
|
||||
}
|
||||
SetAccumulator(AddNewNode<LoadTaggedField>(
|
||||
{generator}, JSGeneratorObject::kInputOrDebugPosOffset));
|
||||
}
|
||||
|
||||
void MaglevGraphBuilder::VisitGetIterator() {
|
||||
// GetIterator <object>
|
||||
|
@ -53,24 +53,18 @@ class MaglevGraphBuilder {
|
||||
|
||||
void StartPrologue();
|
||||
void SetArgument(int i, ValueNode* value);
|
||||
ValueNode* GetArgument(int i) const;
|
||||
void BuildRegisterFrameInitialization();
|
||||
BasicBlock* EndPrologue();
|
||||
|
||||
void BuildBody() {
|
||||
for (iterator_.Reset(); !iterator_.done(); iterator_.Advance()) {
|
||||
VisitSingleBytecode();
|
||||
// TODO(v8:7700): Clean up after all bytecodes are supported.
|
||||
if (found_unsupported_bytecode()) break;
|
||||
}
|
||||
}
|
||||
|
||||
Graph* graph() const { return graph_; }
|
||||
|
||||
// TODO(v8:7700): Clean up after all bytecodes are supported.
|
||||
bool found_unsupported_bytecode() const {
|
||||
return found_unsupported_bytecode_;
|
||||
}
|
||||
|
||||
private:
|
||||
BasicBlock* CreateEmptyBlock(int offset, BasicBlock* predecessor) {
|
||||
DCHECK_NULL(current_block_);
|
||||
@ -930,6 +924,7 @@ class MaglevGraphBuilder {
|
||||
LocalIsolate* local_isolate() const { return local_isolate_; }
|
||||
Zone* zone() const { return compilation_unit_->zone(); }
|
||||
int parameter_count() const { return compilation_unit_->parameter_count(); }
|
||||
int parameter_count_without_receiver() { return parameter_count() - 1; }
|
||||
int register_count() const { return compilation_unit_->register_count(); }
|
||||
bool has_graph_labeller() const {
|
||||
return compilation_unit_->has_graph_labeller();
|
||||
@ -965,12 +960,6 @@ class MaglevGraphBuilder {
|
||||
|
||||
InterpreterFrameState current_interpreter_frame_;
|
||||
|
||||
// Allow marking some bytecodes as unsupported during graph building, so that
|
||||
// we can test maglev incrementally.
|
||||
// TODO(v8:7700): Clean up after all bytecodes are supported.
|
||||
bool found_unsupported_bytecode_ = false;
|
||||
bool this_field_will_be_unused_once_all_bytecodes_are_supported_;
|
||||
|
||||
#ifdef DEBUG
|
||||
std::unordered_set<Node*> new_nodes_;
|
||||
#endif
|
||||
|
@ -129,6 +129,7 @@ class MaglevGraphVerifier {
|
||||
case Opcode::kCreateFunctionContext:
|
||||
case Opcode::kCreateClosure:
|
||||
case Opcode::kFastCreateClosure:
|
||||
case Opcode::kGeneratorRestoreRegister:
|
||||
case Opcode::kGetTemplateObject:
|
||||
case Opcode::kLogicalNot:
|
||||
case Opcode::kSetPendingMessage:
|
||||
@ -259,6 +260,7 @@ class MaglevGraphVerifier {
|
||||
case Opcode::kCallWithSpread:
|
||||
case Opcode::kConstruct:
|
||||
case Opcode::kConstructWithSpread:
|
||||
case Opcode::kGeneratorStore:
|
||||
case Opcode::kForInNext:
|
||||
case Opcode::kPhi:
|
||||
// All inputs should be tagged.
|
||||
|
@ -132,6 +132,23 @@ void PushInput(MaglevCodeGenState* code_gen_state, const Input& input) {
|
||||
}
|
||||
}
|
||||
|
||||
Register FromAnyToRegister(MaglevCodeGenState* code_gen_state, Register scratch,
|
||||
const Input& input) {
|
||||
if (input.operand().IsConstant()) {
|
||||
input.node()->LoadToRegister(code_gen_state, scratch);
|
||||
return scratch;
|
||||
}
|
||||
const compiler::AllocatedOperand& operand =
|
||||
compiler::AllocatedOperand::cast(input.operand());
|
||||
if (operand.IsRegister()) {
|
||||
return ToRegister(input);
|
||||
} else {
|
||||
DCHECK(operand.IsStackSlot());
|
||||
__ movq(scratch, code_gen_state->ToMemOperand(input));
|
||||
return scratch;
|
||||
}
|
||||
}
|
||||
|
||||
class SaveRegisterStateForCall {
|
||||
public:
|
||||
SaveRegisterStateForCall(MaglevCodeGenState* code_gen_state,
|
||||
@ -566,8 +583,10 @@ void PrintTargets(std::ostream& os, MaglevGraphLabeller* graph_labeller,
|
||||
const BasicBlockRef& target = node->Cast<Switch>()->targets()[i];
|
||||
os << " b" << graph_labeller->BlockId(target.block_ptr());
|
||||
}
|
||||
BasicBlock* fallthrough_target = node->Cast<Switch>()->fallthrough();
|
||||
os << " b" << graph_labeller->BlockId(fallthrough_target);
|
||||
if (node->Cast<Switch>()->has_fallthrough()) {
|
||||
BasicBlock* fallthrough_target = node->Cast<Switch>()->fallthrough();
|
||||
os << " b" << graph_labeller->BlockId(fallthrough_target);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename NodeT>
|
||||
@ -792,6 +811,59 @@ void DeleteProperty::PrintParams(std::ostream& os,
|
||||
os << "(" << LanguageMode2String(mode()) << ")";
|
||||
}
|
||||
|
||||
void GeneratorStore::AllocateVreg(MaglevVregAllocationState* vreg_state) {
|
||||
UseAny(context_input());
|
||||
UseRegister(generator_input());
|
||||
for (int i = 0; i < num_parameters_and_registers(); i++) {
|
||||
UseAny(parameters_and_registers(i));
|
||||
}
|
||||
set_temporaries_needed(1);
|
||||
}
|
||||
void GeneratorStore::GenerateCode(MaglevCodeGenState* code_gen_state,
|
||||
const ProcessingState& state) {
|
||||
Register generator = ToRegister(generator_input());
|
||||
Register array = temporaries().PopFirst();
|
||||
__ DecompressAnyTagged(
|
||||
array, FieldOperand(generator,
|
||||
JSGeneratorObject::kParametersAndRegistersOffset));
|
||||
for (int i = 0; i < num_parameters_and_registers(); i++) {
|
||||
Register value = FromAnyToRegister(code_gen_state, kScratchRegister,
|
||||
parameters_and_registers(i));
|
||||
__ StoreTaggedField(FieldOperand(array, FixedArray::OffsetOfElementAt(i)),
|
||||
value);
|
||||
}
|
||||
|
||||
Register context =
|
||||
FromAnyToRegister(code_gen_state, kScratchRegister, context_input());
|
||||
__ StoreTaggedField(
|
||||
FieldOperand(generator, JSGeneratorObject::kContextOffset), context);
|
||||
__ StoreTaggedSignedField(
|
||||
FieldOperand(generator, JSGeneratorObject::kContinuationOffset),
|
||||
Smi::FromInt(suspend_id()));
|
||||
__ StoreTaggedSignedField(
|
||||
FieldOperand(generator, JSGeneratorObject::kInputOrDebugPosOffset),
|
||||
Smi::FromInt(bytecode_offset()));
|
||||
}
|
||||
|
||||
void GeneratorRestoreRegister::AllocateVreg(
|
||||
MaglevVregAllocationState* vreg_state) {
|
||||
UseRegister(array_input());
|
||||
DefineAsRegister(vreg_state, this);
|
||||
}
|
||||
void GeneratorRestoreRegister::GenerateCode(MaglevCodeGenState* code_gen_state,
|
||||
const ProcessingState& state) {
|
||||
Register array = ToRegister(array_input());
|
||||
// Loads the current value in the generator register file.
|
||||
__ DecompressAnyTagged(
|
||||
ToRegister(result()),
|
||||
FieldOperand(array, FixedArray::OffsetOfElementAt(index())));
|
||||
// And trashs it with StaleRegisterConstant.
|
||||
__ LoadRoot(kScratchRegister, RootIndex::kStaleRegister);
|
||||
__ StoreTaggedField(
|
||||
FieldOperand(array, FixedArray::OffsetOfElementAt(index())),
|
||||
kScratchRegister);
|
||||
}
|
||||
|
||||
void ForInPrepare::AllocateVreg(MaglevVregAllocationState* vreg_state) {
|
||||
using D = CallInterfaceDescriptorFor<Builtin::kForInPrepare>::type;
|
||||
UseFixed(context(), kContextRegister);
|
||||
|
@ -136,6 +136,7 @@ class CompactInterpreterFrameState;
|
||||
V(DeleteProperty) \
|
||||
V(ForInPrepare) \
|
||||
V(ForInNext) \
|
||||
V(GeneratorRestoreRegister) \
|
||||
V(GetIterator) \
|
||||
V(GetSecondReturnedValue) \
|
||||
V(GetTemplateObject) \
|
||||
@ -189,6 +190,7 @@ class CompactInterpreterFrameState;
|
||||
V(CheckSymbol) \
|
||||
V(CheckString) \
|
||||
V(CheckMapsWithMigration) \
|
||||
V(GeneratorStore) \
|
||||
V(StoreTaggedFieldNoWriteBarrier) \
|
||||
V(StoreTaggedFieldWithWriteBarrier) \
|
||||
V(IncreaseInterruptBudget) \
|
||||
@ -1790,6 +1792,49 @@ class DeleteProperty : public FixedInputValueNodeT<3, DeleteProperty> {
|
||||
const LanguageMode mode_;
|
||||
};
|
||||
|
||||
class GeneratorStore : public NodeT<GeneratorStore> {
|
||||
using Base = NodeT<GeneratorStore>;
|
||||
|
||||
public:
|
||||
// We assume the context as fixed input.
|
||||
static constexpr int kContextIndex = 0;
|
||||
static constexpr int kGeneratorIndex = 1;
|
||||
static constexpr int kFixedInputCount = 2;
|
||||
|
||||
// This ctor is used when for variable input counts.
|
||||
// Inputs must be initialized manually.
|
||||
GeneratorStore(uint64_t bitfield, ValueNode* context, ValueNode* generator,
|
||||
int suspend_id, int bytecode_offset)
|
||||
: Base(bitfield),
|
||||
suspend_id_(suspend_id),
|
||||
bytecode_offset_(bytecode_offset) {
|
||||
set_input(kContextIndex, context);
|
||||
set_input(kGeneratorIndex, generator);
|
||||
}
|
||||
|
||||
int suspend_id() const { return suspend_id_; }
|
||||
int bytecode_offset() const { return bytecode_offset_; }
|
||||
|
||||
Input& context_input() { return input(kContextIndex); }
|
||||
Input& generator_input() { return input(kGeneratorIndex); }
|
||||
|
||||
int num_parameters_and_registers() const {
|
||||
return input_count() - kFixedInputCount;
|
||||
}
|
||||
Input& parameters_and_registers(int i) { return input(i + kFixedInputCount); }
|
||||
void set_parameters_and_registers(int i, ValueNode* node) {
|
||||
set_input(i + kFixedInputCount, node);
|
||||
}
|
||||
|
||||
void AllocateVreg(MaglevVregAllocationState*);
|
||||
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
|
||||
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
|
||||
|
||||
private:
|
||||
const int suspend_id_;
|
||||
const int bytecode_offset_;
|
||||
};
|
||||
|
||||
class ForInPrepare : public FixedInputValueNodeT<2, ForInPrepare> {
|
||||
using Base = FixedInputValueNodeT<2, ForInPrepare>;
|
||||
|
||||
@ -1912,6 +1957,26 @@ class ToString : public FixedInputValueNodeT<2, ToString> {
|
||||
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
|
||||
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
|
||||
};
|
||||
|
||||
class GeneratorRestoreRegister
|
||||
: public FixedInputValueNodeT<1, GeneratorRestoreRegister> {
|
||||
using Base = FixedInputValueNodeT<1, GeneratorRestoreRegister>;
|
||||
|
||||
public:
|
||||
explicit GeneratorRestoreRegister(uint64_t bitfield, int index)
|
||||
: Base(bitfield), index_(index) {}
|
||||
|
||||
Input& array_input() { return input(0); }
|
||||
int index() const { return index_; }
|
||||
|
||||
void AllocateVreg(MaglevVregAllocationState*);
|
||||
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
|
||||
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
|
||||
|
||||
private:
|
||||
const int index_;
|
||||
};
|
||||
|
||||
class InitialValue : public FixedInputValueNodeT<0, InitialValue> {
|
||||
using Base = FixedInputValueNodeT<0, InitialValue>;
|
||||
|
||||
|
@ -270,31 +270,35 @@ void StraightForwardRegisterAllocator::AllocateRegisters() {
|
||||
printing_visitor_->os() << "live regs: ";
|
||||
PrintLiveRegs();
|
||||
|
||||
ControlNode* control = NearestPostDominatingHole(block->control_node());
|
||||
if (!control->Is<JumpLoop>()) {
|
||||
printing_visitor_->os() << "\n[holes:";
|
||||
while (true) {
|
||||
if (control->Is<Jump>()) {
|
||||
BasicBlock* target = control->Cast<Jump>()->target();
|
||||
printing_visitor_->os()
|
||||
<< " " << control->id() << "-" << target->first_id();
|
||||
control = control->next_post_dominating_hole();
|
||||
DCHECK_NOT_NULL(control);
|
||||
continue;
|
||||
} else if (control->Is<Return>()) {
|
||||
printing_visitor_->os() << " " << control->id() << ".";
|
||||
break;
|
||||
} else if (control->Is<Deopt>() || control->Is<Abort>()) {
|
||||
printing_visitor_->os() << " " << control->id() << "✖️";
|
||||
break;
|
||||
} else if (control->Is<JumpLoop>()) {
|
||||
printing_visitor_->os() << " " << control->id() << "↰";
|
||||
break;
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
printing_visitor_->os() << "]";
|
||||
}
|
||||
// TODO(victorgomes): Support PostDominatingHole for Switch. Computing the
|
||||
// neareash domanting hole is only (currently) used here for printing. The
|
||||
// algorithm does not take into account a switch statement.
|
||||
// ControlNode* control =
|
||||
// NearestPostDominatingHole(block->control_node());
|
||||
// (!control->Is<JumpLoop>()) {
|
||||
// printing_visitor_->os() << "\n[holes:";
|
||||
// while (true) {
|
||||
// if (control->Is<Jump>()) {
|
||||
// BasicBlock* target = control->Cast<Jump>()->target();
|
||||
// printing_visitor_->os()
|
||||
// << " " << control->id() << "-" << target->first_id();
|
||||
// control = control->next_post_dominating_hole();
|
||||
// DCHECK_NOT_NULL(control);
|
||||
// continue;
|
||||
// } else if (control->Is<Return>()) {
|
||||
// printing_visitor_->os() << " " << control->id() << ".";
|
||||
// break;
|
||||
// } else if (control->Is<Deopt>() || control->Is<Abort>()) {
|
||||
// printing_visitor_->os() << " " << control->id() << "✖️";
|
||||
// break;
|
||||
// } else if (control->Is<JumpLoop>()) {
|
||||
// printing_visitor_->os() << " " << control->id() << "↰";
|
||||
// break;
|
||||
// }
|
||||
// UNREACHABLE();
|
||||
// }
|
||||
// printing_visitor_->os() << "]";
|
||||
// }
|
||||
printing_visitor_->os() << std::endl;
|
||||
}
|
||||
|
||||
@ -804,8 +808,10 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node,
|
||||
for (int i = 0; i < control_node->size(); i++) {
|
||||
InitializeConditionalBranchTarget(control_node, targets[i].block_ptr());
|
||||
}
|
||||
InitializeConditionalBranchTarget(control_node,
|
||||
control_node->fallthrough());
|
||||
if (control_node->has_fallthrough()) {
|
||||
InitializeConditionalBranchTarget(control_node,
|
||||
control_node->fallthrough());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1007,13 +1013,18 @@ void StraightForwardRegisterAllocator::VerifyInputs(NodeBase* node) {
|
||||
graph_labeller()->NodeId(input.node()), RegisterName(reg));
|
||||
}
|
||||
} else {
|
||||
DCHECK_EQ(input.operand(), input.node()->allocation());
|
||||
if (input.operand() != input.node()->allocation()) {
|
||||
std::stringstream ss;
|
||||
ss << input.operand();
|
||||
FATAL("Input node n%d is not in operand %s",
|
||||
graph_labeller()->NodeId(input.node()), ss.str().c_str());
|
||||
}
|
||||
// TODO(victorgomes): This check is currently too strong. If we use a
|
||||
// UseRegister and an UseAny for the same input. The register allocator
|
||||
// might allocate it to a register and then to stack. They will alias,
|
||||
// so the check will fail, but it is not an issue. We should however
|
||||
// force the allocator to check if the object is live in one of the
|
||||
// register instead of putting in the stack for an UseAny.
|
||||
// if (input.operand() != input.node()->allocation()) {
|
||||
// std::stringstream ss;
|
||||
// ss << input.operand();
|
||||
// FATAL("Input node n%d is not in operand %s",
|
||||
// graph_labeller()->NodeId(input.node()), ss.str().c_str());
|
||||
// }
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user