[maglev] Use v8_flags for accessing flag values
Avoid the deprecated FLAG_* syntax, access flag values via the {v8_flags} struct instead. R=leszeks@chromium.org Bug: v8:12887 Change-Id: I45a24a6297153f279a060079c0ee318545df6817 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3898931 Reviewed-by: Leszek Swirski <leszeks@chromium.org> Commit-Queue: Clemens Backes <clemensb@chromium.org> Cr-Commit-Position: refs/heads/main@{#83215}
This commit is contained in:
parent
0d04e8440e
commit
bc0200cf94
@ -252,7 +252,7 @@ inline void MaglevAssembler::JumpToDeferredIf(Condition cond,
|
||||
Args&&... args) {
|
||||
DeferredCodeInfo* deferred_code = PushDeferredCode<Function, Args...>(
|
||||
std::forward<Function>(deferred_code_gen), std::forward<Args>(args)...);
|
||||
if (FLAG_code_comments) {
|
||||
if (v8_flags.code_comments) {
|
||||
RecordComment("-- Jump to deferred code");
|
||||
}
|
||||
j(cond, &deferred_code->deferred_code_label);
|
||||
|
@ -637,7 +637,7 @@ class MaglevCodeGeneratingNodeProcessor {
|
||||
: masm_(masm) {}
|
||||
|
||||
void PreProcessGraph(Graph* graph) {
|
||||
if (FLAG_maglev_break_on_entry) {
|
||||
if (v8_flags.maglev_break_on_entry) {
|
||||
__ int3();
|
||||
}
|
||||
|
||||
@ -764,7 +764,7 @@ class MaglevCodeGeneratingNodeProcessor {
|
||||
}
|
||||
|
||||
void PreProcessBasicBlock(BasicBlock* block) {
|
||||
if (FLAG_code_comments) {
|
||||
if (v8_flags.code_comments) {
|
||||
std::stringstream ss;
|
||||
ss << "-- Block b" << graph_labeller()->BlockId(block);
|
||||
__ RecordComment(ss.str());
|
||||
@ -775,14 +775,14 @@ class MaglevCodeGeneratingNodeProcessor {
|
||||
|
||||
template <typename NodeT>
|
||||
void Process(NodeT* node, const ProcessingState& state) {
|
||||
if (FLAG_code_comments) {
|
||||
if (v8_flags.code_comments) {
|
||||
std::stringstream ss;
|
||||
ss << "-- " << graph_labeller()->NodeId(node) << ": "
|
||||
<< PrintNode(graph_labeller(), node);
|
||||
__ RecordComment(ss.str());
|
||||
}
|
||||
|
||||
if (FLAG_debug_code) {
|
||||
if (v8_flags.debug_code) {
|
||||
__ movq(kScratchRegister, rbp);
|
||||
__ subq(kScratchRegister, rsp);
|
||||
__ cmpq(kScratchRegister,
|
||||
@ -806,7 +806,7 @@ class MaglevCodeGeneratingNodeProcessor {
|
||||
compiler::AllocatedOperand::cast(value_node->result().operand());
|
||||
// We shouldn't spill nodes which already output to the stack.
|
||||
if (!source.IsAnyStackSlot()) {
|
||||
if (FLAG_code_comments) __ RecordComment("-- Spill:");
|
||||
if (v8_flags.code_comments) __ RecordComment("-- Spill:");
|
||||
if (source.IsRegister()) {
|
||||
__ movq(masm()->GetStackSlot(value_node->spill_slot()),
|
||||
ToRegister(source));
|
||||
@ -851,7 +851,7 @@ class MaglevCodeGeneratingNodeProcessor {
|
||||
// TODO(leszeks): We should remove dead phis entirely and turn this into
|
||||
// a DCHECK.
|
||||
if (!phi->has_valid_live_range()) {
|
||||
if (FLAG_code_comments) {
|
||||
if (v8_flags.code_comments) {
|
||||
std::stringstream ss;
|
||||
ss << "-- * "
|
||||
<< phi->input(state.block()->predecessor_id()).operand() << " → "
|
||||
@ -866,7 +866,7 @@ class MaglevCodeGeneratingNodeProcessor {
|
||||
compiler::InstructionOperand source = input.operand();
|
||||
compiler::AllocatedOperand target =
|
||||
compiler::AllocatedOperand::cast(phi->result().operand());
|
||||
if (FLAG_code_comments) {
|
||||
if (v8_flags.code_comments) {
|
||||
std::stringstream ss;
|
||||
ss << "-- * " << source << " → " << target << " (n"
|
||||
<< graph_labeller()->NodeId(phi) << ")";
|
||||
@ -889,7 +889,7 @@ class MaglevCodeGeneratingNodeProcessor {
|
||||
if (LoadMergeState(state, &node, &merge)) {
|
||||
compiler::InstructionOperand source =
|
||||
merge->operand(predecessor_id);
|
||||
if (FLAG_code_comments) {
|
||||
if (v8_flags.code_comments) {
|
||||
std::stringstream ss;
|
||||
ss << "-- * " << source << " → " << reg;
|
||||
__ RecordComment(ss.str());
|
||||
@ -909,7 +909,7 @@ class MaglevCodeGeneratingNodeProcessor {
|
||||
if (LoadMergeState(state, &node, &merge)) {
|
||||
compiler::InstructionOperand source =
|
||||
merge->operand(predecessor_id);
|
||||
if (FLAG_code_comments) {
|
||||
if (v8_flags.code_comments) {
|
||||
std::stringstream ss;
|
||||
ss << "-- * " << source << " → " << reg;
|
||||
__ RecordComment(ss.str());
|
||||
|
@ -54,12 +54,12 @@ MaglevCompilationInfo::MaglevCompilationInfo(Isolate* isolate,
|
||||
: zone_(isolate->allocator(), kMaglevZoneName),
|
||||
isolate_(isolate),
|
||||
broker_(new compiler::JSHeapBroker(
|
||||
isolate, zone(), FLAG_trace_heap_broker, CodeKind::MAGLEV))
|
||||
#define V(Name) , Name##_(FLAG_##Name)
|
||||
isolate, zone(), v8_flags.trace_heap_broker, CodeKind::MAGLEV))
|
||||
#define V(Name) , Name##_(v8_flags.Name)
|
||||
MAGLEV_COMPILATION_FLAG_LIST(V)
|
||||
#undef V
|
||||
{
|
||||
DCHECK(FLAG_maglev);
|
||||
DCHECK(v8_flags.maglev);
|
||||
|
||||
MaglevCompilationHandleScope compilation(isolate, this);
|
||||
|
||||
|
@ -539,13 +539,14 @@ void MaglevCompiler::Compile(LocalIsolate* local_isolate,
|
||||
compiler::UnparkedScopeIfNeeded unparked_scope(compilation_info->broker());
|
||||
|
||||
// Build graph.
|
||||
if (FLAG_print_maglev_code || FLAG_code_comments || FLAG_print_maglev_graph ||
|
||||
FLAG_trace_maglev_graph_building || FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.print_maglev_code || v8_flags.code_comments ||
|
||||
v8_flags.print_maglev_graph || v8_flags.trace_maglev_graph_building ||
|
||||
v8_flags.trace_maglev_regalloc) {
|
||||
compilation_info->set_graph_labeller(new MaglevGraphLabeller());
|
||||
}
|
||||
|
||||
if (FLAG_print_maglev_code || FLAG_print_maglev_graph ||
|
||||
FLAG_trace_maglev_graph_building || FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.print_maglev_code || v8_flags.print_maglev_graph ||
|
||||
v8_flags.trace_maglev_graph_building || v8_flags.trace_maglev_regalloc) {
|
||||
MaglevCompilationUnit* top_level_unit =
|
||||
compilation_info->toplevel_compilation_unit();
|
||||
std::cout << "Compiling " << Brief(*top_level_unit->function().object())
|
||||
@ -561,7 +562,7 @@ void MaglevCompiler::Compile(LocalIsolate* local_isolate,
|
||||
|
||||
graph_builder.Build();
|
||||
|
||||
if (FLAG_print_maglev_graph) {
|
||||
if (v8_flags.print_maglev_graph) {
|
||||
std::cout << "\nAfter graph buiding" << std::endl;
|
||||
PrintGraph(std::cout, compilation_info, graph_builder.graph());
|
||||
}
|
||||
@ -579,7 +580,7 @@ void MaglevCompiler::Compile(LocalIsolate* local_isolate,
|
||||
processor.ProcessGraph(graph_builder.graph());
|
||||
}
|
||||
|
||||
if (FLAG_print_maglev_graph) {
|
||||
if (v8_flags.print_maglev_graph) {
|
||||
std::cout << "After node processor" << std::endl;
|
||||
PrintGraph(std::cout, compilation_info, graph_builder.graph());
|
||||
}
|
||||
@ -587,7 +588,7 @@ void MaglevCompiler::Compile(LocalIsolate* local_isolate,
|
||||
StraightForwardRegisterAllocator allocator(compilation_info,
|
||||
graph_builder.graph());
|
||||
|
||||
if (FLAG_print_maglev_graph) {
|
||||
if (v8_flags.print_maglev_graph) {
|
||||
std::cout << "After register allocation" << std::endl;
|
||||
PrintGraph(std::cout, compilation_info, graph_builder.graph());
|
||||
}
|
||||
@ -629,7 +630,7 @@ MaybeHandle<CodeT> MaglevCompiler::GenerateCode(
|
||||
return {};
|
||||
}
|
||||
|
||||
if (FLAG_print_maglev_code) {
|
||||
if (v8_flags.print_maglev_code) {
|
||||
code->Print();
|
||||
}
|
||||
|
||||
|
@ -88,7 +88,7 @@ MaglevCompilationJob::MaglevCompilationJob(
|
||||
std::unique_ptr<MaglevCompilationInfo>&& info)
|
||||
: OptimizedCompilationJob(kMaglevCompilerName, State::kReadyToPrepare),
|
||||
info_(std::move(info)) {
|
||||
DCHECK(FLAG_maglev);
|
||||
DCHECK(v8_flags.maglev);
|
||||
}
|
||||
|
||||
MaglevCompilationJob::~MaglevCompilationJob() = default;
|
||||
@ -157,7 +157,7 @@ class MaglevConcurrentDispatcher::JobTask final : public v8::JobTask {
|
||||
|
||||
MaglevConcurrentDispatcher::MaglevConcurrentDispatcher(Isolate* isolate)
|
||||
: isolate_(isolate) {
|
||||
if (FLAG_concurrent_recompilation && FLAG_maglev) {
|
||||
if (v8_flags.concurrent_recompilation && v8_flags.maglev) {
|
||||
job_handle_ = V8::GetCurrentPlatform()->PostJob(
|
||||
TaskPriority::kUserVisible, std::make_unique<JobTask>(this));
|
||||
DCHECK(is_enabled());
|
||||
|
@ -133,7 +133,7 @@ void MaglevGraphBuilder::BuildMergeStates() {
|
||||
const compiler::LoopInfo& loop_info = offset_and_info.second;
|
||||
const compiler::BytecodeLivenessState* liveness = GetInLivenessFor(offset);
|
||||
DCHECK_NULL(merge_states_[offset]);
|
||||
if (FLAG_trace_maglev_graph_building) {
|
||||
if (v8_flags.trace_maglev_graph_building) {
|
||||
std::cout << "- Creating loop merge state at @" << offset << std::endl;
|
||||
}
|
||||
merge_states_[offset] = MergePointInterpreterFrameState::NewForLoop(
|
||||
@ -150,7 +150,7 @@ void MaglevGraphBuilder::BuildMergeStates() {
|
||||
GetInLivenessFor(offset);
|
||||
DCHECK_EQ(NumPredecessors(offset), 0);
|
||||
DCHECK_NULL(merge_states_[offset]);
|
||||
if (FLAG_trace_maglev_graph_building) {
|
||||
if (v8_flags.trace_maglev_graph_building) {
|
||||
std::cout << "- Creating exception merge state at @" << offset
|
||||
<< ", context register r" << context_reg.index() << std::endl;
|
||||
}
|
||||
@ -1966,7 +1966,7 @@ void MaglevGraphBuilder::BuildCallFromRegisters(
|
||||
return;
|
||||
|
||||
case compiler::ProcessedFeedback::kCall: {
|
||||
if (!FLAG_maglev_inlining) break;
|
||||
if (!v8_flags.maglev_inlining) break;
|
||||
|
||||
const compiler::CallFeedback& call_feedback = processed_feedback.AsCall();
|
||||
CallFeedbackContent content = call_feedback.call_feedback_content();
|
||||
@ -2677,7 +2677,7 @@ void MaglevGraphBuilder::MergeDeadIntoFrameState(int target) {
|
||||
// If this merge is the last one which kills a loop merge, remove that
|
||||
// merge state.
|
||||
if (merge_states_[target]->is_unreachable_loop()) {
|
||||
if (FLAG_trace_maglev_graph_building) {
|
||||
if (v8_flags.trace_maglev_graph_building) {
|
||||
std::cout << "! Killing loop merge state at @" << target << std::endl;
|
||||
}
|
||||
merge_states_[target] = nullptr;
|
||||
@ -3059,7 +3059,7 @@ void MaglevGraphBuilder::VisitResumeGenerator() {
|
||||
{generator}, JSGeneratorObject::kParametersAndRegistersOffset);
|
||||
interpreter::RegisterList registers = iterator_.GetRegisterListOperand(1);
|
||||
|
||||
if (FLAG_maglev_assert) {
|
||||
if (v8_flags.maglev_assert) {
|
||||
// Check if register count is invalid, that is, larger than the
|
||||
// register file length.
|
||||
ValueNode* array_length_smi =
|
||||
|
@ -96,7 +96,7 @@ class MaglevGraphBuilder {
|
||||
if (has_graph_labeller()) {
|
||||
for (Phi* phi : *merge_states_[offset]->phis()) {
|
||||
graph_labeller()->RegisterNode(phi);
|
||||
if (FLAG_trace_maglev_graph_building) {
|
||||
if (v8_flags.trace_maglev_graph_building) {
|
||||
std::cout << " " << phi << " "
|
||||
<< PrintNodeLabel(graph_labeller(), phi) << ": "
|
||||
<< PrintNode(graph_labeller(), phi) << std::endl;
|
||||
@ -147,7 +147,7 @@ class MaglevGraphBuilder {
|
||||
if (has_graph_labeller()) {
|
||||
for (Phi* phi : *merge_states_[offset]->phis()) {
|
||||
graph_labeller()->RegisterNode(phi);
|
||||
if (FLAG_trace_maglev_graph_building) {
|
||||
if (v8_flags.trace_maglev_graph_building) {
|
||||
std::cout << " " << phi << " "
|
||||
<< PrintNodeLabel(graph_labeller(), phi) << ": "
|
||||
<< PrintNode(graph_labeller(), phi) << std::endl;
|
||||
@ -174,7 +174,7 @@ class MaglevGraphBuilder {
|
||||
|
||||
void MarkBytecodeDead() {
|
||||
DCHECK_NULL(current_block_);
|
||||
if (FLAG_trace_maglev_graph_building) {
|
||||
if (v8_flags.trace_maglev_graph_building) {
|
||||
std::cout << "== Dead ==\n"
|
||||
<< std::setw(4) << iterator_.current_offset() << " : ";
|
||||
interpreter::BytecodeDecoder::Decode(std::cout,
|
||||
@ -250,7 +250,7 @@ class MaglevGraphBuilder {
|
||||
merge_state->Merge(*compilation_unit_, current_interpreter_frame_,
|
||||
graph()->last_block(), offset);
|
||||
}
|
||||
if (FLAG_trace_maglev_graph_building) {
|
||||
if (v8_flags.trace_maglev_graph_building) {
|
||||
auto detail = merge_state->is_exception_handler() ? "exception handler"
|
||||
: merge_state->is_loop() ? "loop header"
|
||||
: "merge";
|
||||
@ -316,7 +316,7 @@ class MaglevGraphBuilder {
|
||||
}
|
||||
|
||||
DCHECK_NOT_NULL(current_block_);
|
||||
if (FLAG_trace_maglev_graph_building) {
|
||||
if (v8_flags.trace_maglev_graph_building) {
|
||||
std::cout << std::setw(4) << iterator_.current_offset() << " : ";
|
||||
interpreter::BytecodeDecoder::Decode(std::cout,
|
||||
iterator_.current_address());
|
||||
@ -352,7 +352,7 @@ class MaglevGraphBuilder {
|
||||
}
|
||||
current_block_->nodes().Add(node);
|
||||
if (has_graph_labeller()) graph_labeller()->RegisterNode(node);
|
||||
if (FLAG_trace_maglev_graph_building) {
|
||||
if (v8_flags.trace_maglev_graph_building) {
|
||||
std::cout << " " << node << " "
|
||||
<< PrintNodeLabel(graph_labeller(), node) << ": "
|
||||
<< PrintNode(graph_labeller(), node) << std::endl;
|
||||
@ -900,7 +900,7 @@ class MaglevGraphBuilder {
|
||||
graph()->Add(block);
|
||||
if (has_graph_labeller()) {
|
||||
graph_labeller()->RegisterBasicBlock(block);
|
||||
if (FLAG_trace_maglev_graph_building) {
|
||||
if (v8_flags.trace_maglev_graph_building) {
|
||||
bool kSkipTargets = true;
|
||||
std::cout << " " << control_node << " "
|
||||
<< PrintNodeLabel(graph_labeller(), control_node) << ": "
|
||||
@ -938,7 +938,7 @@ class MaglevGraphBuilder {
|
||||
DCHECK_NULL(current_block_);
|
||||
if (std::is_base_of<ConditionalControlNode, ControlNodeT>::value) {
|
||||
if (NumPredecessors(next_block_offset) == 1) {
|
||||
if (FLAG_trace_maglev_graph_building) {
|
||||
if (v8_flags.trace_maglev_graph_building) {
|
||||
std::cout << "== New block (single fallthrough) ==" << std::endl;
|
||||
}
|
||||
StartNewBlock(next_block_offset);
|
||||
|
@ -45,7 +45,7 @@ void PrintPaddedId(std::ostream& os, MaglevGraphLabeller* graph_labeller,
|
||||
for (int i = 0; i < padding_width; ++i) {
|
||||
os << padding;
|
||||
}
|
||||
if (FLAG_log_colour) os << "\033[0m";
|
||||
if (v8_flags.log_colour) os << "\033[0m";
|
||||
if (node->has_id()) {
|
||||
os << node->id() << "/";
|
||||
}
|
||||
@ -158,7 +158,7 @@ void PrintVerticalArrows(std::ostream& os,
|
||||
desired_color = (i % 6) + 1;
|
||||
c.AddVertical();
|
||||
}
|
||||
if (FLAG_log_colour && desired_color != current_color &&
|
||||
if (v8_flags.log_colour && desired_color != current_color &&
|
||||
desired_color != -1) {
|
||||
os << "\033[0;3" << desired_color << "m";
|
||||
current_color = desired_color;
|
||||
@ -167,7 +167,7 @@ void PrintVerticalArrows(std::ostream& os,
|
||||
}
|
||||
// If there are no arrows starting here, clear the color. Otherwise,
|
||||
// PrintPaddedId will clear it.
|
||||
if (FLAG_log_colour && arrows_starting_here.empty() &&
|
||||
if (v8_flags.log_colour && arrows_starting_here.empty() &&
|
||||
targets_starting_here.empty()) {
|
||||
os << "\033[0m";
|
||||
}
|
||||
@ -342,7 +342,7 @@ void MaglevPrintingVisitor::PreProcessBasicBlock(BasicBlock* block) {
|
||||
desired_color = (i % 6) + 1;
|
||||
c.AddVertical();
|
||||
}
|
||||
if (FLAG_log_colour && current_color != desired_color &&
|
||||
if (v8_flags.log_colour && current_color != desired_color &&
|
||||
desired_color != -1) {
|
||||
os_ << "\033[0;3" << desired_color << "m";
|
||||
current_color = desired_color;
|
||||
@ -350,7 +350,7 @@ void MaglevPrintingVisitor::PreProcessBasicBlock(BasicBlock* block) {
|
||||
os_ << c;
|
||||
}
|
||||
os_ << (saw_start ? "►" : " ");
|
||||
if (FLAG_log_colour) os_ << "\033[0m";
|
||||
if (v8_flags.log_colour) os_ << "\033[0m";
|
||||
}
|
||||
|
||||
int block_id = graph_labeller_->BlockId(block);
|
||||
|
@ -504,14 +504,14 @@ class MergePointInterpreterFrameState {
|
||||
known_node_aspects_->Merge(unmerged.known_node_aspects());
|
||||
}
|
||||
|
||||
if (FLAG_trace_maglev_graph_building) {
|
||||
if (v8_flags.trace_maglev_graph_building) {
|
||||
std::cout << "Merging..." << std::endl;
|
||||
}
|
||||
frame_state_.ForEachValue(compilation_unit, [&](ValueNode*& value,
|
||||
interpreter::Register reg) {
|
||||
CheckIsLoopPhiIfNeeded(compilation_unit, merge_offset, reg, value);
|
||||
|
||||
if (FLAG_trace_maglev_graph_building) {
|
||||
if (v8_flags.trace_maglev_graph_building) {
|
||||
std::cout << " " << reg.ToString() << ": "
|
||||
<< PrintNodeLabel(compilation_unit.graph_labeller(), value)
|
||||
<< " <- "
|
||||
@ -520,7 +520,7 @@ class MergePointInterpreterFrameState {
|
||||
}
|
||||
value = MergeValue(compilation_unit, reg, value, unmerged.get(reg),
|
||||
merge_offset);
|
||||
if (FLAG_trace_maglev_graph_building) {
|
||||
if (v8_flags.trace_maglev_graph_building) {
|
||||
std::cout << " => "
|
||||
<< PrintNodeLabel(compilation_unit.graph_labeller(), value)
|
||||
<< ": " << PrintNode(compilation_unit.graph_labeller(), value)
|
||||
@ -541,14 +541,14 @@ class MergePointInterpreterFrameState {
|
||||
DCHECK(is_unmerged_loop());
|
||||
predecessors_[predecessor_count_ - 1] = loop_end_block;
|
||||
|
||||
if (FLAG_trace_maglev_graph_building) {
|
||||
if (v8_flags.trace_maglev_graph_building) {
|
||||
std::cout << "Merging loop backedge..." << std::endl;
|
||||
}
|
||||
frame_state_.ForEachValue(compilation_unit, [&](ValueNode* value,
|
||||
interpreter::Register reg) {
|
||||
CheckIsLoopPhiIfNeeded(compilation_unit, merge_offset, reg, value);
|
||||
|
||||
if (FLAG_trace_maglev_graph_building) {
|
||||
if (v8_flags.trace_maglev_graph_building) {
|
||||
std::cout << " " << reg.ToString() << ": "
|
||||
<< PrintNodeLabel(compilation_unit.graph_labeller(), value)
|
||||
<< " <- "
|
||||
@ -557,7 +557,7 @@ class MergePointInterpreterFrameState {
|
||||
}
|
||||
MergeLoopValue(compilation_unit, reg, value, loop_end_state.get(reg),
|
||||
merge_offset);
|
||||
if (FLAG_trace_maglev_graph_building) {
|
||||
if (v8_flags.trace_maglev_graph_building) {
|
||||
std::cout << " => "
|
||||
<< PrintNodeLabel(compilation_unit.graph_labeller(), value)
|
||||
<< ": " << PrintNode(compilation_unit.graph_labeller(), value)
|
||||
@ -770,7 +770,7 @@ class MergePointInterpreterFrameState {
|
||||
|
||||
for (int i = 0; i < predecessors_so_far_; i++) result->set_input(i, merged);
|
||||
result->set_input(predecessors_so_far_, unmerged);
|
||||
if (FLAG_trace_maglev_graph_building) {
|
||||
if (v8_flags.trace_maglev_graph_building) {
|
||||
for (int i = predecessors_so_far_ + 1; i < predecessor_count_; i++) {
|
||||
result->set_input(i, nullptr);
|
||||
}
|
||||
@ -823,7 +823,7 @@ class MergePointInterpreterFrameState {
|
||||
DCHECK_EQ(predecessors_so_far_, 0);
|
||||
// Create a new loop phi, which for now is empty.
|
||||
Phi* result = Node::New<Phi>(zone, predecessor_count_, reg, merge_offset);
|
||||
if (FLAG_trace_maglev_graph_building) {
|
||||
if (v8_flags.trace_maglev_graph_building) {
|
||||
for (int i = 0; i < predecessor_count_; i++) {
|
||||
result->set_input(i, nullptr);
|
||||
}
|
||||
|
@ -151,7 +151,7 @@ void AllocateRaw(MaglevAssembler* masm, RegisterSnapshot& register_snapshot,
|
||||
// TODO(victorgomes): Call the runtime for large object allocation.
|
||||
// TODO(victorgomes): Support double alignment.
|
||||
DCHECK_EQ(alignment, kTaggedAligned);
|
||||
if (FLAG_single_generation) {
|
||||
if (v8_flags.single_generation) {
|
||||
alloc_type = AllocationType::kOld;
|
||||
}
|
||||
bool in_new_space = alloc_type == AllocationType::kYoung;
|
||||
@ -1386,7 +1386,7 @@ void CheckJSArrayBounds::GenerateCode(MaglevAssembler* masm,
|
||||
__ AssertNotSmi(object);
|
||||
__ AssertSmi(index);
|
||||
|
||||
if (FLAG_debug_code) {
|
||||
if (v8_flags.debug_code) {
|
||||
__ CmpObjectType(object, JS_ARRAY_TYPE, kScratchRegister);
|
||||
__ Assert(equal, AbortReason::kUnexpectedValue);
|
||||
}
|
||||
@ -1408,13 +1408,13 @@ void CheckJSObjectElementsBounds::GenerateCode(MaglevAssembler* masm,
|
||||
__ AssertNotSmi(object);
|
||||
__ AssertSmi(index);
|
||||
|
||||
if (FLAG_debug_code) {
|
||||
if (v8_flags.debug_code) {
|
||||
__ CmpObjectType(object, FIRST_JS_OBJECT_TYPE, kScratchRegister);
|
||||
__ Assert(greater_equal, AbortReason::kUnexpectedValue);
|
||||
}
|
||||
__ LoadAnyTaggedField(kScratchRegister,
|
||||
FieldOperand(object, JSObject::kElementsOffset));
|
||||
if (FLAG_debug_code) {
|
||||
if (v8_flags.debug_code) {
|
||||
__ AssertNotSmi(kScratchRegister);
|
||||
}
|
||||
TaggedRegister length(kScratchRegister);
|
||||
@ -1465,7 +1465,7 @@ void CheckedInternalizedString::GenerateCode(MaglevAssembler* masm,
|
||||
__ j(zero, &deopt_info->deopt_entry_label);
|
||||
__ LoadTaggedPointerField(
|
||||
object, FieldOperand(object, ThinString::kActualOffset));
|
||||
if (FLAG_debug_code) {
|
||||
if (v8_flags.debug_code) {
|
||||
__ RecordComment("DCHECK IsInternalizedString");
|
||||
__ LoadMap(map_tmp, object);
|
||||
__ testw(FieldOperand(map_tmp, Map::kInstanceTypeOffset),
|
||||
@ -1524,13 +1524,13 @@ void LoadTaggedElement::GenerateCode(MaglevAssembler* masm,
|
||||
Register index = ToRegister(index_input());
|
||||
Register result_reg = ToRegister(result());
|
||||
__ AssertNotSmi(object);
|
||||
if (FLAG_debug_code) {
|
||||
if (v8_flags.debug_code) {
|
||||
__ CmpObjectType(object, JS_OBJECT_TYPE, kScratchRegister);
|
||||
__ Assert(above_equal, AbortReason::kUnexpectedValue);
|
||||
}
|
||||
__ DecompressAnyTagged(kScratchRegister,
|
||||
FieldOperand(object, JSObject::kElementsOffset));
|
||||
if (FLAG_debug_code) {
|
||||
if (v8_flags.debug_code) {
|
||||
__ CmpObjectType(kScratchRegister, FIXED_ARRAY_TYPE, kScratchRegister);
|
||||
__ Assert(equal, AbortReason::kUnexpectedValue);
|
||||
// Reload since CmpObjectType clobbered the scratch register.
|
||||
@ -1564,13 +1564,13 @@ void LoadDoubleElement::GenerateCode(MaglevAssembler* masm,
|
||||
Register index = ToRegister(index_input());
|
||||
DoubleRegister result_reg = ToDoubleRegister(result());
|
||||
__ AssertNotSmi(object);
|
||||
if (FLAG_debug_code) {
|
||||
if (v8_flags.debug_code) {
|
||||
__ CmpObjectType(object, JS_OBJECT_TYPE, kScratchRegister);
|
||||
__ Assert(above_equal, AbortReason::kUnexpectedValue);
|
||||
}
|
||||
__ DecompressAnyTagged(kScratchRegister,
|
||||
FieldOperand(object, JSObject::kElementsOffset));
|
||||
if (FLAG_debug_code) {
|
||||
if (v8_flags.debug_code) {
|
||||
__ CmpObjectType(kScratchRegister, FIXED_DOUBLE_ARRAY_TYPE,
|
||||
kScratchRegister);
|
||||
__ Assert(equal, AbortReason::kUnexpectedValue);
|
||||
@ -2500,7 +2500,7 @@ void LogicalNot::GenerateCode(MaglevAssembler* masm,
|
||||
Register object = ToRegister(value());
|
||||
Register return_value = ToRegister(result());
|
||||
|
||||
if (FLAG_debug_code) {
|
||||
if (v8_flags.debug_code) {
|
||||
// LogicalNot expects either TrueValue or FalseValue.
|
||||
Label next;
|
||||
__ CompareRoot(object, RootIndex::kFalseValue);
|
||||
@ -3479,7 +3479,7 @@ void AttemptOnStackReplacement(MaglevAssembler* masm, Label* return_label,
|
||||
}
|
||||
|
||||
__ bind(&deopt);
|
||||
if (V8_LIKELY(FLAG_turbofan)) {
|
||||
if (V8_LIKELY(v8_flags.turbofan)) {
|
||||
__ EmitEagerDeopt(node, DeoptimizeReason::kPrepareForOnStackReplacement);
|
||||
} else {
|
||||
// Fall through. With TF disabled we cannot OSR and thus it doesn't make
|
||||
|
@ -283,7 +283,7 @@ void StraightForwardRegisterAllocator::PrintLiveRegs() const {
|
||||
}
|
||||
|
||||
void StraightForwardRegisterAllocator::AllocateRegisters() {
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_.reset(new MaglevPrintingVisitor(
|
||||
compilation_info_->graph_labeller(), std::cout));
|
||||
printing_visitor_->PreProcessGraph(graph_);
|
||||
@ -326,7 +326,7 @@ void StraightForwardRegisterAllocator::AllocateRegisters() {
|
||||
InitializeRegisterValues(block->empty_block_register_state());
|
||||
}
|
||||
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->PreProcessBasicBlock(block);
|
||||
printing_visitor_->os() << "live regs: ";
|
||||
PrintLiveRegs();
|
||||
@ -391,7 +391,7 @@ void StraightForwardRegisterAllocator::AllocateRegisters() {
|
||||
if (phi->owner() == interpreter::Register::virtual_accumulator() &&
|
||||
!phi->is_dead()) {
|
||||
phi->result().SetAllocated(ForceAllocate(kReturnRegister0, phi));
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->Process(phi, ProcessingState(block_it_));
|
||||
printing_visitor_->os() << "phi (exception message object) "
|
||||
<< phi->result().operand() << std::endl;
|
||||
@ -411,7 +411,7 @@ void StraightForwardRegisterAllocator::AllocateRegisters() {
|
||||
compiler::AllocatedOperand allocation =
|
||||
general_registers_.AllocateRegister(phi);
|
||||
phi->result().SetAllocated(allocation);
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->Process(phi, ProcessingState(block_it_));
|
||||
printing_visitor_->os()
|
||||
<< "phi (new reg) " << phi->result().operand() << std::endl;
|
||||
@ -428,14 +428,14 @@ void StraightForwardRegisterAllocator::AllocateRegisters() {
|
||||
AllocateSpillSlot(phi);
|
||||
// TODO(verwaest): Will this be used at all?
|
||||
phi->result().SetAllocated(phi->spill_slot());
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->Process(phi, ProcessingState(block_it_));
|
||||
printing_visitor_->os()
|
||||
<< "phi (stack) " << phi->result().operand() << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->os() << "live regs: ";
|
||||
PrintLiveRegs();
|
||||
printing_visitor_->os() << std::endl;
|
||||
@ -470,7 +470,7 @@ void StraightForwardRegisterAllocator::UpdateUse(
|
||||
|
||||
if (!node->is_dead()) return;
|
||||
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->os()
|
||||
<< " freeing " << PrintNodeLabel(graph_labeller(), node) << "\n";
|
||||
}
|
||||
@ -498,7 +498,7 @@ void StraightForwardRegisterAllocator::UpdateUse(
|
||||
detail::DeepForEachInput(
|
||||
&deopt_info,
|
||||
[&](ValueNode* node, interpreter::Register reg, InputLocation* input) {
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->os()
|
||||
<< "- using " << PrintNodeLabel(graph_labeller(), node) << "\n";
|
||||
}
|
||||
@ -523,7 +523,7 @@ void StraightForwardRegisterAllocator::UpdateUse(
|
||||
// Skip over the result location since it is irrelevant for lazy deopts
|
||||
// (unoptimized code will recreate the result).
|
||||
if (deopt_info.IsResultRegister(reg)) return;
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->os()
|
||||
<< "- using " << PrintNodeLabel(graph_labeller(), node) << "\n";
|
||||
}
|
||||
@ -555,7 +555,7 @@ void StraightForwardRegisterAllocator::AllocateNode(Node* node) {
|
||||
DCHECK(!node->Is<ConstantGapMove>());
|
||||
|
||||
current_node_ = node;
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->os()
|
||||
<< "Allocating " << PrintNodeLabel(graph_labeller(), node)
|
||||
<< " inputs...\n";
|
||||
@ -567,26 +567,26 @@ void StraightForwardRegisterAllocator::AllocateNode(Node* node) {
|
||||
|
||||
// Allocate node output.
|
||||
if (node->Is<ValueNode>()) {
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->os() << "Allocating result...\n";
|
||||
}
|
||||
AllocateNodeResult(node->Cast<ValueNode>());
|
||||
}
|
||||
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->os() << "Updating uses...\n";
|
||||
}
|
||||
|
||||
// Update uses only after allocating the node result. This order is necessary
|
||||
// to avoid emitting input-clobbering gap moves during node result allocation.
|
||||
if (node->properties().can_eager_deopt()) {
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->os() << "Using eager deopt nodes...\n";
|
||||
}
|
||||
UpdateUse(*node->eager_deopt_info());
|
||||
}
|
||||
for (Input& input : *node) {
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->os()
|
||||
<< "Using input " << PrintNodeLabel(graph_labeller(), input.node())
|
||||
<< "...\n";
|
||||
@ -596,7 +596,7 @@ void StraightForwardRegisterAllocator::AllocateNode(Node* node) {
|
||||
|
||||
// Lazy deopts are semantically after the node, so update them last.
|
||||
if (node->properties().can_lazy_deopt()) {
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->os() << "Using lazy deopt nodes...\n";
|
||||
}
|
||||
UpdateUse(*node->lazy_deopt_info());
|
||||
@ -604,7 +604,7 @@ void StraightForwardRegisterAllocator::AllocateNode(Node* node) {
|
||||
|
||||
if (node->properties().needs_register_snapshot()) SaveRegisterSnapshot(node);
|
||||
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->Process(node, ProcessingState(block_it_));
|
||||
printing_visitor_->os() << "live regs: ";
|
||||
PrintLiveRegs();
|
||||
@ -716,7 +716,7 @@ void StraightForwardRegisterAllocator::DropRegisterValue(
|
||||
|
||||
ValueNode* node = registers.GetValue(reg);
|
||||
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->os() << " dropping " << reg << " value "
|
||||
<< PrintNodeLabel(graph_labeller(), node) << "\n";
|
||||
}
|
||||
@ -877,7 +877,7 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node,
|
||||
DCHECK_EQ(node->input_count(), 0);
|
||||
DCHECK_EQ(node->properties(), OpProperties(0));
|
||||
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->Process(node, ProcessingState(block_it_));
|
||||
}
|
||||
} else if (node->Is<Deopt>()) {
|
||||
@ -889,7 +889,7 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node,
|
||||
|
||||
UpdateUse(*node->eager_deopt_info());
|
||||
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->Process(node, ProcessingState(block_it_));
|
||||
}
|
||||
} else if (auto unconditional = node->TryCast<UnconditionalControlNode>()) {
|
||||
@ -927,7 +927,7 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node,
|
||||
}
|
||||
}
|
||||
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->Process(node, ProcessingState(block_it_));
|
||||
}
|
||||
} else {
|
||||
@ -950,7 +950,7 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node,
|
||||
double_registers_.clear_blocked();
|
||||
VerifyRegisterState();
|
||||
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->Process(node, ProcessingState(block_it_));
|
||||
}
|
||||
|
||||
@ -984,7 +984,7 @@ void StraightForwardRegisterAllocator::TryAllocateToInput(Phi* phi) {
|
||||
if (general_registers_.unblocked_free().has(reg)) {
|
||||
phi->result().SetAllocated(ForceAllocate(reg, phi));
|
||||
DCHECK_EQ(general_registers_.GetValue(reg), phi);
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->Process(phi, ProcessingState(block_it_));
|
||||
printing_visitor_->os()
|
||||
<< "phi (reuse) " << input.operand() << std::endl;
|
||||
@ -1001,7 +1001,7 @@ void StraightForwardRegisterAllocator::AddMoveBeforeCurrentNode(
|
||||
Node* gap_move;
|
||||
if (source.IsConstant()) {
|
||||
DCHECK(IsConstantNode(node->opcode()));
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->os()
|
||||
<< " constant gap move: " << target << " ← "
|
||||
<< PrintNodeLabel(graph_labeller(), node) << std::endl;
|
||||
@ -1009,7 +1009,7 @@ void StraightForwardRegisterAllocator::AddMoveBeforeCurrentNode(
|
||||
gap_move =
|
||||
Node::New<ConstantGapMove>(compilation_info_->zone(), {}, node, target);
|
||||
} else {
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->os() << " gap move: " << target << " ← "
|
||||
<< PrintNodeLabel(graph_labeller(), node) << ":"
|
||||
<< source << std::endl;
|
||||
@ -1037,7 +1037,7 @@ void StraightForwardRegisterAllocator::AddMoveBeforeCurrentNode(
|
||||
void StraightForwardRegisterAllocator::Spill(ValueNode* node) {
|
||||
if (node->is_loadable()) return;
|
||||
AllocateSpillSlot(node);
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->os()
|
||||
<< " spill: " << node->spill_slot() << " ← "
|
||||
<< PrintNodeLabel(graph_labeller(), node) << std::endl;
|
||||
@ -1053,7 +1053,7 @@ void StraightForwardRegisterAllocator::AssignFixedInput(Input& input) {
|
||||
switch (operand.extended_policy()) {
|
||||
case compiler::UnallocatedOperand::MUST_HAVE_REGISTER:
|
||||
// Allocated in AssignArbitraryRegisterInput.
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->os()
|
||||
<< "- " << PrintNodeLabel(graph_labeller(), input.node())
|
||||
<< " has arbitrary register\n";
|
||||
@ -1062,7 +1062,7 @@ void StraightForwardRegisterAllocator::AssignFixedInput(Input& input) {
|
||||
|
||||
case compiler::UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT:
|
||||
// Allocated in AssignAnyInput.
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->os()
|
||||
<< "- " << PrintNodeLabel(graph_labeller(), input.node())
|
||||
<< " has arbitrary location\n";
|
||||
@ -1088,7 +1088,7 @@ void StraightForwardRegisterAllocator::AssignFixedInput(Input& input) {
|
||||
case compiler::UnallocatedOperand::MUST_HAVE_SLOT:
|
||||
UNREACHABLE();
|
||||
}
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->os()
|
||||
<< "- " << PrintNodeLabel(graph_labeller(), input.node())
|
||||
<< " in forced " << input.operand() << "\n";
|
||||
@ -1120,7 +1120,7 @@ void StraightForwardRegisterAllocator::AssignArbitraryRegisterInput(
|
||||
ValueNode* node = input.node();
|
||||
compiler::InstructionOperand location = node->allocation();
|
||||
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->os()
|
||||
<< "- " << PrintNodeLabel(graph_labeller(), input.node()) << " in "
|
||||
<< location << "\n";
|
||||
@ -1152,7 +1152,7 @@ void StraightForwardRegisterAllocator::AssignAnyInput(Input& input) {
|
||||
compiler::InstructionOperand location = node->allocation();
|
||||
|
||||
input.InjectLocation(location);
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->os()
|
||||
<< "- " << PrintNodeLabel(graph_labeller(), input.node())
|
||||
<< " in original " << location << "\n";
|
||||
@ -1291,7 +1291,7 @@ void StraightForwardRegisterAllocator::SpillAndClearRegisters(
|
||||
while (registers.used() != registers.empty()) {
|
||||
RegisterT reg = registers.used().first();
|
||||
ValueNode* node = registers.GetValue(reg);
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->os() << " clearing registers with "
|
||||
<< PrintNodeLabel(graph_labeller(), node) << "\n";
|
||||
}
|
||||
@ -1328,7 +1328,7 @@ void StraightForwardRegisterAllocator::AllocateSpillSlot(ValueNode* node) {
|
||||
// architectures.
|
||||
SpillSlots& slots = is_tagged ? tagged_ : untagged_;
|
||||
MachineRepresentation representation = node->GetMachineRepresentation();
|
||||
if (!FLAG_maglev_reuse_stack_slots || slots.free_slots.empty()) {
|
||||
if (!v8_flags.maglev_reuse_stack_slots || slots.free_slots.empty()) {
|
||||
free_slot = slots.top++;
|
||||
} else {
|
||||
NodeIdT start = node->live_range().start;
|
||||
@ -1352,7 +1352,7 @@ template <typename RegisterT>
|
||||
RegisterT StraightForwardRegisterAllocator::PickRegisterToFree(
|
||||
RegListBase<RegisterT> reserved) {
|
||||
RegisterFrameState<RegisterT>& registers = GetRegisterFrameState<RegisterT>();
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->os() << " need to free a register... ";
|
||||
}
|
||||
int furthest_use = 0;
|
||||
@ -1373,7 +1373,7 @@ RegisterT StraightForwardRegisterAllocator::PickRegisterToFree(
|
||||
best = reg;
|
||||
}
|
||||
}
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->os()
|
||||
<< " chose " << best << " with next use " << furthest_use << "\n";
|
||||
}
|
||||
@ -1448,7 +1448,7 @@ template <typename RegisterT>
|
||||
compiler::AllocatedOperand StraightForwardRegisterAllocator::ForceAllocate(
|
||||
RegisterFrameState<RegisterT>& registers, RegisterT reg, ValueNode* node) {
|
||||
DCHECK(!registers.is_blocked(reg));
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->os()
|
||||
<< " forcing " << reg << " to "
|
||||
<< PrintNodeLabel(graph_labeller(), node) << "...\n";
|
||||
@ -1547,7 +1547,7 @@ void StraightForwardRegisterAllocator::AssignFixedTemporaries(NodeBase* node) {
|
||||
general_registers_.block(reg);
|
||||
}
|
||||
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->os()
|
||||
<< "Fixed temporaries: " << fixed_temporaries << "\n";
|
||||
}
|
||||
@ -1579,7 +1579,7 @@ void StraightForwardRegisterAllocator::AssignArbitraryTemporaries(
|
||||
|
||||
DCHECK_GE(temporaries.Count(), node->num_temporaries_needed());
|
||||
node->assign_temporaries(temporaries);
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->os() << "Temporaries: " << temporaries << "\n";
|
||||
}
|
||||
}
|
||||
@ -1711,7 +1711,7 @@ void StraightForwardRegisterAllocator::MergeRegisterValues(ControlNode* control,
|
||||
return InitializeBranchTargetRegisterValues(control, target);
|
||||
}
|
||||
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->os() << "Merging registers...\n";
|
||||
}
|
||||
|
||||
@ -1735,7 +1735,7 @@ void StraightForwardRegisterAllocator::MergeRegisterValues(ControlNode* control,
|
||||
if (!registers.free().has(reg)) {
|
||||
incoming = registers.GetValue(reg);
|
||||
if (!IsLiveAtTarget(incoming, control, target)) {
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->os() << " " << reg << " - incoming node "
|
||||
<< PrintNodeLabel(graph_labeller(), incoming)
|
||||
<< " dead at target\n";
|
||||
@ -1747,7 +1747,7 @@ void StraightForwardRegisterAllocator::MergeRegisterValues(ControlNode* control,
|
||||
if (incoming == node) {
|
||||
// We're using the same register as the target already has. If registers
|
||||
// are merged, add input information.
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
if (node) {
|
||||
printing_visitor_->os()
|
||||
<< " " << reg << " - incoming node same as node: "
|
||||
@ -1762,7 +1762,7 @@ void StraightForwardRegisterAllocator::MergeRegisterValues(ControlNode* control,
|
||||
// The register is already occupied with a different node. Figure out
|
||||
// where that node is allocated on the incoming branch.
|
||||
merge->operand(predecessor_id) = node->allocation();
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->os() << " " << reg << " - merge: loading "
|
||||
<< PrintNodeLabel(graph_labeller(), node)
|
||||
<< " from " << node->allocation() << " \n";
|
||||
@ -1787,7 +1787,7 @@ void StraightForwardRegisterAllocator::MergeRegisterValues(ControlNode* control,
|
||||
// containing conversion nodes.
|
||||
// DCHECK_IMPLIES(!IsInRegister(target_state, incoming),
|
||||
// incoming->properties().is_conversion());
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->os()
|
||||
<< " " << reg << " - can't load incoming "
|
||||
<< PrintNodeLabel(graph_labeller(), node) << ", bailing out\n";
|
||||
@ -1802,7 +1802,7 @@ void StraightForwardRegisterAllocator::MergeRegisterValues(ControlNode* control,
|
||||
// over the liveness of the node they are converting.
|
||||
// TODO(v8:7700): Overeager DCHECK.
|
||||
// DCHECK(node->properties().is_conversion());
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->os() << " " << reg << " - can't load "
|
||||
<< PrintNodeLabel(graph_labeller(), node)
|
||||
<< ", dropping the merge\n";
|
||||
@ -1834,14 +1834,14 @@ void StraightForwardRegisterAllocator::MergeRegisterValues(ControlNode* control,
|
||||
// state.
|
||||
if (node == nullptr) {
|
||||
merge->operand(predecessor_id) = register_info;
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->os() << " " << reg << " - new merge: loading new "
|
||||
<< PrintNodeLabel(graph_labeller(), incoming)
|
||||
<< " from " << register_info << " \n";
|
||||
}
|
||||
} else {
|
||||
merge->operand(predecessor_id) = node->allocation();
|
||||
if (FLAG_trace_maglev_regalloc) {
|
||||
if (v8_flags.trace_maglev_regalloc) {
|
||||
printing_visitor_->os() << " " << reg << " - new merge: loading "
|
||||
<< PrintNodeLabel(graph_labeller(), node)
|
||||
<< " from " << node->allocation() << " \n";
|
||||
|
@ -13,7 +13,7 @@ namespace internal {
|
||||
|
||||
MaybeHandle<CodeT> Maglev::Compile(Isolate* isolate,
|
||||
Handle<JSFunction> function) {
|
||||
DCHECK(FLAG_maglev);
|
||||
DCHECK(v8_flags.maglev);
|
||||
std::unique_ptr<maglev::MaglevCompilationInfo> info =
|
||||
maglev::MaglevCompilationInfo::New(isolate, function);
|
||||
maglev::MaglevCompiler::Compile(isolate->main_thread_local_isolate(),
|
||||
|
Loading…
Reference in New Issue
Block a user