[csa] re-schedule CSA graph
This CL is an experiment to get more performance data from the perf-bots and will likely lead to regressions. The try-bots (see patcheset 9) indicate some regressions, but it doesn't seem too bad. Change-Id: Ia173ab20ee2a4904663db0f4ca2ffb196b203c77 Reviewed-on: https://chromium-review.googlesource.com/c/1319763 Commit-Queue: Tobias Tebbi <tebbi@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Michael Starzinger <mstarzinger@chromium.org> Cr-Commit-Position: refs/heads/master@{#57483}
This commit is contained in:
parent
0ff69e7e93
commit
205860b147
@ -436,6 +436,7 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
|
||||
Node* isolate_constant =
|
||||
ExternalConstant(ExternalReference::isolate_address(isolate()));
|
||||
Node* fp_mode = Parameter(Descriptor::kFPMode);
|
||||
Node* object = BitcastTaggedToWord(Parameter(Descriptor::kObject));
|
||||
CallCFunction3WithCallerSavedRegistersMode(
|
||||
MachineType::Int32(), MachineType::Pointer(), MachineType::Pointer(),
|
||||
MachineType::Pointer(), function, object, slot, isolate_constant,
|
||||
|
@ -174,30 +174,44 @@ Handle<Code> CodeAssembler::GenerateCode(CodeAssemblerState* state,
|
||||
DCHECK(!state->code_generated_);
|
||||
|
||||
RawMachineAssembler* rasm = state->raw_assembler_.get();
|
||||
Schedule* schedule = rasm->Export();
|
||||
|
||||
JumpOptimizationInfo jump_opt;
|
||||
bool should_optimize_jumps =
|
||||
rasm->isolate()->serializer_enabled() && FLAG_turbo_rewrite_far_jumps;
|
||||
Handle<Code> code;
|
||||
if (FLAG_optimize_csa) {
|
||||
// TODO(tebbi): Support jump rewriting also when FLAG_optimize_csa.
|
||||
DCHECK(!FLAG_turbo_rewrite_far_jumps);
|
||||
Graph* graph = rasm->ExportForOptimization();
|
||||
|
||||
Handle<Code> code =
|
||||
Pipeline::GenerateCodeForCodeStub(
|
||||
rasm->isolate(), rasm->call_descriptor(), rasm->graph(), schedule,
|
||||
state->kind_, state->name_, state->stub_key_, state->builtin_index_,
|
||||
should_optimize_jumps ? &jump_opt : nullptr, rasm->poisoning_level(),
|
||||
options)
|
||||
.ToHandleChecked();
|
||||
code = Pipeline::GenerateCodeForCodeStub(
|
||||
rasm->isolate(), rasm->call_descriptor(), graph, nullptr,
|
||||
state->kind_, state->name_, state->stub_key_,
|
||||
state->builtin_index_, nullptr, rasm->poisoning_level(), options)
|
||||
.ToHandleChecked();
|
||||
} else {
|
||||
Schedule* schedule = rasm->Export();
|
||||
|
||||
if (jump_opt.is_optimizable()) {
|
||||
jump_opt.set_optimizing();
|
||||
JumpOptimizationInfo jump_opt;
|
||||
bool should_optimize_jumps =
|
||||
rasm->isolate()->serializer_enabled() && FLAG_turbo_rewrite_far_jumps;
|
||||
|
||||
// Regenerate machine code
|
||||
code =
|
||||
Pipeline::GenerateCodeForCodeStub(
|
||||
rasm->isolate(), rasm->call_descriptor(), rasm->graph(), schedule,
|
||||
state->kind_, state->name_, state->stub_key_, state->builtin_index_,
|
||||
&jump_opt, rasm->poisoning_level(), options)
|
||||
should_optimize_jumps ? &jump_opt : nullptr,
|
||||
rasm->poisoning_level(), options)
|
||||
.ToHandleChecked();
|
||||
|
||||
if (jump_opt.is_optimizable()) {
|
||||
jump_opt.set_optimizing();
|
||||
|
||||
// Regenerate machine code
|
||||
code = Pipeline::GenerateCodeForCodeStub(
|
||||
rasm->isolate(), rasm->call_descriptor(), rasm->graph(),
|
||||
schedule, state->kind_, state->name_, state->stub_key_,
|
||||
state->builtin_index_, &jump_opt, rasm->poisoning_level(),
|
||||
options)
|
||||
.ToHandleChecked();
|
||||
}
|
||||
}
|
||||
|
||||
state->code_generated_ = true;
|
||||
@ -1103,6 +1117,7 @@ void CodeAssembler::GotoIfException(Node* node, Label* if_exception,
|
||||
Goto(if_exception);
|
||||
|
||||
Bind(&success);
|
||||
raw_assembler()->AddNode(raw_assembler()->common()->IfSuccess(), node);
|
||||
}
|
||||
|
||||
void CodeAssembler::HandleException(Node* node) {
|
||||
@ -1125,7 +1140,9 @@ void CodeAssembler::HandleException(Node* node) {
|
||||
Node* exception_value = raw_assembler()->AddNode(op, node, node);
|
||||
label->AddInputs({UncheckedCast<Object>(exception_value)});
|
||||
Goto(label->plain_label());
|
||||
|
||||
Bind(&success);
|
||||
raw_assembler()->AddNode(raw_assembler()->common()->IfSuccess(), node);
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
@ -2203,7 +2203,6 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
|
||||
}
|
||||
|
||||
PipelineImpl pipeline(&data);
|
||||
DCHECK_NOT_NULL(data.schedule());
|
||||
|
||||
if (info.trace_turbo_json_enabled() || info.trace_turbo_graph_enabled()) {
|
||||
CodeTracer::Scope tracing_scope(data.GetCodeTracer());
|
||||
@ -2221,9 +2220,15 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
|
||||
pipeline.Run<PrintGraphPhase>("Machine");
|
||||
}
|
||||
|
||||
TraceSchedule(data.info(), &data, data.schedule(), "schedule");
|
||||
if (FLAG_optimize_csa) {
|
||||
DCHECK_NULL(data.schedule());
|
||||
pipeline.Run<VerifyGraphPhase>(true, !FLAG_optimize_csa);
|
||||
pipeline.ComputeScheduledGraph();
|
||||
} else {
|
||||
TraceSchedule(data.info(), &data, data.schedule(), "schedule");
|
||||
}
|
||||
DCHECK_NOT_NULL(data.schedule());
|
||||
|
||||
pipeline.Run<VerifyGraphPhase>(false, true);
|
||||
return pipeline.GenerateCode(call_descriptor);
|
||||
}
|
||||
|
||||
|
@ -77,6 +77,312 @@ Schedule* RawMachineAssembler::Export() {
|
||||
return schedule;
|
||||
}
|
||||
|
||||
Graph* RawMachineAssembler::ExportForOptimization() {
|
||||
// Compute the correct codegen order.
|
||||
DCHECK(schedule_->rpo_order()->empty());
|
||||
if (FLAG_trace_turbo_scheduler) {
|
||||
PrintF("--- RAW SCHEDULE -------------------------------------------\n");
|
||||
StdoutStream{} << *schedule_;
|
||||
}
|
||||
schedule_->EnsureCFGWellFormedness();
|
||||
Scheduler::ComputeSpecialRPO(zone(), schedule_);
|
||||
if (FLAG_trace_turbo_scheduler) {
|
||||
PrintF("--- SCHEDULE BEFORE GRAPH CREATION -------------------------\n");
|
||||
StdoutStream{} << *schedule_;
|
||||
}
|
||||
MakeReschedulable();
|
||||
// Invalidate RawMachineAssembler.
|
||||
schedule_ = nullptr;
|
||||
return graph();
|
||||
}
|
||||
|
||||
void RawMachineAssembler::MakeReschedulable() {
|
||||
std::vector<Node*> block_final_control(schedule_->all_blocks_.size());
|
||||
std::vector<Node*> block_final_effect(schedule_->all_blocks_.size());
|
||||
|
||||
struct LoopHeader {
|
||||
BasicBlock* block;
|
||||
Node* loop_node;
|
||||
Node* effect_phi;
|
||||
};
|
||||
std::vector<LoopHeader> loop_headers;
|
||||
|
||||
// These are hoisted outside of the loop to avoid re-allocation.
|
||||
std::vector<Node*> merge_inputs;
|
||||
std::vector<Node*> effect_phi_inputs;
|
||||
|
||||
for (BasicBlock* block : *schedule_->rpo_order()) {
|
||||
Node* current_control;
|
||||
Node* current_effect;
|
||||
if (block == schedule_->start()) {
|
||||
current_control = current_effect = graph()->start();
|
||||
} else if (block == schedule_->end()) {
|
||||
for (size_t i = 0; i < block->PredecessorCount(); ++i) {
|
||||
NodeProperties::MergeControlToEnd(
|
||||
graph(), common(), block->PredecessorAt(i)->control_input());
|
||||
}
|
||||
} else if (block->IsLoopHeader()) {
|
||||
// The graph()->start() inputs are just placeholders until we computed the
|
||||
// real back-edges and re-structure the control flow so the loop has
|
||||
// exactly two predecessors.
|
||||
current_control = graph()->NewNode(common()->Loop(2), graph()->start(),
|
||||
graph()->start());
|
||||
current_effect =
|
||||
graph()->NewNode(common()->EffectPhi(2), graph()->start(),
|
||||
graph()->start(), current_control);
|
||||
|
||||
Node* terminate = graph()->NewNode(common()->Terminate(), current_effect,
|
||||
current_control);
|
||||
NodeProperties::MergeControlToEnd(graph(), common(), terminate);
|
||||
loop_headers.push_back(
|
||||
LoopHeader{block, current_control, current_effect});
|
||||
} else if (block->PredecessorCount() == 1) {
|
||||
BasicBlock* predecessor = block->PredecessorAt(0);
|
||||
DCHECK_LT(predecessor->rpo_number(), block->rpo_number());
|
||||
current_effect = block_final_effect[predecessor->id().ToSize()];
|
||||
current_control = block_final_control[predecessor->id().ToSize()];
|
||||
} else {
|
||||
// Create control merge nodes and effect phis for all predecessor blocks.
|
||||
merge_inputs.clear();
|
||||
effect_phi_inputs.clear();
|
||||
int predecessor_count = static_cast<int>(block->PredecessorCount());
|
||||
for (int i = 0; i < predecessor_count; ++i) {
|
||||
BasicBlock* predecessor = block->PredecessorAt(i);
|
||||
DCHECK_LT(predecessor->rpo_number(), block->rpo_number());
|
||||
merge_inputs.push_back(block_final_control[predecessor->id().ToSize()]);
|
||||
effect_phi_inputs.push_back(
|
||||
block_final_effect[predecessor->id().ToSize()]);
|
||||
}
|
||||
current_control = graph()->NewNode(common()->Merge(predecessor_count),
|
||||
static_cast<int>(merge_inputs.size()),
|
||||
merge_inputs.data());
|
||||
effect_phi_inputs.push_back(current_control);
|
||||
current_effect = graph()->NewNode(
|
||||
common()->EffectPhi(predecessor_count),
|
||||
static_cast<int>(effect_phi_inputs.size()), effect_phi_inputs.data());
|
||||
}
|
||||
|
||||
auto update_current_control_and_effect = [&](Node* node) {
|
||||
bool existing_effect_and_control =
|
||||
IrOpcode::IsIfProjectionOpcode(node->opcode()) ||
|
||||
IrOpcode::IsPhiOpcode(node->opcode());
|
||||
if (node->op()->EffectInputCount() > 0) {
|
||||
DCHECK_EQ(1, node->op()->EffectInputCount());
|
||||
if (existing_effect_and_control) {
|
||||
NodeProperties::ReplaceEffectInput(node, current_effect);
|
||||
} else {
|
||||
node->AppendInput(graph()->zone(), current_effect);
|
||||
}
|
||||
}
|
||||
if (node->op()->ControlInputCount() > 0) {
|
||||
DCHECK_EQ(1, node->op()->ControlInputCount());
|
||||
if (existing_effect_and_control) {
|
||||
NodeProperties::ReplaceControlInput(node, current_control);
|
||||
} else {
|
||||
node->AppendInput(graph()->zone(), current_control);
|
||||
}
|
||||
}
|
||||
if (node->op()->EffectOutputCount() > 0) {
|
||||
DCHECK_EQ(1, node->op()->EffectOutputCount());
|
||||
current_effect = node;
|
||||
}
|
||||
if (node->op()->ControlOutputCount() > 0) {
|
||||
current_control = node;
|
||||
}
|
||||
};
|
||||
|
||||
for (Node* node : *block) {
|
||||
update_current_control_and_effect(node);
|
||||
}
|
||||
if (block->deferred()) MarkControlDeferred(current_control);
|
||||
|
||||
if (Node* block_terminator = block->control_input()) {
|
||||
update_current_control_and_effect(block_terminator);
|
||||
}
|
||||
|
||||
block_final_effect[block->id().ToSize()] = current_effect;
|
||||
block_final_control[block->id().ToSize()] = current_control;
|
||||
}
|
||||
|
||||
// Fix-up loop backedges and re-structure control flow so that loop nodes have
|
||||
// exactly two control predecessors.
|
||||
for (const LoopHeader& loop_header : loop_headers) {
|
||||
BasicBlock* block = loop_header.block;
|
||||
std::vector<BasicBlock*> loop_entries;
|
||||
std::vector<BasicBlock*> loop_backedges;
|
||||
for (size_t i = 0; i < block->PredecessorCount(); ++i) {
|
||||
BasicBlock* predecessor = block->PredecessorAt(i);
|
||||
if (block->LoopContains(predecessor)) {
|
||||
loop_backedges.push_back(predecessor);
|
||||
} else {
|
||||
DCHECK(loop_backedges.empty());
|
||||
loop_entries.push_back(predecessor);
|
||||
}
|
||||
}
|
||||
DCHECK(!loop_entries.empty());
|
||||
DCHECK(!loop_backedges.empty());
|
||||
|
||||
int entrance_count = static_cast<int>(loop_entries.size());
|
||||
int backedge_count = static_cast<int>(loop_backedges.size());
|
||||
Node* control_loop_entry = CreateNodeFromPredecessors(
|
||||
loop_entries, block_final_control, common()->Merge(entrance_count), {});
|
||||
Node* control_backedge =
|
||||
CreateNodeFromPredecessors(loop_backedges, block_final_control,
|
||||
common()->Merge(backedge_count), {});
|
||||
Node* effect_loop_entry = CreateNodeFromPredecessors(
|
||||
loop_entries, block_final_effect, common()->EffectPhi(entrance_count),
|
||||
{control_loop_entry});
|
||||
Node* effect_backedge = CreateNodeFromPredecessors(
|
||||
loop_backedges, block_final_effect, common()->EffectPhi(backedge_count),
|
||||
{control_backedge});
|
||||
|
||||
loop_header.loop_node->ReplaceInput(0, control_loop_entry);
|
||||
loop_header.loop_node->ReplaceInput(1, control_backedge);
|
||||
loop_header.effect_phi->ReplaceInput(0, effect_loop_entry);
|
||||
loop_header.effect_phi->ReplaceInput(1, effect_backedge);
|
||||
|
||||
for (Node* node : *block) {
|
||||
if (node->opcode() == IrOpcode::kPhi) {
|
||||
MakePhiBinary(node, static_cast<int>(loop_entries.size()),
|
||||
control_loop_entry, control_backedge);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Node* RawMachineAssembler::CreateNodeFromPredecessors(
|
||||
const std::vector<BasicBlock*>& predecessors,
|
||||
const std::vector<Node*>& sidetable, const Operator* op,
|
||||
const std::vector<Node*>& additional_inputs) {
|
||||
if (predecessors.size() == 1) {
|
||||
return sidetable[predecessors.front()->id().ToSize()];
|
||||
}
|
||||
std::vector<Node*> inputs;
|
||||
for (BasicBlock* predecessor : predecessors) {
|
||||
inputs.push_back(sidetable[predecessor->id().ToSize()]);
|
||||
}
|
||||
for (Node* additional_input : additional_inputs) {
|
||||
inputs.push_back(additional_input);
|
||||
}
|
||||
return graph()->NewNode(op, static_cast<int>(inputs.size()), inputs.data());
|
||||
}
|
||||
|
||||
void RawMachineAssembler::MakePhiBinary(Node* phi, int split_point,
|
||||
Node* left_control,
|
||||
Node* right_control) {
|
||||
int value_count = phi->op()->ValueInputCount();
|
||||
if (value_count == 2) return;
|
||||
DCHECK_LT(split_point, value_count);
|
||||
DCHECK_GT(split_point, 0);
|
||||
|
||||
MachineRepresentation rep = PhiRepresentationOf(phi->op());
|
||||
int left_input_count = split_point;
|
||||
int right_input_count = value_count - split_point;
|
||||
|
||||
Node* left_input;
|
||||
if (left_input_count == 1) {
|
||||
left_input = NodeProperties::GetValueInput(phi, 0);
|
||||
} else {
|
||||
std::vector<Node*> inputs;
|
||||
for (int i = 0; i < left_input_count; ++i) {
|
||||
inputs.push_back(NodeProperties::GetValueInput(phi, i));
|
||||
}
|
||||
inputs.push_back(left_control);
|
||||
left_input =
|
||||
graph()->NewNode(common()->Phi(rep, static_cast<int>(left_input_count)),
|
||||
static_cast<int>(inputs.size()), inputs.data());
|
||||
}
|
||||
|
||||
Node* right_input;
|
||||
if (right_input_count == 1) {
|
||||
right_input = NodeProperties::GetValueInput(phi, split_point);
|
||||
} else {
|
||||
std::vector<Node*> inputs;
|
||||
for (int i = split_point; i < value_count; ++i) {
|
||||
inputs.push_back(NodeProperties::GetValueInput(phi, i));
|
||||
}
|
||||
inputs.push_back(right_control);
|
||||
right_input = graph()->NewNode(
|
||||
common()->Phi(rep, static_cast<int>(right_input_count)),
|
||||
static_cast<int>(inputs.size()), inputs.data());
|
||||
}
|
||||
|
||||
Node* control = NodeProperties::GetControlInput(phi);
|
||||
phi->TrimInputCount(3);
|
||||
phi->ReplaceInput(0, left_input);
|
||||
phi->ReplaceInput(1, right_input);
|
||||
phi->ReplaceInput(2, control);
|
||||
NodeProperties::ChangeOp(phi, common()->Phi(rep, 2));
|
||||
}
|
||||
|
||||
void RawMachineAssembler::MarkControlDeferred(Node* control_node) {
|
||||
BranchHint new_branch_hint;
|
||||
Node* responsible_branch = nullptr;
|
||||
while (responsible_branch == nullptr) {
|
||||
switch (control_node->opcode()) {
|
||||
case IrOpcode::kIfException:
|
||||
// IfException projections are deferred by default.
|
||||
return;
|
||||
case IrOpcode::kIfSuccess:
|
||||
control_node = NodeProperties::GetControlInput(control_node);
|
||||
continue;
|
||||
case IrOpcode::kIfValue:
|
||||
case IrOpcode::kIfDefault:
|
||||
// Marking switch cases as deferred is currently impossible.
|
||||
return;
|
||||
case IrOpcode::kIfTrue: {
|
||||
Node* branch = NodeProperties::GetControlInput(control_node);
|
||||
BranchHint hint = BranchOperatorInfoOf(branch->op()).hint;
|
||||
if (hint == BranchHint::kTrue) {
|
||||
// The other possibility is also deferred, so the responsible branch
|
||||
// has to be before.
|
||||
control_node = NodeProperties::GetControlInput(branch);
|
||||
continue;
|
||||
}
|
||||
new_branch_hint = BranchHint::kFalse;
|
||||
responsible_branch = branch;
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kIfFalse: {
|
||||
Node* branch = NodeProperties::GetControlInput(control_node);
|
||||
BranchHint hint = BranchOperatorInfoOf(branch->op()).hint;
|
||||
if (hint == BranchHint::kFalse) {
|
||||
// The other possibility is also deferred, so the responsible branch
|
||||
// has to be before.
|
||||
control_node = NodeProperties::GetControlInput(branch);
|
||||
continue;
|
||||
}
|
||||
new_branch_hint = BranchHint::kTrue;
|
||||
responsible_branch = branch;
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kMerge:
|
||||
for (int i = 0; i < control_node->op()->ControlInputCount(); ++i) {
|
||||
MarkControlDeferred(NodeProperties::GetControlInput(control_node, i));
|
||||
}
|
||||
return;
|
||||
case IrOpcode::kLoop:
|
||||
control_node = NodeProperties::GetControlInput(control_node, 0);
|
||||
continue;
|
||||
case IrOpcode::kBranch:
|
||||
case IrOpcode::kSwitch:
|
||||
UNREACHABLE();
|
||||
case IrOpcode::kStart:
|
||||
return;
|
||||
default:
|
||||
DCHECK_EQ(1, control_node->op()->ControlInputCount());
|
||||
control_node = NodeProperties::GetControlInput(control_node);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
BranchOperatorInfo info = BranchOperatorInfoOf(responsible_branch->op());
|
||||
if (info.hint == new_branch_hint) return;
|
||||
NodeProperties::ChangeOp(
|
||||
responsible_branch,
|
||||
common()->Branch(new_branch_hint, info.is_safety_check));
|
||||
}
|
||||
|
||||
Node* RawMachineAssembler::TargetParameter() {
|
||||
DCHECK_NOT_NULL(target_parameter_);
|
||||
return target_parameter_;
|
||||
@ -101,7 +407,16 @@ void RawMachineAssembler::Branch(Node* condition, RawMachineLabel* true_val,
|
||||
Node* branch = MakeNode(
|
||||
common()->Branch(BranchHint::kNone, IsSafetyCheck::kNoSafetyCheck), 1,
|
||||
&condition);
|
||||
schedule()->AddBranch(CurrentBlock(), branch, Use(true_val), Use(false_val));
|
||||
BasicBlock* true_block = schedule()->NewBasicBlock();
|
||||
BasicBlock* false_block = schedule()->NewBasicBlock();
|
||||
schedule()->AddBranch(CurrentBlock(), branch, true_block, false_block);
|
||||
|
||||
true_block->AddNode(MakeNode(common()->IfTrue(), 1, &branch));
|
||||
schedule()->AddGoto(true_block, Use(true_val));
|
||||
|
||||
false_block->AddNode(MakeNode(common()->IfFalse(), 1, &branch));
|
||||
schedule()->AddGoto(false_block, Use(false_val));
|
||||
|
||||
current_block_ = nullptr;
|
||||
}
|
||||
|
||||
@ -119,7 +434,7 @@ void RawMachineAssembler::Switch(Node* index, RawMachineLabel* default_label,
|
||||
size_t case_count) {
|
||||
DCHECK_NE(schedule()->end(), current_block_);
|
||||
size_t succ_count = case_count + 1;
|
||||
Node* switch_node = AddNode(common()->Switch(succ_count), index);
|
||||
Node* switch_node = MakeNode(common()->Switch(succ_count), 1, &index);
|
||||
BasicBlock** succ_blocks = zone()->NewArray<BasicBlock*>(succ_count);
|
||||
for (size_t index = 0; index < case_count; ++index) {
|
||||
int32_t case_value = case_values[index];
|
||||
|
@ -60,6 +60,10 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
|
||||
// Finalizes the schedule and exports it to be used for code generation. Note
|
||||
// that this RawMachineAssembler becomes invalid after export.
|
||||
Schedule* Export();
|
||||
// Finalizes the schedule and transforms it into a graph that's suitable for
|
||||
// it to be used for Turbofan optimization and re-scheduling. Note that this
|
||||
// RawMachineAssembler becomes invalid after export.
|
||||
Graph* ExportForOptimization();
|
||||
|
||||
// ===========================================================================
|
||||
// The following utility methods create new nodes with specific operators and
|
||||
@ -621,28 +625,25 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
|
||||
|
||||
// Conversions.
|
||||
Node* BitcastTaggedToWord(Node* a) {
|
||||
#ifdef ENABLE_VERIFY_CSA
|
||||
return AddNode(machine()->BitcastTaggedToWord(), a);
|
||||
#else
|
||||
if (FLAG_verify_csa || FLAG_optimize_csa) {
|
||||
return AddNode(machine()->BitcastTaggedToWord(), a);
|
||||
}
|
||||
return a;
|
||||
#endif
|
||||
}
|
||||
Node* BitcastMaybeObjectToWord(Node* a) {
|
||||
#ifdef ENABLE_VERIFY_CSA
|
||||
return AddNode(machine()->BitcastMaybeObjectToWord(), a);
|
||||
#else
|
||||
if (FLAG_verify_csa || FLAG_optimize_csa) {
|
||||
return AddNode(machine()->BitcastMaybeObjectToWord(), a);
|
||||
}
|
||||
return a;
|
||||
#endif
|
||||
}
|
||||
Node* BitcastWordToTagged(Node* a) {
|
||||
return AddNode(machine()->BitcastWordToTagged(), a);
|
||||
}
|
||||
Node* BitcastWordToTaggedSigned(Node* a) {
|
||||
#ifdef ENABLE_VERIFY_CSA
|
||||
return AddNode(machine()->BitcastWordToTaggedSigned(), a);
|
||||
#else
|
||||
if (FLAG_verify_csa || FLAG_optimize_csa) {
|
||||
return AddNode(machine()->BitcastWordToTaggedSigned(), a);
|
||||
}
|
||||
return a;
|
||||
#endif
|
||||
}
|
||||
Node* TruncateFloat64ToWord32(Node* a) {
|
||||
return AddNode(machine()->TruncateFloat64ToWord32(), a);
|
||||
@ -982,6 +983,18 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
|
||||
BasicBlock* EnsureBlock(RawMachineLabel* label);
|
||||
BasicBlock* CurrentBlock();
|
||||
|
||||
// A post-processing pass to add effect and control edges so that the graph
|
||||
// can be optimized and re-scheduled.
|
||||
// TODO(tebbi): Move this to a separate class.
|
||||
void MakeReschedulable();
|
||||
Node* CreateNodeFromPredecessors(const std::vector<BasicBlock*>& predecessors,
|
||||
const std::vector<Node*>& sidetable,
|
||||
const Operator* op,
|
||||
const std::vector<Node*>& additional_inputs);
|
||||
void MakePhiBinary(Node* phi, int split_point, Node* left_control,
|
||||
Node* right_control);
|
||||
void MarkControlDeferred(Node* control_input);
|
||||
|
||||
Schedule* schedule() { return schedule_; }
|
||||
size_t parameter_count() const { return call_descriptor_->ParameterCount(); }
|
||||
|
||||
|
@ -55,6 +55,9 @@ void BasicBlock::AddNode(Node* node) { nodes_.push_back(node); }
|
||||
void BasicBlock::set_control(Control control) { control_ = control; }
|
||||
|
||||
void BasicBlock::set_control_input(Node* control_input) {
|
||||
if (!nodes_.empty() && control_input == nodes_.back()) {
|
||||
nodes_.pop_back();
|
||||
}
|
||||
control_input_ = control_input;
|
||||
}
|
||||
|
||||
@ -363,30 +366,14 @@ void Schedule::EliminateRedundantPhiNodes() {
|
||||
}
|
||||
|
||||
void Schedule::EnsureSplitEdgeForm(BasicBlock* block) {
|
||||
#ifdef DEBUG
|
||||
DCHECK(block->PredecessorCount() > 1 && block != end_);
|
||||
for (auto current_pred = block->predecessors().begin();
|
||||
current_pred != block->predecessors().end(); ++current_pred) {
|
||||
BasicBlock* pred = *current_pred;
|
||||
if (pred->SuccessorCount() > 1) {
|
||||
// Found a predecessor block with multiple successors.
|
||||
BasicBlock* split_edge_block = NewBasicBlock();
|
||||
split_edge_block->set_control(BasicBlock::kGoto);
|
||||
split_edge_block->successors().push_back(block);
|
||||
split_edge_block->predecessors().push_back(pred);
|
||||
split_edge_block->set_deferred(block->deferred());
|
||||
*current_pred = split_edge_block;
|
||||
// Find a corresponding successor in the previous block, replace it
|
||||
// with the split edge block... but only do it once, since we only
|
||||
// replace the previous blocks in the current block one at a time.
|
||||
for (auto successor = pred->successors().begin();
|
||||
successor != pred->successors().end(); ++successor) {
|
||||
if (*successor == block) {
|
||||
*successor = split_edge_block;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
DCHECK_LE(pred->SuccessorCount(), 1);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void Schedule::EnsureDeferredCodeSingleEntryPoint(BasicBlock* block) {
|
||||
|
@ -439,6 +439,9 @@ DEFINE_BOOL(trace_verify_csa, false, "trace code stubs verification")
|
||||
DEFINE_STRING(csa_trap_on_node, nullptr,
|
||||
"trigger break point when a node with given id is created in "
|
||||
"given stub. The format is: StubName,NodeId")
|
||||
DEFINE_BOOL_READONLY(optimize_csa, true,
|
||||
"run the optimizing Turbofan backend in the CSA pipeline")
|
||||
DEFINE_NEG_IMPLICATION(optimize_csa, turbo_rewrite_far_jumps)
|
||||
DEFINE_BOOL_READONLY(fixed_array_bounds_checks, DEBUG_BOOL,
|
||||
"enable FixedArray bounds checks")
|
||||
DEFINE_BOOL(turbo_stats, false, "print TurboFan statistics")
|
||||
|
@ -2754,6 +2754,7 @@ IGNITION_HANDLER(Throw, InterpreterAssembler) {
|
||||
CallRuntime(Runtime::kThrow, context, exception);
|
||||
// We shouldn't ever return from a throw.
|
||||
Abort(AbortReason::kUnexpectedReturnFromThrow);
|
||||
Unreachable();
|
||||
}
|
||||
|
||||
// ReThrow
|
||||
@ -2765,6 +2766,7 @@ IGNITION_HANDLER(ReThrow, InterpreterAssembler) {
|
||||
CallRuntime(Runtime::kReThrow, context, exception);
|
||||
// We shouldn't ever return from a throw.
|
||||
Abort(AbortReason::kUnexpectedReturnFromThrow);
|
||||
Unreachable();
|
||||
}
|
||||
|
||||
// Abort <abort_reason>
|
||||
@ -2801,6 +2803,7 @@ IGNITION_HANDLER(ThrowReferenceErrorIfHole, InterpreterAssembler) {
|
||||
CallRuntime(Runtime::kThrowReferenceError, GetContext(), name);
|
||||
// We shouldn't ever return from a throw.
|
||||
Abort(AbortReason::kUnexpectedReturnFromThrow);
|
||||
Unreachable();
|
||||
}
|
||||
}
|
||||
|
||||
@ -2819,6 +2822,7 @@ IGNITION_HANDLER(ThrowSuperNotCalledIfHole, InterpreterAssembler) {
|
||||
CallRuntime(Runtime::kThrowSuperNotCalled, GetContext());
|
||||
// We shouldn't ever return from a throw.
|
||||
Abort(AbortReason::kUnexpectedReturnFromThrow);
|
||||
Unreachable();
|
||||
}
|
||||
}
|
||||
|
||||
@ -2838,6 +2842,7 @@ IGNITION_HANDLER(ThrowSuperAlreadyCalledIfNotHole, InterpreterAssembler) {
|
||||
CallRuntime(Runtime::kThrowSuperAlreadyCalledError, GetContext());
|
||||
// We shouldn't ever return from a throw.
|
||||
Abort(AbortReason::kUnexpectedReturnFromThrow);
|
||||
Unreachable();
|
||||
}
|
||||
}
|
||||
|
||||
@ -3071,6 +3076,7 @@ IGNITION_HANDLER(ExtraWide, InterpreterAssembler) {
|
||||
// An invalid bytecode aborting execution if dispatched.
|
||||
IGNITION_HANDLER(Illegal, InterpreterAssembler) {
|
||||
Abort(AbortReason::kInvalidBytecode);
|
||||
Unreachable();
|
||||
}
|
||||
|
||||
// SuspendGenerator <generator> <first input register> <register count>
|
||||
|
@ -1748,7 +1748,6 @@ void TurboAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
|
||||
}
|
||||
|
||||
void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, int8_t imm8) {
|
||||
DCHECK(imm8 == 0 || imm8 == 1);
|
||||
if (CpuFeatures::IsSupported(SSE4_1)) {
|
||||
CpuFeatureScope sse_scope(this, SSE4_1);
|
||||
pinsrd(dst, src, imm8);
|
||||
|
@ -56,11 +56,13 @@ class CodeAssemblerTester {
|
||||
}
|
||||
|
||||
Handle<Code> GenerateCode() {
|
||||
return CodeAssembler::GenerateCode(
|
||||
&state_, AssemblerOptions::Default(scope_.isolate()));
|
||||
return GenerateCode(AssemblerOptions::Default(scope_.isolate()));
|
||||
}
|
||||
|
||||
Handle<Code> GenerateCode(const AssemblerOptions& options) {
|
||||
if (state_.InsideBlock()) {
|
||||
CodeAssembler(&state_).Unreachable();
|
||||
}
|
||||
return CodeAssembler::GenerateCode(&state_, options);
|
||||
}
|
||||
|
||||
|
@ -48,13 +48,13 @@ TEST(ProfileDiamond) {
|
||||
|
||||
m.GenerateCode();
|
||||
{
|
||||
uint32_t expected[] = {0, 0, 0, 0};
|
||||
uint32_t expected[] = {0, 0, 0, 0, 0, 0};
|
||||
m.Expect(arraysize(expected), expected);
|
||||
}
|
||||
|
||||
m.Call(0);
|
||||
{
|
||||
uint32_t expected[] = {1, 1, 0, 1};
|
||||
uint32_t expected[] = {1, 1, 1, 0, 0, 1};
|
||||
m.Expect(arraysize(expected), expected);
|
||||
}
|
||||
|
||||
@ -62,13 +62,13 @@ TEST(ProfileDiamond) {
|
||||
|
||||
m.Call(1);
|
||||
{
|
||||
uint32_t expected[] = {1, 0, 1, 1};
|
||||
uint32_t expected[] = {1, 0, 0, 1, 1, 1};
|
||||
m.Expect(arraysize(expected), expected);
|
||||
}
|
||||
|
||||
m.Call(0);
|
||||
{
|
||||
uint32_t expected[] = {2, 1, 1, 2};
|
||||
uint32_t expected[] = {2, 1, 1, 1, 1, 2};
|
||||
m.Expect(arraysize(expected), expected);
|
||||
}
|
||||
}
|
||||
@ -94,7 +94,7 @@ TEST(ProfileLoop) {
|
||||
|
||||
m.GenerateCode();
|
||||
{
|
||||
uint32_t expected[] = {0, 0, 0, 0};
|
||||
uint32_t expected[] = {0, 0, 0, 0, 0, 0};
|
||||
m.Expect(arraysize(expected), expected);
|
||||
}
|
||||
|
||||
@ -102,7 +102,7 @@ TEST(ProfileLoop) {
|
||||
for (size_t i = 0; i < arraysize(runs); i++) {
|
||||
m.ResetCounts();
|
||||
CHECK_EQ(1, m.Call(static_cast<int>(runs[i])));
|
||||
uint32_t expected[] = {1, runs[i] + 1, runs[i], 1};
|
||||
uint32_t expected[] = {1, runs[i] + 1, runs[i], runs[i], 1, 1};
|
||||
m.Expect(arraysize(expected), expected);
|
||||
}
|
||||
}
|
||||
|
@ -20,14 +20,6 @@ namespace c = v8::internal::compiler;
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
#ifdef ENABLE_VERIFY_CSA
|
||||
#define IS_BITCAST_WORD_TO_TAGGED_SIGNED(x) IsBitcastWordToTaggedSigned(x)
|
||||
#define IS_BITCAST_TAGGED_TO_WORD(x) IsBitcastTaggedToWord(x)
|
||||
#else
|
||||
#define IS_BITCAST_WORD_TO_TAGGED_SIGNED(x) (x)
|
||||
#define IS_BITCAST_TAGGED_TO_WORD(x) (x)
|
||||
#endif
|
||||
|
||||
CodeStubAssemblerTestState::CodeStubAssemblerTestState(
|
||||
CodeStubAssemblerTest* test)
|
||||
: compiler::CodeAssemblerState(
|
||||
@ -39,7 +31,7 @@ TARGET_TEST_F(CodeStubAssemblerTest, SmiTag) {
|
||||
CodeStubAssemblerForTest m(&state);
|
||||
Node* value = m.Int32Constant(44);
|
||||
EXPECT_THAT(m.SmiTag(value),
|
||||
IS_BITCAST_WORD_TO_TAGGED_SIGNED(c::IsIntPtrConstant(
|
||||
IsBitcastWordToTaggedSigned(c::IsIntPtrConstant(
|
||||
static_cast<intptr_t>(44) << (kSmiShiftSize + kSmiTagSize))));
|
||||
EXPECT_THAT(m.SmiUntag(value),
|
||||
c::IsIntPtrConstant(static_cast<intptr_t>(44) >>
|
||||
|
@ -2256,22 +2256,22 @@ IS_UNOP_MATCHER(TaggedPoisonOnSpeculation)
|
||||
// Special-case Bitcast operators which are disabled when ENABLE_VERIFY_CSA is
|
||||
// not enabled.
|
||||
Matcher<Node*> IsBitcastTaggedToWord(const Matcher<Node*>& input_matcher) {
|
||||
#ifdef ENABLE_VERIFY_CSA
|
||||
return MakeMatcher(
|
||||
new IsUnopMatcher(IrOpcode::kBitcastTaggedToWord, input_matcher));
|
||||
#else
|
||||
return input_matcher;
|
||||
#endif
|
||||
if (FLAG_verify_csa || FLAG_optimize_csa) {
|
||||
return MakeMatcher(
|
||||
new IsUnopMatcher(IrOpcode::kBitcastTaggedToWord, input_matcher));
|
||||
} else {
|
||||
return input_matcher;
|
||||
}
|
||||
}
|
||||
|
||||
Matcher<Node*> IsBitcastWordToTaggedSigned(
|
||||
const Matcher<Node*>& input_matcher) {
|
||||
#ifdef ENABLE_VERIFY_CSA
|
||||
return MakeMatcher(
|
||||
new IsUnopMatcher(IrOpcode::kBitcastWordToTaggedSigned, input_matcher));
|
||||
#else
|
||||
return input_matcher;
|
||||
#endif
|
||||
if (FLAG_verify_csa || FLAG_optimize_csa) {
|
||||
return MakeMatcher(
|
||||
new IsUnopMatcher(IrOpcode::kBitcastWordToTaggedSigned, input_matcher));
|
||||
} else {
|
||||
return input_matcher;
|
||||
}
|
||||
}
|
||||
|
||||
#undef LOAD_MATCHER
|
||||
|
Loading…
Reference in New Issue
Block a user