[turbofan] Fix bug in instruction scheduling
Disallow reorderings across calls and across caller registers save/restore. Bug: v8:9775 Change-Id: I8b1037dd127217ed9f4a42d45e0d928380c9241a Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1862558 Commit-Queue: Georg Neis <neis@chromium.org> Reviewed-by: Tobias Tebbi <tebbi@chromium.org> Cr-Commit-Position: refs/heads/master@{#64429}
This commit is contained in:
parent
d6f911368b
commit
a927810c03
@ -95,17 +95,11 @@ void InstructionScheduler::StartBlock(RpoNumber rpo) {
|
||||
|
||||
void InstructionScheduler::EndBlock(RpoNumber rpo) {
|
||||
if (FLAG_turbo_stress_instruction_scheduling) {
|
||||
ScheduleBlock<StressSchedulerQueue>();
|
||||
Schedule<StressSchedulerQueue>();
|
||||
} else {
|
||||
ScheduleBlock<CriticalPathFirstQueue>();
|
||||
Schedule<CriticalPathFirstQueue>();
|
||||
}
|
||||
sequence()->EndBlock(rpo);
|
||||
graph_.clear();
|
||||
last_side_effect_instr_ = nullptr;
|
||||
pending_loads_.clear();
|
||||
last_live_in_reg_marker_ = nullptr;
|
||||
last_deopt_or_trap_ = nullptr;
|
||||
operands_map_.clear();
|
||||
}
|
||||
|
||||
void InstructionScheduler::AddTerminator(Instruction* instr) {
|
||||
@ -119,6 +113,16 @@ void InstructionScheduler::AddTerminator(Instruction* instr) {
|
||||
}
|
||||
|
||||
void InstructionScheduler::AddInstruction(Instruction* instr) {
|
||||
if (IsBarrier(instr)) {
|
||||
if (FLAG_turbo_stress_instruction_scheduling) {
|
||||
Schedule<StressSchedulerQueue>();
|
||||
} else {
|
||||
Schedule<CriticalPathFirstQueue>();
|
||||
}
|
||||
sequence()->AddInstruction(instr);
|
||||
return;
|
||||
}
|
||||
|
||||
ScheduleGraphNode* new_node = new (zone()) ScheduleGraphNode(zone(), instr);
|
||||
|
||||
// We should not have branches in the middle of a block.
|
||||
@ -197,7 +201,7 @@ void InstructionScheduler::AddInstruction(Instruction* instr) {
|
||||
}
|
||||
|
||||
template <typename QueueType>
|
||||
void InstructionScheduler::ScheduleBlock() {
|
||||
void InstructionScheduler::Schedule() {
|
||||
QueueType ready_list(this);
|
||||
|
||||
// Compute total latencies so that we can schedule the critical path first.
|
||||
@ -231,6 +235,14 @@ void InstructionScheduler::ScheduleBlock() {
|
||||
|
||||
cycle++;
|
||||
}
|
||||
|
||||
// Reset own state.
|
||||
graph_.clear();
|
||||
operands_map_.clear();
|
||||
pending_loads_.clear();
|
||||
last_deopt_or_trap_ = nullptr;
|
||||
last_live_in_reg_marker_ = nullptr;
|
||||
last_side_effect_instr_ = nullptr;
|
||||
}
|
||||
|
||||
int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
|
||||
@ -287,14 +299,7 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
|
||||
return kHasSideEffect;
|
||||
|
||||
case kArchPrepareCallCFunction:
|
||||
case kArchSaveCallerRegisters:
|
||||
case kArchRestoreCallerRegisters:
|
||||
case kArchPrepareTailCall:
|
||||
case kArchCallCFunction:
|
||||
case kArchCallCodeObject:
|
||||
case kArchCallJSFunction:
|
||||
case kArchCallWasmFunction:
|
||||
case kArchCallBuiltinPointer:
|
||||
case kArchTailCallCodeObjectFromJSFunction:
|
||||
case kArchTailCallCodeObject:
|
||||
case kArchTailCallAddress:
|
||||
@ -303,6 +308,21 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
|
||||
case kArchDebugBreak:
|
||||
return kHasSideEffect;
|
||||
|
||||
case kArchSaveCallerRegisters:
|
||||
case kArchRestoreCallerRegisters:
|
||||
return kIsBarrier;
|
||||
|
||||
case kArchCallCFunction:
|
||||
case kArchCallCodeObject:
|
||||
case kArchCallJSFunction:
|
||||
case kArchCallWasmFunction:
|
||||
case kArchCallBuiltinPointer:
|
||||
// Calls can cause GC and GC may relocate objects. If a pure instruction
|
||||
// operates on a tagged pointer that was cast to a word then it may be
|
||||
// incorrect to move the instruction across the call. Hence we mark all
|
||||
// (non-tail-)calls as barriers.
|
||||
return kIsBarrier;
|
||||
|
||||
case kArchStoreWithWriteBarrier:
|
||||
return kHasSideEffect;
|
||||
|
||||
|
@ -24,6 +24,9 @@ enum ArchOpcodeFlags {
|
||||
// instruction e.g. div on Intel platform which
|
||||
// will raise an exception when the divisor is
|
||||
// zero.
|
||||
kIsBarrier = 8, // The instruction can cause GC or it reads/writes registers
|
||||
// that are not explicitly given. Nothing can be reordered
|
||||
// across such an instruction.
|
||||
};
|
||||
|
||||
class InstructionScheduler final : public ZoneObject {
|
||||
@ -46,7 +49,7 @@ class InstructionScheduler final : public ZoneObject {
|
||||
public:
|
||||
ScheduleGraphNode(Zone* zone, Instruction* instr);
|
||||
|
||||
// Mark the instruction represented by 'node' as a dependecy of this one.
|
||||
// Mark the instruction represented by 'node' as a dependency of this one.
|
||||
// The current instruction will be registered as an unscheduled predecessor
|
||||
// of 'node' (i.e. it must be scheduled before 'node').
|
||||
void AddSuccessor(ScheduleGraphNode* node);
|
||||
@ -141,12 +144,16 @@ class InstructionScheduler final : public ZoneObject {
|
||||
// Perform scheduling for the current block specifying the queue type to
|
||||
// use to determine the next best candidate.
|
||||
template <typename QueueType>
|
||||
void ScheduleBlock();
|
||||
void Schedule();
|
||||
|
||||
// Return the scheduling properties of the given instruction.
|
||||
V8_EXPORT_PRIVATE int GetInstructionFlags(const Instruction* instr) const;
|
||||
int GetTargetInstructionFlags(const Instruction* instr) const;
|
||||
|
||||
bool IsBarrier(const Instruction* instr) const {
|
||||
return (GetInstructionFlags(instr) & kIsBarrier) != 0;
|
||||
}
|
||||
|
||||
// Check whether the given instruction has side effects (e.g. function call,
|
||||
// memory store).
|
||||
bool HasSideEffect(const Instruction* instr) const {
|
||||
|
@ -3619,6 +3619,43 @@ TEST(TestCallBuiltinIndirectLoad) {
|
||||
Handle<String>::cast(result.ToHandleChecked())));
|
||||
}
|
||||
|
||||
TEST(InstructionSchedulingCallerSavedRegisters) {
|
||||
// This is a regression test for v8:9775, where TF's instruction scheduler
|
||||
// incorrectly moved pure operations in between a ArchSaveCallerRegisters and
|
||||
// a ArchRestoreCallerRegisters instruction.
|
||||
bool old_turbo_instruction_scheduling = FLAG_turbo_instruction_scheduling;
|
||||
FLAG_turbo_instruction_scheduling = true;
|
||||
|
||||
Isolate* isolate(CcTest::InitIsolateOnce());
|
||||
const int kNumParams = 1;
|
||||
CodeAssemblerTester asm_tester(isolate, kNumParams);
|
||||
CodeStubAssembler m(asm_tester.state());
|
||||
|
||||
{
|
||||
Node* x = m.SmiUntag(m.Parameter(0));
|
||||
Node* y = m.WordOr(m.WordShr(x, 1), m.IntPtrConstant(1));
|
||||
TNode<ExternalReference> isolate_ptr =
|
||||
m.ExternalConstant(ExternalReference::isolate_address(isolate));
|
||||
m.CallCFunctionWithCallerSavedRegisters(
|
||||
m.ExternalConstant(
|
||||
ExternalReference::smi_lexicographic_compare_function()),
|
||||
MachineType::Int32(), kSaveFPRegs,
|
||||
std::make_pair(MachineType::Pointer(), isolate_ptr),
|
||||
std::make_pair(MachineType::TaggedSigned(), m.SmiConstant(0)),
|
||||
std::make_pair(MachineType::TaggedSigned(), m.SmiConstant(0)));
|
||||
m.Return(m.SmiTag(m.Signed(m.WordOr(x, y))));
|
||||
}
|
||||
|
||||
AssemblerOptions options = AssemblerOptions::Default(isolate);
|
||||
FunctionTester ft(asm_tester.GenerateCode(options), kNumParams);
|
||||
Handle<Object> input = isolate->factory()->NewNumber(8);
|
||||
MaybeHandle<Object> result = ft.Call(input);
|
||||
CHECK(result.ToHandleChecked()->IsSmi());
|
||||
CHECK_EQ(result.ToHandleChecked()->Number(), 13);
|
||||
|
||||
FLAG_turbo_instruction_scheduling = old_turbo_instruction_scheduling;
|
||||
}
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
Loading…
Reference in New Issue
Block a user