From a927810c03d2a36d47c633deca45a24e71400ef9 Mon Sep 17 00:00:00 2001 From: Georg Neis Date: Fri, 18 Oct 2019 11:20:52 +0200 Subject: [PATCH] [turbofan] Fix bug in instruction scheduling Disallow reorderings across calls and across caller registers save/restore. Bug: v8:9775 Change-Id: I8b1037dd127217ed9f4a42d45e0d928380c9241a Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1862558 Commit-Queue: Georg Neis Reviewed-by: Tobias Tebbi Cr-Commit-Position: refs/heads/master@{#64429} --- src/compiler/backend/instruction-scheduler.cc | 52 +++++++++++++------ src/compiler/backend/instruction-scheduler.h | 11 +++- test/cctest/test-code-stub-assembler.cc | 37 +++++++++++++ 3 files changed, 82 insertions(+), 18 deletions(-) diff --git a/src/compiler/backend/instruction-scheduler.cc b/src/compiler/backend/instruction-scheduler.cc index d4920cd575..e19c4b0b26 100644 --- a/src/compiler/backend/instruction-scheduler.cc +++ b/src/compiler/backend/instruction-scheduler.cc @@ -95,17 +95,11 @@ void InstructionScheduler::StartBlock(RpoNumber rpo) { void InstructionScheduler::EndBlock(RpoNumber rpo) { if (FLAG_turbo_stress_instruction_scheduling) { - ScheduleBlock(); + Schedule(); } else { - ScheduleBlock(); + Schedule(); } sequence()->EndBlock(rpo); - graph_.clear(); - last_side_effect_instr_ = nullptr; - pending_loads_.clear(); - last_live_in_reg_marker_ = nullptr; - last_deopt_or_trap_ = nullptr; - operands_map_.clear(); } void InstructionScheduler::AddTerminator(Instruction* instr) { @@ -119,6 +113,16 @@ void InstructionScheduler::AddTerminator(Instruction* instr) { } void InstructionScheduler::AddInstruction(Instruction* instr) { + if (IsBarrier(instr)) { + if (FLAG_turbo_stress_instruction_scheduling) { + Schedule(); + } else { + Schedule(); + } + sequence()->AddInstruction(instr); + return; + } + ScheduleGraphNode* new_node = new (zone()) ScheduleGraphNode(zone(), instr); // We should not have branches in the middle of a block. @@ -197,7 +201,7 @@ void InstructionScheduler::AddInstruction(Instruction* instr) { } template -void InstructionScheduler::ScheduleBlock() { +void InstructionScheduler::Schedule() { QueueType ready_list(this); // Compute total latencies so that we can schedule the critical path first. @@ -231,6 +235,14 @@ void InstructionScheduler::ScheduleBlock() { cycle++; } + + // Reset own state. + graph_.clear(); + operands_map_.clear(); + pending_loads_.clear(); + last_deopt_or_trap_ = nullptr; + last_live_in_reg_marker_ = nullptr; + last_side_effect_instr_ = nullptr; } int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const { @@ -287,14 +299,7 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const { return kHasSideEffect; case kArchPrepareCallCFunction: - case kArchSaveCallerRegisters: - case kArchRestoreCallerRegisters: case kArchPrepareTailCall: - case kArchCallCFunction: - case kArchCallCodeObject: - case kArchCallJSFunction: - case kArchCallWasmFunction: - case kArchCallBuiltinPointer: case kArchTailCallCodeObjectFromJSFunction: case kArchTailCallCodeObject: case kArchTailCallAddress: @@ -303,6 +308,21 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const { case kArchDebugBreak: return kHasSideEffect; + case kArchSaveCallerRegisters: + case kArchRestoreCallerRegisters: + return kIsBarrier; + + case kArchCallCFunction: + case kArchCallCodeObject: + case kArchCallJSFunction: + case kArchCallWasmFunction: + case kArchCallBuiltinPointer: + // Calls can cause GC and GC may relocate objects. If a pure instruction + // operates on a tagged pointer that was cast to a word then it may be + // incorrect to move the instruction across the call. Hence we mark all + // (non-tail-)calls as barriers. + return kIsBarrier; + case kArchStoreWithWriteBarrier: return kHasSideEffect; diff --git a/src/compiler/backend/instruction-scheduler.h b/src/compiler/backend/instruction-scheduler.h index b73c225c94..ec63a36e2c 100644 --- a/src/compiler/backend/instruction-scheduler.h +++ b/src/compiler/backend/instruction-scheduler.h @@ -24,6 +24,9 @@ enum ArchOpcodeFlags { // instruction e.g. div on Intel platform which // will raise an exception when the divisor is // zero. + kIsBarrier = 8, // The instruction can cause GC or it reads/writes registers + // that are not explicitly given. Nothing can be reordered + // across such an instruction. }; class InstructionScheduler final : public ZoneObject { @@ -46,7 +49,7 @@ class InstructionScheduler final : public ZoneObject { public: ScheduleGraphNode(Zone* zone, Instruction* instr); - // Mark the instruction represented by 'node' as a dependecy of this one. + // Mark the instruction represented by 'node' as a dependency of this one. // The current instruction will be registered as an unscheduled predecessor // of 'node' (i.e. it must be scheduled before 'node'). void AddSuccessor(ScheduleGraphNode* node); @@ -141,12 +144,16 @@ class InstructionScheduler final : public ZoneObject { // Perform scheduling for the current block specifying the queue type to // use to determine the next best candidate. template - void ScheduleBlock(); + void Schedule(); // Return the scheduling properties of the given instruction. V8_EXPORT_PRIVATE int GetInstructionFlags(const Instruction* instr) const; int GetTargetInstructionFlags(const Instruction* instr) const; + bool IsBarrier(const Instruction* instr) const { + return (GetInstructionFlags(instr) & kIsBarrier) != 0; + } + // Check whether the given instruction has side effects (e.g. function call, // memory store). bool HasSideEffect(const Instruction* instr) const { diff --git a/test/cctest/test-code-stub-assembler.cc b/test/cctest/test-code-stub-assembler.cc index 39068c0d52..1e40e14834 100644 --- a/test/cctest/test-code-stub-assembler.cc +++ b/test/cctest/test-code-stub-assembler.cc @@ -3619,6 +3619,43 @@ TEST(TestCallBuiltinIndirectLoad) { Handle::cast(result.ToHandleChecked()))); } +TEST(InstructionSchedulingCallerSavedRegisters) { + // This is a regression test for v8:9775, where TF's instruction scheduler + // incorrectly moved pure operations in between a ArchSaveCallerRegisters and + // a ArchRestoreCallerRegisters instruction. + bool old_turbo_instruction_scheduling = FLAG_turbo_instruction_scheduling; + FLAG_turbo_instruction_scheduling = true; + + Isolate* isolate(CcTest::InitIsolateOnce()); + const int kNumParams = 1; + CodeAssemblerTester asm_tester(isolate, kNumParams); + CodeStubAssembler m(asm_tester.state()); + + { + Node* x = m.SmiUntag(m.Parameter(0)); + Node* y = m.WordOr(m.WordShr(x, 1), m.IntPtrConstant(1)); + TNode isolate_ptr = + m.ExternalConstant(ExternalReference::isolate_address(isolate)); + m.CallCFunctionWithCallerSavedRegisters( + m.ExternalConstant( + ExternalReference::smi_lexicographic_compare_function()), + MachineType::Int32(), kSaveFPRegs, + std::make_pair(MachineType::Pointer(), isolate_ptr), + std::make_pair(MachineType::TaggedSigned(), m.SmiConstant(0)), + std::make_pair(MachineType::TaggedSigned(), m.SmiConstant(0))); + m.Return(m.SmiTag(m.Signed(m.WordOr(x, y)))); + } + + AssemblerOptions options = AssemblerOptions::Default(isolate); + FunctionTester ft(asm_tester.GenerateCode(options), kNumParams); + Handle input = isolate->factory()->NewNumber(8); + MaybeHandle result = ft.Call(input); + CHECK(result.ToHandleChecked()->IsSmi()); + CHECK_EQ(result.ToHandleChecked()->Number(), 13); + + FLAG_turbo_instruction_scheduling = old_turbo_instruction_scheduling; +} + } // namespace compiler } // namespace internal } // namespace v8