2014-08-08 07:04:07 +00:00
|
|
|
// Copyright 2014 the V8 project authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file.
|
|
|
|
|
2018-11-12 14:12:52 +00:00
|
|
|
#include "test/unittests/compiler/backend/instruction-selector-unittest.h"
|
2014-08-08 07:04:07 +00:00
|
|
|
|
2019-05-21 09:30:15 +00:00
|
|
|
#include "src/codegen/code-factory.h"
|
2019-07-16 21:46:08 +00:00
|
|
|
#include "src/codegen/tick-counter.h"
|
This CL enables precise source positions for all V8 compilers. It merges compiler::SourcePosition and internal::SourcePosition to a single class used throughout the codebase. The new internal::SourcePosition instances store an id identifying an inlined function in addition to a script offset.
SourcePosition::InliningId() refers to a the new table DeoptimizationInputData::InliningPositions(), which provides the following data for every inlining id:
- The inlined SharedFunctionInfo as an offset into DeoptimizationInfo::LiteralArray
- The SourcePosition of the inlining. Recursively, this yields the full inlining stack.
Before the Code object is created, the same information can be found in CompilationInfo::inlined_functions().
If SourcePosition::InliningId() is SourcePosition::kNotInlined, it refers to the outer (non-inlined) function.
So every SourcePosition has full information about its inlining stack, as long as the corresponding Code object is known. The internal represenation of a source position is a positive 64bit integer.
All compilers create now appropriate source positions for inlined functions. In the case of Turbofan, this required using AstGraphBuilderWithPositions for inlined functions too. So this class is now moved to a header file.
At the moment, the additional information in source positions is only used in --trace-deopt and --code-comments. The profiler needs to be updated, at the moment it gets the correct script offsets from the deopt info, but the wrong script id from the reconstructed deopt stack, which can lead to wrong outputs. This should be resolved by making the profiler use the new inlining information for deopts.
I activated the inlined deoptimization tests in test-cpu-profiler.cc for Turbofan, changing them to a case where the deopt stack and the inlining position agree. It is currently still broken for other cases.
The following additional changes were necessary:
- The source position table (internal::SourcePositionTableBuilder etc.) supports now 64bit source positions. Encoding source positions in a single 64bit int together with the difference encoding in the source position table results in very little overhead for the inlining id, since only 12% of the source positions in Octane have a changed inlining id.
- The class HPositionInfo was effectively dead code and is now removed.
- SourcePosition has new printing and information facilities, including computing a full inlining stack.
- I had to rename compiler/source-position.{h,cc} to compiler/compiler-source-position-table.{h,cc} to avoid clashes with the new src/source-position.cc file.
- I wrote the new wrapper PodArray for ByteArray. It is a template working with any POD-type. This is used in DeoptimizationInputData::InliningPositions().
- I removed HInlinedFunctionInfo and HGraph::inlined_function_infos, because they were only used for the now obsolete Crankshaft inlining ids.
- Crankshaft managed a list of inlined functions in Lithium: LChunk::inlined_functions. This is an analog structure to CompilationInfo::inlined_functions. So I removed LChunk::inlined_functions and made Crankshaft use CompilationInfo::inlined_functions instead, because this was necessary to register the offsets into the literal array in a uniform way. This is a safe change because LChunk::inlined_functions has no other uses and the functions in CompilationInfo::inlined_functions have a strictly longer lifespan, being created earlier (in Hydrogen already).
BUG=v8:5432
Review-Url: https://codereview.chromium.org/2451853002
Cr-Commit-Position: refs/heads/master@{#40975}
2016-11-14 17:21:37 +00:00
|
|
|
#include "src/compiler/compiler-source-position-table.h"
|
2015-02-20 10:11:45 +00:00
|
|
|
#include "src/compiler/graph.h"
|
2015-02-25 16:37:49 +00:00
|
|
|
#include "src/compiler/schedule.h"
|
2019-05-24 13:51:59 +00:00
|
|
|
#include "src/flags/flags.h"
|
2019-05-23 08:51:46 +00:00
|
|
|
#include "src/objects/objects-inl.h"
|
2014-10-01 08:34:25 +00:00
|
|
|
#include "test/unittests/compiler/compiler-test-utils.h"
|
2014-08-14 06:33:50 +00:00
|
|
|
|
2014-08-08 07:04:07 +00:00
|
|
|
namespace v8 {
|
|
|
|
namespace internal {
|
|
|
|
namespace compiler {
|
|
|
|
|
2020-07-23 10:06:02 +00:00
|
|
|
InstructionSelectorTest::InstructionSelectorTest()
|
|
|
|
: TestWithNativeContextAndZone(kCompressGraphZone),
|
|
|
|
rng_(FLAG_random_seed) {}
|
2014-08-14 06:33:50 +00:00
|
|
|
|
2018-09-17 11:30:48 +00:00
|
|
|
InstructionSelectorTest::~InstructionSelectorTest() = default;
|
2014-09-04 08:44:03 +00:00
|
|
|
|
2014-08-08 07:04:07 +00:00
|
|
|
InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
|
|
|
|
InstructionSelector::Features features,
|
2015-04-30 09:56:24 +00:00
|
|
|
InstructionSelectorTest::StreamBuilderMode mode,
|
|
|
|
InstructionSelector::SourcePositionMode source_position_mode) {
|
2019-07-29 17:29:29 +00:00
|
|
|
Schedule* schedule = ExportForTest();
|
2014-08-14 06:33:50 +00:00
|
|
|
if (FLAG_trace_turbo) {
|
2018-06-14 12:46:07 +00:00
|
|
|
StdoutStream{} << "=== Schedule before instruction selection ==="
|
|
|
|
<< std::endl
|
|
|
|
<< *schedule;
|
2014-08-14 06:33:50 +00:00
|
|
|
}
|
2015-01-08 14:13:18 +00:00
|
|
|
size_t const node_count = graph()->NodeCount();
|
|
|
|
EXPECT_NE(0u, node_count);
|
2015-02-11 14:12:03 +00:00
|
|
|
Linkage linkage(call_descriptor());
|
2014-11-03 13:26:46 +00:00
|
|
|
InstructionBlocks* instruction_blocks =
|
|
|
|
InstructionSequence::InstructionBlocksFor(test_->zone(), schedule);
|
2015-01-23 15:19:34 +00:00
|
|
|
InstructionSequence sequence(test_->isolate(), test_->zone(),
|
|
|
|
instruction_blocks);
|
2014-08-08 07:04:07 +00:00
|
|
|
SourcePositionTable source_position_table(graph());
|
2019-07-16 21:46:08 +00:00
|
|
|
TickCounter tick_counter;
|
2019-08-29 13:08:58 +00:00
|
|
|
size_t max_unoptimized_frame_height = 0;
|
2020-01-16 13:29:52 +00:00
|
|
|
size_t max_pushed_argument_count = 0;
|
2018-06-25 15:02:40 +00:00
|
|
|
InstructionSelector selector(
|
|
|
|
test_->zone(), node_count, &linkage, &sequence, schedule,
|
|
|
|
&source_position_table, nullptr,
|
2020-10-27 00:58:38 +00:00
|
|
|
InstructionSelector::kEnableSwitchJumpTable, &tick_counter, nullptr,
|
2020-01-16 13:29:52 +00:00
|
|
|
&max_unoptimized_frame_height, &max_pushed_argument_count,
|
|
|
|
source_position_mode, features, InstructionSelector::kDisableScheduling,
|
2018-06-25 15:02:40 +00:00
|
|
|
InstructionSelector::kEnableRootsRelativeAddressing,
|
|
|
|
PoisoningMitigationLevel::kPoisonAll);
|
2014-08-08 07:04:07 +00:00
|
|
|
selector.SelectInstructions();
|
|
|
|
if (FLAG_trace_turbo) {
|
2018-06-14 12:46:07 +00:00
|
|
|
StdoutStream{} << "=== Code sequence after instruction selection ==="
|
|
|
|
<< std::endl
|
2018-11-21 12:44:39 +00:00
|
|
|
<< sequence;
|
2014-08-08 07:04:07 +00:00
|
|
|
}
|
|
|
|
Stream s;
|
2015-01-08 14:13:18 +00:00
|
|
|
s.virtual_registers_ = selector.GetVirtualRegistersForTesting();
|
2014-10-06 14:30:55 +00:00
|
|
|
// Map virtual registers.
|
2015-01-08 14:13:18 +00:00
|
|
|
for (Instruction* const instr : sequence) {
|
2014-08-08 07:04:07 +00:00
|
|
|
if (instr->opcode() < 0) continue;
|
|
|
|
if (mode == kTargetInstructions) {
|
|
|
|
switch (instr->arch_opcode()) {
|
|
|
|
#define CASE(Name) \
|
|
|
|
case k##Name: \
|
|
|
|
break;
|
|
|
|
TARGET_ARCH_OPCODE_LIST(CASE)
|
|
|
|
#undef CASE
|
|
|
|
default:
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
2014-08-27 15:56:11 +00:00
|
|
|
if (mode == kAllExceptNopInstructions && instr->arch_opcode() == kArchNop) {
|
|
|
|
continue;
|
|
|
|
}
|
2014-08-08 07:04:07 +00:00
|
|
|
for (size_t i = 0; i < instr->OutputCount(); ++i) {
|
|
|
|
InstructionOperand* output = instr->OutputAt(i);
|
|
|
|
EXPECT_NE(InstructionOperand::IMMEDIATE, output->kind());
|
|
|
|
if (output->IsConstant()) {
|
2015-04-09 09:15:28 +00:00
|
|
|
int vreg = ConstantOperand::cast(output)->virtual_register();
|
|
|
|
s.constants_.insert(std::make_pair(vreg, sequence.GetConstant(vreg)));
|
2014-08-08 07:04:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
for (size_t i = 0; i < instr->InputCount(); ++i) {
|
|
|
|
InstructionOperand* input = instr->InputAt(i);
|
|
|
|
EXPECT_NE(InstructionOperand::CONSTANT, input->kind());
|
|
|
|
if (input->IsImmediate()) {
|
2015-04-09 14:06:19 +00:00
|
|
|
auto imm = ImmediateOperand::cast(input);
|
|
|
|
if (imm->type() == ImmediateOperand::INDEXED) {
|
|
|
|
int index = imm->indexed_value();
|
|
|
|
s.immediates_.insert(
|
|
|
|
std::make_pair(index, sequence.GetImmediate(imm)));
|
|
|
|
}
|
2014-08-08 07:04:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
s.instructions_.push_back(instr);
|
|
|
|
}
|
2015-01-08 14:13:18 +00:00
|
|
|
for (auto i : s.virtual_registers_) {
|
|
|
|
int const virtual_register = i.second;
|
2016-05-27 17:43:36 +00:00
|
|
|
if (sequence.IsFP(virtual_register)) {
|
2014-08-20 09:16:30 +00:00
|
|
|
EXPECT_FALSE(sequence.IsReference(virtual_register));
|
|
|
|
s.doubles_.insert(virtual_register);
|
|
|
|
}
|
|
|
|
if (sequence.IsReference(virtual_register)) {
|
2016-05-27 17:43:36 +00:00
|
|
|
EXPECT_FALSE(sequence.IsFP(virtual_register));
|
2014-08-20 09:16:30 +00:00
|
|
|
s.references_.insert(virtual_register);
|
|
|
|
}
|
|
|
|
}
|
2016-07-18 09:23:28 +00:00
|
|
|
for (int i = 0; i < sequence.GetDeoptimizationEntryCount(); i++) {
|
|
|
|
s.deoptimization_entries_.push_back(
|
|
|
|
sequence.GetDeoptimizationEntry(i).descriptor());
|
2014-08-27 15:56:11 +00:00
|
|
|
}
|
2014-08-08 07:04:07 +00:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2014-10-06 14:30:55 +00:00
|
|
|
int InstructionSelectorTest::Stream::ToVreg(const Node* node) const {
|
|
|
|
VirtualRegisters::const_iterator i = virtual_registers_.find(node->id());
|
|
|
|
CHECK(i != virtual_registers_.end());
|
|
|
|
return i->second;
|
|
|
|
}
|
|
|
|
|
2014-10-24 09:36:40 +00:00
|
|
|
bool InstructionSelectorTest::Stream::IsFixed(const InstructionOperand* operand,
|
|
|
|
Register reg) const {
|
|
|
|
if (!operand->IsUnallocated()) return false;
|
|
|
|
const UnallocatedOperand* unallocated = UnallocatedOperand::cast(operand);
|
|
|
|
if (!unallocated->HasFixedRegisterPolicy()) return false;
|
2015-10-02 16:55:12 +00:00
|
|
|
return unallocated->fixed_register_index() == reg.code();
|
2014-10-24 09:36:40 +00:00
|
|
|
}
|
|
|
|
|
2014-10-31 06:41:07 +00:00
|
|
|
bool InstructionSelectorTest::Stream::IsSameAsFirst(
|
|
|
|
const InstructionOperand* operand) const {
|
|
|
|
if (!operand->IsUnallocated()) return false;
|
|
|
|
const UnallocatedOperand* unallocated = UnallocatedOperand::cast(operand);
|
|
|
|
return unallocated->HasSameAsInputPolicy();
|
|
|
|
}
|
|
|
|
|
2014-10-24 09:36:40 +00:00
|
|
|
bool InstructionSelectorTest::Stream::IsUsedAtStart(
|
|
|
|
const InstructionOperand* operand) const {
|
|
|
|
if (!operand->IsUnallocated()) return false;
|
|
|
|
const UnallocatedOperand* unallocated = UnallocatedOperand::cast(operand);
|
|
|
|
return unallocated->IsUsedAtStart();
|
|
|
|
}
|
|
|
|
|
2015-06-23 07:17:07 +00:00
|
|
|
const FrameStateFunctionInfo*
|
|
|
|
InstructionSelectorTest::StreamBuilder::GetFrameStateFunctionInfo(
|
|
|
|
int parameter_count, int local_count) {
|
|
|
|
return common()->CreateFrameStateFunctionInfo(
|
2017-06-14 11:21:43 +00:00
|
|
|
FrameStateType::kInterpretedFunction, parameter_count, local_count,
|
[turbofan] Remove the JSContextRelaxation reducer.
This reducer doesn't really add value, because:
(a) it is only concerned with JSCallFunction and JSToNumber, but when
we get to it, all JSCallFunction nodes will have been replaced by
Call nodes, and in the not so far future, we will also have
replaced almost all JSToNumber nodes with better code,
(b) and the reducer tries to be smart and use one of the outermost
contexts, but that might not be beneficial always; actually it
might even create longer live ranges and lead to more spilling
in some cases.
But most importantly, the JSContextRelaxation currently blocks inlining
based on SharedFunctionInfo, because it requires the inliner to check
the native context, which in turn requires JSFunction knowledge. So I'm
removing this reducer for now to unblock the more important inliner
changes.
R=jarin@chromium.org
Review URL: https://codereview.chromium.org/1715633002
Cr-Commit-Position: refs/heads/master@{#34139}
2016-02-19 07:55:26 +00:00
|
|
|
Handle<SharedFunctionInfo>());
|
2015-06-23 07:17:07 +00:00
|
|
|
}
|
|
|
|
|
2014-08-20 04:01:00 +00:00
|
|
|
// -----------------------------------------------------------------------------
|
|
|
|
// Return.
|
|
|
|
|
2014-09-25 08:56:02 +00:00
|
|
|
TARGET_TEST_F(InstructionSelectorTest, ReturnFloat32Constant) {
|
|
|
|
const float kValue = 4.2f;
|
2015-12-10 09:03:30 +00:00
|
|
|
StreamBuilder m(this, MachineType::Float32());
|
2014-09-25 08:56:02 +00:00
|
|
|
m.Return(m.Float32Constant(kValue));
|
|
|
|
Stream s = m.Build(kAllInstructions);
|
2015-01-15 09:05:52 +00:00
|
|
|
ASSERT_EQ(3U, s.size());
|
2014-09-25 08:56:02 +00:00
|
|
|
EXPECT_EQ(kArchNop, s[0]->arch_opcode());
|
|
|
|
ASSERT_EQ(InstructionOperand::CONSTANT, s[0]->OutputAt(0)->kind());
|
|
|
|
EXPECT_FLOAT_EQ(kValue, s.ToFloat32(s[0]->OutputAt(0)));
|
|
|
|
EXPECT_EQ(kArchRet, s[1]->arch_opcode());
|
2016-11-02 13:15:39 +00:00
|
|
|
EXPECT_EQ(2U, s[1]->InputCount());
|
2014-09-25 08:56:02 +00:00
|
|
|
}
|
|
|
|
|
2014-08-14 06:33:50 +00:00
|
|
|
TARGET_TEST_F(InstructionSelectorTest, ReturnParameter) {
|
2015-12-10 09:03:30 +00:00
|
|
|
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
|
2014-08-08 07:04:07 +00:00
|
|
|
m.Return(m.Parameter(0));
|
|
|
|
Stream s = m.Build(kAllInstructions);
|
2015-01-15 09:05:52 +00:00
|
|
|
ASSERT_EQ(3U, s.size());
|
2014-08-08 07:04:07 +00:00
|
|
|
EXPECT_EQ(kArchNop, s[0]->arch_opcode());
|
|
|
|
ASSERT_EQ(1U, s[0]->OutputCount());
|
|
|
|
EXPECT_EQ(kArchRet, s[1]->arch_opcode());
|
2016-11-02 13:15:39 +00:00
|
|
|
EXPECT_EQ(2U, s[1]->InputCount());
|
2014-08-08 07:04:07 +00:00
|
|
|
}
|
|
|
|
|
2014-08-14 06:33:50 +00:00
|
|
|
TARGET_TEST_F(InstructionSelectorTest, ReturnZero) {
|
2015-12-10 09:03:30 +00:00
|
|
|
StreamBuilder m(this, MachineType::Int32());
|
2014-08-08 07:04:07 +00:00
|
|
|
m.Return(m.Int32Constant(0));
|
|
|
|
Stream s = m.Build(kAllInstructions);
|
2015-01-15 09:05:52 +00:00
|
|
|
ASSERT_EQ(3U, s.size());
|
2014-08-08 07:04:07 +00:00
|
|
|
EXPECT_EQ(kArchNop, s[0]->arch_opcode());
|
|
|
|
ASSERT_EQ(1U, s[0]->OutputCount());
|
|
|
|
EXPECT_EQ(InstructionOperand::CONSTANT, s[0]->OutputAt(0)->kind());
|
|
|
|
EXPECT_EQ(0, s.ToInt32(s[0]->OutputAt(0)));
|
|
|
|
EXPECT_EQ(kArchRet, s[1]->arch_opcode());
|
2016-11-02 13:15:39 +00:00
|
|
|
EXPECT_EQ(2U, s[1]->InputCount());
|
2014-08-08 07:04:07 +00:00
|
|
|
}
|
|
|
|
|
2014-08-20 04:01:00 +00:00
|
|
|
// -----------------------------------------------------------------------------
|
|
|
|
// Conversions.
|
|
|
|
|
2016-04-24 11:39:31 +00:00
|
|
|
TARGET_TEST_F(InstructionSelectorTest, TruncateFloat64ToWord32WithParameter) {
|
2015-12-10 09:03:30 +00:00
|
|
|
StreamBuilder m(this, MachineType::Int32(), MachineType::Float64());
|
2016-04-24 11:39:31 +00:00
|
|
|
m.Return(m.TruncateFloat64ToWord32(m.Parameter(0)));
|
2014-08-20 04:01:00 +00:00
|
|
|
Stream s = m.Build(kAllInstructions);
|
2015-01-15 09:05:52 +00:00
|
|
|
ASSERT_EQ(4U, s.size());
|
2014-08-20 04:01:00 +00:00
|
|
|
EXPECT_EQ(kArchNop, s[0]->arch_opcode());
|
|
|
|
EXPECT_EQ(kArchTruncateDoubleToI, s[1]->arch_opcode());
|
|
|
|
EXPECT_EQ(1U, s[1]->InputCount());
|
|
|
|
EXPECT_EQ(1U, s[1]->OutputCount());
|
|
|
|
EXPECT_EQ(kArchRet, s[2]->arch_opcode());
|
|
|
|
}
|
|
|
|
|
2014-08-20 09:16:30 +00:00
|
|
|
// -----------------------------------------------------------------------------
|
|
|
|
// Parameters.
|
|
|
|
|
|
|
|
TARGET_TEST_F(InstructionSelectorTest, DoubleParameter) {
|
2015-12-10 09:03:30 +00:00
|
|
|
StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
|
2014-08-20 09:16:30 +00:00
|
|
|
Node* param = m.Parameter(0);
|
|
|
|
m.Return(param);
|
|
|
|
Stream s = m.Build(kAllInstructions);
|
2014-10-06 14:30:55 +00:00
|
|
|
EXPECT_TRUE(s.IsDouble(param));
|
2014-08-20 09:16:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TARGET_TEST_F(InstructionSelectorTest, ReferenceParameter) {
|
2015-12-10 09:03:30 +00:00
|
|
|
StreamBuilder m(this, MachineType::AnyTagged(), MachineType::AnyTagged());
|
2014-08-20 09:16:30 +00:00
|
|
|
Node* param = m.Parameter(0);
|
|
|
|
m.Return(param);
|
|
|
|
Stream s = m.Build(kAllInstructions);
|
2014-10-06 14:30:55 +00:00
|
|
|
EXPECT_TRUE(s.IsReference(param));
|
2014-08-20 09:16:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// -----------------------------------------------------------------------------
|
2015-10-14 14:53:04 +00:00
|
|
|
// FinishRegion.
|
2014-08-20 09:16:30 +00:00
|
|
|
|
2015-10-14 14:53:04 +00:00
|
|
|
TARGET_TEST_F(InstructionSelectorTest, FinishRegion) {
|
2015-12-10 09:03:30 +00:00
|
|
|
StreamBuilder m(this, MachineType::AnyTagged(), MachineType::AnyTagged());
|
2014-08-20 09:16:30 +00:00
|
|
|
Node* param = m.Parameter(0);
|
2015-10-14 14:53:04 +00:00
|
|
|
Node* finish =
|
|
|
|
m.AddNode(m.common()->FinishRegion(), param, m.graph()->start());
|
2014-08-20 09:16:30 +00:00
|
|
|
m.Return(finish);
|
|
|
|
Stream s = m.Build(kAllInstructions);
|
2016-09-19 08:02:22 +00:00
|
|
|
ASSERT_EQ(3U, s.size());
|
2014-08-20 09:16:30 +00:00
|
|
|
EXPECT_EQ(kArchNop, s[0]->arch_opcode());
|
|
|
|
ASSERT_EQ(1U, s[0]->OutputCount());
|
|
|
|
ASSERT_TRUE(s[0]->Output()->IsUnallocated());
|
2016-09-19 08:02:22 +00:00
|
|
|
EXPECT_EQ(kArchRet, s[1]->arch_opcode());
|
2014-10-06 14:30:55 +00:00
|
|
|
EXPECT_EQ(s.ToVreg(param), s.ToVreg(s[0]->Output()));
|
2016-11-02 13:15:39 +00:00
|
|
|
EXPECT_EQ(s.ToVreg(param), s.ToVreg(s[1]->InputAt(1)));
|
2014-10-06 14:30:55 +00:00
|
|
|
EXPECT_TRUE(s.IsReference(finish));
|
2014-08-20 09:16:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// -----------------------------------------------------------------------------
|
2014-09-03 08:49:21 +00:00
|
|
|
// Phi.
|
2014-08-20 09:16:30 +00:00
|
|
|
|
2019-05-27 11:31:49 +00:00
|
|
|
using InstructionSelectorPhiTest =
|
|
|
|
InstructionSelectorTestWithParam<MachineType>;
|
2014-08-20 09:16:30 +00:00
|
|
|
|
2014-09-05 11:44:31 +00:00
|
|
|
TARGET_TEST_P(InstructionSelectorPhiTest, Doubleness) {
|
2014-08-20 09:16:30 +00:00
|
|
|
const MachineType type = GetParam();
|
|
|
|
StreamBuilder m(this, type, type, type);
|
|
|
|
Node* param0 = m.Parameter(0);
|
|
|
|
Node* param1 = m.Parameter(1);
|
2015-11-30 11:28:50 +00:00
|
|
|
RawMachineLabel a, b, c;
|
2014-08-20 09:16:30 +00:00
|
|
|
m.Branch(m.Int32Constant(0), &a, &b);
|
|
|
|
m.Bind(&a);
|
|
|
|
m.Goto(&c);
|
|
|
|
m.Bind(&b);
|
|
|
|
m.Goto(&c);
|
|
|
|
m.Bind(&c);
|
2015-12-10 09:03:30 +00:00
|
|
|
Node* phi = m.Phi(type.representation(), param0, param1);
|
2014-08-20 09:16:30 +00:00
|
|
|
m.Return(phi);
|
|
|
|
Stream s = m.Build(kAllInstructions);
|
2014-10-06 14:30:55 +00:00
|
|
|
EXPECT_EQ(s.IsDouble(phi), s.IsDouble(param0));
|
|
|
|
EXPECT_EQ(s.IsDouble(phi), s.IsDouble(param1));
|
2014-08-20 09:16:30 +00:00
|
|
|
}
|
|
|
|
|
2014-09-05 11:44:31 +00:00
|
|
|
TARGET_TEST_P(InstructionSelectorPhiTest, Referenceness) {
|
2014-08-20 09:16:30 +00:00
|
|
|
const MachineType type = GetParam();
|
|
|
|
StreamBuilder m(this, type, type, type);
|
|
|
|
Node* param0 = m.Parameter(0);
|
|
|
|
Node* param1 = m.Parameter(1);
|
2015-11-30 11:28:50 +00:00
|
|
|
RawMachineLabel a, b, c;
|
2014-08-20 09:16:30 +00:00
|
|
|
m.Branch(m.Int32Constant(1), &a, &b);
|
|
|
|
m.Bind(&a);
|
|
|
|
m.Goto(&c);
|
|
|
|
m.Bind(&b);
|
|
|
|
m.Goto(&c);
|
|
|
|
m.Bind(&c);
|
2015-12-10 09:03:30 +00:00
|
|
|
Node* phi = m.Phi(type.representation(), param0, param1);
|
2014-08-20 09:16:30 +00:00
|
|
|
m.Return(phi);
|
|
|
|
Stream s = m.Build(kAllInstructions);
|
2014-10-06 14:30:55 +00:00
|
|
|
EXPECT_EQ(s.IsReference(phi), s.IsReference(param0));
|
|
|
|
EXPECT_EQ(s.IsReference(phi), s.IsReference(param1));
|
2014-08-20 09:16:30 +00:00
|
|
|
}
|
|
|
|
|
2019-02-15 16:53:29 +00:00
|
|
|
INSTANTIATE_TEST_SUITE_P(
|
2015-12-10 09:03:30 +00:00
|
|
|
InstructionSelectorTest, InstructionSelectorPhiTest,
|
|
|
|
::testing::Values(MachineType::Float64(), MachineType::Int8(),
|
|
|
|
MachineType::Uint8(), MachineType::Int16(),
|
|
|
|
MachineType::Uint16(), MachineType::Int32(),
|
|
|
|
MachineType::Uint32(), MachineType::Int64(),
|
|
|
|
MachineType::Uint64(), MachineType::Pointer(),
|
|
|
|
MachineType::AnyTagged()));
|
2014-08-20 09:16:30 +00:00
|
|
|
|
|
|
|
// -----------------------------------------------------------------------------
|
|
|
|
// ValueEffect.
|
|
|
|
|
|
|
|
TARGET_TEST_F(InstructionSelectorTest, ValueEffect) {
|
2015-12-10 09:03:30 +00:00
|
|
|
StreamBuilder m1(this, MachineType::Int32(), MachineType::Pointer());
|
2014-08-20 09:16:30 +00:00
|
|
|
Node* p1 = m1.Parameter(0);
|
2015-12-10 09:03:30 +00:00
|
|
|
m1.Return(m1.Load(MachineType::Int32(), p1, m1.Int32Constant(0)));
|
2014-08-20 09:16:30 +00:00
|
|
|
Stream s1 = m1.Build(kAllInstructions);
|
2015-12-10 09:03:30 +00:00
|
|
|
StreamBuilder m2(this, MachineType::Int32(), MachineType::Pointer());
|
2014-08-20 09:16:30 +00:00
|
|
|
Node* p2 = m2.Parameter(0);
|
2015-12-10 09:03:30 +00:00
|
|
|
m2.Return(m2.AddNode(
|
|
|
|
m2.machine()->Load(MachineType::Int32()), p2, m2.Int32Constant(0),
|
2016-06-20 10:46:12 +00:00
|
|
|
m2.AddNode(m2.common()->BeginRegion(RegionObservability::kObservable),
|
|
|
|
m2.graph()->start())));
|
2014-08-20 09:16:30 +00:00
|
|
|
Stream s2 = m2.Build(kAllInstructions);
|
|
|
|
EXPECT_LE(3U, s1.size());
|
|
|
|
ASSERT_EQ(s1.size(), s2.size());
|
|
|
|
TRACED_FORRANGE(size_t, i, 0, s1.size() - 1) {
|
|
|
|
const Instruction* i1 = s1[i];
|
|
|
|
const Instruction* i2 = s2[i];
|
|
|
|
EXPECT_EQ(i1->arch_opcode(), i2->arch_opcode());
|
|
|
|
EXPECT_EQ(i1->InputCount(), i2->InputCount());
|
|
|
|
EXPECT_EQ(i1->OutputCount(), i2->OutputCount());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-27 15:56:11 +00:00
|
|
|
// -----------------------------------------------------------------------------
|
|
|
|
// Calls with deoptimization.
|
2014-09-25 08:56:02 +00:00
|
|
|
|
2014-08-28 11:06:26 +00:00
|
|
|
TARGET_TEST_F(InstructionSelectorTest, CallJSFunctionWithDeopt) {
|
2015-12-10 09:03:30 +00:00
|
|
|
StreamBuilder m(this, MachineType::AnyTagged(), MachineType::AnyTagged(),
|
|
|
|
MachineType::AnyTagged(), MachineType::AnyTagged());
|
2014-08-27 15:56:11 +00:00
|
|
|
|
2021-01-20 12:07:43 +00:00
|
|
|
BytecodeOffset bailout_id(42);
|
2014-08-27 15:56:11 +00:00
|
|
|
|
|
|
|
Node* function_node = m.Parameter(0);
|
|
|
|
Node* receiver = m.Parameter(1);
|
2014-09-03 10:13:21 +00:00
|
|
|
Node* context = m.Parameter(2);
|
2014-08-27 15:56:11 +00:00
|
|
|
|
2015-12-10 09:03:30 +00:00
|
|
|
ZoneVector<MachineType> int32_type(1, MachineType::Int32(), zone());
|
2020-02-12 21:38:18 +00:00
|
|
|
ZoneVector<MachineType> tagged_type(1, MachineType::AnyTagged(), zone());
|
|
|
|
ZoneVector<MachineType> empty_type(zone());
|
2015-03-19 14:00:28 +00:00
|
|
|
|
2018-02-09 19:19:25 +00:00
|
|
|
auto call_descriptor = Linkage::GetJSCallDescriptor(
|
2018-10-02 15:50:33 +00:00
|
|
|
zone(), false, 1,
|
|
|
|
CallDescriptor::kNeedsFrameState | CallDescriptor::kCanUseRoots);
|
2015-08-25 11:31:09 +00:00
|
|
|
|
2015-11-04 17:17:06 +00:00
|
|
|
// Build frame state for the state before the call.
|
2017-01-05 10:44:44 +00:00
|
|
|
Node* parameters = m.AddNode(
|
|
|
|
m.common()->TypedStateValues(&int32_type, SparseInputMask::Dense()),
|
|
|
|
m.Int32Constant(1));
|
|
|
|
Node* locals = m.AddNode(
|
2020-02-12 21:38:18 +00:00
|
|
|
m.common()->TypedStateValues(&empty_type, SparseInputMask::Dense()));
|
2017-01-05 10:44:44 +00:00
|
|
|
Node* stack = m.AddNode(
|
2020-02-12 21:38:18 +00:00
|
|
|
m.common()->TypedStateValues(&tagged_type, SparseInputMask::Dense()),
|
|
|
|
m.UndefinedConstant());
|
2015-11-04 17:17:06 +00:00
|
|
|
Node* context_sentinel = m.Int32Constant(0);
|
2015-09-23 09:08:15 +00:00
|
|
|
Node* state_node = m.AddNode(
|
2017-06-30 12:11:44 +00:00
|
|
|
m.common()->FrameState(bailout_id, OutputFrameStateCombine::PokeAt(0),
|
2015-06-23 07:17:07 +00:00
|
|
|
m.GetFrameStateFunctionInfo(1, 0)),
|
2015-11-04 17:17:06 +00:00
|
|
|
parameters, locals, stack, context_sentinel, function_node,
|
2015-06-23 07:17:07 +00:00
|
|
|
m.UndefinedConstant());
|
2015-11-04 17:17:06 +00:00
|
|
|
|
|
|
|
// Build the call.
|
2016-12-19 11:35:42 +00:00
|
|
|
Node* nodes[] = {function_node, receiver, m.UndefinedConstant(),
|
|
|
|
m.Int32Constant(1), context, state_node};
|
2018-02-09 19:19:25 +00:00
|
|
|
Node* call = m.CallNWithFrameState(call_descriptor, arraysize(nodes), nodes);
|
2014-09-01 09:31:14 +00:00
|
|
|
m.Return(call);
|
2014-08-27 15:56:11 +00:00
|
|
|
|
|
|
|
Stream s = m.Build(kAllExceptNopInstructions);
|
|
|
|
|
|
|
|
// Skip until kArchCallJSFunction.
|
|
|
|
size_t index = 0;
|
|
|
|
for (; index < s.size() && s[index]->arch_opcode() != kArchCallJSFunction;
|
|
|
|
index++) {
|
|
|
|
}
|
2014-09-01 09:31:14 +00:00
|
|
|
// Now we should have two instructions: call and return.
|
|
|
|
ASSERT_EQ(index + 2, s.size());
|
2014-08-27 15:56:11 +00:00
|
|
|
|
|
|
|
EXPECT_EQ(kArchCallJSFunction, s[index++]->arch_opcode());
|
|
|
|
EXPECT_EQ(kArchRet, s[index++]->arch_opcode());
|
2014-09-01 09:31:14 +00:00
|
|
|
|
|
|
|
// TODO(jarin) Check deoptimization table.
|
2014-08-27 15:56:11 +00:00
|
|
|
}
|
|
|
|
|
2015-11-04 17:17:06 +00:00
|
|
|
TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeopt) {
|
2015-12-10 09:03:30 +00:00
|
|
|
StreamBuilder m(this, MachineType::AnyTagged(), MachineType::AnyTagged(),
|
|
|
|
MachineType::AnyTagged(), MachineType::AnyTagged());
|
2014-08-27 15:56:11 +00:00
|
|
|
|
2021-01-20 12:07:43 +00:00
|
|
|
BytecodeOffset bailout_id_before(42);
|
2014-08-27 15:56:11 +00:00
|
|
|
|
|
|
|
// Some arguments for the call node.
|
|
|
|
Node* function_node = m.Parameter(0);
|
|
|
|
Node* receiver = m.Parameter(1);
|
|
|
|
Node* context = m.Int32Constant(1); // Context is ignored.
|
|
|
|
|
2015-12-10 09:03:30 +00:00
|
|
|
ZoneVector<MachineType> int32_type(1, MachineType::Int32(), zone());
|
|
|
|
ZoneVector<MachineType> float64_type(1, MachineType::Float64(), zone());
|
|
|
|
ZoneVector<MachineType> tagged_type(1, MachineType::AnyTagged(), zone());
|
2015-03-19 14:00:28 +00:00
|
|
|
|
2017-05-31 12:40:13 +00:00
|
|
|
Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject);
|
2018-02-09 19:19:25 +00:00
|
|
|
auto call_descriptor = Linkage::GetStubCallDescriptor(
|
2018-06-18 15:14:29 +00:00
|
|
|
zone(), callable.descriptor(), 1, CallDescriptor::kNeedsFrameState,
|
|
|
|
Operator::kNoProperties);
|
2015-11-04 17:17:06 +00:00
|
|
|
|
2014-08-27 15:56:11 +00:00
|
|
|
// Build frame state for the state before the call.
|
2017-01-05 10:44:44 +00:00
|
|
|
Node* parameters = m.AddNode(
|
|
|
|
m.common()->TypedStateValues(&int32_type, SparseInputMask::Dense()),
|
|
|
|
m.Int32Constant(43));
|
|
|
|
Node* locals = m.AddNode(
|
|
|
|
m.common()->TypedStateValues(&float64_type, SparseInputMask::Dense()),
|
|
|
|
m.Float64Constant(0.5));
|
|
|
|
Node* stack = m.AddNode(
|
|
|
|
m.common()->TypedStateValues(&tagged_type, SparseInputMask::Dense()),
|
|
|
|
m.UndefinedConstant());
|
2014-09-01 09:31:14 +00:00
|
|
|
Node* context_sentinel = m.Int32Constant(0);
|
2017-06-30 12:11:44 +00:00
|
|
|
Node* state_node =
|
|
|
|
m.AddNode(m.common()->FrameState(bailout_id_before,
|
|
|
|
OutputFrameStateCombine::PokeAt(0),
|
|
|
|
m.GetFrameStateFunctionInfo(1, 1)),
|
|
|
|
parameters, locals, stack, context_sentinel, function_node,
|
|
|
|
m.UndefinedConstant());
|
2014-08-27 15:56:11 +00:00
|
|
|
|
|
|
|
// Build the call.
|
2015-11-04 17:17:06 +00:00
|
|
|
Node* stub_code = m.HeapConstant(callable.code());
|
2016-12-19 11:35:42 +00:00
|
|
|
Node* nodes[] = {stub_code, function_node, receiver, context, state_node};
|
2018-02-09 19:19:25 +00:00
|
|
|
Node* call = m.CallNWithFrameState(call_descriptor, arraysize(nodes), nodes);
|
2014-08-27 15:56:11 +00:00
|
|
|
m.Return(call);
|
|
|
|
|
|
|
|
Stream s = m.Build(kAllExceptNopInstructions);
|
|
|
|
|
|
|
|
// Skip until kArchCallJSFunction.
|
|
|
|
size_t index = 0;
|
|
|
|
for (; index < s.size() && s[index]->arch_opcode() != kArchCallCodeObject;
|
|
|
|
index++) {
|
|
|
|
}
|
2014-09-01 09:31:14 +00:00
|
|
|
// Now we should have two instructions: call, return.
|
|
|
|
ASSERT_EQ(index + 2, s.size());
|
2014-08-27 15:56:11 +00:00
|
|
|
|
|
|
|
// Check the call instruction
|
|
|
|
const Instruction* call_instr = s[index++];
|
|
|
|
EXPECT_EQ(kArchCallCodeObject, call_instr->arch_opcode());
|
|
|
|
size_t num_operands =
|
|
|
|
1 + // Code object.
|
[turbofan] IA32 port of branch load poisoning.
The tricky part here is to take away one register from register
allocation for the mask. The only problem is with calls that need
an input operand to be passed in the poison register. For such calls,
we change the register constraint in the instruction selector
to pass the value in whatever place the register allocator sees fit.
During code generation, we then copy the value from that place
to the poison register. By that time, the mask is not necessary
(once we bake the mask into the target, it should be done before
this move).
For the branches, the mask update does not use cmov (unlike x64)
because cmov does not take an immediate and we do not have
a scratch register. Instead we use bit-twiddling tricks
(suggested by @tebbi). For example, here is the code for masking
register update after a bailout on non-zero:
jnz deopt_bailout ;; Bailout branch
setnz bl ;; These three instructions update the mask
add ebx, 255
sar ebx, 31
(On x64, the sequence is:
jnz deopt_bailout
mov r10, 0 ;; We have a scratch register for zero
cmovnz r9, r10 ;; Set to zero if we execute this branch
;; in branch mis-speculation
)
This CL also fixes a bug in register configuration, where we used
to wrongly restrict the array of register name.
Change-Id: I5fceff2faf8bdc527d9934afc284b749574ab69e
Bug: chromium:798964
Reviewed-on: https://chromium-review.googlesource.com/946251
Commit-Queue: Jaroslav Sevcik <jarin@chromium.org>
Reviewed-by: Benedikt Meurer <bmeurer@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51798}
2018-03-07 06:35:45 +00:00
|
|
|
1 + // Poison index
|
|
|
|
6 + // Frame state deopt id + one input for each value in frame state.
|
2014-08-27 15:56:11 +00:00
|
|
|
1 + // Function.
|
2014-09-01 09:31:14 +00:00
|
|
|
1; // Context.
|
2014-08-27 15:56:11 +00:00
|
|
|
ASSERT_EQ(num_operands, call_instr->InputCount());
|
|
|
|
|
|
|
|
// Code object.
|
|
|
|
EXPECT_TRUE(call_instr->InputAt(0)->IsImmediate());
|
|
|
|
|
|
|
|
// Deoptimization id.
|
[turbofan] IA32 port of branch load poisoning.
The tricky part here is to take away one register from register
allocation for the mask. The only problem is with calls that need
an input operand to be passed in the poison register. For such calls,
we change the register constraint in the instruction selector
to pass the value in whatever place the register allocator sees fit.
During code generation, we then copy the value from that place
to the poison register. By that time, the mask is not necessary
(once we bake the mask into the target, it should be done before
this move).
For the branches, the mask update does not use cmov (unlike x64)
because cmov does not take an immediate and we do not have
a scratch register. Instead we use bit-twiddling tricks
(suggested by @tebbi). For example, here is the code for masking
register update after a bailout on non-zero:
jnz deopt_bailout ;; Bailout branch
setnz bl ;; These three instructions update the mask
add ebx, 255
sar ebx, 31
(On x64, the sequence is:
jnz deopt_bailout
mov r10, 0 ;; We have a scratch register for zero
cmovnz r9, r10 ;; Set to zero if we execute this branch
;; in branch mis-speculation
)
This CL also fixes a bug in register configuration, where we used
to wrongly restrict the array of register name.
Change-Id: I5fceff2faf8bdc527d9934afc284b749574ab69e
Bug: chromium:798964
Reviewed-on: https://chromium-review.googlesource.com/946251
Commit-Queue: Jaroslav Sevcik <jarin@chromium.org>
Reviewed-by: Benedikt Meurer <bmeurer@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51798}
2018-03-07 06:35:45 +00:00
|
|
|
int32_t deopt_id_before = s.ToInt32(call_instr->InputAt(2));
|
2014-09-01 09:31:14 +00:00
|
|
|
FrameStateDescriptor* desc_before =
|
|
|
|
s.GetFrameStateDescriptor(deopt_id_before);
|
2014-08-27 15:56:11 +00:00
|
|
|
EXPECT_EQ(bailout_id_before, desc_before->bailout_id());
|
2014-09-18 08:56:52 +00:00
|
|
|
EXPECT_EQ(1u, desc_before->parameters_count());
|
|
|
|
EXPECT_EQ(1u, desc_before->locals_count());
|
|
|
|
EXPECT_EQ(1u, desc_before->stack_count());
|
[turbofan] IA32 port of branch load poisoning.
The tricky part here is to take away one register from register
allocation for the mask. The only problem is with calls that need
an input operand to be passed in the poison register. For such calls,
we change the register constraint in the instruction selector
to pass the value in whatever place the register allocator sees fit.
During code generation, we then copy the value from that place
to the poison register. By that time, the mask is not necessary
(once we bake the mask into the target, it should be done before
this move).
For the branches, the mask update does not use cmov (unlike x64)
because cmov does not take an immediate and we do not have
a scratch register. Instead we use bit-twiddling tricks
(suggested by @tebbi). For example, here is the code for masking
register update after a bailout on non-zero:
jnz deopt_bailout ;; Bailout branch
setnz bl ;; These three instructions update the mask
add ebx, 255
sar ebx, 31
(On x64, the sequence is:
jnz deopt_bailout
mov r10, 0 ;; We have a scratch register for zero
cmovnz r9, r10 ;; Set to zero if we execute this branch
;; in branch mis-speculation
)
This CL also fixes a bug in register configuration, where we used
to wrongly restrict the array of register name.
Change-Id: I5fceff2faf8bdc527d9934afc284b749574ab69e
Bug: chromium:798964
Reviewed-on: https://chromium-review.googlesource.com/946251
Commit-Queue: Jaroslav Sevcik <jarin@chromium.org>
Reviewed-by: Benedikt Meurer <bmeurer@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51798}
2018-03-07 06:35:45 +00:00
|
|
|
EXPECT_EQ(43, s.ToInt32(call_instr->InputAt(4)));
|
|
|
|
EXPECT_EQ(0, s.ToInt32(call_instr->InputAt(5))); // This should be a context.
|
2014-10-08 08:47:29 +00:00
|
|
|
// We inserted 0 here.
|
[turbofan] IA32 port of branch load poisoning.
The tricky part here is to take away one register from register
allocation for the mask. The only problem is with calls that need
an input operand to be passed in the poison register. For such calls,
we change the register constraint in the instruction selector
to pass the value in whatever place the register allocator sees fit.
During code generation, we then copy the value from that place
to the poison register. By that time, the mask is not necessary
(once we bake the mask into the target, it should be done before
this move).
For the branches, the mask update does not use cmov (unlike x64)
because cmov does not take an immediate and we do not have
a scratch register. Instead we use bit-twiddling tricks
(suggested by @tebbi). For example, here is the code for masking
register update after a bailout on non-zero:
jnz deopt_bailout ;; Bailout branch
setnz bl ;; These three instructions update the mask
add ebx, 255
sar ebx, 31
(On x64, the sequence is:
jnz deopt_bailout
mov r10, 0 ;; We have a scratch register for zero
cmovnz r9, r10 ;; Set to zero if we execute this branch
;; in branch mis-speculation
)
This CL also fixes a bug in register configuration, where we used
to wrongly restrict the array of register name.
Change-Id: I5fceff2faf8bdc527d9934afc284b749574ab69e
Bug: chromium:798964
Reviewed-on: https://chromium-review.googlesource.com/946251
Commit-Queue: Jaroslav Sevcik <jarin@chromium.org>
Reviewed-by: Benedikt Meurer <bmeurer@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51798}
2018-03-07 06:35:45 +00:00
|
|
|
EXPECT_EQ(0.5, s.ToFloat64(call_instr->InputAt(6)));
|
|
|
|
EXPECT_TRUE(s.ToHeapObject(call_instr->InputAt(7))->IsUndefined(isolate()));
|
2014-08-27 15:56:11 +00:00
|
|
|
|
|
|
|
// Function.
|
[turbofan] IA32 port of branch load poisoning.
The tricky part here is to take away one register from register
allocation for the mask. The only problem is with calls that need
an input operand to be passed in the poison register. For such calls,
we change the register constraint in the instruction selector
to pass the value in whatever place the register allocator sees fit.
During code generation, we then copy the value from that place
to the poison register. By that time, the mask is not necessary
(once we bake the mask into the target, it should be done before
this move).
For the branches, the mask update does not use cmov (unlike x64)
because cmov does not take an immediate and we do not have
a scratch register. Instead we use bit-twiddling tricks
(suggested by @tebbi). For example, here is the code for masking
register update after a bailout on non-zero:
jnz deopt_bailout ;; Bailout branch
setnz bl ;; These three instructions update the mask
add ebx, 255
sar ebx, 31
(On x64, the sequence is:
jnz deopt_bailout
mov r10, 0 ;; We have a scratch register for zero
cmovnz r9, r10 ;; Set to zero if we execute this branch
;; in branch mis-speculation
)
This CL also fixes a bug in register configuration, where we used
to wrongly restrict the array of register name.
Change-Id: I5fceff2faf8bdc527d9934afc284b749574ab69e
Bug: chromium:798964
Reviewed-on: https://chromium-review.googlesource.com/946251
Commit-Queue: Jaroslav Sevcik <jarin@chromium.org>
Reviewed-by: Benedikt Meurer <bmeurer@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51798}
2018-03-07 06:35:45 +00:00
|
|
|
EXPECT_EQ(s.ToVreg(function_node), s.ToVreg(call_instr->InputAt(8)));
|
2014-08-27 15:56:11 +00:00
|
|
|
// Context.
|
[turbofan] IA32 port of branch load poisoning.
The tricky part here is to take away one register from register
allocation for the mask. The only problem is with calls that need
an input operand to be passed in the poison register. For such calls,
we change the register constraint in the instruction selector
to pass the value in whatever place the register allocator sees fit.
During code generation, we then copy the value from that place
to the poison register. By that time, the mask is not necessary
(once we bake the mask into the target, it should be done before
this move).
For the branches, the mask update does not use cmov (unlike x64)
because cmov does not take an immediate and we do not have
a scratch register. Instead we use bit-twiddling tricks
(suggested by @tebbi). For example, here is the code for masking
register update after a bailout on non-zero:
jnz deopt_bailout ;; Bailout branch
setnz bl ;; These three instructions update the mask
add ebx, 255
sar ebx, 31
(On x64, the sequence is:
jnz deopt_bailout
mov r10, 0 ;; We have a scratch register for zero
cmovnz r9, r10 ;; Set to zero if we execute this branch
;; in branch mis-speculation
)
This CL also fixes a bug in register configuration, where we used
to wrongly restrict the array of register name.
Change-Id: I5fceff2faf8bdc527d9934afc284b749574ab69e
Bug: chromium:798964
Reviewed-on: https://chromium-review.googlesource.com/946251
Commit-Queue: Jaroslav Sevcik <jarin@chromium.org>
Reviewed-by: Benedikt Meurer <bmeurer@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51798}
2018-03-07 06:35:45 +00:00
|
|
|
EXPECT_EQ(s.ToVreg(context), s.ToVreg(call_instr->InputAt(9)));
|
2014-08-27 15:56:11 +00:00
|
|
|
|
|
|
|
EXPECT_EQ(kArchRet, s[index++]->arch_opcode());
|
|
|
|
|
|
|
|
EXPECT_EQ(index, s.size());
|
|
|
|
}
|
|
|
|
|
2015-11-04 17:17:06 +00:00
|
|
|
TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeoptRecursiveFrameState) {
|
2015-12-10 09:03:30 +00:00
|
|
|
StreamBuilder m(this, MachineType::AnyTagged(), MachineType::AnyTagged(),
|
|
|
|
MachineType::AnyTagged(), MachineType::AnyTagged());
|
2014-09-03 14:10:20 +00:00
|
|
|
|
2021-01-20 12:07:43 +00:00
|
|
|
BytecodeOffset bailout_id_before(42);
|
|
|
|
BytecodeOffset bailout_id_parent(62);
|
2014-09-03 14:10:20 +00:00
|
|
|
|
|
|
|
// Some arguments for the call node.
|
|
|
|
Node* function_node = m.Parameter(0);
|
|
|
|
Node* receiver = m.Parameter(1);
|
|
|
|
Node* context = m.Int32Constant(66);
|
2015-11-04 17:17:06 +00:00
|
|
|
Node* context2 = m.Int32Constant(46);
|
2014-09-03 14:10:20 +00:00
|
|
|
|
2015-12-10 09:03:30 +00:00
|
|
|
ZoneVector<MachineType> int32_type(1, MachineType::Int32(), zone());
|
|
|
|
ZoneVector<MachineType> float64_type(1, MachineType::Float64(), zone());
|
2015-03-19 14:00:28 +00:00
|
|
|
|
2017-05-31 12:40:13 +00:00
|
|
|
Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject);
|
2018-02-09 19:19:25 +00:00
|
|
|
auto call_descriptor = Linkage::GetStubCallDescriptor(
|
2018-06-18 15:14:29 +00:00
|
|
|
zone(), callable.descriptor(), 1, CallDescriptor::kNeedsFrameState,
|
|
|
|
Operator::kNoProperties);
|
2015-11-04 17:17:06 +00:00
|
|
|
|
2014-09-03 14:10:20 +00:00
|
|
|
// Build frame state for the state before the call.
|
2017-01-05 10:44:44 +00:00
|
|
|
Node* parameters = m.AddNode(
|
|
|
|
m.common()->TypedStateValues(&int32_type, SparseInputMask::Dense()),
|
|
|
|
m.Int32Constant(63));
|
|
|
|
Node* locals = m.AddNode(
|
|
|
|
m.common()->TypedStateValues(&int32_type, SparseInputMask::Dense()),
|
|
|
|
m.Int32Constant(64));
|
|
|
|
Node* stack = m.AddNode(
|
|
|
|
m.common()->TypedStateValues(&int32_type, SparseInputMask::Dense()),
|
|
|
|
m.Int32Constant(65));
|
2015-09-23 09:08:15 +00:00
|
|
|
Node* frame_state_parent = m.AddNode(
|
2015-06-23 07:17:07 +00:00
|
|
|
m.common()->FrameState(bailout_id_parent,
|
|
|
|
OutputFrameStateCombine::Ignore(),
|
|
|
|
m.GetFrameStateFunctionInfo(1, 1)),
|
2015-05-15 12:17:15 +00:00
|
|
|
parameters, locals, stack, context, function_node, m.UndefinedConstant());
|
2014-09-03 14:10:20 +00:00
|
|
|
|
2017-01-05 10:44:44 +00:00
|
|
|
Node* parameters2 = m.AddNode(
|
|
|
|
m.common()->TypedStateValues(&int32_type, SparseInputMask::Dense()),
|
|
|
|
m.Int32Constant(43));
|
|
|
|
Node* locals2 = m.AddNode(
|
|
|
|
m.common()->TypedStateValues(&float64_type, SparseInputMask::Dense()),
|
|
|
|
m.Float64Constant(0.25));
|
|
|
|
Node* stack2 = m.AddNode(
|
2020-02-12 21:38:18 +00:00
|
|
|
m.common()->TypedStateValues(&int32_type, SparseInputMask::Dense()),
|
|
|
|
m.Int32Constant(44));
|
2017-06-30 12:11:44 +00:00
|
|
|
Node* state_node =
|
|
|
|
m.AddNode(m.common()->FrameState(bailout_id_before,
|
|
|
|
OutputFrameStateCombine::PokeAt(0),
|
|
|
|
m.GetFrameStateFunctionInfo(1, 1)),
|
|
|
|
parameters2, locals2, stack2, context2, function_node,
|
|
|
|
frame_state_parent);
|
2014-09-03 14:10:20 +00:00
|
|
|
|
|
|
|
// Build the call.
|
2015-11-04 17:17:06 +00:00
|
|
|
Node* stub_code = m.HeapConstant(callable.code());
|
2016-12-19 11:35:42 +00:00
|
|
|
Node* nodes[] = {stub_code, function_node, receiver, context2, state_node};
|
2018-02-09 19:19:25 +00:00
|
|
|
Node* call = m.CallNWithFrameState(call_descriptor, arraysize(nodes), nodes);
|
2014-09-03 14:10:20 +00:00
|
|
|
m.Return(call);
|
|
|
|
|
|
|
|
Stream s = m.Build(kAllExceptNopInstructions);
|
|
|
|
|
|
|
|
// Skip until kArchCallJSFunction.
|
|
|
|
size_t index = 0;
|
|
|
|
for (; index < s.size() && s[index]->arch_opcode() != kArchCallCodeObject;
|
|
|
|
index++) {
|
|
|
|
}
|
|
|
|
// Now we should have three instructions: call, return.
|
|
|
|
EXPECT_EQ(index + 2, s.size());
|
|
|
|
|
|
|
|
// Check the call instruction
|
|
|
|
const Instruction* call_instr = s[index++];
|
|
|
|
EXPECT_EQ(kArchCallCodeObject, call_instr->arch_opcode());
|
|
|
|
size_t num_operands =
|
|
|
|
1 + // Code object.
|
[turbofan] IA32 port of branch load poisoning.
The tricky part here is to take away one register from register
allocation for the mask. The only problem is with calls that need
an input operand to be passed in the poison register. For such calls,
we change the register constraint in the instruction selector
to pass the value in whatever place the register allocator sees fit.
During code generation, we then copy the value from that place
to the poison register. By that time, the mask is not necessary
(once we bake the mask into the target, it should be done before
this move).
For the branches, the mask update does not use cmov (unlike x64)
because cmov does not take an immediate and we do not have
a scratch register. Instead we use bit-twiddling tricks
(suggested by @tebbi). For example, here is the code for masking
register update after a bailout on non-zero:
jnz deopt_bailout ;; Bailout branch
setnz bl ;; These three instructions update the mask
add ebx, 255
sar ebx, 31
(On x64, the sequence is:
jnz deopt_bailout
mov r10, 0 ;; We have a scratch register for zero
cmovnz r9, r10 ;; Set to zero if we execute this branch
;; in branch mis-speculation
)
This CL also fixes a bug in register configuration, where we used
to wrongly restrict the array of register name.
Change-Id: I5fceff2faf8bdc527d9934afc284b749574ab69e
Bug: chromium:798964
Reviewed-on: https://chromium-review.googlesource.com/946251
Commit-Queue: Jaroslav Sevcik <jarin@chromium.org>
Reviewed-by: Benedikt Meurer <bmeurer@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51798}
2018-03-07 06:35:45 +00:00
|
|
|
1 + // Poison index.
|
2014-09-03 14:10:20 +00:00
|
|
|
1 + // Frame state deopt id
|
2020-02-12 21:38:18 +00:00
|
|
|
5 + // One input for each value in frame state + context.
|
2015-05-15 12:17:15 +00:00
|
|
|
5 + // One input for each value in the parent frame state + context.
|
2014-09-03 14:10:20 +00:00
|
|
|
1 + // Function.
|
|
|
|
1; // Context.
|
|
|
|
EXPECT_EQ(num_operands, call_instr->InputCount());
|
|
|
|
// Code object.
|
|
|
|
EXPECT_TRUE(call_instr->InputAt(0)->IsImmediate());
|
|
|
|
|
|
|
|
// Deoptimization id.
|
[turbofan] IA32 port of branch load poisoning.
The tricky part here is to take away one register from register
allocation for the mask. The only problem is with calls that need
an input operand to be passed in the poison register. For such calls,
we change the register constraint in the instruction selector
to pass the value in whatever place the register allocator sees fit.
During code generation, we then copy the value from that place
to the poison register. By that time, the mask is not necessary
(once we bake the mask into the target, it should be done before
this move).
For the branches, the mask update does not use cmov (unlike x64)
because cmov does not take an immediate and we do not have
a scratch register. Instead we use bit-twiddling tricks
(suggested by @tebbi). For example, here is the code for masking
register update after a bailout on non-zero:
jnz deopt_bailout ;; Bailout branch
setnz bl ;; These three instructions update the mask
add ebx, 255
sar ebx, 31
(On x64, the sequence is:
jnz deopt_bailout
mov r10, 0 ;; We have a scratch register for zero
cmovnz r9, r10 ;; Set to zero if we execute this branch
;; in branch mis-speculation
)
This CL also fixes a bug in register configuration, where we used
to wrongly restrict the array of register name.
Change-Id: I5fceff2faf8bdc527d9934afc284b749574ab69e
Bug: chromium:798964
Reviewed-on: https://chromium-review.googlesource.com/946251
Commit-Queue: Jaroslav Sevcik <jarin@chromium.org>
Reviewed-by: Benedikt Meurer <bmeurer@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51798}
2018-03-07 06:35:45 +00:00
|
|
|
int32_t deopt_id_before = s.ToInt32(call_instr->InputAt(2));
|
2014-09-03 14:10:20 +00:00
|
|
|
FrameStateDescriptor* desc_before =
|
|
|
|
s.GetFrameStateDescriptor(deopt_id_before);
|
2014-10-08 08:47:29 +00:00
|
|
|
FrameStateDescriptor* desc_before_outer = desc_before->outer_state();
|
2014-09-03 14:10:20 +00:00
|
|
|
EXPECT_EQ(bailout_id_before, desc_before->bailout_id());
|
2014-10-08 08:47:29 +00:00
|
|
|
EXPECT_EQ(1u, desc_before_outer->parameters_count());
|
|
|
|
EXPECT_EQ(1u, desc_before_outer->locals_count());
|
|
|
|
EXPECT_EQ(1u, desc_before_outer->stack_count());
|
|
|
|
// Values from parent environment.
|
[turbofan] IA32 port of branch load poisoning.
The tricky part here is to take away one register from register
allocation for the mask. The only problem is with calls that need
an input operand to be passed in the poison register. For such calls,
we change the register constraint in the instruction selector
to pass the value in whatever place the register allocator sees fit.
During code generation, we then copy the value from that place
to the poison register. By that time, the mask is not necessary
(once we bake the mask into the target, it should be done before
this move).
For the branches, the mask update does not use cmov (unlike x64)
because cmov does not take an immediate and we do not have
a scratch register. Instead we use bit-twiddling tricks
(suggested by @tebbi). For example, here is the code for masking
register update after a bailout on non-zero:
jnz deopt_bailout ;; Bailout branch
setnz bl ;; These three instructions update the mask
add ebx, 255
sar ebx, 31
(On x64, the sequence is:
jnz deopt_bailout
mov r10, 0 ;; We have a scratch register for zero
cmovnz r9, r10 ;; Set to zero if we execute this branch
;; in branch mis-speculation
)
This CL also fixes a bug in register configuration, where we used
to wrongly restrict the array of register name.
Change-Id: I5fceff2faf8bdc527d9934afc284b749574ab69e
Bug: chromium:798964
Reviewed-on: https://chromium-review.googlesource.com/946251
Commit-Queue: Jaroslav Sevcik <jarin@chromium.org>
Reviewed-by: Benedikt Meurer <bmeurer@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51798}
2018-03-07 06:35:45 +00:00
|
|
|
EXPECT_EQ(63, s.ToInt32(call_instr->InputAt(4)));
|
2014-09-03 14:10:20 +00:00
|
|
|
// Context:
|
[turbofan] IA32 port of branch load poisoning.
The tricky part here is to take away one register from register
allocation for the mask. The only problem is with calls that need
an input operand to be passed in the poison register. For such calls,
we change the register constraint in the instruction selector
to pass the value in whatever place the register allocator sees fit.
During code generation, we then copy the value from that place
to the poison register. By that time, the mask is not necessary
(once we bake the mask into the target, it should be done before
this move).
For the branches, the mask update does not use cmov (unlike x64)
because cmov does not take an immediate and we do not have
a scratch register. Instead we use bit-twiddling tricks
(suggested by @tebbi). For example, here is the code for masking
register update after a bailout on non-zero:
jnz deopt_bailout ;; Bailout branch
setnz bl ;; These three instructions update the mask
add ebx, 255
sar ebx, 31
(On x64, the sequence is:
jnz deopt_bailout
mov r10, 0 ;; We have a scratch register for zero
cmovnz r9, r10 ;; Set to zero if we execute this branch
;; in branch mis-speculation
)
This CL also fixes a bug in register configuration, where we used
to wrongly restrict the array of register name.
Change-Id: I5fceff2faf8bdc527d9934afc284b749574ab69e
Bug: chromium:798964
Reviewed-on: https://chromium-review.googlesource.com/946251
Commit-Queue: Jaroslav Sevcik <jarin@chromium.org>
Reviewed-by: Benedikt Meurer <bmeurer@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51798}
2018-03-07 06:35:45 +00:00
|
|
|
EXPECT_EQ(66, s.ToInt32(call_instr->InputAt(5)));
|
|
|
|
EXPECT_EQ(64, s.ToInt32(call_instr->InputAt(6)));
|
|
|
|
EXPECT_EQ(65, s.ToInt32(call_instr->InputAt(7)));
|
2014-10-08 08:47:29 +00:00
|
|
|
// Values from the nested frame.
|
|
|
|
EXPECT_EQ(1u, desc_before->parameters_count());
|
|
|
|
EXPECT_EQ(1u, desc_before->locals_count());
|
2020-02-12 21:38:18 +00:00
|
|
|
EXPECT_EQ(1u, desc_before->stack_count());
|
[turbofan] IA32 port of branch load poisoning.
The tricky part here is to take away one register from register
allocation for the mask. The only problem is with calls that need
an input operand to be passed in the poison register. For such calls,
we change the register constraint in the instruction selector
to pass the value in whatever place the register allocator sees fit.
During code generation, we then copy the value from that place
to the poison register. By that time, the mask is not necessary
(once we bake the mask into the target, it should be done before
this move).
For the branches, the mask update does not use cmov (unlike x64)
because cmov does not take an immediate and we do not have
a scratch register. Instead we use bit-twiddling tricks
(suggested by @tebbi). For example, here is the code for masking
register update after a bailout on non-zero:
jnz deopt_bailout ;; Bailout branch
setnz bl ;; These three instructions update the mask
add ebx, 255
sar ebx, 31
(On x64, the sequence is:
jnz deopt_bailout
mov r10, 0 ;; We have a scratch register for zero
cmovnz r9, r10 ;; Set to zero if we execute this branch
;; in branch mis-speculation
)
This CL also fixes a bug in register configuration, where we used
to wrongly restrict the array of register name.
Change-Id: I5fceff2faf8bdc527d9934afc284b749574ab69e
Bug: chromium:798964
Reviewed-on: https://chromium-review.googlesource.com/946251
Commit-Queue: Jaroslav Sevcik <jarin@chromium.org>
Reviewed-by: Benedikt Meurer <bmeurer@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51798}
2018-03-07 06:35:45 +00:00
|
|
|
EXPECT_EQ(43, s.ToInt32(call_instr->InputAt(9)));
|
|
|
|
EXPECT_EQ(46, s.ToInt32(call_instr->InputAt(10)));
|
|
|
|
EXPECT_EQ(0.25, s.ToFloat64(call_instr->InputAt(11)));
|
|
|
|
EXPECT_EQ(44, s.ToInt32(call_instr->InputAt(12)));
|
2014-09-03 14:10:20 +00:00
|
|
|
|
|
|
|
// Function.
|
2020-02-12 21:38:18 +00:00
|
|
|
EXPECT_EQ(s.ToVreg(function_node), s.ToVreg(call_instr->InputAt(13)));
|
2014-09-03 14:10:20 +00:00
|
|
|
// Context.
|
2020-02-12 21:38:18 +00:00
|
|
|
EXPECT_EQ(s.ToVreg(context2), s.ToVreg(call_instr->InputAt(14)));
|
2014-09-03 14:10:20 +00:00
|
|
|
// Continuation.
|
|
|
|
|
|
|
|
EXPECT_EQ(kArchRet, s[index++]->arch_opcode());
|
|
|
|
EXPECT_EQ(index, s.size());
|
|
|
|
}
|
|
|
|
|
2014-08-08 07:04:07 +00:00
|
|
|
} // namespace compiler
|
|
|
|
} // namespace internal
|
|
|
|
} // namespace v8
|