// Copyright 2014 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "test/unittests/compiler/backend/instruction-selector-unittest.h" #include "src/objects/objects-inl.h" namespace v8 { namespace internal { namespace compiler { namespace { // Immediates (random subset). const int32_t kImmediates[] = {kMinInt, -42, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 16, 42, 0xFF, 0xFFFF, 0x0F0F0F0F, kMaxInt}; } // namespace TEST_F(InstructionSelectorTest, Int32AddWithParameter) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); m.Return(m.Int32Add(m.Parameter(0), m.Parameter(1))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kIA32Lea, s[0]->arch_opcode()); } TEST_F(InstructionSelectorTest, Int32AddWithImmediate) { TRACED_FOREACH(int32_t, imm, kImmediates) { { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); m.Return(m.Int32Add(m.Parameter(0), m.Int32Constant(imm))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kIA32Lea, s[0]->arch_opcode()); if (imm == 0) { ASSERT_EQ(1U, s[0]->InputCount()); } else { ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1))); } } { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); m.Return(m.Int32Add(m.Int32Constant(imm), m.Parameter(0))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kIA32Lea, s[0]->arch_opcode()); if (imm == 0) { ASSERT_EQ(1U, s[0]->InputCount()); } else { ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1))); } } } } TEST_F(InstructionSelectorTest, Int32SubWithParameter) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); m.Return(m.Int32Sub(m.Parameter(0), m.Parameter(1))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kIA32Sub, s[0]->arch_opcode()); EXPECT_EQ(1U, s[0]->OutputCount()); } TEST_F(InstructionSelectorTest, Int32SubWithImmediate) { TRACED_FOREACH(int32_t, imm, kImmediates) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); m.Return(m.Int32Sub(m.Parameter(0), m.Int32Constant(imm))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kIA32Sub, s[0]->arch_opcode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1))); } } // ----------------------------------------------------------------------------- // Conversions. TEST_F(InstructionSelectorTest, ChangeFloat32ToFloat64WithParameter) { StreamBuilder m(this, MachineType::Float32(), MachineType::Float64()); m.Return(m.ChangeFloat32ToFloat64(m.Parameter(0))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kIA32Float32ToFloat64, s[0]->arch_opcode()); EXPECT_EQ(1U, s[0]->InputCount()); EXPECT_EQ(1U, s[0]->OutputCount()); } TEST_F(InstructionSelectorTest, TruncateFloat64ToFloat32WithParameter) { StreamBuilder m(this, MachineType::Float64(), MachineType::Float32()); m.Return(m.TruncateFloat64ToFloat32(m.Parameter(0))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kIA32Float64ToFloat32, s[0]->arch_opcode()); EXPECT_EQ(1U, s[0]->InputCount()); EXPECT_EQ(1U, s[0]->OutputCount()); } // ----------------------------------------------------------------------------- // Better left operand for commutative binops TEST_F(InstructionSelectorTest, BetterLeftOperandTestAddBinop) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* param1 = m.Parameter(0); Node* param2 = m.Parameter(1); Node* add = m.Int32Add(param1, param2); m.Return(m.Int32Add(add, param1)); Stream s = m.Build(); ASSERT_EQ(2U, s.size()); EXPECT_EQ(kIA32Lea, s[0]->arch_opcode()); ASSERT_EQ(2U, s[0]->InputCount()); ASSERT_TRUE(s[0]->InputAt(0)->IsUnallocated()); EXPECT_EQ(s.ToVreg(param1), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(param2), s.ToVreg(s[0]->InputAt(1))); ASSERT_EQ(2U, s[1]->InputCount()); EXPECT_EQ(s.ToVreg(param1), s.ToVreg(s[0]->InputAt(0))); } TEST_F(InstructionSelectorTest, BetterLeftOperandTestMulBinop) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* param1 = m.Parameter(0); Node* param2 = m.Parameter(1); Node* mul = m.Int32Mul(param1, param2); m.Return(m.Int32Mul(mul, param1)); Stream s = m.Build(); ASSERT_EQ(2U, s.size()); EXPECT_EQ(kIA32Imul, s[0]->arch_opcode()); ASSERT_EQ(2U, s[0]->InputCount()); ASSERT_TRUE(s[0]->InputAt(0)->IsUnallocated()); EXPECT_EQ(s.ToVreg(param2), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(param1), s.ToVreg(s[0]->InputAt(1))); } // ----------------------------------------------------------------------------- // Conversions. TEST_F(InstructionSelectorTest, ChangeUint32ToFloat64WithParameter) { StreamBuilder m(this, MachineType::Float64(), MachineType::Uint32()); m.Return(m.ChangeUint32ToFloat64(m.Parameter(0))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kIA32Uint32ToFloat64, s[0]->arch_opcode()); } // ----------------------------------------------------------------------------- // Loads and stores namespace { struct MemoryAccess { MachineType type; ArchOpcode load_opcode; ArchOpcode store_opcode; }; std::ostream& operator<<(std::ostream& os, const MemoryAccess& memacc) { return os << memacc.type; } static const MemoryAccess kMemoryAccesses[] = { {MachineType::Int8(), kIA32Movsxbl, kIA32Movb}, {MachineType::Uint8(), kIA32Movzxbl, kIA32Movb}, {MachineType::Int16(), kIA32Movsxwl, kIA32Movw}, {MachineType::Uint16(), kIA32Movzxwl, kIA32Movw}, {MachineType::Int32(), kIA32Movl, kIA32Movl}, {MachineType::Uint32(), kIA32Movl, kIA32Movl}, {MachineType::Float32(), kIA32Movss, kIA32Movss}, {MachineType::Float64(), kIA32Movsd, kIA32Movsd}}; } // namespace using InstructionSelectorMemoryAccessTest = InstructionSelectorTestWithParam; TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) { const MemoryAccess memacc = GetParam(); StreamBuilder m(this, memacc.type, MachineType::Pointer(), MachineType::Int32()); m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode()); EXPECT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(1U, s[0]->OutputCount()); } TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateBase) { const MemoryAccess memacc = GetParam(); TRACED_FOREACH(int32_t, base, kImmediates) { StreamBuilder m(this, memacc.type, MachineType::Pointer()); m.Return(m.Load(memacc.type, m.Int32Constant(base), m.Parameter(0))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode()); if (base == 0) { ASSERT_EQ(1U, s[0]->InputCount()); } else { ASSERT_EQ(2U, s[0]->InputCount()); ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind()); EXPECT_EQ(base, s.ToInt32(s[0]->InputAt(1))); } EXPECT_EQ(1U, s[0]->OutputCount()); } } TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateIndex) { const MemoryAccess memacc = GetParam(); TRACED_FOREACH(int32_t, index, kImmediates) { StreamBuilder m(this, memacc.type, MachineType::Pointer()); m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode()); if (index == 0) { ASSERT_EQ(1U, s[0]->InputCount()); } else { ASSERT_EQ(2U, s[0]->InputCount()); ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind()); EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1))); } EXPECT_EQ(1U, s[0]->OutputCount()); } } TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) { const MemoryAccess memacc = GetParam(); StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(), MachineType::Int32(), memacc.type); m.Store(memacc.type.representation(), m.Parameter(0), m.Parameter(1), m.Parameter(2), kNoWriteBarrier); m.Return(m.Int32Constant(0)); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode()); EXPECT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(0U, s[0]->OutputCount()); } TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateBase) { const MemoryAccess memacc = GetParam(); TRACED_FOREACH(int32_t, base, kImmediates) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), memacc.type); m.Store(memacc.type.representation(), m.Int32Constant(base), m.Parameter(0), m.Parameter(1), kNoWriteBarrier); m.Return(m.Int32Constant(0)); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode()); if (base == 0) { ASSERT_EQ(2U, s[0]->InputCount()); } else { ASSERT_EQ(3U, s[0]->InputCount()); ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind()); EXPECT_EQ(base, s.ToInt32(s[0]->InputAt(1))); } EXPECT_EQ(0U, s[0]->OutputCount()); } } TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) { const MemoryAccess memacc = GetParam(); TRACED_FOREACH(int32_t, index, kImmediates) { StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(), memacc.type); m.Store(memacc.type.representation(), m.Parameter(0), m.Int32Constant(index), m.Parameter(1), kNoWriteBarrier); m.Return(m.Int32Constant(0)); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode()); if (index == 0) { ASSERT_EQ(2U, s[0]->InputCount()); } else { ASSERT_EQ(3U, s[0]->InputCount()); ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind()); EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1))); } EXPECT_EQ(0U, s[0]->OutputCount()); } } INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorMemoryAccessTest, ::testing::ValuesIn(kMemoryAccesses)); // ----------------------------------------------------------------------------- // AddressingMode for loads and stores. class AddressingModeUnitTest : public InstructionSelectorTest { public: AddressingModeUnitTest() : m(nullptr) { Reset(); } ~AddressingModeUnitTest() override { delete m; } void Run(Node* base, Node* load_index, Node* store_index, AddressingMode mode) { Node* load = m->Load(MachineType::Int32(), base, load_index); m->Store(MachineRepresentation::kWord32, base, store_index, load, kNoWriteBarrier); m->Return(m->Int32Constant(0)); Stream s = m->Build(); ASSERT_EQ(2U, s.size()); EXPECT_EQ(mode, s[0]->addressing_mode()); EXPECT_EQ(mode, s[1]->addressing_mode()); } Node* zero; Node* null_ptr; Node* non_zero; Node* base_reg; // opaque value to generate base as register Node* index_reg; // opaque value to generate index as register Node* scales[4]; StreamBuilder* m; void Reset() { delete m; m = new StreamBuilder(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); zero = m->Int32Constant(0); null_ptr = m->Int32Constant(0); non_zero = m->Int32Constant(127); base_reg = m->Parameter(0); index_reg = m->Parameter(0); scales[0] = m->Int32Constant(1); scales[1] = m->Int32Constant(2); scales[2] = m->Int32Constant(4); scales[3] = m->Int32Constant(8); } }; TEST_F(AddressingModeUnitTest, AddressingMode_MR) { Node* base = base_reg; Node* index = zero; Run(base, index, index, kMode_MR); } TEST_F(AddressingModeUnitTest, AddressingMode_MRI) { Node* base = base_reg; Node* index = non_zero; Run(base, index, index, kMode_MRI); } TEST_F(AddressingModeUnitTest, AddressingMode_MR1) { Node* base = base_reg; Node* index = index_reg; Run(base, index, index, kMode_MR1); } TEST_F(AddressingModeUnitTest, AddressingMode_MRN) { AddressingMode expected[] = {kMode_MR1, kMode_MR2, kMode_MR4, kMode_MR8}; for (size_t i = 0; i < arraysize(scales); ++i) { Reset(); Node* base = base_reg; Node* load_index = m->Int32Mul(index_reg, scales[i]); Node* store_index = m->Int32Mul(index_reg, scales[i]); Run(base, load_index, store_index, expected[i]); } } TEST_F(AddressingModeUnitTest, AddressingMode_MR1I) { Node* base = base_reg; Node* load_index = m->Int32Add(index_reg, non_zero); Node* store_index = m->Int32Add(index_reg, non_zero); Run(base, load_index, store_index, kMode_MR1I); } TEST_F(AddressingModeUnitTest, AddressingMode_MRNI) { AddressingMode expected[] = {kMode_MR1I, kMode_MR2I, kMode_MR4I, kMode_MR8I}; for (size_t i = 0; i < arraysize(scales); ++i) { Reset(); Node* base = base_reg; Node* load_index = m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero); Node* store_index = m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero); Run(base, load_index, store_index, expected[i]); } } TEST_F(AddressingModeUnitTest, AddressingMode_M1ToMR) { Node* base = null_ptr; Node* index = index_reg; // M1 maps to MR Run(base, index, index, kMode_MR); } TEST_F(AddressingModeUnitTest, AddressingMode_MN) { AddressingMode expected[] = {kMode_MR, kMode_M2, kMode_M4, kMode_M8}; for (size_t i = 0; i < arraysize(scales); ++i) { Reset(); Node* base = null_ptr; Node* load_index = m->Int32Mul(index_reg, scales[i]); Node* store_index = m->Int32Mul(index_reg, scales[i]); Run(base, load_index, store_index, expected[i]); } } TEST_F(AddressingModeUnitTest, AddressingMode_M1IToMRI) { Node* base = null_ptr; Node* load_index = m->Int32Add(index_reg, non_zero); Node* store_index = m->Int32Add(index_reg, non_zero); // M1I maps to MRI Run(base, load_index, store_index, kMode_MRI); } TEST_F(AddressingModeUnitTest, AddressingMode_MNI) { AddressingMode expected[] = {kMode_MRI, kMode_M2I, kMode_M4I, kMode_M8I}; for (size_t i = 0; i < arraysize(scales); ++i) { Reset(); Node* base = null_ptr; Node* load_index = m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero); Node* store_index = m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero); Run(base, load_index, store_index, expected[i]); } } TEST_F(AddressingModeUnitTest, AddressingMode_MI) { Node* bases[] = {null_ptr, non_zero}; Node* indices[] = {zero, non_zero}; for (size_t i = 0; i < arraysize(bases); ++i) { for (size_t j = 0; j < arraysize(indices); ++j) { Reset(); Node* base = bases[i]; Node* index = indices[j]; Run(base, index, index, kMode_MI); } } } // ----------------------------------------------------------------------------- // Multiplication. namespace { struct MultParam { int value; bool lea_expected; AddressingMode addressing_mode; }; std::ostream& operator<<(std::ostream& os, const MultParam& m) { return os << m.value << "." << m.lea_expected << "." << m.addressing_mode; } const MultParam kMultParams[] = {{-1, false, kMode_None}, {0, false, kMode_None}, {1, true, kMode_MR}, {2, true, kMode_M2}, {3, true, kMode_MR2}, {4, true, kMode_M4}, {5, true, kMode_MR4}, {6, false, kMode_None}, {7, false, kMode_None}, {8, true, kMode_M8}, {9, true, kMode_MR8}, {10, false, kMode_None}, {11, false, kMode_None}}; } // namespace using InstructionSelectorMultTest = InstructionSelectorTestWithParam; static unsigned InputCountForLea(AddressingMode mode) { switch (mode) { case kMode_MR1I: case kMode_MR2I: case kMode_MR4I: case kMode_MR8I: return 3U; case kMode_M1I: case kMode_M2I: case kMode_M4I: case kMode_M8I: return 2U; case kMode_MR1: case kMode_MR2: case kMode_MR4: case kMode_MR8: case kMode_MRI: return 2U; case kMode_M1: case kMode_M2: case kMode_M4: case kMode_M8: case kMode_MI: case kMode_MR: return 1U; default: UNREACHABLE(); } } static AddressingMode AddressingModeForAddMult(int32_t imm, const MultParam& m) { if (imm == 0) return m.addressing_mode; switch (m.addressing_mode) { case kMode_MR1: return kMode_MR1I; case kMode_MR2: return kMode_MR2I; case kMode_MR4: return kMode_MR4I; case kMode_MR8: return kMode_MR8I; case kMode_M1: return kMode_M1I; case kMode_M2: return kMode_M2I; case kMode_M4: return kMode_M4I; case kMode_M8: return kMode_M8I; case kMode_MR: return kMode_MRI; default: UNREACHABLE(); } } TEST_P(InstructionSelectorMultTest, Mult32) { const MultParam m_param = GetParam(); StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); Node* param = m.Parameter(0); Node* mult = m.Int32Mul(param, m.Int32Constant(m_param.value)); m.Return(mult); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(m_param.addressing_mode, s[0]->addressing_mode()); if (m_param.lea_expected) { EXPECT_EQ(kIA32Lea, s[0]->arch_opcode()); ASSERT_EQ(InputCountForLea(s[0]->addressing_mode()), s[0]->InputCount()); } else { EXPECT_EQ(kIA32Imul, s[0]->arch_opcode()); ASSERT_EQ(2U, s[0]->InputCount()); } EXPECT_EQ(s.ToVreg(param), s.ToVreg(s[0]->InputAt(0))); } TEST_P(InstructionSelectorMultTest, MultAdd32) { TRACED_FOREACH(int32_t, imm, kImmediates) { const MultParam m_param = GetParam(); StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); Node* param = m.Parameter(0); Node* mult = m.Int32Add(m.Int32Mul(param, m.Int32Constant(m_param.value)), m.Int32Constant(imm)); m.Return(mult); Stream s = m.Build(); if (m_param.lea_expected) { ASSERT_EQ(1U, s.size()); EXPECT_EQ(kIA32Lea, s[0]->arch_opcode()); EXPECT_EQ(AddressingModeForAddMult(imm, m_param), s[0]->addressing_mode()); unsigned input_count = InputCountForLea(s[0]->addressing_mode()); ASSERT_EQ(input_count, s[0]->InputCount()); if (imm != 0) { ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(input_count - 1)->kind()); EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(input_count - 1))); } } else { ASSERT_EQ(2U, s.size()); EXPECT_EQ(kIA32Imul, s[0]->arch_opcode()); EXPECT_EQ(kIA32Lea, s[1]->arch_opcode()); } } } INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorMultTest, ::testing::ValuesIn(kMultParams)); TEST_F(InstructionSelectorTest, Int32MulHigh) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); Node* const n = m.Int32MulHigh(p0, p1); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kIA32ImulHigh, s[0]->arch_opcode()); ASSERT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_TRUE(s.IsFixed(s[0]->InputAt(0), eax)); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); EXPECT_TRUE(!s.IsUsedAtStart(s[0]->InputAt(1))); ASSERT_EQ(1U, s[0]->OutputCount()); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); EXPECT_TRUE(s.IsFixed(s[0]->OutputAt(0), edx)); } // ----------------------------------------------------------------------------- // Binops with a memory operand. TEST_F(InstructionSelectorTest, LoadAnd32) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); m.Return( m.Word32And(p0, m.Load(MachineType::Int32(), p1, m.Int32Constant(127)))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kIA32And, s[0]->arch_opcode()); ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); } TEST_F(InstructionSelectorTest, LoadImmutableAnd32) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); m.Return(m.Word32And( p0, m.LoadImmutable(MachineType::Int32(), p1, m.Int32Constant(127)))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kIA32And, s[0]->arch_opcode()); ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); } TEST_F(InstructionSelectorTest, LoadOr32) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); m.Return( m.Word32Or(p0, m.Load(MachineType::Int32(), p1, m.Int32Constant(127)))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kIA32Or, s[0]->arch_opcode()); ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); } TEST_F(InstructionSelectorTest, LoadXor32) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); m.Return( m.Word32Xor(p0, m.Load(MachineType::Int32(), p1, m.Int32Constant(127)))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kIA32Xor, s[0]->arch_opcode()); ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); } TEST_F(InstructionSelectorTest, LoadAdd32) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); m.Return( m.Int32Add(p0, m.Load(MachineType::Int32(), p1, m.Int32Constant(127)))); Stream s = m.Build(); // Use lea instead of add, so memory operand is invalid. ASSERT_EQ(2U, s.size()); EXPECT_EQ(kIA32Movl, s[0]->arch_opcode()); EXPECT_EQ(kIA32Lea, s[1]->arch_opcode()); } TEST_F(InstructionSelectorTest, LoadSub32) { StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); Node* const p0 = m.Parameter(0); Node* const p1 = m.Parameter(1); m.Return( m.Int32Sub(p0, m.Load(MachineType::Int32(), p1, m.Int32Constant(127)))); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kIA32Sub, s[0]->arch_opcode()); ASSERT_EQ(3U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); } // ----------------------------------------------------------------------------- // Floating point operations. TEST_F(InstructionSelectorTest, Float32Abs) { { StreamBuilder m(this, MachineType::Float32(), MachineType::Float32()); Node* const p0 = m.Parameter(0); Node* const n = m.Float32Abs(p0); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kFloat32Abs, s[0]->arch_opcode()); ASSERT_EQ(1U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); ASSERT_EQ(1U, s[0]->OutputCount()); EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output())); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); EXPECT_EQ(kFlags_none, s[0]->flags_mode()); } { StreamBuilder m(this, MachineType::Float32(), MachineType::Float32()); Node* const p0 = m.Parameter(0); Node* const n = m.Float32Abs(p0); m.Return(n); Stream s = m.Build(AVX); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kFloat32Abs, s[0]->arch_opcode()); ASSERT_EQ(1U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); ASSERT_EQ(1U, s[0]->OutputCount()); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); EXPECT_EQ(kFlags_none, s[0]->flags_mode()); } } TEST_F(InstructionSelectorTest, Float64Abs) { { StreamBuilder m(this, MachineType::Float64(), MachineType::Float64()); Node* const p0 = m.Parameter(0); Node* const n = m.Float64Abs(p0); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kFloat64Abs, s[0]->arch_opcode()); ASSERT_EQ(1U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); ASSERT_EQ(1U, s[0]->OutputCount()); EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output())); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); EXPECT_EQ(kFlags_none, s[0]->flags_mode()); } { StreamBuilder m(this, MachineType::Float64(), MachineType::Float64()); Node* const p0 = m.Parameter(0); Node* const n = m.Float64Abs(p0); m.Return(n); Stream s = m.Build(AVX); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kFloat64Abs, s[0]->arch_opcode()); ASSERT_EQ(1U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); ASSERT_EQ(1U, s[0]->OutputCount()); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); EXPECT_EQ(kFlags_none, s[0]->flags_mode()); } } TEST_F(InstructionSelectorTest, Float64BinopArithmetic) { { StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(), MachineType::Float64()); Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1)); Node* mul = m.Float64Mul(add, m.Parameter(1)); Node* sub = m.Float64Sub(mul, add); Node* ret = m.Float64Div(mul, sub); m.Return(ret); Stream s = m.Build(AVX); ASSERT_EQ(4U, s.size()); EXPECT_EQ(kFloat64Add, s[0]->arch_opcode()); EXPECT_EQ(kFloat64Mul, s[1]->arch_opcode()); EXPECT_EQ(kFloat64Sub, s[2]->arch_opcode()); EXPECT_EQ(kFloat64Div, s[3]->arch_opcode()); } { StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(), MachineType::Float64()); Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1)); Node* mul = m.Float64Mul(add, m.Parameter(1)); Node* sub = m.Float64Sub(mul, add); Node* ret = m.Float64Div(mul, sub); m.Return(ret); Stream s = m.Build(); ASSERT_EQ(4U, s.size()); EXPECT_EQ(kFloat64Add, s[0]->arch_opcode()); EXPECT_EQ(kFloat64Mul, s[1]->arch_opcode()); EXPECT_EQ(kFloat64Sub, s[2]->arch_opcode()); EXPECT_EQ(kFloat64Div, s[3]->arch_opcode()); } } // ----------------------------------------------------------------------------- // Miscellaneous. TEST_F(InstructionSelectorTest, Word32Clz) { StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32()); Node* const p0 = m.Parameter(0); Node* const n = m.Word32Clz(p0); m.Return(n); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kIA32Lzcnt, s[0]->arch_opcode()); ASSERT_EQ(1U, s[0]->InputCount()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); ASSERT_EQ(1U, s[0]->OutputCount()); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); } // SIMD. TEST_F(InstructionSelectorTest, SIMDSplatZero) { // Test optimization for splat of contant 0. // {i8x16,i16x8,i32x4,i64x2}.splat(const(0)) -> v128.zero(). // Optimizations for f32x4.splat and f64x2.splat not implemented since it // doesn't improve the codegen as much (same number of instructions). { StreamBuilder m(this, MachineType::Simd128()); Node* const splat = m.I64x2SplatI32Pair(m.Int32Constant(0), m.Int32Constant(0)); m.Return(splat); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kIA32S128Zero, s[0]->arch_opcode()); ASSERT_EQ(0U, s[0]->InputCount()); EXPECT_EQ(1U, s[0]->OutputCount()); } { StreamBuilder m(this, MachineType::Simd128()); Node* const splat = m.I32x4Splat(m.Int32Constant(0)); m.Return(splat); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kIA32S128Zero, s[0]->arch_opcode()); ASSERT_EQ(0U, s[0]->InputCount()); EXPECT_EQ(1U, s[0]->OutputCount()); } { StreamBuilder m(this, MachineType::Simd128()); Node* const splat = m.I16x8Splat(m.Int32Constant(0)); m.Return(splat); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kIA32S128Zero, s[0]->arch_opcode()); ASSERT_EQ(0U, s[0]->InputCount()); EXPECT_EQ(1U, s[0]->OutputCount()); } { StreamBuilder m(this, MachineType::Simd128()); Node* const splat = m.I8x16Splat(m.Int32Constant(0)); m.Return(splat); Stream s = m.Build(); ASSERT_EQ(1U, s.size()); EXPECT_EQ(kIA32S128Zero, s[0]->arch_opcode()); ASSERT_EQ(0U, s[0]->InputCount()); EXPECT_EQ(1U, s[0]->OutputCount()); } } struct SwizzleConstants { uint8_t shuffle[kSimd128Size]; bool omit_add; }; static constexpr SwizzleConstants kSwizzleConstants[] = { { // all lanes < kSimd128Size {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, true, }, { // lanes that are >= kSimd128Size have top bit set {12, 13, 14, 15, 0x90, 0x91, 0x92, 0x93, 0xA0, 0xA1, 0xA2, 0xA3, 0xFC, 0xFD, 0xFE, 0xFF}, true, }, { {12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}, false, }, }; using InstructionSelectorSIMDSwizzleConstantTest = InstructionSelectorTestWithParam; TEST_P(InstructionSelectorSIMDSwizzleConstantTest, SimdSwizzleConstant) { // Test optimization of swizzle with constant indices. auto param = GetParam(); StreamBuilder m(this, MachineType::Simd128(), MachineType::Simd128()); Node* const c = m.S128Const(param.shuffle); Node* swizzle = m.AddNode(m.machine()->I8x16Swizzle(), m.Parameter(0), c); m.Return(swizzle); Stream s = m.Build(); ASSERT_EQ(2U, s.size()); ASSERT_EQ(kIA32I8x16Swizzle, s[1]->arch_opcode()); ASSERT_EQ(param.omit_add, s[1]->misc()); ASSERT_EQ(1U, s[0]->OutputCount()); } INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorSIMDSwizzleConstantTest, ::testing::ValuesIn(kSwizzleConstants)); } // namespace compiler } // namespace internal } // namespace v8