[turbofan] First step towards correctified 64-bit addressing.

Also remove the LEA matching from x64, since it was never really
effective. We'll optimize that once we're correct.

TEST=cctest,unittests
R=dcarney@chromium.org

Review URL: https://codereview.chromium.org/652363006

Cr-Commit-Position: refs/heads/master@{#25024}
git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@25024 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
bmeurer@chromium.org 2014-10-31 06:41:07 +00:00
parent bd5c9834b6
commit 948ce2141e
17 changed files with 506 additions and 797 deletions

View File

@ -550,7 +550,6 @@ source_set("v8_base") {
"src/compiler/node-aux-data.h",
"src/compiler/node-cache.cc",
"src/compiler/node-cache.h",
"src/compiler/node-matchers.cc",
"src/compiler/node-matchers.h",
"src/compiler/node-properties-inl.h",
"src/compiler/node-properties.h",

View File

@ -46,14 +46,14 @@ Node* ChangeLowering::HeapNumberValueIndexConstant() {
STATIC_ASSERT(HeapNumber::kValueOffset % kPointerSize == 0);
const int heap_number_value_offset =
((HeapNumber::kValueOffset / kPointerSize) * (machine()->Is64() ? 8 : 4));
return jsgraph()->Int32Constant(heap_number_value_offset - kHeapObjectTag);
return jsgraph()->IntPtrConstant(heap_number_value_offset - kHeapObjectTag);
}
Node* ChangeLowering::SmiMaxValueConstant() {
const int smi_value_size = machine()->Is32() ? SmiTagging<4>::SmiValueSize()
: SmiTagging<8>::SmiValueSize();
return jsgraph()->Int32Constant(
return jsgraph()->IntPtrConstant(
-(static_cast<int>(0xffffffffu << (smi_value_size - 1)) + 1));
}
@ -61,7 +61,7 @@ Node* ChangeLowering::SmiMaxValueConstant() {
Node* ChangeLowering::SmiShiftBitsConstant() {
const int smi_shift_size = machine()->Is32() ? SmiTagging<4>::SmiShiftSize()
: SmiTagging<8>::SmiShiftSize();
return jsgraph()->Int32Constant(smi_shift_size + kSmiTagSize);
return jsgraph()->IntPtrConstant(smi_shift_size + kSmiTagSize);
}
@ -166,7 +166,7 @@ Reduction ChangeLowering::ChangeTaggedToUI32(Node* val, Node* control,
STATIC_ASSERT(kSmiTagMask == 1);
Node* tag = graph()->NewNode(machine()->WordAnd(), val,
jsgraph()->Int32Constant(kSmiTagMask));
jsgraph()->IntPtrConstant(kSmiTagMask));
Node* branch = graph()->NewNode(common()->Branch(), tag, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
@ -192,7 +192,7 @@ Reduction ChangeLowering::ChangeTaggedToFloat64(Node* val, Node* control) {
STATIC_ASSERT(kSmiTagMask == 1);
Node* tag = graph()->NewNode(machine()->WordAnd(), val,
jsgraph()->Int32Constant(kSmiTagMask));
jsgraph()->IntPtrConstant(kSmiTagMask));
Node* branch = graph()->NewNode(common()->Branch(), tag, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);

View File

@ -53,6 +53,178 @@ static AddressingMode AdjustAddressingMode(AddressingMode base_mode,
}
// Fairly intel-specify node matcher used for matching scale factors in
// addressing modes.
// Matches nodes of form [x * N] for N in {1,2,4,8}
class ScaleFactorMatcher : public NodeMatcher {
public:
static const int kMatchedFactors[4];
explicit ScaleFactorMatcher(Node* node);
bool Matches() const { return left_ != NULL; }
int Power() const {
DCHECK(Matches());
return power_;
}
Node* Left() const {
DCHECK(Matches());
return left_;
}
private:
Node* left_;
int power_;
};
// Fairly intel-specify node matcher used for matching index and displacement
// operands in addressing modes.
// Matches nodes of form:
// [x * N]
// [x * N + K]
// [x + K]
// [x] -- fallback case
// for N in {1,2,4,8} and K int32_t
class IndexAndDisplacementMatcher : public NodeMatcher {
public:
explicit IndexAndDisplacementMatcher(Node* node);
Node* index_node() const { return index_node_; }
int displacement() const { return displacement_; }
int power() const { return power_; }
private:
Node* index_node_;
int displacement_;
int power_;
};
// Fairly intel-specify node matcher used for matching multiplies that can be
// transformed to lea instructions.
// Matches nodes of form:
// [x * N]
// for N in {1,2,3,4,5,8,9}
class LeaMultiplyMatcher : public NodeMatcher {
public:
static const int kMatchedFactors[7];
explicit LeaMultiplyMatcher(Node* node);
bool Matches() const { return left_ != NULL; }
int Power() const {
DCHECK(Matches());
return power_;
}
Node* Left() const {
DCHECK(Matches());
return left_;
}
// Displacement will be either 0 or 1.
int32_t Displacement() const {
DCHECK(Matches());
return displacement_;
}
private:
Node* left_;
int power_;
int displacement_;
};
const int ScaleFactorMatcher::kMatchedFactors[] = {1, 2, 4, 8};
ScaleFactorMatcher::ScaleFactorMatcher(Node* node)
: NodeMatcher(node), left_(NULL), power_(0) {
if (opcode() != IrOpcode::kInt32Mul) return;
// TODO(dcarney): should test 64 bit ints as well.
Int32BinopMatcher m(this->node());
if (!m.right().HasValue()) return;
int32_t value = m.right().Value();
switch (value) {
case 8:
power_++; // Fall through.
case 4:
power_++; // Fall through.
case 2:
power_++; // Fall through.
case 1:
break;
default:
return;
}
left_ = m.left().node();
}
IndexAndDisplacementMatcher::IndexAndDisplacementMatcher(Node* node)
: NodeMatcher(node), index_node_(node), displacement_(0), power_(0) {
if (opcode() == IrOpcode::kInt32Add) {
Int32BinopMatcher m(this->node());
if (m.right().HasValue()) {
displacement_ = m.right().Value();
index_node_ = m.left().node();
}
}
// Test scale factor.
ScaleFactorMatcher scale_matcher(index_node_);
if (scale_matcher.Matches()) {
index_node_ = scale_matcher.Left();
power_ = scale_matcher.Power();
}
}
const int LeaMultiplyMatcher::kMatchedFactors[7] = {1, 2, 3, 4, 5, 8, 9};
LeaMultiplyMatcher::LeaMultiplyMatcher(Node* node)
: NodeMatcher(node), left_(NULL), power_(0), displacement_(0) {
if (opcode() != IrOpcode::kInt32Mul && opcode() != IrOpcode::kInt64Mul) {
return;
}
int64_t value;
Node* left = NULL;
{
Int32BinopMatcher m(this->node());
if (m.right().HasValue()) {
value = m.right().Value();
left = m.left().node();
} else {
Int64BinopMatcher m(this->node());
if (m.right().HasValue()) {
value = m.right().Value();
left = m.left().node();
} else {
return;
}
}
}
switch (value) {
case 9:
case 8:
power_++; // Fall through.
case 5:
case 4:
power_++; // Fall through.
case 3:
case 2:
power_++; // Fall through.
case 1:
break;
default:
return;
}
if (!base::bits::IsPowerOfTwo64(value)) {
displacement_ = 1;
}
left_ = left;
}
class AddressingModeMatcher {
public:
AddressingModeMatcher(IA32OperandGenerator* g, Node* base, Node* index)

View File

@ -675,8 +675,10 @@ class Constant FINAL {
Type type() const { return type_; }
int32_t ToInt32() const {
DCHECK_EQ(kInt32, type());
return static_cast<int32_t>(value_);
DCHECK(type() == kInt32 || type() == kInt64);
const int32_t value = static_cast<int32_t>(value_);
DCHECK_EQ(value_, static_cast<int64_t>(value));
return value;
}
int64_t ToInt64() const {

View File

@ -81,26 +81,34 @@ inline MachineType RepresentationOf(MachineType machine_type) {
return static_cast<MachineType>(result);
}
// Gets the element size in bytes of the machine type.
inline int ElementSizeOf(MachineType machine_type) {
// Gets the log2 of the element size in bytes of the machine type.
inline int ElementSizeLog2Of(MachineType machine_type) {
switch (RepresentationOf(machine_type)) {
case kRepBit:
case kRepWord8:
return 1;
return 0;
case kRepWord16:
return 2;
return 1;
case kRepWord32:
case kRepFloat32:
return 4;
return 2;
case kRepWord64:
case kRepFloat64:
return 8;
return 3;
case kRepTagged:
return kPointerSize;
return kPointerSizeLog2;
default:
UNREACHABLE();
return kPointerSize;
break;
}
UNREACHABLE();
return -1;
}
// Gets the element size in bytes of the machine type.
inline int ElementSizeOf(MachineType machine_type) {
const int shift = ElementSizeLog2Of(machine_type);
DCHECK_NE(-1, shift);
return 1 << shift;
}
// Describes the inputs and outputs of a function or call.

View File

@ -1,104 +0,0 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/generic-node-inl.h"
#include "src/compiler/node-matchers.h"
namespace v8 {
namespace internal {
namespace compiler {
const int ScaleFactorMatcher::kMatchedFactors[] = {1, 2, 4, 8};
ScaleFactorMatcher::ScaleFactorMatcher(Node* node)
: NodeMatcher(node), left_(NULL), power_(0) {
if (opcode() != IrOpcode::kInt32Mul) return;
// TODO(dcarney): should test 64 bit ints as well.
Int32BinopMatcher m(this->node());
if (!m.right().HasValue()) return;
int32_t value = m.right().Value();
switch (value) {
case 8:
power_++; // Fall through.
case 4:
power_++; // Fall through.
case 2:
power_++; // Fall through.
case 1:
break;
default:
return;
}
left_ = m.left().node();
}
IndexAndDisplacementMatcher::IndexAndDisplacementMatcher(Node* node)
: NodeMatcher(node), index_node_(node), displacement_(0), power_(0) {
if (opcode() == IrOpcode::kInt32Add) {
Int32BinopMatcher m(this->node());
if (m.right().HasValue()) {
displacement_ = m.right().Value();
index_node_ = m.left().node();
}
}
// Test scale factor.
ScaleFactorMatcher scale_matcher(index_node_);
if (scale_matcher.Matches()) {
index_node_ = scale_matcher.Left();
power_ = scale_matcher.Power();
}
}
const int LeaMultiplyMatcher::kMatchedFactors[7] = {1, 2, 3, 4, 5, 8, 9};
LeaMultiplyMatcher::LeaMultiplyMatcher(Node* node)
: NodeMatcher(node), left_(NULL), power_(0), displacement_(0) {
if (opcode() != IrOpcode::kInt32Mul && opcode() != IrOpcode::kInt64Mul) {
return;
}
int64_t value;
Node* left = NULL;
{
Int32BinopMatcher m(this->node());
if (m.right().HasValue()) {
value = m.right().Value();
left = m.left().node();
} else {
Int64BinopMatcher m(this->node());
if (m.right().HasValue()) {
value = m.right().Value();
left = m.left().node();
} else {
return;
}
}
}
switch (value) {
case 9:
case 8:
power_++; // Fall through.
case 5:
case 4:
power_++; // Fall through.
case 3:
case 2:
power_++; // Fall through.
case 1:
break;
default:
return;
}
if (!base::bits::IsPowerOfTwo64(value)) {
displacement_ = 1;
}
left_ = left;
}
} // namespace compiler
} // namespace internal
} // namespace v8

View File

@ -150,88 +150,6 @@ typedef BinopMatcher<UintPtrMatcher, UintPtrMatcher> UintPtrBinopMatcher;
typedef BinopMatcher<Float64Matcher, Float64Matcher> Float64BinopMatcher;
typedef BinopMatcher<NumberMatcher, NumberMatcher> NumberBinopMatcher;
// Fairly intel-specify node matcher used for matching scale factors in
// addressing modes.
// Matches nodes of form [x * N] for N in {1,2,4,8}
class ScaleFactorMatcher : public NodeMatcher {
public:
static const int kMatchedFactors[4];
explicit ScaleFactorMatcher(Node* node);
bool Matches() const { return left_ != NULL; }
int Power() const {
DCHECK(Matches());
return power_;
}
Node* Left() const {
DCHECK(Matches());
return left_;
}
private:
Node* left_;
int power_;
};
// Fairly intel-specify node matcher used for matching index and displacement
// operands in addressing modes.
// Matches nodes of form:
// [x * N]
// [x * N + K]
// [x + K]
// [x] -- fallback case
// for N in {1,2,4,8} and K int32_t
class IndexAndDisplacementMatcher : public NodeMatcher {
public:
explicit IndexAndDisplacementMatcher(Node* node);
Node* index_node() const { return index_node_; }
int displacement() const { return displacement_; }
int power() const { return power_; }
private:
Node* index_node_;
int displacement_;
int power_;
};
// Fairly intel-specify node matcher used for matching multiplies that can be
// transformed to lea instructions.
// Matches nodes of form:
// [x * N]
// for N in {1,2,3,4,5,8,9}
class LeaMultiplyMatcher : public NodeMatcher {
public:
static const int kMatchedFactors[7];
explicit LeaMultiplyMatcher(Node* node);
bool Matches() const { return left_ != NULL; }
int Power() const {
DCHECK(Matches());
return power_;
}
Node* Left() const {
DCHECK(Matches());
return left_;
}
// Displacement will be either 0 or 1.
int32_t Displacement() const {
DCHECK(Matches());
return displacement_;
}
private:
Node* left_;
int power_;
int displacement_;
};
} // namespace compiler
} // namespace internal
} // namespace v8

View File

@ -1118,7 +1118,7 @@ static WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
void SimplifiedLowering::DoLoadField(Node* node) {
const FieldAccess& access = FieldAccessOf(node->op());
node->set_op(machine()->Load(access.machine_type));
Node* offset = jsgraph()->Int32Constant(access.offset - access.tag());
Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph()->zone(), 1, offset);
}
@ -1129,7 +1129,7 @@ void SimplifiedLowering::DoStoreField(Node* node) {
access.base_is_tagged, access.machine_type, access.type);
node->set_op(
machine()->Store(StoreRepresentation(access.machine_type, kind)));
Node* offset = jsgraph()->Int32Constant(access.offset - access.tag());
Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph()->zone(), 1, offset);
}
@ -1137,20 +1137,22 @@ void SimplifiedLowering::DoStoreField(Node* node) {
Node* SimplifiedLowering::ComputeIndex(const ElementAccess& access,
Node* const key) {
Node* index = key;
const int element_size = ElementSizeOf(access.machine_type);
if (element_size != 1) {
index = graph()->NewNode(machine()->Int32Mul(), index,
jsgraph()->Int32Constant(element_size));
const int element_size_shift = ElementSizeLog2Of(access.machine_type);
if (element_size_shift) {
index = graph()->NewNode(machine()->Word32Shl(), index,
jsgraph()->Int32Constant(element_size_shift));
}
const int fixed_offset = access.header_size - access.tag();
if (fixed_offset != 0) {
if (fixed_offset) {
index = graph()->NewNode(machine()->Int32Add(), index,
jsgraph()->Int32Constant(fixed_offset));
}
// TODO(bmeurer): 64-Bit
// if (machine()->Is64()) {
// index = graph()->NewNode(machine()->ChangeInt32ToInt64(), index);
// }
if (machine()->Is64()) {
// TODO(turbofan): This is probably only correct for typed arrays, and only
// if the typed arrays are at most 2GiB in size, which happens to match
// exactly our current situation.
index = graph()->NewNode(machine()->ChangeUint32ToUint64(), index);
}
return index;
}

View File

@ -34,12 +34,7 @@ class X64OperandConverter : public InstructionOperandConverter {
Operand OutputOperand() { return ToOperand(instr_->Output()); }
Immediate ToImmediate(InstructionOperand* operand) {
Constant constant = ToConstant(operand);
if (constant.type() == Constant::kInt32) {
return Immediate(constant.ToInt32());
}
UNREACHABLE();
return Immediate(-1);
return Immediate(ToConstant(operand).ToInt32());
}
Operand ToOperand(InstructionOperand* op, int extra = 0) {

View File

@ -88,7 +88,7 @@ namespace compiler {
// M = memory operand
// R = base register
// N = index register * N for N in {1, 2, 4, 8}
// I = immediate displacement (int32_t)
// I = immediate displacement (32-bit signed integer)
#define TARGET_ADDRESSING_MODE_LIST(V) \
V(MR) /* [%r1 ] */ \

View File

@ -25,6 +25,10 @@ class X64OperandGenerator FINAL : public OperandGenerator {
switch (node->opcode()) {
case IrOpcode::kInt32Constant:
return true;
case IrOpcode::kInt64Constant: {
const int64_t value = OpParameter<int64_t>(node);
return value == static_cast<int64_t>(static_cast<int32_t>(value));
}
default:
return false;
}
@ -36,106 +40,14 @@ class X64OperandGenerator FINAL : public OperandGenerator {
};
// Get the AddressingMode of scale factor N from the AddressingMode of scale
// factor 1.
static AddressingMode AdjustAddressingMode(AddressingMode base_mode,
int power) {
DCHECK(0 <= power && power < 4);
return static_cast<AddressingMode>(static_cast<int>(base_mode) + power);
}
class AddressingModeMatcher {
public:
AddressingModeMatcher(X64OperandGenerator* g, Node* base, Node* index)
: base_operand_(NULL),
index_operand_(NULL),
displacement_operand_(NULL),
mode_(kMode_None) {
Int32Matcher index_imm(index);
if (index_imm.HasValue()) {
int32_t value = index_imm.Value();
if (value == 0) {
mode_ = kMode_MR;
} else {
mode_ = kMode_MRI;
index_operand_ = g->UseImmediate(index);
}
base_operand_ = g->UseRegister(base);
} else {
// Compute base operand.
Int64Matcher base_imm(base);
if (!base_imm.HasValue() || base_imm.Value() != 0) {
base_operand_ = g->UseRegister(base);
}
// Compute index and displacement.
IndexAndDisplacementMatcher matcher(index);
index_operand_ = g->UseRegister(matcher.index_node());
if (matcher.displacement() != 0) {
displacement_operand_ = g->TempImmediate(matcher.displacement());
}
// Compute mode with scale factor one.
if (base_operand_ == NULL) {
if (displacement_operand_ == NULL) {
mode_ = kMode_M1;
} else {
mode_ = kMode_M1I;
}
} else {
if (displacement_operand_ == NULL) {
mode_ = kMode_MR1;
} else {
mode_ = kMode_MR1I;
}
}
// Adjust mode to actual scale factor.
mode_ = AdjustAddressingMode(mode_, matcher.power());
}
DCHECK_NE(kMode_None, mode_);
}
size_t SetInputs(InstructionOperand** inputs) {
size_t input_count = 0;
// Compute inputs_ and input_count.
if (base_operand_ != NULL) {
inputs[input_count++] = base_operand_;
}
if (index_operand_ != NULL) {
inputs[input_count++] = index_operand_;
}
if (displacement_operand_ != NULL) {
// Pure displacement mode not supported by x64.
DCHECK_NE(static_cast<int>(input_count), 0);
inputs[input_count++] = displacement_operand_;
}
DCHECK_NE(static_cast<int>(input_count), 0);
return input_count;
}
static const int kMaxInputCount = 3;
InstructionOperand* base_operand_;
InstructionOperand* index_operand_;
InstructionOperand* displacement_operand_;
AddressingMode mode_;
};
static void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
X64OperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitLoad(Node* node) {
MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
X64OperandGenerator g(this);
Node* const base = node->InputAt(0);
Node* const index = node->InputAt(1);
ArchOpcode opcode;
// TODO(titzer): signed/unsigned small loads
switch (rep) {
case kRepFloat32:
opcode = kX64Movss;
@ -161,14 +73,19 @@ void InstructionSelector::VisitLoad(Node* node) {
UNREACHABLE();
return;
}
X64OperandGenerator g(this);
AddressingModeMatcher matcher(&g, base, index);
InstructionCode code = opcode | AddressingModeField::encode(matcher.mode_);
InstructionOperand* outputs[] = {g.DefineAsRegister(node)};
InstructionOperand* inputs[AddressingModeMatcher::kMaxInputCount];
size_t input_count = matcher.SetInputs(inputs);
Emit(code, 1, outputs, input_count, inputs);
if (g.CanBeImmediate(base)) {
// load [#base + %index]
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), g.UseRegister(index), g.UseImmediate(base));
} else if (g.CanBeImmediate(index)) {
// load [%base + #index]
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
} else {
// load [%base + %index*1]
Emit(opcode | AddressingModeField::encode(kMode_MR1),
g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
}
}
@ -218,20 +135,21 @@ void InstructionSelector::VisitStore(Node* node) {
UNREACHABLE();
return;
}
InstructionOperand* val;
if (g.CanBeImmediate(value)) {
val = g.UseImmediate(value);
InstructionOperand* value_operand =
g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
if (g.CanBeImmediate(base)) {
// store [#base + %index], %|#value
Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr,
g.UseRegister(index), g.UseImmediate(base), value_operand);
} else if (g.CanBeImmediate(index)) {
// store [%base + #index], %|#value
Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr,
g.UseRegister(base), g.UseImmediate(index), value_operand);
} else {
val = g.UseRegister(value);
// store [%base + %index*1], %|#value
Emit(opcode | AddressingModeField::encode(kMode_MR1), nullptr,
g.UseRegister(base), g.UseRegister(index), value_operand);
}
AddressingModeMatcher matcher(&g, base, index);
InstructionCode code = opcode | AddressingModeField::encode(matcher.mode_);
InstructionOperand* inputs[AddressingModeMatcher::kMaxInputCount + 1];
size_t input_count = matcher.SetInputs(inputs);
inputs[input_count++] = val;
Emit(code, 0, static_cast<InstructionOperand**>(NULL), input_count, inputs);
}
@ -342,19 +260,21 @@ void InstructionSelector::VisitWord64Xor(Node* node) {
}
namespace {
// Shared routine for multiple 32-bit shift operations.
// TODO(bmeurer): Merge this with VisitWord64Shift using template magic?
static void VisitWord32Shift(InstructionSelector* selector, Node* node,
ArchOpcode opcode) {
void VisitWord32Shift(InstructionSelector* selector, Node* node,
ArchOpcode opcode) {
X64OperandGenerator g(selector);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
Int32BinopMatcher m(node);
Node* left = m.left().node();
Node* right = m.right().node();
if (g.CanBeImmediate(right)) {
selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
g.UseImmediate(right));
} else {
Int32BinopMatcher m(node);
if (m.right().IsWord32And()) {
Int32BinopMatcher mright(right);
if (mright.right().Is(0x1F)) {
@ -369,17 +289,17 @@ static void VisitWord32Shift(InstructionSelector* selector, Node* node,
// Shared routine for multiple 64-bit shift operations.
// TODO(bmeurer): Merge this with VisitWord32Shift using template magic?
static void VisitWord64Shift(InstructionSelector* selector, Node* node,
ArchOpcode opcode) {
void VisitWord64Shift(InstructionSelector* selector, Node* node,
ArchOpcode opcode) {
X64OperandGenerator g(selector);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
Int64BinopMatcher m(node);
Node* left = m.left().node();
Node* right = m.right().node();
if (g.CanBeImmediate(right)) {
selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
g.UseImmediate(right));
} else {
Int64BinopMatcher m(node);
if (m.right().IsWord64And()) {
Int64BinopMatcher mright(right);
if (mright.right().Is(0x3F)) {
@ -391,6 +311,8 @@ static void VisitWord64Shift(InstructionSelector* selector, Node* node,
}
}
} // namespace
void InstructionSelector::VisitWord32Shl(Node* node) {
VisitWord32Shift(this, node, kX64Shl32);
@ -432,57 +354,12 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
}
static bool TryEmitLeaMultAdd(InstructionSelector* selector, Node* node,
ArchOpcode opcode) {
int32_t displacement_value;
Node* left;
{
Int32BinopMatcher m32(node);
left = m32.left().node();
if (m32.right().HasValue()) {
displacement_value = m32.right().Value();
} else {
Int64BinopMatcher m64(node);
if (!m64.right().HasValue()) {
return false;
}
int64_t value_64 = m64.right().Value();
displacement_value = static_cast<int32_t>(value_64);
if (displacement_value != value_64) return false;
}
}
LeaMultiplyMatcher lmm(left);
if (!lmm.Matches()) return false;
AddressingMode mode;
size_t input_count;
X64OperandGenerator g(selector);
InstructionOperand* index = g.UseRegister(lmm.Left());
InstructionOperand* displacement = g.TempImmediate(displacement_value);
InstructionOperand* inputs[] = {index, displacement, displacement};
if (lmm.Displacement() != 0) {
input_count = 3;
inputs[1] = index;
mode = kMode_MR1I;
} else {
input_count = 2;
mode = kMode_M1I;
}
mode = AdjustAddressingMode(mode, lmm.Power());
InstructionOperand* outputs[] = {g.DefineAsRegister(node)};
selector->Emit(opcode | AddressingModeField::encode(mode), 1, outputs,
input_count, inputs);
return true;
}
void InstructionSelector::VisitInt32Add(Node* node) {
if (TryEmitLeaMultAdd(this, node, kX64Lea32)) return;
VisitBinop(this, node, kX64Add32);
}
void InstructionSelector::VisitInt64Add(Node* node) {
if (TryEmitLeaMultAdd(this, node, kX64Lea)) return;
VisitBinop(this, node, kX64Add);
}
@ -509,33 +386,9 @@ void InstructionSelector::VisitInt64Sub(Node* node) {
}
static bool TryEmitLeaMult(InstructionSelector* selector, Node* node,
ArchOpcode opcode) {
LeaMultiplyMatcher lea(node);
// Try to match lea.
if (!lea.Matches()) return false;
AddressingMode mode;
size_t input_count;
X64OperandGenerator g(selector);
InstructionOperand* left = g.UseRegister(lea.Left());
InstructionOperand* inputs[] = {left, left};
if (lea.Displacement() != 0) {
input_count = 2;
mode = kMode_MR1;
} else {
input_count = 1;
mode = kMode_M1;
}
mode = AdjustAddressingMode(mode, lea.Power());
InstructionOperand* outputs[] = {g.DefineAsRegister(node)};
selector->Emit(opcode | AddressingModeField::encode(mode), 1, outputs,
input_count, inputs);
return true;
}
namespace {
static void VisitMul(InstructionSelector* selector, Node* node,
ArchOpcode opcode) {
void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
X64OperandGenerator g(selector);
Int32BinopMatcher m(node);
Node* left = m.left().node();
@ -552,15 +405,15 @@ static void VisitMul(InstructionSelector* selector, Node* node,
}
}
} // namespace
void InstructionSelector::VisitInt32Mul(Node* node) {
if (TryEmitLeaMult(this, node, kX64Lea32)) return;
VisitMul(this, node, kX64Imul32);
}
void InstructionSelector::VisitInt64Mul(Node* node) {
if (TryEmitLeaMult(this, node, kX64Lea)) return;
VisitMul(this, node, kX64Imul);
}
@ -672,7 +525,37 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
X64OperandGenerator g(this);
Emit(kX64Movl, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
Node* value = node->InputAt(0);
switch (value->opcode()) {
case IrOpcode::kWord32And:
case IrOpcode::kWord32Or:
case IrOpcode::kWord32Xor:
case IrOpcode::kWord32Shl:
case IrOpcode::kWord32Shr:
case IrOpcode::kWord32Sar:
case IrOpcode::kWord32Ror:
case IrOpcode::kWord32Equal:
case IrOpcode::kInt32Add:
case IrOpcode::kInt32Sub:
case IrOpcode::kInt32Mul:
case IrOpcode::kInt32MulHigh:
case IrOpcode::kInt32Div:
case IrOpcode::kInt32LessThan:
case IrOpcode::kInt32LessThanOrEqual:
case IrOpcode::kInt32Mod:
case IrOpcode::kUint32Div:
case IrOpcode::kUint32LessThan:
case IrOpcode::kUint32LessThanOrEqual:
case IrOpcode::kUint32Mod: {
// These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
// zero-extension is a no-op.
Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
return;
}
default:
break;
}
Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
}
@ -684,7 +567,24 @@ void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
X64OperandGenerator g(this);
Emit(kX64Movl, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
Node* value = node->InputAt(0);
if (CanCover(node, value)) {
switch (value->opcode()) {
case IrOpcode::kWord64Sar:
case IrOpcode::kWord64Shr: {
Int64BinopMatcher m(value);
if (m.right().Is(32)) {
Emit(kX64Shr, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()), g.TempImmediate(32));
return;
}
break;
}
default:
break;
}
}
Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
}
@ -731,6 +631,18 @@ void InstructionSelector::VisitFloat64Sqrt(Node* node) {
}
namespace {
void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
X64OperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
} // namespace
void InstructionSelector::VisitFloat64Floor(Node* node) {
DCHECK(CpuFeatures::IsSupported(SSE4_1));
VisitRRFloat64(this, kSSEFloat64Floor, node);

View File

@ -1324,45 +1324,54 @@ TEST(InsertChangesAroundFloat64Cmp) {
}
namespace {
void CheckFieldAccessArithmetic(FieldAccess access, Node* load_or_store) {
Int32Matcher index = Int32Matcher(load_or_store->InputAt(1));
CHECK(index.Is(access.offset - access.tag()));
IntPtrMatcher mindex(load_or_store->InputAt(1));
CHECK(mindex.Is(access.offset - access.tag()));
}
Node* CheckElementAccessArithmetic(ElementAccess access, Node* load_or_store) {
Int32BinopMatcher index(load_or_store->InputAt(1));
CHECK_EQ(IrOpcode::kInt32Add, index.node()->opcode());
CHECK(index.right().Is(access.header_size - access.tag()));
Node* index = load_or_store->InputAt(1);
if (kPointerSize == 8) {
CHECK_EQ(IrOpcode::kChangeUint32ToUint64, index->opcode());
index = index->InputAt(0);
}
int element_size = ElementSizeOf(access.machine_type);
Int32BinopMatcher mindex(index);
CHECK_EQ(IrOpcode::kInt32Add, mindex.node()->opcode());
CHECK(mindex.right().Is(access.header_size - access.tag()));
if (element_size != 1) {
Int32BinopMatcher mul(index.left().node());
CHECK_EQ(IrOpcode::kInt32Mul, mul.node()->opcode());
CHECK(mul.right().Is(element_size));
return mul.left().node();
const int element_size_shift = ElementSizeLog2Of(access.machine_type);
if (element_size_shift) {
Int32BinopMatcher shl(mindex.left().node());
CHECK_EQ(IrOpcode::kWord32Shl, shl.node()->opcode());
CHECK(shl.right().Is(element_size_shift));
return shl.left().node();
} else {
return index.left().node();
return mindex.left().node();
}
}
static const MachineType machine_reps[] = {
kRepBit, kMachInt8, kMachInt16, kMachInt32,
kMachInt64, kMachFloat64, kMachAnyTagged};
const MachineType kMachineReps[] = {kRepBit, kMachInt8, kMachInt16,
kMachInt32, kMachInt64, kMachFloat64,
kMachAnyTagged};
} // namespace
TEST(LowerLoadField_to_load) {
TestingGraph t(Type::Any(), Type::Signed32());
for (size_t i = 0; i < arraysize(machine_reps); i++) {
for (size_t i = 0; i < arraysize(kMachineReps); i++) {
FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
Handle<Name>::null(), Type::Any(), machine_reps[i]};
Handle<Name>::null(), Type::Any(), kMachineReps[i]};
Node* load =
t.graph()->NewNode(t.simplified()->LoadField(access), t.p0, t.start);
Node* use = t.Use(load, machine_reps[i]);
Node* use = t.Use(load, kMachineReps[i]);
t.Return(use);
t.Lower();
CHECK_EQ(IrOpcode::kLoad, load->opcode());
@ -1370,7 +1379,7 @@ TEST(LowerLoadField_to_load) {
CheckFieldAccessArithmetic(access, load);
MachineType rep = OpParameter<MachineType>(load);
CHECK_EQ(machine_reps[i], rep);
CHECK_EQ(kMachineReps[i], rep);
}
}
@ -1378,12 +1387,12 @@ TEST(LowerLoadField_to_load) {
TEST(LowerStoreField_to_store) {
TestingGraph t(Type::Any(), Type::Signed32());
for (size_t i = 0; i < arraysize(machine_reps); i++) {
for (size_t i = 0; i < arraysize(kMachineReps); i++) {
FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
Handle<Name>::null(), Type::Any(), machine_reps[i]};
Handle<Name>::null(), Type::Any(), kMachineReps[i]};
Node* val = t.ExampleWithOutput(machine_reps[i]);
Node* val = t.ExampleWithOutput(kMachineReps[i]);
Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
val, t.start, t.start);
t.Effect(store);
@ -1393,10 +1402,10 @@ TEST(LowerStoreField_to_store) {
CheckFieldAccessArithmetic(access, store);
StoreRepresentation rep = OpParameter<StoreRepresentation>(store);
if (machine_reps[i] & kRepTagged) {
if (kMachineReps[i] & kRepTagged) {
CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
}
CHECK_EQ(machine_reps[i], rep.machine_type());
CHECK_EQ(kMachineReps[i], rep.machine_type());
}
}
@ -1404,15 +1413,15 @@ TEST(LowerStoreField_to_store) {
TEST(LowerLoadElement_to_load) {
TestingGraph t(Type::Any(), Type::Signed32());
for (size_t i = 0; i < arraysize(machine_reps); i++) {
for (size_t i = 0; i < arraysize(kMachineReps); i++) {
ElementAccess access = {kNoBoundsCheck, kTaggedBase,
FixedArrayBase::kHeaderSize, Type::Any(),
machine_reps[i]};
kMachineReps[i]};
Node* load =
t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0, t.p1,
t.jsgraph.Int32Constant(1024), t.start, t.start);
Node* use = t.Use(load, machine_reps[i]);
Node* use = t.Use(load, kMachineReps[i]);
t.Return(use);
t.Lower();
CHECK_EQ(IrOpcode::kLoad, load->opcode());
@ -1420,7 +1429,7 @@ TEST(LowerLoadElement_to_load) {
CheckElementAccessArithmetic(access, load);
MachineType rep = OpParameter<MachineType>(load);
CHECK_EQ(machine_reps[i], rep);
CHECK_EQ(kMachineReps[i], rep);
}
}
@ -1428,12 +1437,12 @@ TEST(LowerLoadElement_to_load) {
TEST(LowerStoreElement_to_store) {
TestingGraph t(Type::Any(), Type::Signed32());
for (size_t i = 0; i < arraysize(machine_reps); i++) {
for (size_t i = 0; i < arraysize(kMachineReps); i++) {
ElementAccess access = {kNoBoundsCheck, kTaggedBase,
FixedArrayBase::kHeaderSize, Type::Any(),
machine_reps[i]};
kMachineReps[i]};
Node* val = t.ExampleWithOutput(machine_reps[i]);
Node* val = t.ExampleWithOutput(kMachineReps[i]);
Node* store = t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0,
t.p1, t.jsgraph.Int32Constant(1024), val,
t.start, t.start);
@ -1444,10 +1453,10 @@ TEST(LowerStoreElement_to_store) {
CheckElementAccessArithmetic(access, store);
StoreRepresentation rep = OpParameter<StoreRepresentation>(store);
if (machine_reps[i] & kRepTagged) {
if (kMachineReps[i] & kRepTagged) {
CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
}
CHECK_EQ(machine_reps[i], rep.machine_type());
CHECK_EQ(kMachineReps[i], rep.machine_type());
}
}

View File

@ -90,9 +90,12 @@ class ChangeLoweringTest : public GraphTest {
Matcher<Node*> IsLoadHeapNumber(const Matcher<Node*>& value_matcher,
const Matcher<Node*>& control_matcher) {
return IsLoad(kMachFloat64, value_matcher,
IsInt32Constant(HeapNumberValueOffset()), graph()->start(),
IsIntPtrConstant(HeapNumberValueOffset()), graph()->start(),
control_matcher);
}
Matcher<Node*> IsIntPtrConstant(int value) {
return Is32() ? IsInt32Constant(value) : IsInt64Constant(value);
}
Matcher<Node*> IsWordEqual(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
return Is32() ? IsWord32Equal(lhs_matcher, rhs_matcher)
@ -162,7 +165,7 @@ TARGET_TEST_P(ChangeLoweringCommonTest, ChangeFloat64ToTagged) {
IsAllocateHeapNumber(IsValueEffect(val), graph()->start())),
IsStore(StoreRepresentation(kMachFloat64, kNoWriteBarrier),
CaptureEq(&heap_number),
IsInt32Constant(HeapNumberValueOffset()), val,
IsIntPtrConstant(HeapNumberValueOffset()), val,
CaptureEq(&heap_number), graph()->start())));
}
@ -207,7 +210,7 @@ TARGET_TEST_F(ChangeLowering32Test, ChangeInt32ToTagged) {
IsAllocateHeapNumber(_, CaptureEq(&if_true))),
IsStore(StoreRepresentation(kMachFloat64, kNoWriteBarrier),
CaptureEq(&heap_number),
IsInt32Constant(HeapNumberValueOffset()),
IsIntPtrConstant(HeapNumberValueOffset()),
IsChangeInt32ToFloat64(val),
CaptureEq(&heap_number), CaptureEq(&if_true))),
IsProjection(
@ -346,7 +349,7 @@ TARGET_TEST_F(ChangeLowering64Test, ChangeInt32ToTagged) {
EXPECT_THAT(reduction.replacement(),
IsWord64Shl(IsChangeInt32ToInt64(val),
IsInt32Constant(SmiShiftAmount())));
IsInt64Constant(SmiShiftAmount())));
}
@ -366,12 +369,12 @@ TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToFloat64) {
IsPhi(
kMachFloat64, IsLoadHeapNumber(val, CaptureEq(&if_true)),
IsChangeInt32ToFloat64(IsTruncateInt64ToInt32(
IsWord64Sar(val, IsInt32Constant(SmiShiftAmount())))),
IsWord64Sar(val, IsInt64Constant(SmiShiftAmount())))),
IsMerge(
AllOf(CaptureEq(&if_true),
IsIfTrue(AllOf(
CaptureEq(&branch),
IsBranch(IsWord64And(val, IsInt32Constant(kSmiTagMask)),
IsBranch(IsWord64And(val, IsInt64Constant(kSmiTagMask)),
graph()->start())))),
IsIfFalse(CaptureEq(&branch)))));
}
@ -393,11 +396,11 @@ TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToInt32) {
IsPhi(kMachInt32,
IsChangeFloat64ToInt32(IsLoadHeapNumber(val, CaptureEq(&if_true))),
IsTruncateInt64ToInt32(
IsWord64Sar(val, IsInt32Constant(SmiShiftAmount()))),
IsWord64Sar(val, IsInt64Constant(SmiShiftAmount()))),
IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
IsIfFalse(AllOf(
CaptureEq(&branch),
IsBranch(IsWord64And(val, IsInt32Constant(kSmiTagMask)),
IsBranch(IsWord64And(val, IsInt64Constant(kSmiTagMask)),
graph()->start()))))));
}
@ -418,11 +421,11 @@ TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToUint32) {
IsPhi(kMachUint32,
IsChangeFloat64ToUint32(IsLoadHeapNumber(val, CaptureEq(&if_true))),
IsTruncateInt64ToInt32(
IsWord64Sar(val, IsInt32Constant(SmiShiftAmount()))),
IsWord64Sar(val, IsInt64Constant(SmiShiftAmount()))),
IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
IsIfFalse(AllOf(
CaptureEq(&branch),
IsBranch(IsWord64And(val, IsInt32Constant(kSmiTagMask)),
IsBranch(IsWord64And(val, IsInt64Constant(kSmiTagMask)),
graph()->start()))))));
}
@ -442,18 +445,18 @@ TARGET_TEST_F(ChangeLowering64Test, ChangeUint32ToTagged) {
phi,
IsPhi(
kMachAnyTagged, IsWord64Shl(IsChangeUint32ToUint64(val),
IsInt32Constant(SmiShiftAmount())),
IsInt64Constant(SmiShiftAmount())),
IsFinish(AllOf(CaptureEq(&heap_number),
IsAllocateHeapNumber(_, CaptureEq(&if_false))),
IsStore(StoreRepresentation(kMachFloat64, kNoWriteBarrier),
CaptureEq(&heap_number),
IsInt32Constant(HeapNumberValueOffset()),
IsInt64Constant(HeapNumberValueOffset()),
IsChangeUint32ToFloat64(val),
CaptureEq(&heap_number), CaptureEq(&if_false))),
IsMerge(
IsIfTrue(AllOf(CaptureEq(&branch),
IsBranch(IsUint32LessThanOrEqual(
val, IsInt32Constant(SmiMaxValue())),
val, IsInt64Constant(SmiMaxValue())),
graph()->start()))),
AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
}

View File

@ -138,6 +138,14 @@ bool InstructionSelectorTest::Stream::IsFixed(const InstructionOperand* operand,
}
bool InstructionSelectorTest::Stream::IsSameAsFirst(
const InstructionOperand* operand) const {
if (!operand->IsUnallocated()) return false;
const UnallocatedOperand* unallocated = UnallocatedOperand::cast(operand);
return unallocated->HasSameAsInputPolicy();
}
bool InstructionSelectorTest::Stream::IsUsedAtStart(
const InstructionOperand* operand) const {
if (!operand->IsUnallocated()) return false;

View File

@ -172,6 +172,7 @@ class InstructionSelectorTest : public TestWithContext, public TestWithZone {
int ToVreg(const Node* node) const;
bool IsFixed(const InstructionOperand* operand, Register reg) const;
bool IsSameAsFirst(const InstructionOperand* operand) const;
bool IsUsedAtStart(const InstructionOperand* operand) const;
FrameStateDescriptor* GetFrameStateDescriptor(int deoptimization_id) {

View File

@ -10,16 +10,6 @@ namespace v8 {
namespace internal {
namespace compiler {
namespace {
// Immediates (random subset).
static const int32_t kImmediates[] = {
kMinInt, -42, -1, 0, 1, 2, 3, 4, 5,
6, 7, 8, 16, 42, 0xff, 0xffff, 0x0f0f0f0f, kMaxInt};
} // namespace
// -----------------------------------------------------------------------------
// Conversions.
@ -82,39 +72,6 @@ TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithParameter) {
}
// -----------------------------------------------------------------------------
// Better left operand for commutative binops
TEST_F(InstructionSelectorTest, BetterLeftOperandTestAddBinop) {
StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
Node* param1 = m.Parameter(0);
Node* param2 = m.Parameter(1);
Node* add = m.Int32Add(param1, param2);
m.Return(m.Int32Add(add, param1));
Stream s = m.Build();
ASSERT_EQ(2U, s.size());
EXPECT_EQ(kX64Add32, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
ASSERT_TRUE(s[0]->InputAt(0)->IsUnallocated());
EXPECT_EQ(s.ToVreg(param2), s.ToVreg(s[0]->InputAt(0)));
}
TEST_F(InstructionSelectorTest, BetterLeftOperandTestMulBinop) {
StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
Node* param1 = m.Parameter(0);
Node* param2 = m.Parameter(1);
Node* mul = m.Int32Mul(param1, param2);
m.Return(m.Int32Mul(mul, param1));
Stream s = m.Build();
ASSERT_EQ(2U, s.size());
EXPECT_EQ(kX64Imul32, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
ASSERT_TRUE(s[0]->InputAt(0)->IsUnallocated());
EXPECT_EQ(s.ToVreg(param2), s.ToVreg(s[0]->InputAt(0)));
}
// -----------------------------------------------------------------------------
// Loads and stores
@ -181,324 +138,152 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
::testing::ValuesIn(kMemoryAccesses));
// -----------------------------------------------------------------------------
// AddressingMode for loads and stores.
// ChangeUint32ToUint64.
class AddressingModeUnitTest : public InstructionSelectorTest {
public:
AddressingModeUnitTest() : m(NULL) { Reset(); }
~AddressingModeUnitTest() { delete m; }
void Run(Node* base, Node* index, AddressingMode mode) {
Node* load = m->Load(kMachInt32, base, index);
m->Store(kMachInt32, base, index, load);
m->Return(m->Int32Constant(0));
Stream s = m->Build();
ASSERT_EQ(2U, s.size());
EXPECT_EQ(mode, s[0]->addressing_mode());
EXPECT_EQ(mode, s[1]->addressing_mode());
}
namespace {
Node* zero;
Node* null_ptr;
Node* non_zero;
Node* base_reg; // opaque value to generate base as register
Node* index_reg; // opaque value to generate index as register
Node* scales[arraysize(ScaleFactorMatcher::kMatchedFactors)];
StreamBuilder* m;
typedef Node* (RawMachineAssembler::*Constructor)(Node*, Node*);
void Reset() {
delete m;
m = new StreamBuilder(this, kMachInt32, kMachInt32, kMachInt32);
zero = m->Int32Constant(0);
null_ptr = m->Int64Constant(0);
non_zero = m->Int32Constant(127);
base_reg = m->Parameter(0);
index_reg = m->Parameter(0);
for (size_t i = 0; i < arraysize(ScaleFactorMatcher::kMatchedFactors);
++i) {
scales[i] = m->Int32Constant(ScaleFactorMatcher::kMatchedFactors[i]);
}
}
struct BinaryOperation {
Constructor constructor;
const char* constructor_name;
};
TEST_F(AddressingModeUnitTest, AddressingMode_MR) {
Node* base = base_reg;
Node* index = zero;
Run(base, index, kMode_MR);
std::ostream& operator<<(std::ostream& os, const BinaryOperation& bop) {
return os << bop.constructor_name;
}
TEST_F(AddressingModeUnitTest, AddressingMode_MRI) {
Node* base = base_reg;
Node* index = non_zero;
Run(base, index, kMode_MRI);
const BinaryOperation kWord32BinaryOperations[] = {
{&RawMachineAssembler::Word32And, "Word32And"},
{&RawMachineAssembler::Word32Or, "Word32Or"},
{&RawMachineAssembler::Word32Xor, "Word32Xor"},
{&RawMachineAssembler::Word32Shl, "Word32Shl"},
{&RawMachineAssembler::Word32Shr, "Word32Shr"},
{&RawMachineAssembler::Word32Sar, "Word32Sar"},
{&RawMachineAssembler::Word32Ror, "Word32Ror"},
{&RawMachineAssembler::Word32Equal, "Word32Equal"},
{&RawMachineAssembler::Int32Add, "Int32Add"},
{&RawMachineAssembler::Int32Sub, "Int32Sub"},
{&RawMachineAssembler::Int32Mul, "Int32Mul"},
{&RawMachineAssembler::Int32MulHigh, "Int32MulHigh"},
{&RawMachineAssembler::Int32Div, "Int32Div"},
{&RawMachineAssembler::Int32LessThan, "Int32LessThan"},
{&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual"},
{&RawMachineAssembler::Int32Mod, "Int32Mod"},
{&RawMachineAssembler::Uint32Div, "Uint32Div"},
{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan"},
{&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual"},
{&RawMachineAssembler::Uint32Mod, "Uint32Mod"}};
} // namespace
typedef InstructionSelectorTestWithParam<BinaryOperation>
InstructionSelectorChangeUint32ToUint64Test;
TEST_P(InstructionSelectorChangeUint32ToUint64Test, ChangeUint32ToUint64) {
const BinaryOperation& bop = GetParam();
StreamBuilder m(this, kMachUint64, kMachInt32, kMachInt32);
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
m.Return(m.ChangeUint32ToUint64((m.*bop.constructor)(p0, p1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
}
TEST_F(AddressingModeUnitTest, AddressingMode_MR1) {
Node* base = base_reg;
Node* index = index_reg;
Run(base, index, kMode_MR1);
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
InstructionSelectorChangeUint32ToUint64Test,
::testing::ValuesIn(kWord32BinaryOperations));
// -----------------------------------------------------------------------------
// TruncateInt64ToInt32.
TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Sar) {
StreamBuilder m(this, kMachInt32, kMachInt64);
Node* const p = m.Parameter(0);
Node* const t = m.TruncateInt64ToInt32(m.Word64Sar(p, m.Int64Constant(32)));
m.Return(t);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kX64Shr, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(32, s.ToInt32(s[0]->InputAt(1)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_TRUE(s.IsSameAsFirst(s[0]->OutputAt(0)));
EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
}
TEST_F(AddressingModeUnitTest, AddressingMode_MRN) {
AddressingMode expected[] = {kMode_MR1, kMode_MR2, kMode_MR4, kMode_MR8};
for (size_t i = 0; i < arraysize(scales); ++i) {
Reset();
Node* base = base_reg;
Node* index = m->Int32Mul(index_reg, scales[i]);
Run(base, index, expected[i]);
}
TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Shr) {
StreamBuilder m(this, kMachInt32, kMachInt64);
Node* const p = m.Parameter(0);
Node* const t = m.TruncateInt64ToInt32(m.Word64Shr(p, m.Int64Constant(32)));
m.Return(t);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kX64Shr, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(32, s.ToInt32(s[0]->InputAt(1)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_TRUE(s.IsSameAsFirst(s[0]->OutputAt(0)));
EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
}
TEST_F(AddressingModeUnitTest, AddressingMode_MR1I) {
Node* base = base_reg;
Node* index = m->Int32Add(index_reg, non_zero);
Run(base, index, kMode_MR1I);
}
// -----------------------------------------------------------------------------
// Addition.
TEST_F(AddressingModeUnitTest, AddressingMode_MRNI) {
AddressingMode expected[] = {kMode_MR1I, kMode_MR2I, kMode_MR4I, kMode_MR8I};
for (size_t i = 0; i < arraysize(scales); ++i) {
Reset();
Node* base = base_reg;
Node* index = m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero);
Run(base, index, expected[i]);
}
}
TEST_F(AddressingModeUnitTest, AddressingMode_M1) {
Node* base = null_ptr;
Node* index = index_reg;
Run(base, index, kMode_M1);
}
TEST_F(AddressingModeUnitTest, AddressingMode_MN) {
AddressingMode expected[] = {kMode_M1, kMode_M2, kMode_M4, kMode_M8};
for (size_t i = 0; i < arraysize(scales); ++i) {
Reset();
Node* base = null_ptr;
Node* index = m->Int32Mul(index_reg, scales[i]);
Run(base, index, expected[i]);
}
}
TEST_F(AddressingModeUnitTest, AddressingMode_M1I) {
Node* base = null_ptr;
Node* index = m->Int32Add(index_reg, non_zero);
Run(base, index, kMode_M1I);
}
TEST_F(AddressingModeUnitTest, AddressingMode_MNI) {
AddressingMode expected[] = {kMode_M1I, kMode_M2I, kMode_M4I, kMode_M8I};
for (size_t i = 0; i < arraysize(scales); ++i) {
Reset();
Node* base = null_ptr;
Node* index = m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero);
Run(base, index, expected[i]);
}
TEST_F(InstructionSelectorTest, Int32AddWithInt32AddWithParameters) {
StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const a0 = m.Int32Add(p0, p1);
m.Return(m.Int32Add(a0, p0));
Stream s = m.Build();
ASSERT_EQ(2U, s.size());
EXPECT_EQ(kX64Add32, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
}
// -----------------------------------------------------------------------------
// Multiplication.
namespace {
struct MultParam {
int value;
bool lea_expected;
AddressingMode addressing_mode;
};
std::ostream& operator<<(std::ostream& os, const MultParam& m) {
return os << m.value << "." << m.lea_expected << "." << m.addressing_mode;
}
const MultParam kMultParams[] = {{-1, false, kMode_None},
{0, false, kMode_None},
{1, true, kMode_M1},
{2, true, kMode_M2},
{3, true, kMode_MR2},
{4, true, kMode_M4},
{5, true, kMode_MR4},
{6, false, kMode_None},
{7, false, kMode_None},
{8, true, kMode_M8},
{9, true, kMode_MR8},
{10, false, kMode_None},
{11, false, kMode_None}};
} // namespace
typedef InstructionSelectorTestWithParam<MultParam> InstructionSelectorMultTest;
static unsigned InputCountForLea(AddressingMode mode) {
switch (mode) {
case kMode_MR1I:
case kMode_MR2I:
case kMode_MR4I:
case kMode_MR8I:
return 3U;
case kMode_M1I:
case kMode_M2I:
case kMode_M4I:
case kMode_M8I:
return 2U;
case kMode_MR1:
case kMode_MR2:
case kMode_MR4:
case kMode_MR8:
return 2U;
case kMode_M1:
case kMode_M2:
case kMode_M4:
case kMode_M8:
return 1U;
default:
UNREACHABLE();
return 0U;
}
}
static AddressingMode AddressingModeForAddMult(const MultParam& m) {
switch (m.addressing_mode) {
case kMode_MR1:
return kMode_MR1I;
case kMode_MR2:
return kMode_MR2I;
case kMode_MR4:
return kMode_MR4I;
case kMode_MR8:
return kMode_MR8I;
case kMode_M1:
return kMode_M1I;
case kMode_M2:
return kMode_M2I;
case kMode_M4:
return kMode_M4I;
case kMode_M8:
return kMode_M8I;
default:
UNREACHABLE();
return kMode_None;
}
}
TEST_P(InstructionSelectorMultTest, Mult32) {
const MultParam m_param = GetParam();
StreamBuilder m(this, kMachInt32, kMachInt32);
Node* param = m.Parameter(0);
Node* mult = m.Int32Mul(param, m.Int32Constant(m_param.value));
m.Return(mult);
TEST_F(InstructionSelectorTest, Int32MulWithInt32MulWithParameters) {
StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const m0 = m.Int32Mul(p0, p1);
m.Return(m.Int32Mul(m0, p0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(m_param.addressing_mode, s[0]->addressing_mode());
if (m_param.lea_expected) {
EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
ASSERT_EQ(InputCountForLea(s[0]->addressing_mode()), s[0]->InputCount());
} else {
EXPECT_EQ(kX64Imul32, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
}
EXPECT_EQ(s.ToVreg(param), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(2U, s.size());
EXPECT_EQ(kX64Imul32, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(m0), s.ToVreg(s[0]->OutputAt(0)));
EXPECT_EQ(kX64Imul32, s[1]->arch_opcode());
ASSERT_EQ(2U, s[1]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[1]->InputAt(0)));
EXPECT_EQ(s.ToVreg(m0), s.ToVreg(s[1]->InputAt(1)));
}
TEST_P(InstructionSelectorMultTest, Mult64) {
const MultParam m_param = GetParam();
StreamBuilder m(this, kMachInt64, kMachInt64);
Node* param = m.Parameter(0);
Node* mult = m.Int64Mul(param, m.Int64Constant(m_param.value));
m.Return(mult);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(m_param.addressing_mode, s[0]->addressing_mode());
if (m_param.lea_expected) {
EXPECT_EQ(kX64Lea, s[0]->arch_opcode());
ASSERT_EQ(InputCountForLea(s[0]->addressing_mode()), s[0]->InputCount());
EXPECT_EQ(s.ToVreg(param), s.ToVreg(s[0]->InputAt(0)));
} else {
EXPECT_EQ(kX64Imul, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
// TODO(dcarney): why is this happening?
EXPECT_EQ(s.ToVreg(param), s.ToVreg(s[0]->InputAt(1)));
}
}
TEST_P(InstructionSelectorMultTest, MultAdd32) {
TRACED_FOREACH(int32_t, imm, kImmediates) {
const MultParam m_param = GetParam();
StreamBuilder m(this, kMachInt32, kMachInt32);
Node* param = m.Parameter(0);
Node* mult = m.Int32Add(m.Int32Mul(param, m.Int32Constant(m_param.value)),
m.Int32Constant(imm));
m.Return(mult);
Stream s = m.Build();
if (m_param.lea_expected) {
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
EXPECT_EQ(AddressingModeForAddMult(m_param), s[0]->addressing_mode());
unsigned input_count = InputCountForLea(s[0]->addressing_mode());
ASSERT_EQ(input_count, s[0]->InputCount());
ASSERT_EQ(InstructionOperand::IMMEDIATE,
s[0]->InputAt(input_count - 1)->kind());
EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(input_count - 1)));
} else {
ASSERT_EQ(2U, s.size());
EXPECT_EQ(kX64Imul32, s[0]->arch_opcode());
EXPECT_EQ(kX64Add32, s[1]->arch_opcode());
}
}
}
TEST_P(InstructionSelectorMultTest, MultAdd64) {
TRACED_FOREACH(int32_t, imm, kImmediates) {
const MultParam m_param = GetParam();
StreamBuilder m(this, kMachInt64, kMachInt64);
Node* param = m.Parameter(0);
Node* mult = m.Int64Add(m.Int64Mul(param, m.Int64Constant(m_param.value)),
m.Int64Constant(imm));
m.Return(mult);
Stream s = m.Build();
if (m_param.lea_expected) {
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kX64Lea, s[0]->arch_opcode());
EXPECT_EQ(AddressingModeForAddMult(m_param), s[0]->addressing_mode());
unsigned input_count = InputCountForLea(s[0]->addressing_mode());
ASSERT_EQ(input_count, s[0]->InputCount());
ASSERT_EQ(InstructionOperand::IMMEDIATE,
s[0]->InputAt(input_count - 1)->kind());
EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(input_count - 1)));
} else {
ASSERT_EQ(2U, s.size());
EXPECT_EQ(kX64Imul, s[0]->arch_opcode());
EXPECT_EQ(kX64Add, s[1]->arch_opcode());
}
}
}
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorMultTest,
::testing::ValuesIn(kMultParams));
TEST_F(InstructionSelectorTest, Int32MulHigh) {
StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
Node* const p0 = m.Parameter(0);

View File

@ -463,7 +463,6 @@
'../../src/compiler/node-aux-data.h',
'../../src/compiler/node-cache.cc',
'../../src/compiler/node-cache.h',
'../../src/compiler/node-matchers.cc',
'../../src/compiler/node-matchers.h',
'../../src/compiler/node-properties-inl.h',
'../../src/compiler/node-properties.h',