[turbofan] Pierce TypeGuards and FoldConstants in ValueMatcher

Change-Id: I4ab54dac771bb551c2435a98f9e53194a6f27853
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2495494
Commit-Queue: Shu-yu Guo <syg@chromium.org>
Reviewed-by: Georg Neis <neis@chromium.org>
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#70851}
This commit is contained in:
Shu-yu Guo 2020-10-28 08:48:08 -07:00 committed by Commit Bot
parent 88ae150dc7
commit 34610db878
36 changed files with 806 additions and 684 deletions

View File

@ -29,8 +29,8 @@ class ArmOperandGenerator : public OperandGenerator {
bool CanBeImmediate(Node* node, InstructionCode opcode) {
Int32Matcher m(node);
if (!m.HasValue()) return false;
int32_t value = m.Value();
if (!m.HasResolvedValue()) return false;
int32_t value = m.ResolvedValue();
switch (ArchOpcodeField::decode(opcode)) {
case kArmAnd:
case kArmMov:
@ -95,7 +95,7 @@ void VisitSimdShiftRRR(InstructionSelector* selector, ArchOpcode opcode,
Node* node, int width) {
ArmOperandGenerator g(selector);
Int32Matcher m(node->InputAt(1));
if (m.HasValue()) {
if (m.HasResolvedValue()) {
if (m.IsMultipleOf(width)) {
selector->EmitIdentity(node);
} else {
@ -389,13 +389,14 @@ void EmitLoad(InstructionSelector* selector, InstructionCode opcode,
size_t input_count = 2;
ExternalReferenceMatcher m(base);
if (m.HasValue() && selector->CanAddressRelativeToRootsRegister(m.Value())) {
if (m.HasResolvedValue() &&
selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
Int32Matcher int_matcher(index);
if (int_matcher.HasValue()) {
if (int_matcher.HasResolvedValue()) {
ptrdiff_t const delta =
int_matcher.Value() +
int_matcher.ResolvedValue() +
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
selector->isolate(), m.Value());
selector->isolate(), m.ResolvedValue());
input_count = 1;
inputs[0] = g.UseImmediate(static_cast<int32_t>(delta));
opcode |= AddressingModeField::encode(kMode_Root);
@ -675,13 +676,14 @@ void InstructionSelector::VisitStore(Node* node) {
}
ExternalReferenceMatcher m(base);
if (m.HasValue() && CanAddressRelativeToRootsRegister(m.Value())) {
if (m.HasResolvedValue() &&
CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
Int32Matcher int_matcher(index);
if (int_matcher.HasValue()) {
if (int_matcher.HasResolvedValue()) {
ptrdiff_t const delta =
int_matcher.Value() +
int_matcher.ResolvedValue() +
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
isolate(), m.Value());
isolate(), m.ResolvedValue());
int input_count = 2;
InstructionOperand inputs[2];
inputs[0] = g.UseRegister(value);
@ -903,16 +905,16 @@ void InstructionSelector::VisitWord32And(Node* node) {
return;
}
}
if (m.right().HasValue()) {
uint32_t const value = m.right().Value();
if (m.right().HasResolvedValue()) {
uint32_t const value = m.right().ResolvedValue();
uint32_t width = base::bits::CountPopulation(value);
uint32_t leading_zeros = base::bits::CountLeadingZeros32(value);
// Try to merge SHR operations on the left hand input into this AND.
if (m.left().IsWord32Shr()) {
Int32BinopMatcher mshr(m.left().node());
if (mshr.right().HasValue()) {
uint32_t const shift = mshr.right().Value();
if (mshr.right().HasResolvedValue()) {
uint32_t const shift = mshr.right().ResolvedValue();
if (((shift == 8) || (shift == 16) || (shift == 24)) &&
(value == 0xFF)) {
@ -920,14 +922,14 @@ void InstructionSelector::VisitWord32And(Node* node) {
// bytewise rotation.
Emit(kArmUxtb, g.DefineAsRegister(m.node()),
g.UseRegister(mshr.left().node()),
g.TempImmediate(mshr.right().Value()));
g.TempImmediate(mshr.right().ResolvedValue()));
return;
} else if (((shift == 8) || (shift == 16)) && (value == 0xFFFF)) {
// Merge SHR into AND by emitting a UXTH instruction with a
// bytewise rotation.
Emit(kArmUxth, g.DefineAsRegister(m.node()),
g.UseRegister(mshr.left().node()),
g.TempImmediate(mshr.right().Value()));
g.TempImmediate(mshr.right().ResolvedValue()));
return;
} else if (IsSupported(ARMv7) && (width != 0) &&
((leading_zeros + width) == 32)) {
@ -1079,11 +1081,11 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
Int32BinopMatcher m(node);
if (IsSupported(ARMv7) && m.left().IsWord32And() &&
m.right().IsInRange(0, 31)) {
uint32_t lsb = m.right().Value();
uint32_t lsb = m.right().ResolvedValue();
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
uint32_t value = static_cast<uint32_t>(mleft.right().Value() >> lsb)
<< lsb;
if (mleft.right().HasResolvedValue()) {
uint32_t value =
static_cast<uint32_t>(mleft.right().ResolvedValue() >> lsb) << lsb;
uint32_t width = base::bits::CountPopulation(value);
uint32_t msb = base::bits::CountLeadingZeros32(value);
if ((width != 0) && (msb + width + lsb == 32)) {
@ -1100,9 +1102,9 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
Int32BinopMatcher m(node);
if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) {
Int32BinopMatcher mleft(m.left().node());
if (m.right().HasValue() && mleft.right().HasValue()) {
uint32_t sar = m.right().Value();
uint32_t shl = mleft.right().Value();
if (m.right().HasResolvedValue() && mleft.right().HasResolvedValue()) {
uint32_t sar = m.right().ResolvedValue();
uint32_t shl = mleft.right().ResolvedValue();
if ((sar == shl) && (sar == 16)) {
Emit(kArmSxth, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()), g.TempImmediate(0));
@ -1204,7 +1206,7 @@ void VisitWord32PairShift(InstructionSelector* selector, InstructionCode opcode,
// no register aliasing of input registers with output registers.
Int32Matcher m(node->InputAt(2));
InstructionOperand shift_operand;
if (m.HasValue()) {
if (m.HasResolvedValue()) {
shift_operand = g.UseImmediate(m.node());
} else {
shift_operand = g.UseUniqueRegister(m.node());
@ -1425,8 +1427,8 @@ void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
void InstructionSelector::VisitInt32Mul(Node* node) {
ArmOperandGenerator g(this);
Int32BinopMatcher m(node);
if (m.right().HasValue() && m.right().Value() > 0) {
int32_t value = m.right().Value();
if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
int32_t value = m.right().ResolvedValue();
if (base::bits::IsPowerOfTwo(value - 1)) {
Emit(kArmAdd | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),

View File

@ -326,7 +326,7 @@ bool TryMatchAnyExtend(Arm64OperandGenerator* g, InstructionSelector* selector,
if (nm.IsWord32And()) {
Int32BinopMatcher mright(right_node);
if (mright.right().Is(0xFF) || mright.right().Is(0xFFFF)) {
int32_t mask = mright.right().Value();
int32_t mask = mright.right().ResolvedValue();
*left_op = g->UseRegister(left_node);
*right_op = g->UseRegister(mright.left().node());
*opcode |= AddressingModeField::encode(
@ -340,7 +340,7 @@ bool TryMatchAnyExtend(Arm64OperandGenerator* g, InstructionSelector* selector,
Int32BinopMatcher mleft_of_right(mright.left().node());
if ((mright.right().Is(16) && mleft_of_right.right().Is(16)) ||
(mright.right().Is(24) && mleft_of_right.right().Is(24))) {
int32_t shift = mright.right().Value();
int32_t shift = mright.right().ResolvedValue();
*left_op = g->UseRegister(left_node);
*right_op = g->UseRegister(mleft_of_right.left().node());
*opcode |= AddressingModeField::encode(
@ -481,8 +481,8 @@ void VisitBinop(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
inputs[input_count++] = g.UseRegister(m_shift.left().node());
// We only need at most the last 6 bits of the shift.
inputs[input_count++] =
g.UseImmediate(static_cast<int>(m_shift.right().Value() & 0x3F));
inputs[input_count++] = g.UseImmediate(
static_cast<int>(m_shift.right().ResolvedValue() & 0x3F));
} else if (can_commute && TryMatchAnyShift(selector, node, left_node, &opcode,
!is_add_sub)) {
if (must_commute_cond) cont->Commute();
@ -490,8 +490,8 @@ void VisitBinop(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.UseRegisterOrImmediateZero(right_node);
inputs[input_count++] = g.UseRegister(m_shift.left().node());
// We only need at most the last 6 bits of the shift.
inputs[input_count++] =
g.UseImmediate(static_cast<int>(m_shift.right().Value() & 0x3F));
inputs[input_count++] = g.UseImmediate(
static_cast<int>(m_shift.right().ResolvedValue() & 0x3F));
} else {
inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
inputs[input_count++] = g.UseRegister(right_node);
@ -523,12 +523,12 @@ void VisitAddSub(InstructionSelector* selector, Node* node, ArchOpcode opcode,
ArchOpcode negate_opcode) {
Arm64OperandGenerator g(selector);
Matcher m(node);
if (m.right().HasValue() && (m.right().Value() < 0) &&
(m.right().Value() > std::numeric_limits<int>::min()) &&
g.CanBeImmediate(-m.right().Value(), kArithmeticImm)) {
selector->Emit(negate_opcode, g.DefineAsRegister(node),
g.UseRegister(m.left().node()),
g.TempImmediate(static_cast<int32_t>(-m.right().Value())));
if (m.right().HasResolvedValue() && (m.right().ResolvedValue() < 0) &&
(m.right().ResolvedValue() > std::numeric_limits<int>::min()) &&
g.CanBeImmediate(-m.right().ResolvedValue(), kArithmeticImm)) {
selector->Emit(
negate_opcode, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.TempImmediate(static_cast<int32_t>(-m.right().ResolvedValue())));
} else {
VisitBinop<Matcher>(selector, node, opcode, kArithmeticImm);
}
@ -540,8 +540,8 @@ void VisitAddSub(InstructionSelector* selector, Node* node, ArchOpcode opcode,
template <typename Matcher>
int32_t LeftShiftForReducedMultiply(Matcher* m) {
DCHECK(m->IsInt32Mul() || m->IsInt64Mul());
if (m->right().HasValue() && m->right().Value() >= 3) {
uint64_t value_minus_one = m->right().Value() - 1;
if (m->right().HasResolvedValue() && m->right().ResolvedValue() >= 3) {
uint64_t value_minus_one = m->right().ResolvedValue() - 1;
if (base::bits::IsPowerOfTwo(value_minus_one)) {
return base::bits::WhichPowerOfTwo(value_minus_one);
}
@ -580,12 +580,12 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
outputs[0] = g.DefineAsRegister(output == nullptr ? node : output);
ExternalReferenceMatcher m(base);
if (m.HasValue() && g.IsIntegerConstant(index) &&
selector->CanAddressRelativeToRootsRegister(m.Value())) {
if (m.HasResolvedValue() && g.IsIntegerConstant(index) &&
selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
ptrdiff_t const delta =
g.GetIntegerConstantValue(index) +
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
selector->isolate(), m.Value());
selector->isolate(), m.ResolvedValue());
input_count = 1;
// Check that the delta is a 32-bit integer due to the limitations of
// immediate operands.
@ -872,12 +872,12 @@ void InstructionSelector::VisitStore(Node* node) {
}
ExternalReferenceMatcher m(base);
if (m.HasValue() && g.IsIntegerConstant(index) &&
CanAddressRelativeToRootsRegister(m.Value())) {
if (m.HasResolvedValue() && g.IsIntegerConstant(index) &&
CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
ptrdiff_t const delta =
g.GetIntegerConstantValue(index) +
TurboAssemblerBase::RootRegisterOffsetForExternalReference(isolate(),
m.Value());
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
isolate(), m.ResolvedValue());
if (is_int32(delta)) {
input_count = 2;
InstructionOperand inputs[2];
@ -996,8 +996,8 @@ void InstructionSelector::VisitWord32And(Node* node) {
Arm64OperandGenerator g(this);
Int32BinopMatcher m(node);
if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
m.right().HasValue()) {
uint32_t mask = m.right().Value();
m.right().HasResolvedValue()) {
uint32_t mask = m.right().ResolvedValue();
uint32_t mask_width = base::bits::CountPopulation(mask);
uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_width != 0) && (mask_width != 32) &&
@ -1008,9 +1008,9 @@ void InstructionSelector::VisitWord32And(Node* node) {
// Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
// significant bits.
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
if (mleft.right().HasResolvedValue()) {
// Any shift value can match; int32 shifts use `value % 32`.
uint32_t lsb = mleft.right().Value() & 0x1F;
uint32_t lsb = mleft.right().ResolvedValue() & 0x1F;
// Ubfx cannot extract bits past the register size, however since
// shifting the original value would have introduced some zeros we can
@ -1036,8 +1036,8 @@ void InstructionSelector::VisitWord64And(Node* node) {
Arm64OperandGenerator g(this);
Int64BinopMatcher m(node);
if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
m.right().HasValue()) {
uint64_t mask = m.right().Value();
m.right().HasResolvedValue()) {
uint64_t mask = m.right().ResolvedValue();
uint64_t mask_width = base::bits::CountPopulation(mask);
uint64_t mask_msb = base::bits::CountLeadingZeros64(mask);
if ((mask_width != 0) && (mask_width != 64) &&
@ -1048,9 +1048,10 @@ void InstructionSelector::VisitWord64And(Node* node) {
// Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
// significant bits.
Int64BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
if (mleft.right().HasResolvedValue()) {
// Any shift value can match; int64 shifts use `value % 64`.
uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3F);
uint32_t lsb =
static_cast<uint32_t>(mleft.right().ResolvedValue() & 0x3F);
// Ubfx cannot extract bits past the register size, however since
// shifting the original value would have introduced some zeros we can
@ -1106,12 +1107,12 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
m.right().IsInRange(1, 31)) {
Arm64OperandGenerator g(this);
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
uint32_t mask = mleft.right().Value();
if (mleft.right().HasResolvedValue()) {
uint32_t mask = mleft.right().ResolvedValue();
uint32_t mask_width = base::bits::CountPopulation(mask);
uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
uint32_t shift = m.right().Value();
uint32_t shift = m.right().ResolvedValue();
DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
DCHECK_NE(0u, shift);
@ -1189,13 +1190,14 @@ bool TryEmitBitfieldExtract32(InstructionSelector* selector, Node* node) {
// Select Ubfx or Sbfx for (x << (K & 0x1F)) OP (K & 0x1F), where
// OP is >>> or >> and (K & 0x1F) != 0.
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() && m.right().HasValue() &&
(mleft.right().Value() & 0x1F) != 0 &&
(mleft.right().Value() & 0x1F) == (m.right().Value() & 0x1F)) {
if (mleft.right().HasResolvedValue() && m.right().HasResolvedValue() &&
(mleft.right().ResolvedValue() & 0x1F) != 0 &&
(mleft.right().ResolvedValue() & 0x1F) ==
(m.right().ResolvedValue() & 0x1F)) {
DCHECK(m.IsWord32Shr() || m.IsWord32Sar());
ArchOpcode opcode = m.IsWord32Sar() ? kArm64Sbfx32 : kArm64Ubfx32;
int right_val = m.right().Value() & 0x1F;
int right_val = m.right().ResolvedValue() & 0x1F;
DCHECK_NE(right_val, 0);
selector->Emit(opcode, g.DefineAsRegister(node),
@ -1211,14 +1213,15 @@ bool TryEmitBitfieldExtract32(InstructionSelector* selector, Node* node) {
void InstructionSelector::VisitWord32Shr(Node* node) {
Int32BinopMatcher m(node);
if (m.left().IsWord32And() && m.right().HasValue()) {
uint32_t lsb = m.right().Value() & 0x1F;
if (m.left().IsWord32And() && m.right().HasResolvedValue()) {
uint32_t lsb = m.right().ResolvedValue() & 0x1F;
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() && mleft.right().Value() != 0) {
if (mleft.right().HasResolvedValue() &&
mleft.right().ResolvedValue() != 0) {
// Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
// shifted into the least-significant bits.
uint32_t mask = static_cast<uint32_t>(mleft.right().Value() >> lsb)
<< lsb;
uint32_t mask =
static_cast<uint32_t>(mleft.right().ResolvedValue() >> lsb) << lsb;
unsigned mask_width = base::bits::CountPopulation(mask);
unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_msb + mask_width + lsb) == 32) {
@ -1235,13 +1238,13 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
return;
}
if (m.left().IsUint32MulHigh() && m.right().HasValue() &&
if (m.left().IsUint32MulHigh() && m.right().HasResolvedValue() &&
CanCover(node, node->InputAt(0))) {
// Combine this shift with the multiply and shift that would be generated
// by Uint32MulHigh.
Arm64OperandGenerator g(this);
Node* left = m.left().node();
int shift = m.right().Value() & 0x1F;
int shift = m.right().ResolvedValue() & 0x1F;
InstructionOperand const smull_operand = g.TempRegister();
Emit(kArm64Umull, smull_operand, g.UseRegister(left->InputAt(0)),
g.UseRegister(left->InputAt(1)));
@ -1255,14 +1258,15 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
void InstructionSelector::VisitWord64Shr(Node* node) {
Int64BinopMatcher m(node);
if (m.left().IsWord64And() && m.right().HasValue()) {
uint32_t lsb = m.right().Value() & 0x3F;
if (m.left().IsWord64And() && m.right().HasResolvedValue()) {
uint32_t lsb = m.right().ResolvedValue() & 0x3F;
Int64BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() && mleft.right().Value() != 0) {
if (mleft.right().HasResolvedValue() &&
mleft.right().ResolvedValue() != 0) {
// Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
// shifted into the least-significant bits.
uint64_t mask = static_cast<uint64_t>(mleft.right().Value() >> lsb)
<< lsb;
uint64_t mask =
static_cast<uint64_t>(mleft.right().ResolvedValue() >> lsb) << lsb;
unsigned mask_width = base::bits::CountPopulation(mask);
unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
if ((mask_msb + mask_width + lsb) == 64) {
@ -1285,13 +1289,13 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
}
Int32BinopMatcher m(node);
if (m.left().IsInt32MulHigh() && m.right().HasValue() &&
if (m.left().IsInt32MulHigh() && m.right().HasResolvedValue() &&
CanCover(node, node->InputAt(0))) {
// Combine this shift with the multiply and shift that would be generated
// by Int32MulHigh.
Arm64OperandGenerator g(this);
Node* left = m.left().node();
int shift = m.right().Value() & 0x1F;
int shift = m.right().ResolvedValue() & 0x1F;
InstructionOperand const smull_operand = g.TempRegister();
Emit(kArm64Smull, smull_operand, g.UseRegister(left->InputAt(0)),
g.UseRegister(left->InputAt(1)));
@ -1300,7 +1304,7 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
return;
}
if (m.left().IsInt32Add() && m.right().HasValue() &&
if (m.left().IsInt32Add() && m.right().HasResolvedValue() &&
CanCover(node, node->InputAt(0))) {
Node* add_node = m.left().node();
Int32BinopMatcher madd_node(add_node);
@ -1843,10 +1847,10 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
if (value->opcode() == IrOpcode::kWord32Sar && CanCover(node, value)) {
Int32BinopMatcher m(value);
if (m.right().HasValue()) {
if (m.right().HasResolvedValue()) {
Arm64OperandGenerator g(this);
// Mask the shift amount, to keep the same semantics as Word32Sar.
int right = m.right().Value() & 0x1F;
int right = m.right().ResolvedValue() & 0x1F;
Emit(kArm64Sbfx, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.TempImmediate(right), g.TempImmediate(32 - right));
return;
@ -2282,8 +2286,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
if (opcode == kArm64Cmp && !cont->IsPoisoned()) {
Int64Matcher m(right);
if (m.HasValue()) {
if (TryEmitCbzOrTbz<64>(selector, left, m.Value(), node,
if (m.HasResolvedValue()) {
if (TryEmitCbzOrTbz<64>(selector, left, m.ResolvedValue(), node,
cont->condition(), cont)) {
return;
}
@ -2299,15 +2303,16 @@ void VisitWord32Compare(InstructionSelector* selector, Node* node,
Int32BinopMatcher m(node);
FlagsCondition cond = cont->condition();
if (!cont->IsPoisoned()) {
if (m.right().HasValue()) {
if (TryEmitCbzOrTbz<32>(selector, m.left().node(), m.right().Value(),
node, cond, cont)) {
if (m.right().HasResolvedValue()) {
if (TryEmitCbzOrTbz<32>(selector, m.left().node(),
m.right().ResolvedValue(), node, cond, cont)) {
return;
}
} else if (m.left().HasValue()) {
} else if (m.left().HasResolvedValue()) {
FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
if (TryEmitCbzOrTbz<32>(selector, m.right().node(), m.left().Value(),
node, commuted_cond, cont)) {
if (TryEmitCbzOrTbz<32>(selector, m.right().node(),
m.left().ResolvedValue(), node, commuted_cond,
cont)) {
return;
}
}
@ -2384,7 +2389,7 @@ struct TestAndBranchMatcher {
unsigned bit() const {
DCHECK(Matches());
return base::bits::CountTrailingZeros(matcher_.right().Value());
return base::bits::CountTrailingZeros(matcher_.right().ResolvedValue());
}
Node* input() const {
@ -2399,8 +2404,8 @@ struct TestAndBranchMatcher {
void Initialize() {
if (cont_->IsBranch() && !cont_->IsPoisoned() &&
matcher_.right().HasValue() &&
base::bits::IsPowerOfTwo(matcher_.right().Value())) {
matcher_.right().HasResolvedValue() &&
base::bits::IsPowerOfTwo(matcher_.right().ResolvedValue())) {
// If the mask has only one bit set, we can use tbz/tbnz.
DCHECK((cont_->condition() == kEqual) ||
(cont_->condition() == kNotEqual));

View File

@ -162,12 +162,13 @@ class IA32OperandGenerator final : public OperandGenerator {
RegisterMode register_mode = kRegister) {
{
LoadMatcher<ExternalReferenceMatcher> m(node);
if (m.index().HasValue() && m.object().HasValue() &&
selector()->CanAddressRelativeToRootsRegister(m.object().Value())) {
if (m.index().HasResolvedValue() && m.object().HasResolvedValue() &&
selector()->CanAddressRelativeToRootsRegister(
m.object().ResolvedValue())) {
ptrdiff_t const delta =
m.index().Value() +
m.index().ResolvedValue() +
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
selector()->isolate(), m.object().Value());
selector()->isolate(), m.object().ResolvedValue());
if (is_int32(delta)) {
inputs[(*input_count)++] = TempImmediate(static_cast<int32_t>(delta));
return kMode_Root;
@ -1784,7 +1785,8 @@ void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
Float64Matcher mleft(left);
if (mleft.HasValue() && (bit_cast<uint64_t>(mleft.Value()) >> 32) == 0u) {
if (mleft.HasResolvedValue() &&
(bit_cast<uint64_t>(mleft.ResolvedValue()) >> 32) == 0u) {
Emit(kSSEFloat64LoadLowWord32, g.DefineAsRegister(node), g.Use(right));
return;
}

View File

@ -64,8 +64,8 @@ class MipsOperandGenerator final : public OperandGenerator {
bool CanBeImmediate(Node* node, InstructionCode opcode) {
Int32Matcher m(node);
if (!m.HasValue()) return false;
int32_t value = m.Value();
if (!m.HasResolvedValue()) return false;
int32_t value = m.ResolvedValue();
switch (ArchOpcodeField::decode(opcode)) {
case kMipsShl:
case kMipsSar:
@ -486,8 +486,8 @@ void InstructionSelector::VisitWord32And(Node* node) {
MipsOperandGenerator g(this);
Int32BinopMatcher m(node);
if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
m.right().HasValue()) {
uint32_t mask = m.right().Value();
m.right().HasResolvedValue()) {
uint32_t mask = m.right().ResolvedValue();
uint32_t mask_width = base::bits::CountPopulation(mask);
uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
@ -497,9 +497,9 @@ void InstructionSelector::VisitWord32And(Node* node) {
// Select Ext for And(Shr(x, imm), mask) where the mask is in the least
// significant bits.
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
if (mleft.right().HasResolvedValue()) {
// Any shift value can match; int32 shifts use `value % 32`.
uint32_t lsb = mleft.right().Value() & 0x1F;
uint32_t lsb = mleft.right().ResolvedValue() & 0x1F;
// Ext cannot extract bits past the register size, however since
// shifting the original value would have introduced some zeros we can
@ -519,8 +519,8 @@ void InstructionSelector::VisitWord32And(Node* node) {
// Other cases fall through to the normal And operation.
}
}
if (m.right().HasValue()) {
uint32_t mask = m.right().Value();
if (m.right().HasResolvedValue()) {
uint32_t mask = m.right().ResolvedValue();
uint32_t shift = base::bits::CountPopulation(~mask);
uint32_t msb = base::bits::CountLeadingZeros32(~mask);
if (shift != 0 && shift != 32 && msb + shift == 32) {
@ -543,7 +543,7 @@ void InstructionSelector::VisitWord32Xor(Node* node) {
if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
m.right().Is(-1)) {
Int32BinopMatcher mleft(m.left().node());
if (!mleft.right().HasValue()) {
if (!mleft.right().HasResolvedValue()) {
MipsOperandGenerator g(this);
Emit(kMipsNor, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()),
@ -569,12 +569,12 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
Int32BinopMatcher mleft(m.left().node());
// Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is
// contiguous, and the shift immediate non-zero.
if (mleft.right().HasValue()) {
uint32_t mask = mleft.right().Value();
if (mleft.right().HasResolvedValue()) {
uint32_t mask = mleft.right().ResolvedValue();
uint32_t mask_width = base::bits::CountPopulation(mask);
uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
uint32_t shift = m.right().Value();
uint32_t shift = m.right().ResolvedValue();
DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
DCHECK_NE(0u, shift);
if ((shift + mask_width) >= 32) {
@ -593,13 +593,14 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
void InstructionSelector::VisitWord32Shr(Node* node) {
Int32BinopMatcher m(node);
if (m.left().IsWord32And() && m.right().HasValue()) {
uint32_t lsb = m.right().Value() & 0x1F;
if (m.left().IsWord32And() && m.right().HasResolvedValue()) {
uint32_t lsb = m.right().ResolvedValue() & 0x1F;
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() && mleft.right().Value() != 0) {
if (mleft.right().HasResolvedValue() &&
mleft.right().ResolvedValue() != 0) {
// Select Ext for Shr(And(x, mask), imm) where the result of the mask is
// shifted into the least-significant bits.
uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
uint32_t mask = (mleft.right().ResolvedValue() >> lsb) << lsb;
unsigned mask_width = base::bits::CountPopulation(mask);
unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_msb + mask_width + lsb) == 32) {
@ -620,10 +621,10 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
Int32BinopMatcher mleft(m.left().node());
if (m.right().HasValue() && mleft.right().HasValue()) {
if (m.right().HasResolvedValue() && mleft.right().HasResolvedValue()) {
MipsOperandGenerator g(this);
uint32_t sar = m.right().Value();
uint32_t shl = mleft.right().Value();
uint32_t sar = m.right().ResolvedValue();
uint32_t shl = mleft.right().ResolvedValue();
if ((sar == shl) && (sar == 16)) {
Emit(kMipsSeh, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()));
@ -684,7 +685,7 @@ static void VisitWord32PairShift(InstructionSelector* selector,
MipsOperandGenerator g(selector);
Int32Matcher m(node->InputAt(2));
InstructionOperand shift_operand;
if (m.HasValue()) {
if (m.HasResolvedValue()) {
shift_operand = g.UseImmediate(m.node());
} else {
shift_operand = g.UseUniqueRegister(m.node());
@ -868,8 +869,9 @@ void InstructionSelector::VisitInt32Add(Node* node) {
if (m.right().opcode() == IrOpcode::kWord32Shl &&
CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
Int32BinopMatcher mright(m.right().node());
if (mright.right().HasValue() && !m.left().HasValue()) {
int32_t shift_value = static_cast<int32_t>(mright.right().Value());
if (mright.right().HasResolvedValue() && !m.left().HasResolvedValue()) {
int32_t shift_value =
static_cast<int32_t>(mright.right().ResolvedValue());
if (shift_value > 0 && shift_value <= 31) {
Emit(kMipsLsa, g.DefineAsRegister(node),
g.UseRegister(m.left().node()),
@ -884,8 +886,9 @@ void InstructionSelector::VisitInt32Add(Node* node) {
if (m.left().opcode() == IrOpcode::kWord32Shl &&
CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() && !m.right().HasValue()) {
int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
if (mleft.right().HasResolvedValue() && !m.right().HasResolvedValue()) {
int32_t shift_value =
static_cast<int32_t>(mleft.right().ResolvedValue());
if (shift_value > 0 && shift_value <= 31) {
Emit(kMipsLsa, g.DefineAsRegister(node),
g.UseRegister(m.right().node()),
@ -907,8 +910,8 @@ void InstructionSelector::VisitInt32Sub(Node* node) {
void InstructionSelector::VisitInt32Mul(Node* node) {
MipsOperandGenerator g(this);
Int32BinopMatcher m(node);
if (m.right().HasValue() && m.right().Value() > 0) {
uint32_t value = static_cast<uint32_t>(m.right().Value());
if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
uint32_t value = static_cast<uint32_t>(m.right().ResolvedValue());
if (base::bits::IsPowerOfTwo(value)) {
Emit(kMipsShl | AddressingModeField::encode(kMode_None),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),

View File

@ -561,8 +561,8 @@ void InstructionSelector::VisitWord32And(Node* node) {
Mips64OperandGenerator g(this);
Int32BinopMatcher m(node);
if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
m.right().HasValue()) {
uint32_t mask = m.right().Value();
m.right().HasResolvedValue()) {
uint32_t mask = m.right().ResolvedValue();
uint32_t mask_width = base::bits::CountPopulation(mask);
uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
@ -572,9 +572,9 @@ void InstructionSelector::VisitWord32And(Node* node) {
// Select Ext for And(Shr(x, imm), mask) where the mask is in the least
// significant bits.
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
if (mleft.right().HasResolvedValue()) {
// Any shift value can match; int32 shifts use `value % 32`.
uint32_t lsb = mleft.right().Value() & 0x1F;
uint32_t lsb = mleft.right().ResolvedValue() & 0x1F;
// Ext cannot extract bits past the register size, however since
// shifting the original value would have introduced some zeros we can
@ -590,8 +590,8 @@ void InstructionSelector::VisitWord32And(Node* node) {
// Other cases fall through to the normal And operation.
}
}
if (m.right().HasValue()) {
uint32_t mask = m.right().Value();
if (m.right().HasResolvedValue()) {
uint32_t mask = m.right().ResolvedValue();
uint32_t shift = base::bits::CountPopulation(~mask);
uint32_t msb = base::bits::CountLeadingZeros32(~mask);
if (shift != 0 && shift != 32 && msb + shift == 32) {
@ -610,8 +610,8 @@ void InstructionSelector::VisitWord64And(Node* node) {
Mips64OperandGenerator g(this);
Int64BinopMatcher m(node);
if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
m.right().HasValue()) {
uint64_t mask = m.right().Value();
m.right().HasResolvedValue()) {
uint64_t mask = m.right().ResolvedValue();
uint32_t mask_width = base::bits::CountPopulation(mask);
uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
@ -621,9 +621,10 @@ void InstructionSelector::VisitWord64And(Node* node) {
// Select Dext for And(Shr(x, imm), mask) where the mask is in the least
// significant bits.
Int64BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
if (mleft.right().HasResolvedValue()) {
// Any shift value can match; int64 shifts use `value % 64`.
uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3F);
uint32_t lsb =
static_cast<uint32_t>(mleft.right().ResolvedValue() & 0x3F);
// Dext cannot extract bits past the register size, however since
// shifting the original value would have introduced some zeros we can
@ -643,8 +644,8 @@ void InstructionSelector::VisitWord64And(Node* node) {
// Other cases fall through to the normal And operation.
}
}
if (m.right().HasValue()) {
uint64_t mask = m.right().Value();
if (m.right().HasResolvedValue()) {
uint64_t mask = m.right().ResolvedValue();
uint32_t shift = base::bits::CountPopulation(~mask);
uint32_t msb = base::bits::CountLeadingZeros64(~mask);
if (shift != 0 && shift < 32 && msb + shift == 64) {
@ -673,7 +674,7 @@ void InstructionSelector::VisitWord32Xor(Node* node) {
if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
m.right().Is(-1)) {
Int32BinopMatcher mleft(m.left().node());
if (!mleft.right().HasValue()) {
if (!mleft.right().HasResolvedValue()) {
Mips64OperandGenerator g(this);
Emit(kMips64Nor32, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()),
@ -696,7 +697,7 @@ void InstructionSelector::VisitWord64Xor(Node* node) {
if (m.left().IsWord64Or() && CanCover(node, m.left().node()) &&
m.right().Is(-1)) {
Int64BinopMatcher mleft(m.left().node());
if (!mleft.right().HasValue()) {
if (!mleft.right().HasResolvedValue()) {
Mips64OperandGenerator g(this);
Emit(kMips64Nor, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()),
@ -722,12 +723,12 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
Int32BinopMatcher mleft(m.left().node());
// Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is
// contiguous, and the shift immediate non-zero.
if (mleft.right().HasValue()) {
uint32_t mask = mleft.right().Value();
if (mleft.right().HasResolvedValue()) {
uint32_t mask = mleft.right().ResolvedValue();
uint32_t mask_width = base::bits::CountPopulation(mask);
uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
uint32_t shift = m.right().Value();
uint32_t shift = m.right().ResolvedValue();
DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
DCHECK_NE(0u, shift);
if ((shift + mask_width) >= 32) {
@ -746,13 +747,14 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
void InstructionSelector::VisitWord32Shr(Node* node) {
Int32BinopMatcher m(node);
if (m.left().IsWord32And() && m.right().HasValue()) {
uint32_t lsb = m.right().Value() & 0x1F;
if (m.left().IsWord32And() && m.right().HasResolvedValue()) {
uint32_t lsb = m.right().ResolvedValue() & 0x1F;
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() && mleft.right().Value() != 0) {
if (mleft.right().HasResolvedValue() &&
mleft.right().ResolvedValue() != 0) {
// Select Ext for Shr(And(x, mask), imm) where the result of the mask is
// shifted into the least-significant bits.
uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
uint32_t mask = (mleft.right().ResolvedValue() >> lsb) << lsb;
unsigned mask_width = base::bits::CountPopulation(mask);
unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_msb + mask_width + lsb) == 32) {
@ -772,10 +774,10 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
Int32BinopMatcher m(node);
if (m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
Int32BinopMatcher mleft(m.left().node());
if (m.right().HasValue() && mleft.right().HasValue()) {
if (m.right().HasResolvedValue() && mleft.right().HasResolvedValue()) {
Mips64OperandGenerator g(this);
uint32_t sar = m.right().Value();
uint32_t shl = mleft.right().Value();
uint32_t sar = m.right().ResolvedValue();
uint32_t shl = mleft.right().ResolvedValue();
if ((sar == shl) && (sar == 16)) {
Emit(kMips64Seh, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()));
@ -811,12 +813,12 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
// Match Word64Shl(Word64And(x, mask), imm) to Dshl where the mask is
// contiguous, and the shift immediate non-zero.
Int64BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
uint64_t mask = mleft.right().Value();
if (mleft.right().HasResolvedValue()) {
uint64_t mask = mleft.right().ResolvedValue();
uint32_t mask_width = base::bits::CountPopulation(mask);
uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
uint64_t shift = m.right().Value();
uint64_t shift = m.right().ResolvedValue();
DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
DCHECK_NE(0u, shift);
@ -836,13 +838,14 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
void InstructionSelector::VisitWord64Shr(Node* node) {
Int64BinopMatcher m(node);
if (m.left().IsWord64And() && m.right().HasValue()) {
uint32_t lsb = m.right().Value() & 0x3F;
if (m.left().IsWord64And() && m.right().HasResolvedValue()) {
uint32_t lsb = m.right().ResolvedValue() & 0x3F;
Int64BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() && mleft.right().Value() != 0) {
if (mleft.right().HasResolvedValue() &&
mleft.right().ResolvedValue() != 0) {
// Select Dext for Shr(And(x, mask), imm) where the result of the mask is
// shifted into the least-significant bits.
uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
uint64_t mask = (mleft.right().ResolvedValue() >> lsb) << lsb;
unsigned mask_width = base::bits::CountPopulation(mask);
unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
if ((mask_msb + mask_width + lsb) == 64) {
@ -934,8 +937,9 @@ void InstructionSelector::VisitInt32Add(Node* node) {
if (m.right().opcode() == IrOpcode::kWord32Shl &&
CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
Int32BinopMatcher mright(m.right().node());
if (mright.right().HasValue() && !m.left().HasValue()) {
int32_t shift_value = static_cast<int32_t>(mright.right().Value());
if (mright.right().HasResolvedValue() && !m.left().HasResolvedValue()) {
int32_t shift_value =
static_cast<int32_t>(mright.right().ResolvedValue());
if (shift_value > 0 && shift_value <= 31) {
Emit(kMips64Lsa, g.DefineAsRegister(node),
g.UseRegister(m.left().node()),
@ -950,8 +954,9 @@ void InstructionSelector::VisitInt32Add(Node* node) {
if (m.left().opcode() == IrOpcode::kWord32Shl &&
CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() && !m.right().HasValue()) {
int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
if (mleft.right().HasResolvedValue() && !m.right().HasResolvedValue()) {
int32_t shift_value =
static_cast<int32_t>(mleft.right().ResolvedValue());
if (shift_value > 0 && shift_value <= 31) {
Emit(kMips64Lsa, g.DefineAsRegister(node),
g.UseRegister(m.right().node()),
@ -975,8 +980,9 @@ void InstructionSelector::VisitInt64Add(Node* node) {
if (m.right().opcode() == IrOpcode::kWord64Shl &&
CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
Int64BinopMatcher mright(m.right().node());
if (mright.right().HasValue() && !m.left().HasValue()) {
int32_t shift_value = static_cast<int32_t>(mright.right().Value());
if (mright.right().HasResolvedValue() && !m.left().HasResolvedValue()) {
int32_t shift_value =
static_cast<int32_t>(mright.right().ResolvedValue());
if (shift_value > 0 && shift_value <= 31) {
Emit(kMips64Dlsa, g.DefineAsRegister(node),
g.UseRegister(m.left().node()),
@ -991,8 +997,9 @@ void InstructionSelector::VisitInt64Add(Node* node) {
if (m.left().opcode() == IrOpcode::kWord64Shl &&
CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
Int64BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() && !m.right().HasValue()) {
int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
if (mleft.right().HasResolvedValue() && !m.right().HasResolvedValue()) {
int32_t shift_value =
static_cast<int32_t>(mleft.right().ResolvedValue());
if (shift_value > 0 && shift_value <= 31) {
Emit(kMips64Dlsa, g.DefineAsRegister(node),
g.UseRegister(m.right().node()),
@ -1018,8 +1025,8 @@ void InstructionSelector::VisitInt64Sub(Node* node) {
void InstructionSelector::VisitInt32Mul(Node* node) {
Mips64OperandGenerator g(this);
Int32BinopMatcher m(node);
if (m.right().HasValue() && m.right().Value() > 0) {
uint32_t value = static_cast<uint32_t>(m.right().Value());
if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
uint32_t value = static_cast<uint32_t>(m.right().ResolvedValue());
if (base::bits::IsPowerOfTwo(value)) {
Emit(kMips64Shl | AddressingModeField::encode(kMode_None),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
@ -1073,8 +1080,8 @@ void InstructionSelector::VisitInt64Mul(Node* node) {
Mips64OperandGenerator g(this);
Int64BinopMatcher m(node);
// TODO(dusmil): Add optimization for shifts larger than 32.
if (m.right().HasValue() && m.right().Value() > 0) {
uint32_t value = static_cast<uint32_t>(m.right().Value());
if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
uint32_t value = static_cast<uint32_t>(m.right().ResolvedValue());
if (base::bits::IsPowerOfTwo(value)) {
Emit(kMips64Dshl | AddressingModeField::encode(kMode_None),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),

View File

@ -464,7 +464,8 @@ void InstructionSelector::VisitWord32And(Node* node) {
Int32BinopMatcher m(node);
int mb = 0;
int me = 0;
if (m.right().HasValue() && IsContiguousMask32(m.right().Value(), &mb, &me)) {
if (m.right().HasResolvedValue() &&
IsContiguousMask32(m.right().ResolvedValue(), &mb, &me)) {
int sh = 0;
Node* left = m.left().node();
if ((m.left().IsWord32Shr() || m.left().IsWord32Shl()) &&
@ -473,7 +474,7 @@ void InstructionSelector::VisitWord32And(Node* node) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().IsInRange(0, 31)) {
left = mleft.left().node();
sh = mleft.right().Value();
sh = mleft.right().ResolvedValue();
if (m.left().IsWord32Shr()) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 31 - sh) mb = 31 - sh;
@ -502,7 +503,8 @@ void InstructionSelector::VisitWord64And(Node* node) {
Int64BinopMatcher m(node);
int mb = 0;
int me = 0;
if (m.right().HasValue() && IsContiguousMask64(m.right().Value(), &mb, &me)) {
if (m.right().HasResolvedValue() &&
IsContiguousMask64(m.right().ResolvedValue(), &mb, &me)) {
int sh = 0;
Node* left = m.left().node();
if ((m.left().IsWord64Shr() || m.left().IsWord64Shl()) &&
@ -511,7 +513,7 @@ void InstructionSelector::VisitWord64And(Node* node) {
Int64BinopMatcher mleft(m.left().node());
if (mleft.right().IsInRange(0, 63)) {
left = mleft.left().node();
sh = mleft.right().Value();
sh = mleft.right().ResolvedValue();
if (m.left().IsWord64Shr()) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 63 - sh) mb = 63 - sh;
@ -625,11 +627,11 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
// Try to absorb logical-and into rlwinm
Int32BinopMatcher mleft(m.left().node());
int sh = m.right().Value();
int sh = m.right().ResolvedValue();
int mb;
int me;
if (mleft.right().HasValue() &&
IsContiguousMask32(mleft.right().Value() << sh, &mb, &me)) {
if (mleft.right().HasResolvedValue() &&
IsContiguousMask32(mleft.right().ResolvedValue() << sh, &mb, &me)) {
// Adjust the mask such that it doesn't include any rotated bits.
if (me < sh) me = sh;
if (mb >= me) {
@ -651,11 +653,11 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
// Try to absorb logical-and into rldic
Int64BinopMatcher mleft(m.left().node());
int sh = m.right().Value();
int sh = m.right().ResolvedValue();
int mb;
int me;
if (mleft.right().HasValue() &&
IsContiguousMask64(mleft.right().Value() << sh, &mb, &me)) {
if (mleft.right().HasResolvedValue() &&
IsContiguousMask64(mleft.right().ResolvedValue() << sh, &mb, &me)) {
// Adjust the mask such that it doesn't include any rotated bits.
if (me < sh) me = sh;
if (mb >= me) {
@ -694,11 +696,12 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
// Try to absorb logical-and into rlwinm
Int32BinopMatcher mleft(m.left().node());
int sh = m.right().Value();
int sh = m.right().ResolvedValue();
int mb;
int me;
if (mleft.right().HasValue() &&
IsContiguousMask32((uint32_t)(mleft.right().Value()) >> sh, &mb, &me)) {
if (mleft.right().HasResolvedValue() &&
IsContiguousMask32((uint32_t)(mleft.right().ResolvedValue()) >> sh, &mb,
&me)) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 31 - sh) mb = 31 - sh;
sh = (32 - sh) & 0x1F;
@ -720,11 +723,12 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
// Try to absorb logical-and into rldic
Int64BinopMatcher mleft(m.left().node());
int sh = m.right().Value();
int sh = m.right().ResolvedValue();
int mb;
int me;
if (mleft.right().HasValue() &&
IsContiguousMask64((uint64_t)(mleft.right().Value()) >> sh, &mb, &me)) {
if (mleft.right().HasResolvedValue() &&
IsContiguousMask64((uint64_t)(mleft.right().ResolvedValue()) >> sh, &mb,
&me)) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 63 - sh) mb = 63 - sh;
sh = (64 - sh) & 0x3F;
@ -841,7 +845,7 @@ void VisitPairShift(InstructionSelector* selector, InstructionCode opcode,
// no register aliasing of input registers with output registers.
Int32Matcher m(node->InputAt(2));
InstructionOperand shift_operand;
if (m.HasValue()) {
if (m.HasResolvedValue()) {
shift_operand = g.UseImmediate(m.node());
} else {
shift_operand = g.UseUniqueRegister(m.node());
@ -897,8 +901,8 @@ void InstructionSelector::VisitWord64Sar(Node* node) {
Node* displacement = mleft.displacement();
if (displacement != nullptr) {
Int64Matcher mdisplacement(displacement);
DCHECK(mdisplacement.HasValue());
offset = mdisplacement.Value();
DCHECK(mdisplacement.HasResolvedValue());
offset = mdisplacement.ResolvedValue();
}
offset = SmiWordOffset(offset);
if (g.CanBeImmediate(offset, kInt16Imm_4ByteAligned)) {

View File

@ -898,7 +898,8 @@ void InstructionSelector::VisitWord64And(Node* node) {
Int64BinopMatcher m(node);
int mb = 0;
int me = 0;
if (m.right().HasValue() && IsContiguousMask64(m.right().Value(), &mb, &me)) {
if (m.right().HasResolvedValue() &&
IsContiguousMask64(m.right().ResolvedValue(), &mb, &me)) {
int sh = 0;
Node* left = m.left().node();
if ((m.left().IsWord64Shr() || m.left().IsWord64Shl()) &&
@ -906,7 +907,7 @@ void InstructionSelector::VisitWord64And(Node* node) {
Int64BinopMatcher mleft(m.left().node());
if (mleft.right().IsInRange(0, 63)) {
left = mleft.left().node();
sh = mleft.right().Value();
sh = mleft.right().ResolvedValue();
if (m.left().IsWord64Shr()) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 63 - sh) mb = 63 - sh;
@ -950,11 +951,11 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
// TODO(mbrandy): eliminate left sign extension if right >= 32
if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
Int64BinopMatcher mleft(m.left().node());
int sh = m.right().Value();
int sh = m.right().ResolvedValue();
int mb;
int me;
if (mleft.right().HasValue() &&
IsContiguousMask64(mleft.right().Value() << sh, &mb, &me)) {
if (mleft.right().HasResolvedValue() &&
IsContiguousMask64(mleft.right().ResolvedValue() << sh, &mb, &me)) {
// Adjust the mask such that it doesn't include any rotated bits.
if (me < sh) me = sh;
if (mb >= me) {
@ -991,11 +992,12 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
Int64BinopMatcher m(node);
if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
Int64BinopMatcher mleft(m.left().node());
int sh = m.right().Value();
int sh = m.right().ResolvedValue();
int mb;
int me;
if (mleft.right().HasValue() &&
IsContiguousMask64((uint64_t)(mleft.right().Value()) >> sh, &mb, &me)) {
if (mleft.right().HasResolvedValue() &&
IsContiguousMask64((uint64_t)(mleft.right().ResolvedValue()) >> sh, &mb,
&me)) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 63 - sh) mb = 63 - sh;
sh = (64 - sh) & 0x3F;
@ -1119,7 +1121,7 @@ void VisitPairShift(InstructionSelector* selector, InstructionCode opcode,
// no register aliasing of input registers with output registers.
Int32Matcher m(node->InputAt(2));
InstructionOperand shift_operand;
if (m.HasValue()) {
if (m.HasResolvedValue()) {
shift_operand = g.UseImmediate(m.node());
} else {
shift_operand = g.UseUniqueRegister(m.node());

View File

@ -191,12 +191,13 @@ class X64OperandGenerator final : public OperandGenerator {
size_t* input_count) {
{
LoadMatcher<ExternalReferenceMatcher> m(operand);
if (m.index().HasValue() && m.object().HasValue() &&
selector()->CanAddressRelativeToRootsRegister(m.object().Value())) {
if (m.index().HasResolvedValue() && m.object().HasResolvedValue() &&
selector()->CanAddressRelativeToRootsRegister(
m.object().ResolvedValue())) {
ptrdiff_t const delta =
m.index().Value() +
m.index().ResolvedValue() +
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
selector()->isolate(), m.object().Value());
selector()->isolate(), m.object().ResolvedValue());
if (is_int32(delta)) {
inputs[(*input_count)++] = TempImmediate(static_cast<int32_t>(delta));
return kMode_Root;
@ -1122,12 +1123,14 @@ void InstructionSelector::VisitInt32Sub(Node* node) {
// {EmitIdentity} reuses the virtual register of the first input
// for the output. This is exactly what we want here.
EmitIdentity(node);
} else if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) {
} else if (m.right().HasResolvedValue() &&
g.CanBeImmediate(m.right().node())) {
// Turn subtractions of constant values into immediate "leal" instructions
// by negating the value.
Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.TempImmediate(base::NegateWithWraparound(m.right().Value())));
Emit(
kX64Lea32 | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.TempImmediate(base::NegateWithWraparound(m.right().ResolvedValue())));
} else {
VisitBinop(this, node, kX64Sub32);
}
@ -1139,12 +1142,12 @@ void InstructionSelector::VisitInt64Sub(Node* node) {
if (m.left().Is(0)) {
Emit(kX64Neg, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
} else {
if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) {
if (m.right().HasResolvedValue() && g.CanBeImmediate(m.right().node())) {
// Turn subtractions of constant values into immediate "leaq" instructions
// by negating the value.
Emit(kX64Lea | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.TempImmediate(-static_cast<int32_t>(m.right().Value())));
g.TempImmediate(-static_cast<int32_t>(m.right().ResolvedValue())));
return;
}
VisitBinop(this, node, kX64Sub);
@ -2034,8 +2037,8 @@ void VisitWord64EqualImpl(InstructionSelector* selector, Node* node,
const RootsTable& roots_table = selector->isolate()->roots_table();
RootIndex root_index;
HeapObjectBinopMatcher m(node);
if (m.right().HasValue() &&
roots_table.IsRootHandle(m.right().Value(), &root_index)) {
if (m.right().HasResolvedValue() &&
roots_table.IsRootHandle(m.right().ResolvedValue(), &root_index)) {
InstructionCode opcode =
kX64Cmp | AddressingModeField::encode(kMode_Root);
return VisitCompare(
@ -2061,14 +2064,14 @@ void VisitWord32EqualImpl(InstructionSelector* selector, Node* node,
// present.
{
CompressedHeapObjectBinopMatcher m(node);
if (m.right().HasValue()) {
if (m.right().HasResolvedValue()) {
left = m.left().node();
right = m.right().Value();
right = m.right().ResolvedValue();
} else {
HeapObjectBinopMatcher m2(node);
if (m2.right().HasValue()) {
if (m2.right().HasResolvedValue()) {
left = m2.left().node();
right = m2.right().Value();
right = m2.right().ResolvedValue();
}
}
}
@ -2574,7 +2577,8 @@ void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
Float64Matcher mleft(left);
if (mleft.HasValue() && (bit_cast<uint64_t>(mleft.Value()) >> 32) == 0u) {
if (mleft.HasResolvedValue() &&
(bit_cast<uint64_t>(mleft.ResolvedValue()) >> 32) == 0u) {
Emit(kSSEFloat64LoadLowWord32, g.DefineAsRegister(node), g.Use(right));
return;
}

View File

@ -318,17 +318,18 @@ TNode<Float64T> CodeAssembler::Float64Constant(double value) {
bool CodeAssembler::ToInt32Constant(Node* node, int32_t* out_value) {
{
Int64Matcher m(node);
if (m.HasValue() && m.IsInRange(std::numeric_limits<int32_t>::min(),
std::numeric_limits<int32_t>::max())) {
*out_value = static_cast<int32_t>(m.Value());
if (m.HasResolvedValue() &&
m.IsInRange(std::numeric_limits<int32_t>::min(),
std::numeric_limits<int32_t>::max())) {
*out_value = static_cast<int32_t>(m.ResolvedValue());
return true;
}
}
{
Int32Matcher m(node);
if (m.HasValue()) {
*out_value = m.Value();
if (m.HasResolvedValue()) {
*out_value = m.ResolvedValue();
return true;
}
}
@ -338,8 +339,8 @@ bool CodeAssembler::ToInt32Constant(Node* node, int32_t* out_value) {
bool CodeAssembler::ToInt64Constant(Node* node, int64_t* out_value) {
Int64Matcher m(node);
if (m.HasValue()) *out_value = m.Value();
return m.HasValue();
if (m.HasResolvedValue()) *out_value = m.ResolvedValue();
return m.HasResolvedValue();
}
bool CodeAssembler::ToSmiConstant(Node* node, Smi* out_value) {
@ -347,8 +348,8 @@ bool CodeAssembler::ToSmiConstant(Node* node, Smi* out_value) {
node = node->InputAt(0);
}
IntPtrMatcher m(node);
if (m.HasValue()) {
intptr_t value = m.Value();
if (m.HasResolvedValue()) {
intptr_t value = m.ResolvedValue();
// Make sure that the value is actually a smi
CHECK_EQ(0, value & ((static_cast<intptr_t>(1) << kSmiShiftSize) - 1));
*out_value = Smi(static_cast<Address>(value));
@ -363,8 +364,8 @@ bool CodeAssembler::ToIntPtrConstant(Node* node, intptr_t* out_value) {
node = node->InputAt(0);
}
IntPtrMatcher m(node);
if (m.HasValue()) *out_value = m.Value();
return m.HasValue();
if (m.HasResolvedValue()) *out_value = m.ResolvedValue();
return m.HasResolvedValue();
}
bool CodeAssembler::IsUndefinedConstant(TNode<Object> node) {

View File

@ -20,18 +20,15 @@ namespace compiler {
namespace {
Decision DecideCondition(JSHeapBroker* broker, Node* const cond) {
switch (cond->opcode()) {
case IrOpcode::kFoldConstant: {
return DecideCondition(broker, cond->InputAt(1));
}
Node* unwrapped = SkipValueIdentities(cond);
switch (unwrapped->opcode()) {
case IrOpcode::kInt32Constant: {
Int32Matcher mcond(cond);
return mcond.Value() ? Decision::kTrue : Decision::kFalse;
Int32Matcher m(unwrapped);
return m.ResolvedValue() ? Decision::kTrue : Decision::kFalse;
}
case IrOpcode::kHeapConstant: {
HeapObjectMatcher mcond(cond);
return mcond.Ref(broker).BooleanValue() ? Decision::kTrue
: Decision::kFalse;
HeapObjectMatcher m(unwrapped);
return m.Ref(broker).BooleanValue() ? Decision::kTrue : Decision::kFalse;
}
default:
return Decision::kUnknown;
@ -436,7 +433,7 @@ Reduction CommonOperatorReducer::ReduceSwitch(Node* node) {
// non-matching cases as dead code (same for an unused IfDefault), because the
// Switch itself will be marked as dead code.
Int32Matcher mswitched(switched_value);
if (mswitched.HasValue()) {
if (mswitched.HasResolvedValue()) {
bool matched = false;
size_t const projection_count = node->op()->ControlOutputCount();
@ -447,7 +444,7 @@ Reduction CommonOperatorReducer::ReduceSwitch(Node* node) {
Node* if_value = projections[i];
DCHECK_EQ(IrOpcode::kIfValue, if_value->opcode());
const IfValueParameters& p = IfValueParametersOf(if_value->op());
if (p.value() == mswitched.Value()) {
if (p.value() == mswitched.ResolvedValue()) {
matched = true;
Replace(if_value, control);
break;

View File

@ -79,8 +79,8 @@ bool ControlFlowOptimizer::TryBuildSwitch(Node* node) {
if (cond->opcode() != IrOpcode::kWord32Equal) return false;
Int32BinopMatcher m(cond);
Node* index = m.left().node();
if (!m.right().HasValue()) return false;
int32_t value = m.right().Value();
if (!m.right().HasResolvedValue()) return false;
int32_t value = m.right().ResolvedValue();
ZoneSet<int32_t> values(zone());
values.insert(value);
@ -104,8 +104,8 @@ bool ControlFlowOptimizer::TryBuildSwitch(Node* node) {
if (cond1->opcode() != IrOpcode::kWord32Equal) break;
Int32BinopMatcher m1(cond1);
if (m1.left().node() != index) break;
if (!m1.right().HasValue()) break;
int32_t value1 = m1.right().Value();
if (!m1.right().HasResolvedValue()) break;
int32_t value1 = m1.right().ResolvedValue();
if (values.find(value1) != values.end()) break;
DCHECK_NE(value, value1);

View File

@ -94,13 +94,13 @@ bool OffsetMayAlias(Node* offset1, MachineRepresentation repr1, Node* offset2,
IntPtrMatcher matcher1(offset1);
IntPtrMatcher matcher2(offset2);
// If either of the offsets is variable, accesses may alias
if (!matcher1.HasValue() || !matcher2.HasValue()) {
if (!matcher1.HasResolvedValue() || !matcher2.HasResolvedValue()) {
return true;
}
// Otherwise, we return whether accesses overlap
intptr_t start1 = matcher1.Value();
intptr_t start1 = matcher1.ResolvedValue();
intptr_t end1 = start1 + ElementSizeInBytes(repr1);
intptr_t start2 = matcher2.Value();
intptr_t start2 = matcher2.ResolvedValue();
intptr_t end2 = start2 + ElementSizeInBytes(repr2);
return !(end1 <= start2 || end2 <= start1);
}

View File

@ -2252,7 +2252,7 @@ Node* EffectControlLinearizer::LowerCheckedInt32Div(Node* node,
// are all zero, and if so we know that we can perform a division
// safely (and fast by doing an arithmetic - aka sign preserving -
// right shift on {lhs}).
int32_t divisor = m.Value();
int32_t divisor = m.ResolvedValue();
Node* mask = __ Int32Constant(divisor - 1);
Node* shift = __ Int32Constant(base::bits::WhichPowerOfTwo(divisor));
Node* check = __ Word32Equal(__ Word32And(lhs, mask), zero);
@ -2474,7 +2474,7 @@ Node* EffectControlLinearizer::LowerCheckedUint32Div(Node* node,
// are all zero, and if so we know that we can perform a division
// safely (and fast by doing a logical - aka zero extending - right
// shift on {lhs}).
uint32_t divisor = m.Value();
uint32_t divisor = m.ResolvedValue();
Node* mask = __ Uint32Constant(divisor - 1);
Node* shift = __ Uint32Constant(base::bits::WhichPowerOfTwo(divisor));
Node* check = __ Word32Equal(__ Word32And(lhs, mask), zero);

View File

@ -5,6 +5,7 @@
#include "src/compiler/escape-analysis-reducer.h"
#include "src/compiler/all-nodes.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/type-cache.h"
#include "src/execution/frame-constants.h"
@ -68,17 +69,6 @@ Reduction EscapeAnalysisReducer::ReplaceNode(Node* original,
return NoChange();
}
namespace {
Node* SkipTypeGuards(Node* node) {
while (node->opcode() == IrOpcode::kTypeGuard) {
node = NodeProperties::GetValueInput(node, 0);
}
return node;
}
} // namespace
Node* EscapeAnalysisReducer::ObjectIdNode(const VirtualObject* vobject) {
VirtualObject::Id id = vobject->id();
if (id >= object_id_cache_.size()) object_id_cache_.resize(id + 1);
@ -185,8 +175,8 @@ Node* EscapeAnalysisReducer::ReduceDeoptState(Node* node, Node* effect,
i);
}
return new_node.Get();
} else if (const VirtualObject* vobject =
analysis_result().GetVirtualObject(SkipTypeGuards(node))) {
} else if (const VirtualObject* vobject = analysis_result().GetVirtualObject(
SkipValueIdentities(node))) {
if (vobject->HasEscaped()) return node;
if (deduplicator->SeenBefore(vobject)) {
return ObjectIdNode(vobject);

View File

@ -559,9 +559,9 @@ void ReduceNode(const Operator* op, EscapeAnalysisTracker::Scope* current,
switch (op->opcode()) {
case IrOpcode::kAllocate: {
NumberMatcher size(current->ValueInput(0));
if (!size.HasValue()) break;
int size_int = static_cast<int>(size.Value());
if (size_int != size.Value()) break;
if (!size.HasResolvedValue()) break;
int size_int = static_cast<int>(size.ResolvedValue());
if (size_int != size.ResolvedValue()) break;
if (const VirtualObject* vobject = current->InitVirtualObject(size_int)) {
// Initialize with dead nodes as a sentinel for uninitialized memory.
for (Variable field : *vobject) {

View File

@ -680,9 +680,9 @@ void Int64Lowering::LowerNode(Node* node) {
? GetReplacementLow(node->InputAt(1))
: node->InputAt(1);
Int32Matcher m(shift);
if (m.HasValue()) {
if (m.HasResolvedValue()) {
// Precondition: 0 <= shift < 64.
int32_t shift_value = m.Value() & 0x3F;
int32_t shift_value = m.ResolvedValue() & 0x3F;
if (shift_value == 0) {
ReplaceNode(node, GetReplacementLow(input),
GetReplacementHigh(input));

View File

@ -2725,7 +2725,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
// to ensure any exception is thrown in the correct context.
Node* context;
HeapObjectMatcher m(target);
if (m.HasValue()) {
if (m.HasResolvedValue()) {
JSFunctionRef function = m.Ref(broker()).AsJSFunction();
if (should_disallow_heap_access() && !function.serialized()) {
TRACE_BROKER_MISSING(broker(), "Serialize call on function " << function);
@ -3954,7 +3954,7 @@ namespace {
bool ShouldUseCallICFeedback(Node* node) {
HeapObjectMatcher m(node);
if (m.HasValue() || m.IsCheckClosure() || m.IsJSCreateClosure()) {
if (m.HasResolvedValue() || m.IsCheckClosure() || m.IsJSCreateClosure()) {
// Don't use CallIC feedback when we know the function
// being called, i.e. either know the closure itself or
// at least the SharedFunctionInfo.
@ -4001,7 +4001,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
// Try to specialize JSCall {node}s with constant {target}s.
HeapObjectMatcher m(target);
if (m.HasValue()) {
if (m.HasResolvedValue()) {
ObjectRef target_ref = m.Ref(broker());
if (target_ref.IsJSFunction()) {
JSFunctionRef function = target_ref.AsJSFunction();
@ -4600,7 +4600,7 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
arity, feedback_target->AsAllocationSite().object()));
return Changed(node);
} else if (feedback_target.has_value() &&
!HeapObjectMatcher(new_target).HasValue() &&
!HeapObjectMatcher(new_target).HasResolvedValue() &&
feedback_target->map().is_constructor()) {
Node* new_target_feedback = jsgraph()->Constant(*feedback_target);
@ -4625,7 +4625,7 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
// Try to specialize JSConstruct {node}s with constant {target}s.
HeapObjectMatcher m(target);
if (m.HasValue()) {
if (m.HasResolvedValue()) {
HeapObjectRef target_ref = m.Ref(broker());
// Raise a TypeError if the {target} is not a constructor.
@ -4681,7 +4681,7 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
// constructor), {value} will be ignored and therefore we can lower
// to {JSCreate}. See https://tc39.es/ecma262/#sec-object-value.
HeapObjectMatcher mnew_target(new_target);
if (mnew_target.HasValue() &&
if (mnew_target.HasResolvedValue() &&
!mnew_target.Ref(broker()).equals(function)) {
// Drop the value inputs.
node->RemoveInput(n.FeedbackVectorIndex());
@ -6021,7 +6021,7 @@ Reduction JSCallReducer::ReduceStringPrototypeStartsWith(Node* node) {
Node* position = n.ArgumentOr(1, jsgraph()->ZeroConstant());
HeapObjectMatcher m(search_string);
if (m.HasValue()) {
if (m.HasResolvedValue()) {
ObjectRef target_ref = m.Ref(broker());
if (target_ref.IsString()) {
StringRef str = target_ref.AsString();
@ -7345,7 +7345,7 @@ Reduction JSCallReducer::ReduceDataViewAccess(Node* node, DataViewAccess access,
// Check that the {offset} is within range for the {receiver}.
HeapObjectMatcher m(receiver);
if (m.HasValue()) {
if (m.HasResolvedValue()) {
// We only deal with DataViews here whose [[ByteLength]] is at least
// {element_size}, as for all other DataViews it'll be out-of-bounds.
JSDataViewRef dataview = m.Ref(broker()).AsJSDataView();
@ -7710,7 +7710,7 @@ Reduction JSCallReducer::ReduceBigIntAsUintN(Node* node) {
NumberMatcher matcher(bits);
if (matcher.IsInteger() && matcher.IsInRange(0, 64)) {
const int bits_value = static_cast<int>(matcher.Value());
const int bits_value = static_cast<int>(matcher.ResolvedValue());
value = effect = graph()->NewNode(simplified()->CheckBigInt(p.feedback()),
value, effect, control);
value = graph()->NewNode(simplified()->BigIntAsUintN(bits_value), value);

View File

@ -77,7 +77,7 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
out.node = node;
HeapObjectMatcher m(callee);
if (m.HasValue() && m.Ref(broker()).IsJSFunction()) {
if (m.HasResolvedValue() && m.Ref(broker()).IsJSFunction()) {
out.functions[0] = m.Ref(broker()).AsJSFunction();
JSFunctionRef function = out.functions[0].value();
if (CanConsiderForInlining(broker(), function)) {
@ -94,7 +94,7 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
}
for (int n = 0; n < value_input_count; ++n) {
HeapObjectMatcher m(callee->InputAt(n));
if (!m.HasValue() || !m.Ref(broker()).IsJSFunction()) {
if (!m.HasResolvedValue() || !m.Ref(broker()).IsJSFunction()) {
out.num_functions = 0;
return out;
}

View File

@ -287,7 +287,7 @@ base::Optional<SharedFunctionInfoRef> JSInliner::DetermineCallTarget(
// calls whenever the target is a constant function object, as follows:
// - JSCall(target:constant, receiver, args..., vector)
// - JSConstruct(target:constant, new.target, args..., vector)
if (match.HasValue() && match.Ref(broker()).IsJSFunction()) {
if (match.HasResolvedValue() && match.Ref(broker()).IsJSFunction()) {
JSFunctionRef function = match.Ref(broker()).AsJSFunction();
// The function might have not been called yet.
@ -338,7 +338,7 @@ FeedbackVectorRef JSInliner::DetermineCallContext(Node* node,
Node* target = node->InputAt(JSCallOrConstructNode::TargetIndex());
HeapObjectMatcher match(target);
if (match.HasValue() && match.Ref(broker()).IsJSFunction()) {
if (match.HasResolvedValue() && match.Ref(broker()).IsJSFunction()) {
JSFunctionRef function = match.Ref(broker()).AsJSFunction();
// This was already ensured by DetermineCallTarget
CHECK(function.has_feedback_vector());

View File

@ -323,7 +323,7 @@ Reduction JSIntrinsicLowering::ReduceToObject(Node* node) {
Reduction JSIntrinsicLowering::ReduceToString(Node* node) {
// ToString is unnecessary if the input is a string.
HeapObjectMatcher m(NodeProperties::GetValueInput(node, 0));
if (m.HasValue() && m.Ref(broker()).IsString()) {
if (m.HasResolvedValue() && m.Ref(broker()).IsString()) {
ReplaceWithValue(node, m.node());
return Replace(m.node());
}

View File

@ -136,13 +136,13 @@ base::Optional<size_t> JSNativeContextSpecialization::GetMaxStringLength(
}
HeapObjectMatcher matcher(node);
if (matcher.HasValue() && matcher.Ref(broker).IsString()) {
if (matcher.HasResolvedValue() && matcher.Ref(broker).IsString()) {
StringRef input = matcher.Ref(broker).AsString();
return input.length();
}
NumberMatcher number_matcher(node);
if (number_matcher.HasValue()) {
if (number_matcher.HasResolvedValue()) {
return kBase10MaximalLength + 1;
}
@ -157,7 +157,7 @@ Reduction JSNativeContextSpecialization::ReduceJSToString(Node* node) {
Reduction reduction;
HeapObjectMatcher matcher(input);
if (matcher.HasValue() && matcher.Ref(broker()).IsString()) {
if (matcher.HasResolvedValue() && matcher.Ref(broker()).IsString()) {
reduction = Changed(input); // JSToString(x:string) => x
ReplaceWithValue(node, reduction.replacement());
return reduction;
@ -168,9 +168,9 @@ Reduction JSNativeContextSpecialization::ReduceJSToString(Node* node) {
// so alternative approach should be designed if this causes performance
// regressions and the stronger optimization should be re-implemented.
NumberMatcher number_matcher(input);
if (number_matcher.HasValue()) {
const StringConstantBase* base =
shared_zone()->New<NumberToStringConstant>(number_matcher.Value());
if (number_matcher.HasResolvedValue()) {
const StringConstantBase* base = shared_zone()->New<NumberToStringConstant>(
number_matcher.ResolvedValue());
reduction =
Replace(graph()->NewNode(common()->DelayedStringConstant(base)));
ReplaceWithValue(node, reduction.replacement());
@ -186,11 +186,12 @@ JSNativeContextSpecialization::CreateDelayedStringConstant(Node* node) {
return StringConstantBaseOf(node->op());
} else {
NumberMatcher number_matcher(node);
if (number_matcher.HasValue()) {
return shared_zone()->New<NumberToStringConstant>(number_matcher.Value());
if (number_matcher.HasResolvedValue()) {
return shared_zone()->New<NumberToStringConstant>(
number_matcher.ResolvedValue());
} else {
HeapObjectMatcher matcher(node);
if (matcher.HasValue() && matcher.Ref(broker()).IsString()) {
if (matcher.HasResolvedValue() && matcher.Ref(broker()).IsString()) {
StringRef s = matcher.Ref(broker()).AsString();
return shared_zone()->New<StringLiteral>(
s.object(), static_cast<size_t>(s.length()));
@ -208,7 +209,7 @@ bool IsStringConstant(JSHeapBroker* broker, Node* node) {
}
HeapObjectMatcher matcher(node);
return matcher.HasValue() && matcher.Ref(broker).IsString();
return matcher.HasResolvedValue() && matcher.Ref(broker).IsString();
}
} // namespace
@ -352,7 +353,7 @@ Reduction JSNativeContextSpecialization::ReduceJSGetSuperConstructor(
// Check if the input is a known JSFunction.
HeapObjectMatcher m(constructor);
if (!m.HasValue()) return NoChange();
if (!m.HasResolvedValue()) return NoChange();
JSFunctionRef function = m.Ref(broker()).AsJSFunction();
MapRef function_map = function.map();
if (should_disallow_heap_access() && !function_map.serialized_prototype()) {
@ -389,7 +390,7 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
// we have feedback from the InstanceOfIC.
Handle<JSObject> receiver;
HeapObjectMatcher m(constructor);
if (m.HasValue() && m.Ref(broker()).IsJSObject()) {
if (m.HasResolvedValue() && m.Ref(broker()).IsJSObject()) {
receiver = m.Ref(broker()).AsJSObject().object();
} else if (p.feedback().IsValid()) {
ProcessedFeedback const& feedback =
@ -594,7 +595,7 @@ Reduction JSNativeContextSpecialization::ReduceJSHasInPrototypeChain(
// Check if we can constant-fold the prototype chain walk
// for the given {value} and the {prototype}.
HeapObjectMatcher m(prototype);
if (m.HasValue()) {
if (m.HasResolvedValue()) {
InferHasInPrototypeChainResult result =
InferHasInPrototypeChain(value, effect, m.Ref(broker()));
if (result != kMayBeInPrototypeChain) {
@ -615,7 +616,7 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
// Check if the {constructor} is known at compile time.
HeapObjectMatcher m(constructor);
if (!m.HasValue()) return NoChange();
if (!m.HasResolvedValue()) return NoChange();
if (m.Ref(broker()).IsJSBoundFunction()) {
// OrdinaryHasInstance on bound functions turns into a recursive invocation
@ -681,7 +682,7 @@ Reduction JSNativeContextSpecialization::ReduceJSPromiseResolve(Node* node) {
// Check if the {constructor} is the %Promise% function.
HeapObjectMatcher m(constructor);
if (!m.HasValue() ||
if (!m.HasResolvedValue() ||
!m.Ref(broker()).equals(native_context().promise_function())) {
return NoChange();
}
@ -1387,7 +1388,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
// Check if we have a constant receiver.
HeapObjectMatcher m(receiver);
if (m.HasValue()) {
if (m.HasResolvedValue()) {
ObjectRef object = m.Ref(broker());
if (object.IsJSFunction() &&
name.equals(ObjectRef(broker(), factory()->prototype_string()))) {
@ -1555,7 +1556,7 @@ namespace {
base::Optional<JSTypedArrayRef> GetTypedArrayConstant(JSHeapBroker* broker,
Node* receiver) {
HeapObjectMatcher m(receiver);
if (!m.HasValue()) return base::nullopt;
if (!m.HasResolvedValue()) return base::nullopt;
ObjectRef object = m.Ref(broker);
if (!object.IsJSTypedArray()) return base::nullopt;
JSTypedArrayRef typed_array = object.AsJSTypedArray();
@ -1859,7 +1860,7 @@ Reduction JSNativeContextSpecialization::ReduceElementLoadFromHeapConstant(
// constant-fold the load.
NumberMatcher mkey(key);
if (mkey.IsInteger() && mkey.IsInRange(0.0, kMaxUInt32 - 1.0)) {
uint32_t index = static_cast<uint32_t>(mkey.Value());
uint32_t index = static_cast<uint32_t>(mkey.ResolvedValue());
base::Optional<ObjectRef> element =
receiver_ref.GetOwnConstantElement(index);
if (!element.has_value() && receiver_ref.IsJSArray()) {
@ -2526,8 +2527,8 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
if (!p.feedback().IsValid()) return NoChange();
NumberMatcher mflags(n.flags());
CHECK(mflags.HasValue());
DataPropertyInLiteralFlags cflags(mflags.Value());
CHECK(mflags.HasResolvedValue());
DataPropertyInLiteralFlags cflags(mflags.ResolvedValue());
DCHECK(!(cflags & DataPropertyInLiteralFlag::kDontEnum));
if (cflags & DataPropertyInLiteralFlag::kSetFunctionName) return NoChange();
@ -3373,7 +3374,7 @@ bool JSNativeContextSpecialization::InferReceiverMaps(
base::Optional<MapRef> JSNativeContextSpecialization::InferReceiverRootMap(
Node* receiver) const {
HeapObjectMatcher m(receiver);
if (m.HasValue()) {
if (m.HasResolvedValue()) {
MapRef map = m.Ref(broker()).map();
return map.FindRootMap();
} else if (m.IsJSCreate()) {

View File

@ -40,8 +40,8 @@ TNode<Oddball> UndefinedConstant(JSGraph* jsgraph) {
FeedbackCellRef JSCreateClosureNode::GetFeedbackCellRefChecked(
JSHeapBroker* broker) const {
HeapObjectMatcher m(feedback_cell());
CHECK(m.HasValue());
return FeedbackCellRef(broker, m.Value());
CHECK(m.HasResolvedValue());
return FeedbackCellRef(broker, m.ResolvedValue());
}
std::ostream& operator<<(std::ostream& os, CallFrequency const& f) {

View File

@ -107,11 +107,11 @@ class JSBinopReduction final {
GetBinaryOperationHint(node_) == BinaryOperationHint::kString) {
HeapObjectBinopMatcher m(node_);
JSHeapBroker* broker = lowering_->broker();
if (m.right().HasValue() && m.right().Ref(broker).IsString()) {
if (m.right().HasResolvedValue() && m.right().Ref(broker).IsString()) {
StringRef right_string = m.right().Ref(broker).AsString();
if (right_string.length() >= ConsString::kMinLength) return true;
}
if (m.left().HasValue() && m.left().Ref(broker).IsString()) {
if (m.left().HasResolvedValue() && m.left().Ref(broker).IsString()) {
StringRef left_string = m.left().Ref(broker).AsString();
if (left_string.length() >= ConsString::kMinLength) {
// The invariant for ConsString requires the left hand side to be
@ -989,7 +989,7 @@ Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
if (input_type.Is(Type::String())) {
HeapObjectMatcher m(input);
if (m.HasValue() && m.Ref(broker()).IsString()) {
if (m.HasResolvedValue() && m.Ref(broker()).IsString()) {
StringRef input_value = m.Ref(broker()).AsString();
double number;
ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(number, input_value.ToNumber());

File diff suppressed because it is too large Load Diff

View File

@ -139,7 +139,7 @@ Reduction MemoryLowering::ReduceAllocateRaw(
IntPtrMatcher m(size);
if (m.IsInRange(0, kMaxRegularHeapObjectSize) && FLAG_inline_new &&
allocation_folding_ == AllocationFolding::kDoAllocationFolding) {
intptr_t const object_size = m.Value();
intptr_t const object_size = m.ResolvedValue();
AllocationState const* state = *state_ptr;
if (state->size() <= kMaxRegularHeapObjectSize - object_size &&
state->group()->allocation() == allocation_type) {

View File

@ -11,6 +11,7 @@
#include "src/base/compiler-specific.h"
#include "src/codegen/external-reference.h"
#include "src/common/globals.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/node.h"
#include "src/compiler/operator.h"
#include "src/numbers/double.h"
@ -48,96 +49,89 @@ struct NodeMatcher {
Node* node_;
};
inline Node* SkipValueIdentities(Node* node) {
#ifdef DEBUG
bool seen_fold_constant = false;
#endif
do {
#ifdef DEBUG
if (node->opcode() == IrOpcode::kFoldConstant) {
DCHECK(!seen_fold_constant);
seen_fold_constant = true;
}
#endif
} while (NodeProperties::IsValueIdentity(node, &node));
DCHECK_NOT_NULL(node);
return node;
}
// A pattern matcher for abitrary value constants.
//
// Note that value identities on the input node are skipped when matching. The
// resolved value may not be a parameter of the input node. The node() method
// returns the unmodified input node. This is by design, as reducers may wish to
// match value constants but delay reducing the node until a later phase. For
// example, binary operator reducers may opt to keep FoldConstant operands while
// applying a reduction that match on the constant value of the FoldConstant.
template <typename T, IrOpcode::Value kOpcode>
struct ValueMatcher : public NodeMatcher {
using ValueType = T;
explicit ValueMatcher(Node* node) : NodeMatcher(node) {
static_assert(kOpcode != IrOpcode::kFoldConstant, "unsupported opcode");
if (node->opcode() == IrOpcode::kFoldConstant) {
node = node->InputAt(1);
}
DCHECK_NE(node->opcode(), IrOpcode::kFoldConstant);
has_value_ = opcode() == kOpcode;
if (has_value_) {
value_ = OpParameter<T>(node->op());
explicit ValueMatcher(Node* node)
: NodeMatcher(node), resolved_value_(), has_resolved_value_(false) {
node = SkipValueIdentities(node);
has_resolved_value_ = node->opcode() == kOpcode;
if (has_resolved_value_) {
resolved_value_ = OpParameter<T>(node->op());
}
}
bool HasValue() const { return has_value_; }
const T& Value() const {
DCHECK(HasValue());
return value_;
bool HasResolvedValue() const { return has_resolved_value_; }
const T& ResolvedValue() const {
DCHECK(HasResolvedValue());
return resolved_value_;
}
private:
T value_;
bool has_value_;
T resolved_value_;
bool has_resolved_value_;
};
template <>
inline ValueMatcher<uint32_t, IrOpcode::kInt32Constant>::ValueMatcher(
Node* node)
: NodeMatcher(node),
value_(),
has_value_(opcode() == IrOpcode::kInt32Constant) {
if (has_value_) {
value_ = static_cast<uint32_t>(OpParameter<int32_t>(node->op()));
: NodeMatcher(node), resolved_value_(), has_resolved_value_(false) {
node = SkipValueIdentities(node);
has_resolved_value_ = node->opcode() == IrOpcode::kInt32Constant;
if (has_resolved_value_) {
resolved_value_ = static_cast<uint32_t>(OpParameter<int32_t>(node->op()));
}
}
template <>
inline ValueMatcher<int64_t, IrOpcode::kInt64Constant>::ValueMatcher(Node* node)
: NodeMatcher(node), value_(), has_value_(false) {
if (opcode() == IrOpcode::kInt32Constant) {
value_ = OpParameter<int32_t>(node->op());
has_value_ = true;
} else if (opcode() == IrOpcode::kInt64Constant) {
value_ = OpParameter<int64_t>(node->op());
has_value_ = true;
: NodeMatcher(node), resolved_value_(), has_resolved_value_(false) {
node = SkipValueIdentities(node);
if (node->opcode() == IrOpcode::kInt32Constant) {
resolved_value_ = OpParameter<int32_t>(node->op());
has_resolved_value_ = true;
} else if (node->opcode() == IrOpcode::kInt64Constant) {
resolved_value_ = OpParameter<int64_t>(node->op());
has_resolved_value_ = true;
}
}
template <>
inline ValueMatcher<uint64_t, IrOpcode::kInt64Constant>::ValueMatcher(
Node* node)
: NodeMatcher(node), value_(), has_value_(false) {
if (opcode() == IrOpcode::kInt32Constant) {
value_ = static_cast<uint32_t>(OpParameter<int32_t>(node->op()));
has_value_ = true;
} else if (opcode() == IrOpcode::kInt64Constant) {
value_ = static_cast<uint64_t>(OpParameter<int64_t>(node->op()));
has_value_ = true;
}
}
template <>
inline ValueMatcher<double, IrOpcode::kNumberConstant>::ValueMatcher(Node* node)
: NodeMatcher(node), value_(), has_value_(false) {
if (node->opcode() == IrOpcode::kNumberConstant) {
value_ = OpParameter<double>(node->op());
has_value_ = true;
} else if (node->opcode() == IrOpcode::kFoldConstant) {
node = node->InputAt(1);
DCHECK_NE(node->opcode(), IrOpcode::kFoldConstant);
}
}
template <>
inline ValueMatcher<Handle<HeapObject>, IrOpcode::kHeapConstant>::ValueMatcher(
Node* node)
: NodeMatcher(node), value_(), has_value_(false) {
if (node->opcode() == IrOpcode::kHeapConstant) {
value_ = OpParameter<Handle<HeapObject>>(node->op());
has_value_ = true;
} else if (node->opcode() == IrOpcode::kFoldConstant) {
node = node->InputAt(1);
DCHECK_NE(node->opcode(), IrOpcode::kFoldConstant);
: NodeMatcher(node), resolved_value_(), has_resolved_value_(false) {
node = SkipValueIdentities(node);
if (node->opcode() == IrOpcode::kInt32Constant) {
resolved_value_ = static_cast<uint32_t>(OpParameter<int32_t>(node->op()));
has_resolved_value_ = true;
} else if (node->opcode() == IrOpcode::kInt64Constant) {
resolved_value_ = static_cast<uint64_t>(OpParameter<int64_t>(node->op()));
has_resolved_value_ = true;
}
}
@ -147,24 +141,27 @@ struct IntMatcher final : public ValueMatcher<T, kOpcode> {
explicit IntMatcher(Node* node) : ValueMatcher<T, kOpcode>(node) {}
bool Is(const T& value) const {
return this->HasValue() && this->Value() == value;
return this->HasResolvedValue() && this->ResolvedValue() == value;
}
bool IsInRange(const T& low, const T& high) const {
return this->HasValue() && low <= this->Value() && this->Value() <= high;
return this->HasResolvedValue() && low <= this->ResolvedValue() &&
this->ResolvedValue() <= high;
}
bool IsMultipleOf(T n) const {
return this->HasValue() && (this->Value() % n) == 0;
return this->HasResolvedValue() && (this->ResolvedValue() % n) == 0;
}
bool IsPowerOf2() const {
return this->HasValue() && this->Value() > 0 &&
(this->Value() & (this->Value() - 1)) == 0;
return this->HasResolvedValue() && this->ResolvedValue() > 0 &&
(this->ResolvedValue() & (this->ResolvedValue() - 1)) == 0;
}
bool IsNegativePowerOf2() const {
return this->HasValue() && this->Value() < 0 &&
((this->Value() == std::numeric_limits<T>::min()) ||
(-this->Value() & (-this->Value() - 1)) == 0);
return this->HasResolvedValue() && this->ResolvedValue() < 0 &&
((this->ResolvedValue() == std::numeric_limits<T>::min()) ||
(-this->ResolvedValue() & (-this->ResolvedValue() - 1)) == 0);
}
bool IsNegative() const {
return this->HasResolvedValue() && this->ResolvedValue() < 0;
}
bool IsNegative() const { return this->HasValue() && this->Value() < 0; }
};
using Int32Matcher = IntMatcher<int32_t, IrOpcode::kInt32Constant>;
@ -186,28 +183,36 @@ struct FloatMatcher final : public ValueMatcher<T, kOpcode> {
explicit FloatMatcher(Node* node) : ValueMatcher<T, kOpcode>(node) {}
bool Is(const T& value) const {
return this->HasValue() && this->Value() == value;
return this->HasResolvedValue() && this->ResolvedValue() == value;
}
bool IsInRange(const T& low, const T& high) const {
return this->HasValue() && low <= this->Value() && this->Value() <= high;
return this->HasResolvedValue() && low <= this->ResolvedValue() &&
this->ResolvedValue() <= high;
}
bool IsMinusZero() const {
return this->Is(0.0) && std::signbit(this->Value());
return this->Is(0.0) && std::signbit(this->ResolvedValue());
}
bool IsNegative() const {
return this->HasResolvedValue() && this->ResolvedValue() < 0.0;
}
bool IsNaN() const {
return this->HasResolvedValue() && std::isnan(this->ResolvedValue());
}
bool IsZero() const {
return this->Is(0.0) && !std::signbit(this->ResolvedValue());
}
bool IsNegative() const { return this->HasValue() && this->Value() < 0.0; }
bool IsNaN() const { return this->HasValue() && std::isnan(this->Value()); }
bool IsZero() const { return this->Is(0.0) && !std::signbit(this->Value()); }
bool IsNormal() const {
return this->HasValue() && std::isnormal(this->Value());
return this->HasResolvedValue() && std::isnormal(this->ResolvedValue());
}
bool IsInteger() const {
return this->HasValue() && std::nearbyint(this->Value()) == this->Value();
return this->HasResolvedValue() &&
std::nearbyint(this->ResolvedValue()) == this->ResolvedValue();
}
bool IsPositiveOrNegativePowerOf2() const {
if (!this->HasValue() || (this->Value() == 0.0)) {
if (!this->HasResolvedValue() || (this->ResolvedValue() == 0.0)) {
return false;
}
Double value = Double(this->Value());
Double value = Double(this->ResolvedValue());
return !value.IsInfinite() && base::bits::IsPowerOfTwo(value.Significand());
}
};
@ -224,11 +229,12 @@ struct HeapObjectMatcherImpl final
: ValueMatcher<Handle<HeapObject>, kHeapConstantOpcode>(node) {}
bool Is(Handle<HeapObject> const& value) const {
return this->HasValue() && this->Value().address() == value.address();
return this->HasResolvedValue() &&
this->ResolvedValue().address() == value.address();
}
HeapObjectRef Ref(JSHeapBroker* broker) const {
return HeapObjectRef(broker, this->Value());
return HeapObjectRef(broker, this->ResolvedValue());
}
};
@ -242,7 +248,7 @@ struct ExternalReferenceMatcher final
explicit ExternalReferenceMatcher(Node* node)
: ValueMatcher<ExternalReference, IrOpcode::kExternalConstant>(node) {}
bool Is(const ExternalReference& value) const {
return this->HasValue() && this->Value() == value;
return this->HasResolvedValue() && this->ResolvedValue() == value;
}
};
@ -285,7 +291,9 @@ struct BinopMatcher : public NodeMatcher {
const Left& left() const { return left_; }
const Right& right() const { return right_; }
bool IsFoldable() const { return left().HasValue() && right().HasValue(); }
bool IsFoldable() const {
return left().HasResolvedValue() && right().HasResolvedValue();
}
bool LeftEqualsRight() const { return left().node() == right().node(); }
bool OwnsInput(Node* input) {
@ -309,7 +317,7 @@ struct BinopMatcher : public NodeMatcher {
private:
void PutConstantOnRight() {
if (left().HasValue() && !right().HasValue()) {
if (left().HasResolvedValue() && !right().HasResolvedValue()) {
SwapInputs();
}
}
@ -340,17 +348,17 @@ struct ScaleMatcher {
if (node->InputCount() < 2) return;
BinopMatcher m(node);
if (node->opcode() == kShiftOpcode) {
if (m.right().HasValue()) {
if (m.right().HasResolvedValue()) {
typename BinopMatcher::RightMatcher::ValueType value =
m.right().Value();
m.right().ResolvedValue();
if (value >= 0 && value <= 3) {
scale_ = static_cast<int>(value);
}
}
} else if (node->opcode() == kMulOpcode) {
if (m.right().HasValue()) {
if (m.right().HasResolvedValue()) {
typename BinopMatcher::RightMatcher::ValueType value =
m.right().Value();
m.right().ResolvedValue();
if (value == 1) {
scale_ = 0;
} else if (value == 2) {
@ -550,7 +558,7 @@ struct BaseWithIndexAndDisplacementMatcher {
if (right->opcode() == AddMatcher::kSubOpcode &&
OwnedByAddressingOperand(right)) {
AddMatcher right_matcher(right);
if (right_matcher.right().HasValue()) {
if (right_matcher.right().HasResolvedValue()) {
// (S + (B - D))
base = right_matcher.left().node();
displacement = right_matcher.right().node();
@ -562,7 +570,7 @@ struct BaseWithIndexAndDisplacementMatcher {
if (right->opcode() == AddMatcher::kAddOpcode &&
OwnedByAddressingOperand(right)) {
AddMatcher right_matcher(right);
if (right_matcher.right().HasValue()) {
if (right_matcher.right().HasResolvedValue()) {
// (S + (B + D))
base = right_matcher.left().node();
displacement = right_matcher.right().node();
@ -570,7 +578,7 @@ struct BaseWithIndexAndDisplacementMatcher {
// (S + (B + B))
base = right;
}
} else if (m.right().HasValue()) {
} else if (m.right().HasResolvedValue()) {
// (S + D)
displacement = right;
} else {
@ -585,7 +593,7 @@ struct BaseWithIndexAndDisplacementMatcher {
AddMatcher left_matcher(left);
Node* left_left = left_matcher.left().node();
Node* left_right = left_matcher.right().node();
if (left_matcher.right().HasValue()) {
if (left_matcher.right().HasResolvedValue()) {
if (left_matcher.HasIndexInput() && left_left->OwnedBy(left)) {
// ((S - D) + B)
index = left_matcher.IndexInput();
@ -612,7 +620,7 @@ struct BaseWithIndexAndDisplacementMatcher {
Node* left_left = left_matcher.left().node();
Node* left_right = left_matcher.right().node();
if (left_matcher.HasIndexInput() && left_left->OwnedBy(left)) {
if (left_matcher.right().HasValue()) {
if (left_matcher.right().HasResolvedValue()) {
// ((S + D) + B)
index = left_matcher.IndexInput();
scale = left_matcher.scale();
@ -620,7 +628,7 @@ struct BaseWithIndexAndDisplacementMatcher {
power_of_two_plus_one = left_matcher.power_of_two_plus_one();
displacement = left_right;
base = right;
} else if (m.right().HasValue()) {
} else if (m.right().HasResolvedValue()) {
if (left->OwnedBy(node)) {
// ((S + B) + D)
index = left_matcher.IndexInput();
@ -640,12 +648,12 @@ struct BaseWithIndexAndDisplacementMatcher {
base = right;
}
} else {
if (left_matcher.right().HasValue()) {
if (left_matcher.right().HasResolvedValue()) {
// ((B + D) + B)
index = left_left;
displacement = left_right;
base = right;
} else if (m.right().HasValue()) {
} else if (m.right().HasResolvedValue()) {
if (left->OwnedBy(node)) {
// ((B + B) + D)
index = left_left;
@ -663,7 +671,7 @@ struct BaseWithIndexAndDisplacementMatcher {
}
}
} else {
if (m.right().HasValue()) {
if (m.right().HasResolvedValue()) {
// (B + D)
base = left;
displacement = right;

View File

@ -328,7 +328,7 @@ base::Optional<MapRef> NodeProperties::GetJSCreateMap(JSHeapBroker* broker,
receiver->opcode() == IrOpcode::kJSCreateArray);
HeapObjectMatcher mtarget(GetValueInput(receiver, 0));
HeapObjectMatcher mnewtarget(GetValueInput(receiver, 1));
if (mtarget.HasValue() && mnewtarget.HasValue() &&
if (mtarget.HasResolvedValue() && mnewtarget.HasResolvedValue() &&
mnewtarget.Ref(broker).IsJSFunction()) {
ObjectRef target = mtarget.Ref(broker);
JSFunctionRef newtarget = mnewtarget.Ref(broker).AsJSFunction();
@ -353,7 +353,7 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMapsUnsafe(
JSHeapBroker* broker, Node* receiver, Node* effect,
ZoneHandleSet<Map>* maps_return) {
HeapObjectMatcher m(receiver);
if (m.HasValue()) {
if (m.HasResolvedValue()) {
HeapObjectRef receiver = m.Ref(broker);
// We don't use ICs for the Array.prototype and the Object.prototype
// because the runtime has to be able to intercept them properly, so
@ -423,7 +423,7 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMapsUnsafe(
if (IsSame(receiver, object)) {
Node* const value = GetValueInput(effect, 1);
HeapObjectMatcher m(value);
if (m.HasValue()) {
if (m.HasResolvedValue()) {
*maps_return = ZoneHandleSet<Map>(m.Ref(broker).AsMap().object());
return result;
}

View File

@ -121,6 +121,21 @@ class V8_EXPORT_PRIVATE NodeProperties final {
// the IfSuccess projection of {node} if present and {node} itself otherwise.
static Node* FindSuccessfulControlProjection(Node* node);
// Returns whether the node acts as the identity function on a value
// input. The input that is passed through is returned via {out_value}.
static bool IsValueIdentity(Node* node, Node** out_value) {
switch (node->opcode()) {
case IrOpcode::kTypeGuard:
*out_value = GetValueInput(node, 0);
return true;
case IrOpcode::kFoldConstant:
*out_value = GetValueInput(node, 1);
return true;
default:
return false;
}
}
// ---------------------------------------------------------------------------
// Miscellaneous mutators.

View File

@ -85,7 +85,7 @@ void PropertyAccessBuilder::BuildCheckMaps(
Node* receiver, Node** effect, Node* control,
ZoneVector<Handle<Map>> const& receiver_maps) {
HeapObjectMatcher m(receiver);
if (m.HasValue()) {
if (m.HasResolvedValue()) {
MapRef receiver_map = m.Ref(broker()).map();
if (receiver_map.is_stable()) {
for (Handle<Map> map : receiver_maps) {
@ -159,7 +159,7 @@ Node* PropertyAccessBuilder::TryBuildLoadConstantDataField(
if (!access_info.holder().ToHandle(&holder)) {
// Otherwise, try to match the {receiver} as a constant.
HeapObjectMatcher m(receiver);
if (!m.HasValue() || !m.Ref(broker()).IsJSObject()) return nullptr;
if (!m.HasResolvedValue() || !m.Ref(broker()).IsJSObject()) return nullptr;
// Let us make sure the actual map of the constant receiver is among
// the maps in {access_info}.

View File

@ -674,7 +674,7 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
Node* node, MachineRepresentation output_rep, Type output_type,
Node* use_node, UseInfo use_info) {
NumberMatcher m(node);
if (m.HasValue()) {
if (m.HasResolvedValue()) {
// BigInts are not used as number constants.
DCHECK(use_info.type_check() != TypeCheckKind::kBigInt);
switch (use_info.type_check()) {
@ -682,7 +682,7 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
case TypeCheckKind::kNumber:
case TypeCheckKind::kNumberOrBoolean:
case TypeCheckKind::kNumberOrOddball:
return jsgraph()->Float64Constant(m.Value());
return jsgraph()->Float64Constant(m.ResolvedValue());
case TypeCheckKind::kBigInt:
case TypeCheckKind::kHeapObject:
case TypeCheckKind::kSigned32:
@ -1089,7 +1089,7 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
}
case IrOpcode::kHeapConstant: {
HeapObjectMatcher m(node);
if (m.HasValue() && m.Ref(broker_).IsBigInt() &&
if (m.HasResolvedValue() && m.Ref(broker_).IsBigInt() &&
use_info.truncation().IsUsedAsWord64()) {
auto bigint = m.Ref(broker_).AsBigInt();
return jsgraph()->Int64Constant(

View File

@ -1362,8 +1362,8 @@ class RepresentationSelector {
return kPointerWriteBarrier;
}
NumberMatcher m(value);
if (m.HasValue()) {
if (IsSmiDouble(m.Value())) {
if (m.HasResolvedValue()) {
if (IsSmiDouble(m.ResolvedValue())) {
// Storing a smi doesn't need a write barrier.
return kNoWriteBarrier;
}
@ -4279,7 +4279,7 @@ Node* SimplifiedLowering::Int32Div(Node* const node) {
return graph()->NewNode(machine()->Int32Sub(), zero, lhs);
} else if (m.right().Is(0)) {
return rhs;
} else if (machine()->Int32DivIsSafe() || m.right().HasValue()) {
} else if (machine()->Int32DivIsSafe() || m.right().HasResolvedValue()) {
return graph()->NewNode(machine()->Int32Div(), lhs, rhs, graph()->start());
}
@ -4350,7 +4350,7 @@ Node* SimplifiedLowering::Int32Mod(Node* const node) {
if (m.right().Is(-1) || m.right().Is(0)) {
return zero;
} else if (m.right().HasValue()) {
} else if (m.right().HasResolvedValue()) {
return graph()->NewNode(machine()->Int32Mod(), lhs, rhs, graph()->start());
}
@ -4463,7 +4463,7 @@ Node* SimplifiedLowering::Uint32Div(Node* const node) {
if (m.right().Is(0)) {
return zero;
} else if (machine()->Uint32DivIsSafe() || m.right().HasValue()) {
} else if (machine()->Uint32DivIsSafe() || m.right().HasResolvedValue()) {
return graph()->NewNode(machine()->Uint32Div(), lhs, rhs, graph()->start());
}
@ -4482,7 +4482,7 @@ Node* SimplifiedLowering::Uint32Mod(Node* const node) {
if (m.right().Is(0)) {
return zero;
} else if (m.right().HasValue()) {
} else if (m.right().HasResolvedValue()) {
return graph()->NewNode(machine()->Uint32Mod(), lhs, rhs, graph()->start());
}

View File

@ -20,8 +20,8 @@ namespace {
Decision DecideObjectIsSmi(Node* const input) {
NumberMatcher m(input);
if (m.HasValue()) {
return IsSmiDouble(m.Value()) ? Decision::kTrue : Decision::kFalse;
if (m.HasResolvedValue()) {
return IsSmiDouble(m.ResolvedValue()) ? Decision::kTrue : Decision::kFalse;
}
if (m.IsAllocate()) return Decision::kFalse;
if (m.IsChangeBitToTagged()) return Decision::kFalse;
@ -60,7 +60,7 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kChangeTaggedToBit: {
HeapObjectMatcher m(node->InputAt(0));
if (m.HasValue()) {
if (m.HasResolvedValue()) {
return ReplaceInt32(m.Ref(broker()).BooleanValue());
}
if (m.IsChangeBitToTagged()) return Replace(m.InputAt(0));
@ -68,14 +68,14 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kChangeFloat64ToTagged: {
Float64Matcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceNumber(m.Value());
if (m.HasResolvedValue()) return ReplaceNumber(m.ResolvedValue());
if (m.IsChangeTaggedToFloat64()) return Replace(m.node()->InputAt(0));
break;
}
case IrOpcode::kChangeInt31ToTaggedSigned:
case IrOpcode::kChangeInt32ToTagged: {
Int32Matcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceNumber(m.Value());
if (m.HasResolvedValue()) return ReplaceNumber(m.ResolvedValue());
if (m.IsChangeTaggedToInt32() || m.IsChangeTaggedSignedToInt32()) {
return Replace(m.InputAt(0));
}
@ -84,7 +84,7 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
case IrOpcode::kChangeTaggedToFloat64:
case IrOpcode::kTruncateTaggedToFloat64: {
NumberMatcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceFloat64(m.Value());
if (m.HasResolvedValue()) return ReplaceFloat64(m.ResolvedValue());
if (m.IsChangeFloat64ToTagged() || m.IsChangeFloat64ToTaggedPointer()) {
return Replace(m.node()->InputAt(0));
}
@ -99,7 +99,8 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
case IrOpcode::kChangeTaggedSignedToInt32:
case IrOpcode::kChangeTaggedToInt32: {
NumberMatcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceInt32(DoubleToInt32(m.Value()));
if (m.HasResolvedValue())
return ReplaceInt32(DoubleToInt32(m.ResolvedValue()));
if (m.IsChangeFloat64ToTagged() || m.IsChangeFloat64ToTaggedPointer()) {
return Change(node, machine()->ChangeFloat64ToInt32(), m.InputAt(0));
}
@ -110,7 +111,8 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kChangeTaggedToUint32: {
NumberMatcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceUint32(DoubleToUint32(m.Value()));
if (m.HasResolvedValue())
return ReplaceUint32(DoubleToUint32(m.ResolvedValue()));
if (m.IsChangeFloat64ToTagged() || m.IsChangeFloat64ToTaggedPointer()) {
return Change(node, machine()->ChangeFloat64ToUint32(), m.InputAt(0));
}
@ -119,12 +121,14 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kChangeUint32ToTagged: {
Uint32Matcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceNumber(FastUI2D(m.Value()));
if (m.HasResolvedValue())
return ReplaceNumber(FastUI2D(m.ResolvedValue()));
break;
}
case IrOpcode::kTruncateTaggedToWord32: {
NumberMatcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceInt32(DoubleToInt32(m.Value()));
if (m.HasResolvedValue())
return ReplaceInt32(DoubleToInt32(m.ResolvedValue()));
if (m.IsChangeInt31ToTaggedSigned() || m.IsChangeInt32ToTagged() ||
m.IsChangeUint32ToTagged()) {
return Replace(m.InputAt(0));
@ -136,8 +140,9 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kCheckedFloat64ToInt32: {
Float64Matcher m(node->InputAt(0));
if (m.HasValue() && IsInt32Double(m.Value())) {
Node* value = jsgraph()->Int32Constant(static_cast<int32_t>(m.Value()));
if (m.HasResolvedValue() && IsInt32Double(m.ResolvedValue())) {
Node* value =
jsgraph()->Int32Constant(static_cast<int32_t>(m.ResolvedValue()));
ReplaceWithValue(node, value);
return Replace(value);
}
@ -212,7 +217,8 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kNumberAbs: {
NumberMatcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceNumber(std::fabs(m.Value()));
if (m.HasResolvedValue())
return ReplaceNumber(std::fabs(m.ResolvedValue()));
break;
}
case IrOpcode::kReferenceEqual: {
@ -224,15 +230,16 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
// (x + a) + b => x + (a + b) where a and b are constants and have the
// same sign.
Int32BinopMatcher m(node);
if (m.right().HasValue()) {
if (m.right().HasResolvedValue()) {
Node* checked_int32_add = m.left().node();
if (checked_int32_add->opcode() == IrOpcode::kCheckedInt32Add) {
Int32BinopMatcher n(checked_int32_add);
if (n.right().HasValue() &&
(n.right().Value() >= 0) == (m.right().Value() >= 0)) {
if (n.right().HasResolvedValue() &&
(n.right().ResolvedValue() >= 0) ==
(m.right().ResolvedValue() >= 0)) {
int32_t val;
bool overflow = base::bits::SignedAddOverflow32(
n.right().Value(), m.right().Value(), &val);
n.right().ResolvedValue(), m.right().ResolvedValue(), &val);
if (!overflow) {
bool has_no_other_uses = true;
for (Edge edge : checked_int32_add->use_edges()) {

View File

@ -813,7 +813,7 @@ Reduction TypedOptimization::ReduceJSToNumberInput(Node* input) {
if (input_type.Is(Type::String())) {
HeapObjectMatcher m(input);
if (m.HasValue() && m.Ref(broker()).IsString()) {
if (m.HasResolvedValue() && m.Ref(broker()).IsString()) {
StringRef input_value = m.Ref(broker()).AsString();
double number;
ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(number, input_value.ToNumber());

View File

@ -1055,7 +1055,7 @@ Node* WasmGraphBuilder::TrapIfEq32(wasm::TrapReason reason, Node* node,
int32_t val,
wasm::WasmCodePosition position) {
Int32Matcher m(node);
if (m.HasValue() && !m.Is(val)) return graph()->start();
if (m.HasResolvedValue() && !m.Is(val)) return graph()->start();
if (val == 0) {
return TrapIfFalse(reason, node, position);
} else {
@ -1077,7 +1077,7 @@ Node* WasmGraphBuilder::TrapIfEq64(wasm::TrapReason reason, Node* node,
int64_t val,
wasm::WasmCodePosition position) {
Int64Matcher m(node);
if (m.HasValue() && !m.Is(val)) return graph()->start();
if (m.HasResolvedValue() && !m.Is(val)) return graph()->start();
return TrapIfTrue(reason,
graph()->NewNode(mcgraph()->machine()->Word64Equal(), node,
mcgraph()->Int64Constant(val)),
@ -1137,9 +1137,10 @@ Node* WasmGraphBuilder::MaskShiftCount32(Node* node) {
if (!mcgraph()->machine()->Word32ShiftIsSafe()) {
// Shifts by constants are so common we pattern-match them here.
Int32Matcher match(node);
if (match.HasValue()) {
int32_t masked = (match.Value() & kMask32);
if (match.Value() != masked) node = mcgraph()->Int32Constant(masked);
if (match.HasResolvedValue()) {
int32_t masked = (match.ResolvedValue() & kMask32);
if (match.ResolvedValue() != masked)
node = mcgraph()->Int32Constant(masked);
} else {
node = graph()->NewNode(mcgraph()->machine()->Word32And(), node,
mcgraph()->Int32Constant(kMask32));
@ -1153,9 +1154,10 @@ Node* WasmGraphBuilder::MaskShiftCount64(Node* node) {
if (!mcgraph()->machine()->Word32ShiftIsSafe()) {
// Shifts by constants are so common we pattern-match them here.
Int64Matcher match(node);
if (match.HasValue()) {
int64_t masked = (match.Value() & kMask64);
if (match.Value() != masked) node = mcgraph()->Int64Constant(masked);
if (match.HasResolvedValue()) {
int64_t masked = (match.ResolvedValue() & kMask64);
if (match.ResolvedValue() != masked)
node = mcgraph()->Int64Constant(masked);
} else {
node = graph()->NewNode(mcgraph()->machine()->Word64And(), node,
mcgraph()->Int64Constant(kMask64));
@ -2350,10 +2352,10 @@ Node* WasmGraphBuilder::BuildI32AsmjsDivS(Node* left, Node* right) {
MachineOperatorBuilder* m = mcgraph()->machine();
Int32Matcher mr(right);
if (mr.HasValue()) {
if (mr.Value() == 0) {
if (mr.HasResolvedValue()) {
if (mr.ResolvedValue() == 0) {
return mcgraph()->Int32Constant(0);
} else if (mr.Value() == -1) {
} else if (mr.ResolvedValue() == -1) {
// The result is the negation of the left input.
return graph()->NewNode(m->Int32Sub(), mcgraph()->Int32Constant(0), left);
}
@ -2393,8 +2395,8 @@ Node* WasmGraphBuilder::BuildI32AsmjsRemS(Node* left, Node* right) {
Node* const zero = mcgraph()->Int32Constant(0);
Int32Matcher mr(right);
if (mr.HasValue()) {
if (mr.Value() == 0 || mr.Value() == -1) {
if (mr.HasResolvedValue()) {
if (mr.ResolvedValue() == 0 || mr.ResolvedValue() == -1) {
return zero;
}
return graph()->NewNode(m->Int32Mod(), left, right, control());
@ -3174,9 +3176,9 @@ Node* WasmGraphBuilder::BuildI32Rol(Node* left, Node* right) {
// Implement Rol by Ror since TurboFan does not have Rol opcode.
// TODO(weiliang): support Word32Rol opcode in TurboFan.
Int32Matcher m(right);
if (m.HasValue()) {
if (m.HasResolvedValue()) {
return Binop(wasm::kExprI32Ror, left,
mcgraph()->Int32Constant(32 - (m.Value() & 0x1F)));
mcgraph()->Int32Constant(32 - (m.ResolvedValue() & 0x1F)));
} else {
return Binop(wasm::kExprI32Ror, left,
Binop(wasm::kExprI32Sub, mcgraph()->Int32Constant(32), right));
@ -3188,8 +3190,8 @@ Node* WasmGraphBuilder::BuildI64Rol(Node* left, Node* right) {
// TODO(weiliang): support Word64Rol opcode in TurboFan.
Int64Matcher m(right);
Node* inv_right =
m.HasValue()
? mcgraph()->Int64Constant(64 - (m.Value() & 0x3F))
m.HasResolvedValue()
? mcgraph()->Int64Constant(64 - (m.ResolvedValue() & 0x3F))
: Binop(wasm::kExprI64Sub, mcgraph()->Int64Constant(64), right);
return Binop(wasm::kExprI64Ror, left, inv_right);
}
@ -3643,8 +3645,8 @@ Node* WasmGraphBuilder::CheckBoundsAndAlignment(
// Don't emit an alignment check if the index is a constant.
// TODO(wasm): a constant match is also done above in {BoundsCheckMem}.
UintPtrMatcher match(index);
if (match.HasValue()) {
uintptr_t effective_offset = match.Value() + capped_offset;
if (match.HasResolvedValue()) {
uintptr_t effective_offset = match.ResolvedValue() + capped_offset;
if ((effective_offset & align_mask) != 0) {
// statically known to be unaligned; trap.
TrapIfEq32(wasm::kTrapUnalignedAccess, Int32Constant(0), 0, position);
@ -3710,8 +3712,8 @@ Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
// The end offset is smaller than the smallest memory, so only one check is
// required. Check to see if the index is also a constant.
UintPtrMatcher match(index);
if (match.HasValue()) {
uintptr_t index_val = match.Value();
if (match.HasResolvedValue()) {
uintptr_t index_val = match.ResolvedValue();
if (index_val < env_->min_memory_size - end_offset) {
// The input index is a constant and everything is statically within
// bounds of the smallest possible memory.
@ -4261,8 +4263,8 @@ Node* WasmGraphBuilder::Uint32ToUintptr(Node* node) {
if (mcgraph()->machine()->Is32()) return node;
// Fold instances of ChangeUint32ToUint64(IntConstant) directly.
Uint32Matcher matcher(node);
if (matcher.HasValue()) {
uintptr_t value = matcher.Value();
if (matcher.HasResolvedValue()) {
uintptr_t value = matcher.ResolvedValue();
return mcgraph()->IntPtrConstant(bit_cast<intptr_t>(value));
}
return graph()->NewNode(mcgraph()->machine()->ChangeUint32ToUint64(), node);

View File

@ -74,8 +74,8 @@ void ContextSpecializationTester::CheckChangesToValue(
Reduction r = spec()->Reduce(node);
CHECK(r.Changed());
HeapObjectMatcher match(r.replacement());
CHECK(match.HasValue());
CHECK_EQ(*match.Value(), *expected_value);
CHECK(match.HasResolvedValue());
CHECK_EQ(*match.ResolvedValue(), *expected_value);
}
void ContextSpecializationTester::CheckContextInputAndDepthChanges(
@ -88,7 +88,7 @@ void ContextSpecializationTester::CheckContextInputAndDepthChanges(
Node* new_context = NodeProperties::GetContextInput(r.replacement());
CHECK_EQ(IrOpcode::kHeapConstant, new_context->opcode());
HeapObjectMatcher match(new_context);
CHECK_EQ(Context::cast(*match.Value()), *expected_new_context_object);
CHECK_EQ(Context::cast(*match.ResolvedValue()), *expected_new_context_object);
ContextAccess new_access = ContextAccessOf(r.replacement()->op());
CHECK_EQ(new_access.depth(), expected_new_depth);
@ -160,7 +160,7 @@ TEST(ReduceJSLoadContext0) {
Node* new_context_input = NodeProperties::GetContextInput(r.replacement());
CHECK_EQ(IrOpcode::kHeapConstant, new_context_input->opcode());
HeapObjectMatcher match(new_context_input);
CHECK_EQ(*native, Context::cast(*match.Value()));
CHECK_EQ(*native, Context::cast(*match.ResolvedValue()));
ContextAccess access = ContextAccessOf(r.replacement()->op());
CHECK_EQ(Context::GLOBAL_EVAL_FUN_INDEX, static_cast<int>(access.index()));
CHECK_EQ(0, static_cast<int>(access.depth()));
@ -176,8 +176,8 @@ TEST(ReduceJSLoadContext0) {
CHECK(r.replacement() != load);
HeapObjectMatcher match(r.replacement());
CHECK(match.HasValue());
CHECK_EQ(*expected, *match.Value());
CHECK(match.HasResolvedValue());
CHECK_EQ(*expected, *match.ResolvedValue());
}
// Clean up so that verifiers don't complain.
@ -474,7 +474,7 @@ TEST(ReduceJSStoreContext0) {
Node* new_context_input = NodeProperties::GetContextInput(r.replacement());
CHECK_EQ(IrOpcode::kHeapConstant, new_context_input->opcode());
HeapObjectMatcher match(new_context_input);
CHECK_EQ(*native, Context::cast(*match.Value()));
CHECK_EQ(*native, Context::cast(*match.ResolvedValue()));
ContextAccess access = ContextAccessOf(r.replacement()->op());
CHECK_EQ(Context::GLOBAL_EVAL_FUN_INDEX, static_cast<int>(access.index()));
CHECK_EQ(0, static_cast<int>(access.depth()));

View File

@ -48,26 +48,26 @@ class RepresentationChangerTester : public HandleAndZoneScope,
// TODO(titzer): use ValueChecker / ValueUtil
void CheckInt32Constant(Node* n, int32_t expected) {
Int32Matcher m(n);
CHECK(m.HasValue());
CHECK_EQ(expected, m.Value());
CHECK(m.HasResolvedValue());
CHECK_EQ(expected, m.ResolvedValue());
}
void CheckInt64Constant(Node* n, int64_t expected) {
Int64Matcher m(n);
CHECK(m.HasValue());
CHECK_EQ(expected, m.Value());
CHECK(m.HasResolvedValue());
CHECK_EQ(expected, m.ResolvedValue());
}
void CheckUint32Constant(Node* n, uint32_t expected) {
Uint32Matcher m(n);
CHECK(m.HasValue());
CHECK_EQ(static_cast<int>(expected), static_cast<int>(m.Value()));
CHECK(m.HasResolvedValue());
CHECK_EQ(static_cast<int>(expected), static_cast<int>(m.ResolvedValue()));
}
void CheckFloat64Constant(Node* n, double expected) {
Float64Matcher m(n);
CHECK(m.HasValue());
CHECK_DOUBLE_EQ(expected, m.Value());
CHECK(m.HasResolvedValue());
CHECK_DOUBLE_EQ(expected, m.ResolvedValue());
}
void CheckFloat32Constant(Node* n, float expected) {
@ -78,15 +78,15 @@ class RepresentationChangerTester : public HandleAndZoneScope,
void CheckHeapConstant(Node* n, HeapObject expected) {
HeapObjectMatcher m(n);
CHECK(m.HasValue());
CHECK_EQ(expected, *m.Value());
CHECK(m.HasResolvedValue());
CHECK_EQ(expected, *m.ResolvedValue());
}
void CheckNumberConstant(Node* n, double expected) {
NumberMatcher m(n);
CHECK_EQ(IrOpcode::kNumberConstant, n->opcode());
CHECK(m.HasValue());
CHECK_DOUBLE_EQ(expected, m.Value());
CHECK(m.HasResolvedValue());
CHECK_DOUBLE_EQ(expected, m.ResolvedValue());
}
Node* Parameter(int index = 0) {