[maglev] Emit Int32->Smi checks eagerly

For SignedSmall binary ops, we know that the output has to fit in a Smi.
So, emit a Smi check eagerly after these operations, so that future Smi
untagging knows that it doesn't need to do a check.

Bug: v8:7700
Change-Id: I117c55caa5e2ebe870fd964908564d74df726546
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4020434
Reviewed-by: Toon Verwaest <verwaest@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Auto-Submit: Leszek Swirski <leszeks@chromium.org>
Commit-Queue: Toon Verwaest <verwaest@chromium.org>
Cr-Commit-Position: refs/heads/main@{#84223}
This commit is contained in:
Leszek Swirski 2022-11-11 16:01:51 +01:00 committed by V8 LUCI CQ
parent 286a35703a
commit f426a5a778
7 changed files with 279 additions and 94 deletions

View File

@ -303,7 +303,7 @@ using GenericNodeForOperation =
// TODO(victorgomes): Remove this once all operations have fast paths.
template <Operation kOperation>
bool BinaryOperationHasInt32FastPath() {
constexpr bool BinaryOperationHasInt32FastPath() {
switch (kOperation) {
case Operation::kAdd:
case Operation::kSubtract:
@ -328,7 +328,7 @@ bool BinaryOperationHasInt32FastPath() {
}
}
template <Operation kOperation>
bool BinaryOperationHasFloat64FastPath() {
constexpr bool BinaryOperationHasFloat64FastPath() {
switch (kOperation) {
case Operation::kAdd:
case Operation::kSubtract:
@ -389,38 +389,34 @@ static constexpr base::Optional<int> Int32Identity() {
}
}
namespace {
template <Operation kOperation>
ValueNode* MaglevGraphBuilder::AddNewInt32BinaryOperationNode(
std::initializer_list<ValueNode*> inputs) {
switch (kOperation) {
#define CASE(op, OpNode, _) \
case Operation::k##op: \
return AddNewNode<OpNode>(inputs);
MAP_OPERATION_TO_INT32_NODE(CASE)
#undef CASE
#define CASE(op, OpNode) \
case Operation::k##op: \
return AddNewNode<OpNode>(inputs);
MAP_COMPARE_OPERATION_TO_INT32_NODE(CASE)
#undef CASE
default:
UNREACHABLE();
}
}
struct Int32NodeForHelper;
#define SPECIALIZATION(op, OpNode, ...) \
template <> \
struct Int32NodeForHelper<Operation::k##op> { \
using type = OpNode; \
};
MAP_OPERATION_TO_INT32_NODE(SPECIALIZATION)
MAP_COMPARE_OPERATION_TO_INT32_NODE(SPECIALIZATION)
#undef SPECIALIZATION
template <Operation kOperation>
ValueNode* MaglevGraphBuilder::AddNewFloat64BinaryOperationNode(
std::initializer_list<ValueNode*> inputs) {
switch (kOperation) {
#define CASE(op, OpNode) \
case Operation::k##op: \
return AddNewNode<OpNode>(inputs);
MAP_OPERATION_TO_FLOAT64_NODE(CASE)
#undef CASE
default:
UNREACHABLE();
}
}
using Int32NodeFor = typename Int32NodeForHelper<kOperation>::type;
template <Operation kOperation>
struct Float64NodeForHelper;
#define SPECIALIZATION(op, OpNode) \
template <> \
struct Float64NodeForHelper<Operation::k##op> { \
using type = OpNode; \
};
MAP_OPERATION_TO_FLOAT64_NODE(SPECIALIZATION)
#undef SPECIALIZATION
template <Operation kOperation>
using Float64NodeFor = typename Float64NodeForHelper<kOperation>::type;
} // namespace
template <Operation kOperation>
void MaglevGraphBuilder::BuildGenericUnaryOperationNode() {
@ -495,7 +491,22 @@ void MaglevGraphBuilder::BuildInt32BinaryOperationNode() {
return;
}
SetAccumulator(AddNewInt32BinaryOperationNode<kOperation>({left, right}));
using OpNodeT = Int32NodeFor<kOperation>;
OpNodeT* result = AddNewNode<OpNodeT>({left, right});
NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(result);
node_info->type = NodeType::kSmi;
if constexpr (OpNodeT::kProperties.value_representation() ==
ValueRepresentation::kInt32) {
// For Int32, the check is the same as a tag operation, so we may as well
// keep the tagged result as the tagged alternative.
node_info->tagged_alternative = AddNewNode<CheckedSmiTagInt32>({result});
} else {
static_assert(OpNodeT::kProperties.value_representation() ==
ValueRepresentation::kUint32);
AddNewNode<CheckUint32IsSmi>({result});
}
SetAccumulator(result);
}
template <Operation kOperation>
@ -514,7 +525,20 @@ void MaglevGraphBuilder::BuildInt32BinarySmiOperationNode() {
return;
}
ValueNode* right = GetInt32Constant(constant);
SetAccumulator(AddNewInt32BinaryOperationNode<kOperation>({left, right}));
using OpNodeT = Int32NodeFor<kOperation>;
OpNodeT* result = AddNewNode<OpNodeT>({left, right});
known_node_aspects().GetOrCreateInfoFor(result)->type = NodeType::kSmi;
if constexpr (OpNodeT::kProperties.value_representation() ==
ValueRepresentation::kInt32) {
AddNewNode<CheckInt32IsSmi>({result});
} else {
static_assert(OpNodeT::kProperties.value_representation() ==
ValueRepresentation::kUint32);
AddNewNode<CheckUint32IsSmi>({result});
}
SetAccumulator(result);
}
template <Operation kOperation>
@ -523,7 +547,7 @@ void MaglevGraphBuilder::BuildFloat64BinarySmiOperationNode() {
ValueNode* left = GetAccumulatorFloat64();
double constant = static_cast<double>(iterator_.GetImmediateOperand(0));
ValueNode* right = GetFloat64Constant(constant);
SetAccumulator(AddNewFloat64BinaryOperationNode<kOperation>({left, right}));
SetAccumulator(AddNewNode<Float64NodeFor<kOperation>>({left, right}));
}
template <Operation kOperation>
@ -532,7 +556,7 @@ void MaglevGraphBuilder::BuildFloat64BinaryOperationNode() {
ValueNode* left = LoadRegisterFloat64(0);
ValueNode* right = GetAccumulatorFloat64();
SetAccumulator(AddNewFloat64BinaryOperationNode<kOperation>({left, right}));
SetAccumulator(AddNewNode<Float64NodeFor<kOperation>>({left, right}));
}
template <Operation kOperation>
@ -550,17 +574,17 @@ void MaglevGraphBuilder::VisitBinaryOperation() {
DeoptimizeReason::kInsufficientTypeFeedbackForBinaryOperation);
return;
case BinaryOperationHint::kSignedSmall:
if (BinaryOperationHasInt32FastPath<kOperation>()) {
if constexpr (BinaryOperationHasInt32FastPath<kOperation>()) {
BuildInt32BinaryOperationNode<kOperation>();
return;
}
break;
case BinaryOperationHint::kSignedSmallInputs:
case BinaryOperationHint::kNumber:
if (BinaryOperationHasFloat64FastPath<kOperation>()) {
if constexpr (BinaryOperationHasFloat64FastPath<kOperation>()) {
BuildFloat64BinaryOperationNode<kOperation>();
return;
// } else if (BinaryOperationHasInt32FastPath<kOperation>()) {
// } else if constexpr (BinaryOperationHasInt32FastPath<kOperation>()) {
// // Fall back to int32 fast path if there is one (this will be the
// case
// // for operations that deal with bits rather than numbers).
@ -584,17 +608,17 @@ void MaglevGraphBuilder::VisitBinarySmiOperation() {
DeoptimizeReason::kInsufficientTypeFeedbackForBinaryOperation);
return;
case BinaryOperationHint::kSignedSmall:
if (BinaryOperationHasInt32FastPath<kOperation>()) {
if constexpr (BinaryOperationHasInt32FastPath<kOperation>()) {
BuildInt32BinarySmiOperationNode<kOperation>();
return;
}
break;
case BinaryOperationHint::kSignedSmallInputs:
case BinaryOperationHint::kNumber:
if (BinaryOperationHasFloat64FastPath<kOperation>()) {
if constexpr (BinaryOperationHasFloat64FastPath<kOperation>()) {
BuildFloat64BinarySmiOperationNode<kOperation>();
return;
// } else if (BinaryOperationHasInt32FastPath<kOperation>()) {
// } else if constexpr (BinaryOperationHasInt32FastPath<kOperation>()) {
// // Fall back to int32 fast path if there is one (this will be the
// case
// // for operations that deal with bits rather than numbers).
@ -678,7 +702,7 @@ void MaglevGraphBuilder::VisitCompareOperation() {
DeoptimizeReason::kInsufficientTypeFeedbackForCompareOperation);
return;
case CompareOperationHint::kSignedSmall:
if (BinaryOperationHasInt32FastPath<kOperation>()) {
if constexpr (BinaryOperationHasInt32FastPath<kOperation>()) {
ValueNode* left = LoadRegisterInt32(0);
ValueNode* right = GetAccumulatorInt32();
@ -686,13 +710,12 @@ void MaglevGraphBuilder::VisitCompareOperation() {
right)) {
return;
}
SetAccumulator(
AddNewInt32BinaryOperationNode<kOperation>({left, right}));
SetAccumulator(AddNewNode<Int32NodeFor<kOperation>>({left, right}));
return;
}
break;
case CompareOperationHint::kNumber:
if (BinaryOperationHasFloat64FastPath<kOperation>()) {
if constexpr (BinaryOperationHasFloat64FastPath<kOperation>()) {
ValueNode* left = LoadRegisterFloat64(0);
ValueNode* right = GetAccumulatorFloat64();
@ -700,10 +723,9 @@ void MaglevGraphBuilder::VisitCompareOperation() {
right)) {
return;
}
SetAccumulator(
AddNewFloat64BinaryOperationNode<kOperation>({left, right}));
SetAccumulator(AddNewNode<Float64NodeFor<kOperation>>({left, right}));
return;
// } else if (BinaryOperationHasInt32FastPath<kOperation>()) {
// } else if constexpr (BinaryOperationHasInt32FastPath<kOperation>()) {
// // Fall back to int32 fast path if there is one (this will be the
// case
// // for operations that deal with bits rather than numbers).
@ -4144,7 +4166,7 @@ void MaglevGraphBuilder::VisitForInStep() {
// contains an Smi.
ValueNode* index = LoadRegisterInt32(0);
ValueNode* one = GetInt32Constant(1);
SetAccumulator(AddNewInt32BinaryOperationNode<Operation::kAdd>({index, one}));
SetAccumulator(AddNewNode<Int32NodeFor<Operation::kAdd>>({index, one}));
}
void MaglevGraphBuilder::VisitSetPendingMessage() {

View File

@ -655,18 +655,22 @@ class MaglevGraphBuilder {
case ValueRepresentation::kInt32: {
NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value);
if (node_info->tagged_alternative == nullptr) {
// TODO(leszeks): Allow heap number boxing here.
node_info->tagged_alternative =
AddNewNode<CheckedSmiTagInt32>({value});
if (NodeTypeIsSmi(node_info->type)) {
node_info->tagged_alternative = AddNewNode<UnsafeSmiTag>({value});
} else {
node_info->tagged_alternative = AddNewNode<Int32ToNumber>({value});
}
}
return node_info->tagged_alternative;
}
case ValueRepresentation::kUint32: {
NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value);
if (node_info->tagged_alternative == nullptr) {
// TODO(leszeks): Allow heap number boxing here.
node_info->tagged_alternative =
AddNewNode<CheckedSmiTagUint32>({value});
if (NodeTypeIsSmi(node_info->type)) {
node_info->tagged_alternative = AddNewNode<UnsafeSmiTag>({value});
} else {
node_info->tagged_alternative = AddNewNode<Uint32ToNumber>({value});
}
}
return node_info->tagged_alternative;
}
@ -1186,13 +1190,6 @@ class MaglevGraphBuilder {
template <Operation kOperation>
void BuildGenericBinarySmiOperationNode();
template <Operation kOperation>
ValueNode* AddNewInt32BinaryOperationNode(
std::initializer_list<ValueNode*> inputs);
template <Operation kOperation>
ValueNode* AddNewFloat64BinaryOperationNode(
std::initializer_list<ValueNode*> inputs);
template <Operation kOperation>
ValueNode* TryFoldInt32BinaryOperation(ValueNode* left, ValueNode* right);
template <Operation kOperation>

View File

@ -75,6 +75,22 @@ class MaglevGraphVerifier {
}
}
void CheckValueInputIsWord32(NodeBase* node, int i) {
ValueNode* input = node->input(i).node();
ValueRepresentation got = input->properties().value_representation();
if (got != ValueRepresentation::kInt32 &&
got != ValueRepresentation::kUint32) {
std::ostringstream str;
str << "Type representation error: node ";
if (graph_labeller_) {
str << "#" << graph_labeller_->NodeId(node) << " : ";
}
str << node->opcode() << " (input @" << i << " = " << input->opcode()
<< ") type " << got << " is not Int32 or Uint32";
FATAL("%s", str.str().c_str());
}
}
void Process(NodeBase* node, const ProcessingState& state) {
switch (node->opcode()) {
case Opcode::kAbort:
@ -156,19 +172,26 @@ class MaglevGraphVerifier {
CheckValueInputIs(node, 0, ValueRepresentation::kTagged);
break;
case Opcode::kSwitch:
case Opcode::kCheckInt32IsSmi:
case Opcode::kCheckedSmiTagInt32:
case Opcode::kUnsafeSmiTag:
case Opcode::kChangeInt32ToFloat64:
case Opcode::kInt32ToNumber:
case Opcode::kBuiltinStringFromCharCode:
DCHECK_EQ(node->input_count(), 1);
CheckValueInputIs(node, 0, ValueRepresentation::kInt32);
break;
case Opcode::kCheckUint32IsSmi:
case Opcode::kCheckedSmiTagUint32:
case Opcode::kCheckedUint32ToInt32:
case Opcode::kChangeUint32ToFloat64:
case Opcode::kUint32ToNumber:
DCHECK_EQ(node->input_count(), 1);
CheckValueInputIs(node, 0, ValueRepresentation::kUint32);
break;
case Opcode::kUnsafeSmiTag:
DCHECK_EQ(node->input_count(), 1);
CheckValueInputIsWord32(node, 0);
break;
case Opcode::kFloat64Box:
case Opcode::kHoleyFloat64Box:
case Opcode::kCheckedTruncateFloat64ToInt32:

View File

@ -771,15 +771,13 @@ class MergePointInterpreterFrameState {
int32_t constant = value->Cast<Int32Constant>()->value();
return GetSmiConstant(compilation_unit, smi_constants, constant);
} else if (value->Is<StringLength>() ||
value->Is<BuiltinStringPrototypeCharCodeAt>()) {
value->Is<BuiltinStringPrototypeCharCodeAt>() ||
node_info->is_smi()) {
static_assert(String::kMaxLength <= kSmiMaxValue,
"String length must fit into a Smi");
tagged = Node::New<UnsafeSmiTag>(compilation_unit.zone(), {value});
} else {
tagged =
Node::New<CheckedSmiTagInt32, std::initializer_list<ValueNode*>>(
compilation_unit.zone(),
DeoptFrame(value->eager_deopt_info()->top_frame()), {value});
tagged = Node::New<Int32ToNumber>(compilation_unit.zone(), {value});
}
Node::List::AddAfter(value, tagged);
@ -800,10 +798,12 @@ class MergePointInterpreterFrameState {
NodeInfo* node_info = known_node_aspects.GetOrCreateInfoFor(value);
if (!node_info->tagged_alternative) {
// Create a tagged version.
ValueNode* tagged =
Node::New<CheckedSmiTagUint32, std::initializer_list<ValueNode*>>(
compilation_unit.zone(),
DeoptFrame(value->eager_deopt_info()->top_frame()), {value});
ValueNode* tagged;
if (node_info->is_smi()) {
tagged = Node::New<UnsafeSmiTag>(compilation_unit.zone(), {value});
} else {
tagged = Node::New<Uint32ToNumber>(compilation_unit.zone(), {value});
}
Node::List::AddAfter(value, tagged);
compilation_unit.RegisterNodeInGraphLabeller(tagged);

View File

@ -2777,6 +2777,32 @@ void UnsafeSmiUntag::GenerateCode(MaglevAssembler* masm,
__ SmiToInt32(value);
}
void CheckInt32IsSmi::AllocateVreg(MaglevVregAllocationState* vreg_state) {
UseRegister(input());
}
void CheckInt32IsSmi::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
// TODO(leszeks): This basically does a SmiTag and throws the result away.
// Don't throw the result away if we want to actually use it.
Register reg = ToRegister(input());
__ movl(kScratchRegister, reg);
__ addl(kScratchRegister, kScratchRegister);
DCHECK_REGLIST_EMPTY(RegList{reg} &
GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
__ EmitEagerDeoptIf(overflow, DeoptimizeReason::kNotASmi, this);
}
void CheckUint32IsSmi::AllocateVreg(MaglevVregAllocationState* vreg_state) {
UseRegister(input());
}
void CheckUint32IsSmi::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
Register reg = ToRegister(input());
// Perform an unsigned comparison against Smi::kMaxValue.
__ cmpl(reg, Immediate(Smi::kMaxValue));
__ EmitEagerDeoptIf(above, DeoptimizeReason::kNotASmi, this);
}
void CheckedSmiTagInt32::AllocateVreg(MaglevVregAllocationState* vreg_state) {
UseRegister(input());
DefineSameAsFirst(vreg_state, this);
@ -2842,6 +2868,56 @@ void Int32Constant::PrintParams(std::ostream& os,
os << "(" << value() << ")";
}
void Int32ToNumber::AllocateVreg(MaglevVregAllocationState* vreg_state) {
UseRegister(input());
DefineAsRegister(vreg_state, this);
}
void Int32ToNumber::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
ZoneLabelRef done(masm);
Register value = ToRegister(input());
Register object = ToRegister(result());
__ movl(kScratchRegister, value);
__ addl(kScratchRegister, kScratchRegister);
__ JumpToDeferredIf(
overflow,
[](MaglevAssembler* masm, Register object, Register value,
ZoneLabelRef done, Int32ToNumber* node) {
DoubleRegister double_value = kScratchDoubleReg;
__ Cvtlsi2sd(double_value, value);
__ AllocateHeapNumber(node->register_snapshot(), object, double_value);
__ jmp(*done);
},
object, value, done, this);
__ Move(object, kScratchRegister);
__ bind(*done);
}
void Uint32ToNumber::AllocateVreg(MaglevVregAllocationState* vreg_state) {
UseRegister(input());
DefineSameAsFirst(vreg_state, this);
}
void Uint32ToNumber::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
ZoneLabelRef done(masm);
Register value = ToRegister(input());
Register object = ToRegister(result());
__ cmpl(value, Immediate(Smi::kMaxValue));
__ JumpToDeferredIf(
above,
[](MaglevAssembler* masm, Register object, Register value,
ZoneLabelRef done, Uint32ToNumber* node) {
DoubleRegister double_value = kScratchDoubleReg;
__ Cvtlui2sd(double_value, value);
__ AllocateHeapNumber(node->register_snapshot(), object, double_value);
__ jmp(*done);
},
object, value, done, this);
__ addl(value, value);
DCHECK_EQ(object, value);
__ bind(*done);
}
void Float64Box::AllocateVreg(MaglevVregAllocationState* vreg_state) {
UseRegister(input());
DefineAsRegister(vreg_state, this);

View File

@ -179,6 +179,8 @@ class CompactInterpreterFrameState;
V(ChangeInt32ToFloat64) \
V(ChangeUint32ToFloat64) \
V(CheckedTruncateFloat64ToInt32) \
V(Int32ToNumber) \
V(Uint32ToNumber) \
V(Float64Box) \
V(HoleyFloat64Box) \
V(CheckedFloat64Unbox) \
@ -209,6 +211,8 @@ class CompactInterpreterFrameState;
#define NODE_LIST(V) \
V(AssertInt32) \
V(CheckInt32IsSmi) \
V(CheckUint32IsSmi) \
V(CheckHeapObject) \
V(CheckInt32Condition) \
V(CheckJSArrayBounds) \
@ -1681,8 +1685,7 @@ class Int32BinaryNode : public FixedInputValueNodeT<2, Derived> {
using Base = FixedInputValueNodeT<2, Derived>;
public:
static constexpr OpProperties kProperties =
OpProperties::EagerDeopt() | OpProperties::Int32();
static constexpr OpProperties kProperties = OpProperties::Int32();
static constexpr int kLeftIndex = 0;
static constexpr int kRightIndex = 1;
@ -1724,8 +1727,7 @@ class Int32ShiftRightLogical
explicit Int32ShiftRightLogical(uint64_t bitfield) : Base(bitfield) {}
// Unlike the other Int32 nodes, logical right shift returns a Uint32.
static constexpr OpProperties kProperties =
OpProperties::EagerDeopt() | OpProperties::Uint32();
static constexpr OpProperties kProperties = OpProperties::Uint32();
static constexpr int kLeftIndex = 0;
static constexpr int kRightIndex = 1;
@ -1864,6 +1866,36 @@ DEF_FLOAT64_COMPARE_NODE(GreaterThanOrEqual)
#undef DEF_OPERATION_NODE
class CheckInt32IsSmi : public FixedInputNodeT<1, CheckInt32IsSmi> {
using Base = FixedInputNodeT<1, CheckInt32IsSmi>;
public:
explicit CheckInt32IsSmi(uint64_t bitfield) : Base(bitfield) {}
static constexpr OpProperties kProperties = OpProperties::EagerDeopt();
Input& input() { return Node::input(0); }
void AllocateVreg(MaglevVregAllocationState*);
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
class CheckUint32IsSmi : public FixedInputNodeT<1, CheckUint32IsSmi> {
using Base = FixedInputNodeT<1, CheckUint32IsSmi>;
public:
explicit CheckUint32IsSmi(uint64_t bitfield) : Base(bitfield) {}
static constexpr OpProperties kProperties = OpProperties::EagerDeopt();
Input& input() { return Node::input(0); }
void AllocateVreg(MaglevVregAllocationState*);
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
class CheckedSmiTagInt32 : public FixedInputValueNodeT<1, CheckedSmiTagInt32> {
using Base = FixedInputValueNodeT<1, CheckedSmiTagInt32>;
@ -2000,6 +2032,38 @@ class Float64Constant : public FixedInputValueNodeT<0, Float64Constant> {
const double value_;
};
class Int32ToNumber : public FixedInputValueNodeT<1, Int32ToNumber> {
using Base = FixedInputValueNodeT<1, Int32ToNumber>;
public:
explicit Int32ToNumber(uint64_t bitfield) : Base(bitfield) {}
static constexpr OpProperties kProperties =
OpProperties::DeferredCall() | OpProperties::ConversionNode();
Input& input() { return Node::input(0); }
void AllocateVreg(MaglevVregAllocationState*);
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
class Uint32ToNumber : public FixedInputValueNodeT<1, Uint32ToNumber> {
using Base = FixedInputValueNodeT<1, Uint32ToNumber>;
public:
explicit Uint32ToNumber(uint64_t bitfield) : Base(bitfield) {}
static constexpr OpProperties kProperties =
OpProperties::DeferredCall() | OpProperties::ConversionNode();
Input& input() { return Node::input(0); }
void AllocateVreg(MaglevVregAllocationState*);
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
class Float64Box : public FixedInputValueNodeT<1, Float64Box> {
using Base = FixedInputValueNodeT<1, Float64Box>;

View File

@ -20,24 +20,27 @@ assertEquals(foo_int32(), 2);
%OptimizeMaglevOnNextCall(foo_int32);
assertEquals(foo_int32(), 2);
// This examples creates a simple exception handler block where the trampoline
// has an int32 value that overflows and it needs to create a HeapNumber.
function foo_int32_overflow(x) {
try {
x = x + x;
throw "Error";
} catch {
return x;
}
}
%PrepareFunctionForOptimization(foo_int32_overflow);
assertEquals(foo_int32_overflow(1), 2);
%OptimizeMaglevOnNextCall(foo_int32_overflow);
assertEquals(foo_int32_overflow(0x3FFFFFFF), 0x7FFFFFFE);
// If we call it with a HeapNumber, we deopt before the exception:
assertTrue(%ActiveTierIsMaglev(foo_int32_overflow));
assertEquals(foo_int32_overflow(1.1), 2.2);
assertFalse(%ActiveTierIsMaglev(foo_int32_overflow));
// TODO(leszeks): There is currently no way for this to happen, because all
// Int32 ops are eagerly checked for Smi overflow.
//
// // This examples creates a simple exception handler block where the trampoline
// // has an int32 value that overflows and it needs to create a HeapNumber.
// function foo_int32_overflow(x) {
// try {
// x = x + x;
// throw "Error";
// } catch {
// return x;
// }
// }
// %PrepareFunctionForOptimization(foo_int32_overflow);
// assertEquals(foo_int32_overflow(1), 2);
// %OptimizeMaglevOnNextCall(foo_int32_overflow);
// assertEquals(foo_int32_overflow(0x3FFFFFFF), 0x7FFFFFFE);
// assertTrue(%ActiveTierIsMaglev(foo_int32_overflow));
// // If we call it with a HeapNumber, we deopt before the exception:
// assertEquals(foo_int32_overflow(1.1), 2.2);
// assertTrue(%ActiveTierIsMaglev(foo_int32_overflow));
// This examples creates a simple exception handler block where the trampoline
// has an float64 value and needs to convert to a tagged value.