Reland "[maglev] Deopt on overflow in >>>"

This is a reland of commit 24e60017d4

The reland changes %ClearFunctionFeedback to clear *all* feedback
slot kinds including binary/compare/for-in slots. In the tests we
thus no longer have to resort to tricks to restore the function to
it's initial state, instead simply call %ClearFunctionFeedback.

Original change's description:
> [maglev] Deopt on overflow in >>>
>
> Re-enable the int32 fast path for ShiftRightLogical, but account for
> Maglev's missing signed/unsigned representation tracking by a)
> removing rhs==0 as the identity value (a shift by 0 is still a
> signed-unsigned conversion) and b) deoptimizing if the result cannot
> be converted to a non-negative smi.
>
> Note this is not a deopt loop, since a non-smi result will change the
> feedback to kSignedSmallInputs (from kSignedSmall).
>
> To fix this properly, we should track signed/unsigned representations
> and convert the result to a heap number if it doesn't fit within smi
> range.
>
> Bug: v8:7700
> Change-Id: Ifd538d227a6f1290eb7f008d9bfad586ff91ea0f
> Fixed: v8:13251
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3876366
> Reviewed-by: Leszek Swirski <leszeks@chromium.org>
> Commit-Queue: Jakob Linke <jgruber@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#83025}

Bug: v8:7700
Change-Id: I2f607a0fb863b80e8589c9c1e86ee31fbac48c25
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3879491
Auto-Submit: Jakob Linke <jgruber@chromium.org>
Commit-Queue: Jakob Linke <jgruber@chromium.org>
Reviewed-by: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/main@{#83057}
This commit is contained in:
Jakob Linke 2022-09-08 10:35:31 +02:00 committed by V8 LUCI CQ
parent 3c4654da69
commit 178f2eeb13
11 changed files with 239 additions and 83 deletions

View File

@ -176,8 +176,7 @@ bool BinaryOperationHasInt32FastPath() {
case Operation::kBitwiseXor: case Operation::kBitwiseXor:
case Operation::kShiftLeft: case Operation::kShiftLeft:
case Operation::kShiftRight: case Operation::kShiftRight:
// TODO(v8:13251): Implement with overflow protection. case Operation::kShiftRightLogical:
// case Operation::kShiftRightLogical:
case Operation::kEqual: case Operation::kEqual:
case Operation::kStrictEqual: case Operation::kStrictEqual:
case Operation::kLessThan: case Operation::kLessThan:
@ -205,21 +204,21 @@ bool BinaryOperationHasFloat64FastPath() {
} // namespace } // namespace
// MAP_OPERATION_TO_NODES are tuples with the following format: // MAP_OPERATION_TO_NODES are tuples with the following format:
// (Operation name, // - Operation name,
// Int32 operation node, // - Int32 operation node,
// Unit of int32 operation (e.g, 0 for add/sub and 1 for mul/div)) // - Identity of int32 operation (e.g, 0 for add/sub and 1 for mul/div), if it
#define MAP_OPERATION_TO_INT32_NODE(V) \ // exists, or otherwise {}.
V(Add, Int32AddWithOverflow, 0) \ #define MAP_OPERATION_TO_INT32_NODE(V) \
V(Subtract, Int32SubtractWithOverflow, 0) \ V(Add, Int32AddWithOverflow, 0) \
V(Multiply, Int32MultiplyWithOverflow, 1) \ V(Subtract, Int32SubtractWithOverflow, 0) \
V(Divide, Int32DivideWithOverflow, 1) \ V(Multiply, Int32MultiplyWithOverflow, 1) \
V(BitwiseAnd, Int32BitwiseAnd, ~0) \ V(Divide, Int32DivideWithOverflow, 1) \
V(BitwiseOr, Int32BitwiseOr, 0) \ V(BitwiseAnd, Int32BitwiseAnd, ~0) \
V(BitwiseXor, Int32BitwiseXor, 0) \ V(BitwiseOr, Int32BitwiseOr, 0) \
V(ShiftLeft, Int32ShiftLeft, 0) \ V(BitwiseXor, Int32BitwiseXor, 0) \
V(ShiftRight, Int32ShiftRight, 0) \ V(ShiftLeft, Int32ShiftLeft, 0) \
/* TODO(v8:13251): Implement with overflow protection. */ \ V(ShiftRight, Int32ShiftRight, 0) \
V(ShiftRightLogical, Int32ShiftRightLogical, 0) V(ShiftRightLogical, Int32ShiftRightLogical, {})
#define MAP_COMPARE_OPERATION_TO_INT32_NODE(V) \ #define MAP_COMPARE_OPERATION_TO_INT32_NODE(V) \
V(Equal, Int32Equal) \ V(Equal, Int32Equal) \
@ -238,11 +237,11 @@ bool BinaryOperationHasFloat64FastPath() {
V(Divide, Float64Divide) V(Divide, Float64Divide)
template <Operation kOperation> template <Operation kOperation>
static int Int32Unit() { static constexpr base::Optional<int> Int32Identity() {
switch (kOperation) { switch (kOperation) {
#define CASE(op, OpNode, unit) \ #define CASE(op, _, identity) \
case Operation::k##op: \ case Operation::k##op: \
return unit; return identity;
MAP_OPERATION_TO_INT32_NODE(CASE) MAP_OPERATION_TO_INT32_NODE(CASE)
#undef CASE #undef CASE
default: default:
@ -254,8 +253,8 @@ template <Operation kOperation>
ValueNode* MaglevGraphBuilder::AddNewInt32BinaryOperationNode( ValueNode* MaglevGraphBuilder::AddNewInt32BinaryOperationNode(
std::initializer_list<ValueNode*> inputs) { std::initializer_list<ValueNode*> inputs) {
switch (kOperation) { switch (kOperation) {
#define CASE(op, OpNode, unit) \ #define CASE(op, OpNode, _) \
case Operation::k##op: \ case Operation::k##op: \
return AddNewNode<OpNode>(inputs); return AddNewNode<OpNode>(inputs);
MAP_OPERATION_TO_INT32_NODE(CASE) MAP_OPERATION_TO_INT32_NODE(CASE)
#undef CASE #undef CASE
@ -328,7 +327,7 @@ void MaglevGraphBuilder::BuildInt32BinarySmiOperationNode() {
// TODO(v8:7700): Do constant folding. // TODO(v8:7700): Do constant folding.
ValueNode* left = GetAccumulatorInt32(); ValueNode* left = GetAccumulatorInt32();
int32_t constant = iterator_.GetImmediateOperand(0); int32_t constant = iterator_.GetImmediateOperand(0);
if (constant == Int32Unit<kOperation>()) { if (base::Optional<int>(constant) == Int32Identity<kOperation>()) {
// If the constant is the unit of the operation, it already has the right // If the constant is the unit of the operation, it already has the right
// value, so we can just return. // value, so we can just return.
return; return;

View File

@ -2056,6 +2056,13 @@ void Int32ShiftRightLogical::GenerateCode(MaglevAssembler* masm,
Register left = ToRegister(left_input()); Register left = ToRegister(left_input());
DCHECK_EQ(rcx, ToRegister(right_input())); DCHECK_EQ(rcx, ToRegister(right_input()));
__ shrl_cl(left); __ shrl_cl(left);
// The result of >>> is unsigned, but Maglev doesn't yet track
// signed/unsigned representations. Instead, deopt if the resulting smi would
// be negative.
// TODO(jgruber): Properly track signed/unsigned representations and
// allocated a heap number if the result is outside smi range.
__ testl(left, Immediate((1 << 31) | (1 << 30)));
EmitEagerDeoptIf(not_equal, masm, DeoptimizeReason::kOverflow, this);
} }
namespace { namespace {

View File

@ -479,7 +479,7 @@ void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
} }
} }
bool FeedbackVector::ClearSlots(Isolate* isolate) { bool FeedbackVector::ClearSlots(Isolate* isolate, ClearBehavior behavior) {
if (!shared_function_info().HasFeedbackMetadata()) return false; if (!shared_function_info().HasFeedbackMetadata()) return false;
MaybeObject uninitialized_sentinel = MaybeObject::FromObject( MaybeObject uninitialized_sentinel = MaybeObject::FromObject(
FeedbackVector::RawUninitializedSentinel(isolate)); FeedbackVector::RawUninitializedSentinel(isolate));
@ -492,7 +492,7 @@ bool FeedbackVector::ClearSlots(Isolate* isolate) {
MaybeObject obj = Get(slot); MaybeObject obj = Get(slot);
if (obj != uninitialized_sentinel) { if (obj != uninitialized_sentinel) {
FeedbackNexus nexus(*this, slot); FeedbackNexus nexus(*this, slot);
feedback_updated |= nexus.Clear(); feedback_updated |= nexus.Clear(behavior);
} }
} }
return feedback_updated; return feedback_updated;
@ -609,23 +609,37 @@ void FeedbackNexus::ConfigureUninitialized() {
} }
} }
bool FeedbackNexus::Clear() { bool FeedbackNexus::Clear(ClearBehavior behavior) {
bool feedback_updated = false; bool feedback_updated = false;
switch (kind()) { switch (kind()) {
case FeedbackSlotKind::kTypeProfile: case FeedbackSlotKind::kTypeProfile:
// We don't clear these kinds ever. if (V8_LIKELY(behavior == ClearBehavior::kDefault)) {
// We don't clear these kinds ever.
} else if (!IsCleared()) {
DCHECK_EQ(behavior, ClearBehavior::kClearAll);
SetFeedback(UninitializedSentinel(), SKIP_WRITE_BARRIER);
feedback_updated = true;
}
break; break;
case FeedbackSlotKind::kCompareOp: case FeedbackSlotKind::kCompareOp:
case FeedbackSlotKind::kForIn: case FeedbackSlotKind::kForIn:
case FeedbackSlotKind::kBinaryOp: case FeedbackSlotKind::kBinaryOp:
// We don't clear these, either. if (V8_LIKELY(behavior == ClearBehavior::kDefault)) {
// We don't clear these, either.
} else if (!IsCleared()) {
DCHECK_EQ(behavior, ClearBehavior::kClearAll);
SetFeedback(Smi::zero(), SKIP_WRITE_BARRIER);
feedback_updated = true;
}
break; break;
case FeedbackSlotKind::kLiteral: case FeedbackSlotKind::kLiteral:
SetFeedback(Smi::zero(), SKIP_WRITE_BARRIER); if (!IsCleared()) {
feedback_updated = true; SetFeedback(Smi::zero(), SKIP_WRITE_BARRIER);
feedback_updated = true;
}
break; break;
case FeedbackSlotKind::kSetNamedSloppy: case FeedbackSlotKind::kSetNamedSloppy:

View File

@ -28,6 +28,12 @@ class IsCompiledScope;
enum class UpdateFeedbackMode { kOptionalFeedback, kGuaranteedFeedback }; enum class UpdateFeedbackMode { kOptionalFeedback, kGuaranteedFeedback };
// Which feedback slots to clear in Clear().
enum class ClearBehavior {
kDefault,
kClearAll, // .. also ForIn, CompareOp, BinaryOp.
};
enum class FeedbackSlotKind : uint8_t { enum class FeedbackSlotKind : uint8_t {
// This kind means that the slot points to the middle of other slot // This kind means that the slot points to the middle of other slot
// which occupies more than one feedback vector element. // which occupies more than one feedback vector element.
@ -357,7 +363,14 @@ class FeedbackVector
void FeedbackSlotPrint(std::ostream& os, FeedbackSlot slot); void FeedbackSlotPrint(std::ostream& os, FeedbackSlot slot);
// Clears the vector slots. Return true if feedback has changed. // Clears the vector slots. Return true if feedback has changed.
bool ClearSlots(Isolate* isolate); bool ClearSlots(Isolate* isolate) {
return ClearSlots(isolate, ClearBehavior::kDefault);
}
// As above, but clears *all* slots - even those that we usually keep (e.g.:
// BinaryOp feedback).
bool ClearAllSlotsForTesting(Isolate* isolate) {
return ClearSlots(isolate, ClearBehavior::kClearAll);
}
// The object that indicates an uninitialized cache. // The object that indicates an uninitialized cache.
static inline Handle<Symbol> UninitializedSentinel(Isolate* isolate); static inline Handle<Symbol> UninitializedSentinel(Isolate* isolate);
@ -384,6 +397,8 @@ class FeedbackVector
TQ_OBJECT_CONSTRUCTORS(FeedbackVector) TQ_OBJECT_CONSTRUCTORS(FeedbackVector)
private: private:
bool ClearSlots(Isolate* isolate, ClearBehavior behavior);
static void AddToVectorsForProfilingTools(Isolate* isolate, static void AddToVectorsForProfilingTools(Isolate* isolate,
Handle<FeedbackVector> vector); Handle<FeedbackVector> vector);
@ -811,7 +826,7 @@ class V8_EXPORT_PRIVATE FeedbackNexus final {
} }
// Clear() returns true if the state of the underlying vector was changed. // Clear() returns true if the state of the underlying vector was changed.
bool Clear(); bool Clear(ClearBehavior behavior);
void ConfigureUninitialized(); void ConfigureUninitialized();
// ConfigureMegamorphic() returns true if the state of the underlying vector // ConfigureMegamorphic() returns true if the state of the underlying vector
// was changed. Extra feedback is cleared if the 0 parameter version is used. // was changed. Extra feedback is cleared if the 0 parameter version is used.

View File

@ -1364,14 +1364,14 @@ void JSFunction::CalculateInstanceSizeHelper(InstanceType instance_type,
static_cast<unsigned>(JSObject::kMaxInstanceSize)); static_cast<unsigned>(JSObject::kMaxInstanceSize));
} }
void JSFunction::ClearTypeFeedbackInfo() { void JSFunction::ClearAllTypeFeedbackInfoForTesting() {
ResetIfCodeFlushed(); ResetIfCodeFlushed();
if (has_feedback_vector()) { if (has_feedback_vector()) {
FeedbackVector vector = feedback_vector(); FeedbackVector vector = feedback_vector();
Isolate* isolate = GetIsolate(); Isolate* isolate = GetIsolate();
if (vector.ClearSlots(isolate)) { if (vector.ClearAllSlotsForTesting(isolate)) {
IC::OnFeedbackChanged(isolate, vector, FeedbackSlot::Invalid(), IC::OnFeedbackChanged(isolate, vector, FeedbackSlot::Invalid(),
"ClearTypeFeedbackInfo"); "ClearAllTypeFeedbackInfoForTesting");
} }
} }
} }

View File

@ -243,8 +243,9 @@ class JSFunction : public TorqueGeneratedJSFunction<
IsCompiledScope* compiled_scope, IsCompiledScope* compiled_scope,
bool reset_budget_for_feedback_allocation); bool reset_budget_for_feedback_allocation);
// Unconditionally clear the type feedback vector. // Unconditionally clear the type feedback vector, even those that we usually
void ClearTypeFeedbackInfo(); // keep (e.g.: BinaryOp feedback).
void ClearAllTypeFeedbackInfoForTesting();
// Resets function to clear compiled data after bytecode has been flushed. // Resets function to clear compiled data after bytecode has been flushed.
inline bool NeedsResetDueToFlushedBytecode(); inline bool NeedsResetDueToFlushedBytecode();

View File

@ -952,7 +952,7 @@ RUNTIME_FUNCTION(Runtime_ClearFunctionFeedback) {
HandleScope scope(isolate); HandleScope scope(isolate);
DCHECK_EQ(1, args.length()); DCHECK_EQ(1, args.length());
Handle<JSFunction> function = args.at<JSFunction>(0); Handle<JSFunction> function = args.at<JSFunction>(0);
function->ClearTypeFeedbackInfo(); function->ClearAllTypeFeedbackInfoForTesting();
return ReadOnlyRoots(isolate).undefined_value(); return ReadOnlyRoots(isolate).undefined_value();
} }

View File

@ -0,0 +1,48 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax --maglev
function gen_shrl_smi(y) {
return function shrl_smi(x) { return x >>> y; };
}
function shrl_test(lhs, rhs, expected_result) {
const shrl = gen_shrl_smi(rhs);
// Warmup.
%PrepareFunctionForOptimization(shrl);
%ClearFunctionFeedback(shrl);
shrl(1);
%OptimizeMaglevOnNextCall(shrl);
assertEquals(expected_result, shrl(lhs));
assertTrue(isMaglevved(shrl));
%DeoptimizeFunction(shrl);
assertEquals(expected_result, shrl(lhs));
}
function shrl_test_expect_deopt(lhs, rhs, expected_result) {
const shrl = gen_shrl_smi(rhs);
// Warmup.
%PrepareFunctionForOptimization(shrl);
%ClearFunctionFeedback(shrl);
shrl(1);
%OptimizeMaglevOnNextCall(shrl);
assertEquals(expected_result, shrl(lhs));
assertFalse(isMaglevved(shrl));
}
shrl_test(8, 2, 2);
shrl_test_expect_deopt(-1, 1, 2147483647);
shrl_test(-8, 2, 1073741822);
shrl_test_expect_deopt(-8, 0, 4294967288);
shrl_test_expect_deopt(-892396978, 0, 3402570318);
shrl_test(8, 10, 0);
shrl_test(8, 33, 4);
shrl_test_expect_deopt(0xFFFFFFFF, 0x3FFFFFFF, 1);

View File

@ -0,0 +1,44 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax --maglev
function shrl(x, y) {
return x >>> y;
}
function shrl_test(lhs, rhs, expected_result) {
// Warmup.
%PrepareFunctionForOptimization(shrl);
%ClearFunctionFeedback(shrl);
shrl(1, 1);
%OptimizeMaglevOnNextCall(shrl);
assertEquals(expected_result, shrl(lhs, rhs));
assertTrue(isMaglevved(shrl));
%DeoptimizeFunction(shrl);
assertEquals(expected_result, shrl(lhs, rhs));
}
function shrl_test_expect_deopt(lhs, rhs, expected_result) {
// Warmup.
%PrepareFunctionForOptimization(shrl);
%ClearFunctionFeedback(shrl);
shrl(1, 1);
%OptimizeMaglevOnNextCall(shrl);
assertEquals(expected_result, shrl(lhs, rhs));
assertFalse(isMaglevved(shrl));
}
shrl_test(8, 2, 2);
shrl_test_expect_deopt(-1, 1, 2147483647);
shrl_test(-8, 2, 1073741822);
shrl_test_expect_deopt(-8, 0, 4294967288);
shrl_test_expect_deopt(-892396978, 0, 3402570318);
shrl_test(8, 10, 0);
shrl_test(8, 33, 4);
shrl_test_expect_deopt(0xFFFFFFFF, 0x3FFFFFFF, 1);

View File

@ -4,55 +4,42 @@
// Flags: --allow-natives-syntax --maglev // Flags: --allow-natives-syntax --maglev
// Checks Smi shift_right operation and deopt while untagging. function gen_sarl_smi(y) {
(function() { return function sarl_smi(x) { return x >> y; };
function shift_right(x, y) { }
return x >> y;
}
%PrepareFunctionForOptimization(shift_right); function sarl_test(lhs, rhs, expected_result) {
assertEquals(2, shift_right(8, 2)); const sarl = gen_sarl_smi(rhs);
assertEquals(-2, shift_right(-8, 2));
assertEquals(-8, shift_right(-8, 0));
assertEquals(0, shift_right(8, 10));
assertEquals(4, shift_right(8, 33));
%OptimizeMaglevOnNextCall(shift_right); // Warmup.
assertEquals(2, shift_right(8, 2)); %PrepareFunctionForOptimization(sarl);
assertTrue(isMaglevved(shift_right)); %ClearFunctionFeedback(sarl);
sarl(1);
%OptimizeMaglevOnNextCall(sarl);
assertEquals(-2, shift_right(-8, 2)); assertEquals(expected_result, sarl(lhs));
assertTrue(isMaglevved(shift_right)); assertTrue(isMaglevved(sarl));
assertEquals(-8, shift_right(-8, 0)); %DeoptimizeFunction(sarl);
assertTrue(isMaglevved(shift_right)); assertEquals(expected_result, sarl(lhs));
}
assertEquals(0, shift_right(8, 10)); function sarl_test_expect_deopt(lhs, rhs, expected_result) {
assertTrue(isMaglevved(shift_right)); const sarl = gen_sarl_smi(rhs);
// Shifts are mod 32 // Warmup.
assertEquals(4, shift_right(8, 33)); %PrepareFunctionForOptimization(sarl);
assertTrue(isMaglevved(shift_right)); %ClearFunctionFeedback(sarl);
sarl(1);
%OptimizeMaglevOnNextCall(sarl);
// // We should deopt here in SmiUntag. assertEquals(expected_result, sarl(lhs));
// assertEquals(0x40000000, shift_right(1, 0x3FFFFFFF)); assertFalse(isMaglevved(sarl));
// assertFalse(isMaglevved(shift_right)); }
})();
// // Checks when we deopt due to tagging. sarl_test(8, 2, 2);
// (function() { sarl_test(-8, 2, -2);
// function shift_right(x, y) { sarl_test(-8, 0, -8);
// return x + y; sarl_test(8, 10, 0);
// } sarl_test(8, 33, 4);
sarl_test_expect_deopt(0xFFFFFFFF, 0x3FFFFFFF, -1);
// %PrepareFunctionForOptimization(shift_right);
// assertEquals(3, shift_right(1, 2));
// %OptimizeMaglevOnNextCall(shift_right);
// assertEquals(3, shift_right(1, 2));
// assertTrue(isMaglevved(shift_right));
// // We should deopt here in SmiTag.
// assertEquals(3.2, shift_right(1.2, 2));
// assertFalse(isMaglevved(shift_right));
// })();

View File

@ -0,0 +1,41 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax --maglev
function sarl(x, y) {
return x >> y;
}
function sarl_test(lhs, rhs, expected_result) {
// Warmup.
%PrepareFunctionForOptimization(sarl);
%ClearFunctionFeedback(sarl);
sarl(1, 1);
%OptimizeMaglevOnNextCall(sarl);
assertEquals(expected_result, sarl(lhs, rhs));
assertTrue(isMaglevved(sarl));
%DeoptimizeFunction(sarl);
assertEquals(expected_result, sarl(lhs, rhs));
}
function sarl_test_expect_deopt(lhs, rhs, expected_result) {
// Warmup.
%PrepareFunctionForOptimization(sarl);
%ClearFunctionFeedback(sarl);
sarl(1, 1);
%OptimizeMaglevOnNextCall(sarl);
assertEquals(expected_result, sarl(lhs, rhs));
assertFalse(isMaglevved(sarl));
}
sarl_test(8, 2, 2);
sarl_test(-8, 2, -2);
sarl_test(-8, 0, -8);
sarl_test(8, 10, 0);
sarl_test(8, 33, 4);
sarl_test_expect_deopt(0xFFFFFFFF, 0x3FFFFFFF, -1);