v8/test/unittests/compiler/redundancy-elimination-unittest.cc

1348 lines
50 KiB
C++
Raw Normal View History

// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/redundancy-elimination.h"
#include "src/codegen/tick-counter.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/feedback-source.h"
#include "test/unittests/compiler/graph-reducer-unittest.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
#include "testing/gmock-support.h"
using testing::_;
using testing::NiceMock;
namespace v8 {
namespace internal {
namespace compiler {
namespace redundancy_elimination_unittest {
class RedundancyEliminationTest : public GraphTest {
public:
explicit RedundancyEliminationTest(int num_parameters = 4)
: GraphTest(num_parameters),
reducer_(&editor_, zone()),
simplified_(zone()) {
// Initialize the {reducer_} state for the Start node.
reducer_.Reduce(graph()->start());
// Create a feedback vector with two CALL_IC slots.
FeedbackVectorSpec spec(zone());
FeedbackSlot slot1 = spec.AddCallICSlot();
FeedbackSlot slot2 = spec.AddCallICSlot();
Handle<FeedbackMetadata> metadata = FeedbackMetadata::New(isolate(), &spec);
Handle<SharedFunctionInfo> shared =
isolate()->factory()->NewSharedFunctionInfoForBuiltin(
isolate()->factory()->empty_string(), Builtins::kIllegal);
shared->set_raw_outer_scope_info_or_feedback_metadata(*metadata);
Handle<ClosureFeedbackCellArray> closure_feedback_cell_array =
ClosureFeedbackCellArray::New(isolate(), shared);
IsCompiledScope is_compiled_scope(shared->is_compiled_scope(isolate()));
Handle<FeedbackVector> feedback_vector = FeedbackVector::New(
isolate(), shared, closure_feedback_cell_array, &is_compiled_scope);
vector_slot_pairs_.push_back(FeedbackSource());
vector_slot_pairs_.push_back(FeedbackSource(feedback_vector, slot1));
vector_slot_pairs_.push_back(FeedbackSource(feedback_vector, slot2));
}
~RedundancyEliminationTest() override = default;
protected:
Reduction Reduce(Node* node) { return reducer_.Reduce(node); }
std::vector<FeedbackSource> const& vector_slot_pairs() const {
return vector_slot_pairs_;
}
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
private:
NiceMock<MockAdvancedReducerEditor> editor_;
std::vector<FeedbackSource> vector_slot_pairs_;
FeedbackSource feedback2_;
RedundancyElimination reducer_;
SimplifiedOperatorBuilder simplified_;
};
namespace {
const CheckForMinusZeroMode kCheckForMinusZeroModes[] = {
CheckForMinusZeroMode::kCheckForMinusZero,
CheckForMinusZeroMode::kDontCheckForMinusZero,
};
const CheckTaggedInputMode kCheckTaggedInputModes[] = {
CheckTaggedInputMode::kNumber, CheckTaggedInputMode::kNumberOrOddball};
const NumberOperationHint kNumberOperationHints[] = {
NumberOperationHint::kSignedSmall,
NumberOperationHint::kSignedSmallInputs,
NumberOperationHint::kNumber,
NumberOperationHint::kNumberOrOddball,
};
} // namespace
// -----------------------------------------------------------------------------
// CheckBounds
TEST_F(RedundancyEliminationTest, CheckBounds) {
TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* index = Parameter(0);
Node* length = Parameter(1);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect = graph()->NewNode(
simplified()->CheckBounds(feedback1), index, length, effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* check2 = effect = graph()->NewNode(
simplified()->CheckBounds(feedback2), index, length, effect, control);
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check1);
}
}
}
// -----------------------------------------------------------------------------
// CheckNumber
TEST_F(RedundancyEliminationTest, CheckNumberSubsumedByCheckSmi) {
TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect = graph()->NewNode(
simplified()->CheckSmi(feedback1), value, effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* check2 = effect = graph()->NewNode(
simplified()->CheckNumber(feedback2), value, effect, control);
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check1);
}
}
}
[turbofan] Collect and consume (ReceiverOr)Oddball feedback for StrictEqual. This CL introduces proper Oddball and ReceiverOrOddball states for the CompareOperationFeedback, and updates the StrictEqual IC to collect this feedback as well. Previously it would not collect Oddball feedback, not even in the sense of NumberOrOddball, since that's not usable for the SpeculativeNumberEqual. The new feedback is handled via newly introduced CheckReceiverOrOddball and CheckOddball operators in TurboFan, introduced by JSTypedLowering. Just like with the Receiver feedback, it's enough to check one side and do a ReferenceEqual afterwards, since strict equal can only yield true if both sides refer to the same instance. This improves the benchmark mentioned in http://crbug.com/v8/8356 from naive: 2950 ms. tenary: 2456 ms. to around naive: 2996 ms. tenary: 2192 ms. which corresponds to a roughly 10% improvement in the case for the tenary pattern, which is currently used by dart2js. In real world scenarios this will probably help even more, since TurboFan is able to optimize across the strict equality, i.e. there's no longer a stub call forcibly spilling all registers that are live across the call. This new feedback will be used as a basis for the JSEqual support for ReceiverOrOddball, which will allow dart2js switching to the shorter a==b form, at the same peak performance. Bug: v8:8356 Change-Id: Iafbf5d64fcc9312f9e575b54c32c631ce9b572b2 Reviewed-on: https://chromium-review.googlesource.com/c/1297309 Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Cr-Commit-Position: refs/heads/master@{#56925}
2018-10-23 19:57:43 +00:00
// -----------------------------------------------------------------------------
// CheckReceiver
TEST_F(RedundancyEliminationTest, CheckReceiver) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect =
graph()->NewNode(simplified()->CheckReceiver(), value, effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* check2 = effect =
graph()->NewNode(simplified()->CheckReceiver(), value, effect, control);
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check1);
}
// -----------------------------------------------------------------------------
[turbofan] ReceiverOrNullOrUndefined feedback for JSEqual. This changes the ReceiverOrOddball feedback on JSStrictEqual to ReceiverOrNullOrUndefined feedback, which can also safely be consumed by JSEqual (we cannot generally accept any oddball here since booleans trigger implicit conversions, unfortunately). Thus we replace the previously introduced CheckReceiverOrOddball with CheckReceiverOrNullOrUndefined, and drop CheckOddball, since we will no longer collect Oddball feedback separately. TurboFan will then turn a JSEqual[ReceiverOrNullOrUndefined] into a sequence like this: ``` left = CheckReceiverOrNullOrUndefined(left); right = CheckReceiverOrNullOrUndefined(right); result = if ObjectIsUndetectable(left) then ObjectIsUndetectable(right) else ReferenceEqual(left, right); ``` This significantly improves the peak performance of abstract equality with Receiver, Null or Undefined inputs. On the test case outlined in http://crbug.com/v8/8356 we go from naive: 2946 ms. tenary: 2134 ms. to naive: 2230 ms. tenary: 2250 ms. which corresponds to a 25% improvement on the abstract equality case. For regular code this will probably yield more performance, since we get rid of the JSEqual operator, which might have arbitrary side effects and thus blocks all kinds of TurboFan optimizations. The JSStrictEqual case is slightly slower now, since it has to rule out booleans as well (even though that's not strictly necessary, but consistency is key here). This way developers can safely use `a == b` instead of doing a dance like `a == null ? b == null : a === b` (which is what dart2js does right now) when both `a` and `b` are known to be Receiver, Null or Undefined. The abstract equality is not only faster to parse than the tenary, but also generates a shorter bytecode sequence. In the test case referenced in http://crbug.com/v8/8356 the bytecode for `naive` is ``` StackCheck Ldar a1 TestEqual a0, [0] JumpIfFalse [5] LdaSmi [1] Return LdaSmi [2] Return ``` which is 14 bytes, whereas the `tenary` function generates ``` StackCheck Ldar a0 TestUndetectable JumpIfFalse [7] Ldar a1 TestUndetectable Jump [7] Ldar a1 TestEqualStrict a0, [0] JumpIfToBooleanFalse [5] LdaSmi [1] Return LdaSmi [2] Return ``` which is 24 bytes. So the `naive` version is 40% smaller and requires fewer bytecode dispatches. Bug: chromium:898455, v8:8356 Change-Id: If3961b2518b4438700706b3bd6071d546305e233 Reviewed-on: https://chromium-review.googlesource.com/c/1297315 Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Cr-Commit-Position: refs/heads/master@{#56948}
2018-10-24 12:09:34 +00:00
// CheckReceiverOrNullOrUndefined
[turbofan] Collect and consume (ReceiverOr)Oddball feedback for StrictEqual. This CL introduces proper Oddball and ReceiverOrOddball states for the CompareOperationFeedback, and updates the StrictEqual IC to collect this feedback as well. Previously it would not collect Oddball feedback, not even in the sense of NumberOrOddball, since that's not usable for the SpeculativeNumberEqual. The new feedback is handled via newly introduced CheckReceiverOrOddball and CheckOddball operators in TurboFan, introduced by JSTypedLowering. Just like with the Receiver feedback, it's enough to check one side and do a ReferenceEqual afterwards, since strict equal can only yield true if both sides refer to the same instance. This improves the benchmark mentioned in http://crbug.com/v8/8356 from naive: 2950 ms. tenary: 2456 ms. to around naive: 2996 ms. tenary: 2192 ms. which corresponds to a roughly 10% improvement in the case for the tenary pattern, which is currently used by dart2js. In real world scenarios this will probably help even more, since TurboFan is able to optimize across the strict equality, i.e. there's no longer a stub call forcibly spilling all registers that are live across the call. This new feedback will be used as a basis for the JSEqual support for ReceiverOrOddball, which will allow dart2js switching to the shorter a==b form, at the same peak performance. Bug: v8:8356 Change-Id: Iafbf5d64fcc9312f9e575b54c32c631ce9b572b2 Reviewed-on: https://chromium-review.googlesource.com/c/1297309 Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Cr-Commit-Position: refs/heads/master@{#56925}
2018-10-23 19:57:43 +00:00
[turbofan] ReceiverOrNullOrUndefined feedback for JSEqual. This changes the ReceiverOrOddball feedback on JSStrictEqual to ReceiverOrNullOrUndefined feedback, which can also safely be consumed by JSEqual (we cannot generally accept any oddball here since booleans trigger implicit conversions, unfortunately). Thus we replace the previously introduced CheckReceiverOrOddball with CheckReceiverOrNullOrUndefined, and drop CheckOddball, since we will no longer collect Oddball feedback separately. TurboFan will then turn a JSEqual[ReceiverOrNullOrUndefined] into a sequence like this: ``` left = CheckReceiverOrNullOrUndefined(left); right = CheckReceiverOrNullOrUndefined(right); result = if ObjectIsUndetectable(left) then ObjectIsUndetectable(right) else ReferenceEqual(left, right); ``` This significantly improves the peak performance of abstract equality with Receiver, Null or Undefined inputs. On the test case outlined in http://crbug.com/v8/8356 we go from naive: 2946 ms. tenary: 2134 ms. to naive: 2230 ms. tenary: 2250 ms. which corresponds to a 25% improvement on the abstract equality case. For regular code this will probably yield more performance, since we get rid of the JSEqual operator, which might have arbitrary side effects and thus blocks all kinds of TurboFan optimizations. The JSStrictEqual case is slightly slower now, since it has to rule out booleans as well (even though that's not strictly necessary, but consistency is key here). This way developers can safely use `a == b` instead of doing a dance like `a == null ? b == null : a === b` (which is what dart2js does right now) when both `a` and `b` are known to be Receiver, Null or Undefined. The abstract equality is not only faster to parse than the tenary, but also generates a shorter bytecode sequence. In the test case referenced in http://crbug.com/v8/8356 the bytecode for `naive` is ``` StackCheck Ldar a1 TestEqual a0, [0] JumpIfFalse [5] LdaSmi [1] Return LdaSmi [2] Return ``` which is 14 bytes, whereas the `tenary` function generates ``` StackCheck Ldar a0 TestUndetectable JumpIfFalse [7] Ldar a1 TestUndetectable Jump [7] Ldar a1 TestEqualStrict a0, [0] JumpIfToBooleanFalse [5] LdaSmi [1] Return LdaSmi [2] Return ``` which is 24 bytes. So the `naive` version is 40% smaller and requires fewer bytecode dispatches. Bug: chromium:898455, v8:8356 Change-Id: If3961b2518b4438700706b3bd6071d546305e233 Reviewed-on: https://chromium-review.googlesource.com/c/1297315 Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Cr-Commit-Position: refs/heads/master@{#56948}
2018-10-24 12:09:34 +00:00
TEST_F(RedundancyEliminationTest, CheckReceiverOrNullOrUndefined) {
[turbofan] Collect and consume (ReceiverOr)Oddball feedback for StrictEqual. This CL introduces proper Oddball and ReceiverOrOddball states for the CompareOperationFeedback, and updates the StrictEqual IC to collect this feedback as well. Previously it would not collect Oddball feedback, not even in the sense of NumberOrOddball, since that's not usable for the SpeculativeNumberEqual. The new feedback is handled via newly introduced CheckReceiverOrOddball and CheckOddball operators in TurboFan, introduced by JSTypedLowering. Just like with the Receiver feedback, it's enough to check one side and do a ReferenceEqual afterwards, since strict equal can only yield true if both sides refer to the same instance. This improves the benchmark mentioned in http://crbug.com/v8/8356 from naive: 2950 ms. tenary: 2456 ms. to around naive: 2996 ms. tenary: 2192 ms. which corresponds to a roughly 10% improvement in the case for the tenary pattern, which is currently used by dart2js. In real world scenarios this will probably help even more, since TurboFan is able to optimize across the strict equality, i.e. there's no longer a stub call forcibly spilling all registers that are live across the call. This new feedback will be used as a basis for the JSEqual support for ReceiverOrOddball, which will allow dart2js switching to the shorter a==b form, at the same peak performance. Bug: v8:8356 Change-Id: Iafbf5d64fcc9312f9e575b54c32c631ce9b572b2 Reviewed-on: https://chromium-review.googlesource.com/c/1297309 Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Cr-Commit-Position: refs/heads/master@{#56925}
2018-10-23 19:57:43 +00:00
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect = graph()->NewNode(
[turbofan] ReceiverOrNullOrUndefined feedback for JSEqual. This changes the ReceiverOrOddball feedback on JSStrictEqual to ReceiverOrNullOrUndefined feedback, which can also safely be consumed by JSEqual (we cannot generally accept any oddball here since booleans trigger implicit conversions, unfortunately). Thus we replace the previously introduced CheckReceiverOrOddball with CheckReceiverOrNullOrUndefined, and drop CheckOddball, since we will no longer collect Oddball feedback separately. TurboFan will then turn a JSEqual[ReceiverOrNullOrUndefined] into a sequence like this: ``` left = CheckReceiverOrNullOrUndefined(left); right = CheckReceiverOrNullOrUndefined(right); result = if ObjectIsUndetectable(left) then ObjectIsUndetectable(right) else ReferenceEqual(left, right); ``` This significantly improves the peak performance of abstract equality with Receiver, Null or Undefined inputs. On the test case outlined in http://crbug.com/v8/8356 we go from naive: 2946 ms. tenary: 2134 ms. to naive: 2230 ms. tenary: 2250 ms. which corresponds to a 25% improvement on the abstract equality case. For regular code this will probably yield more performance, since we get rid of the JSEqual operator, which might have arbitrary side effects and thus blocks all kinds of TurboFan optimizations. The JSStrictEqual case is slightly slower now, since it has to rule out booleans as well (even though that's not strictly necessary, but consistency is key here). This way developers can safely use `a == b` instead of doing a dance like `a == null ? b == null : a === b` (which is what dart2js does right now) when both `a` and `b` are known to be Receiver, Null or Undefined. The abstract equality is not only faster to parse than the tenary, but also generates a shorter bytecode sequence. In the test case referenced in http://crbug.com/v8/8356 the bytecode for `naive` is ``` StackCheck Ldar a1 TestEqual a0, [0] JumpIfFalse [5] LdaSmi [1] Return LdaSmi [2] Return ``` which is 14 bytes, whereas the `tenary` function generates ``` StackCheck Ldar a0 TestUndetectable JumpIfFalse [7] Ldar a1 TestUndetectable Jump [7] Ldar a1 TestEqualStrict a0, [0] JumpIfToBooleanFalse [5] LdaSmi [1] Return LdaSmi [2] Return ``` which is 24 bytes. So the `naive` version is 40% smaller and requires fewer bytecode dispatches. Bug: chromium:898455, v8:8356 Change-Id: If3961b2518b4438700706b3bd6071d546305e233 Reviewed-on: https://chromium-review.googlesource.com/c/1297315 Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Cr-Commit-Position: refs/heads/master@{#56948}
2018-10-24 12:09:34 +00:00
simplified()->CheckReceiverOrNullOrUndefined(), value, effect, control);
[turbofan] Collect and consume (ReceiverOr)Oddball feedback for StrictEqual. This CL introduces proper Oddball and ReceiverOrOddball states for the CompareOperationFeedback, and updates the StrictEqual IC to collect this feedback as well. Previously it would not collect Oddball feedback, not even in the sense of NumberOrOddball, since that's not usable for the SpeculativeNumberEqual. The new feedback is handled via newly introduced CheckReceiverOrOddball and CheckOddball operators in TurboFan, introduced by JSTypedLowering. Just like with the Receiver feedback, it's enough to check one side and do a ReferenceEqual afterwards, since strict equal can only yield true if both sides refer to the same instance. This improves the benchmark mentioned in http://crbug.com/v8/8356 from naive: 2950 ms. tenary: 2456 ms. to around naive: 2996 ms. tenary: 2192 ms. which corresponds to a roughly 10% improvement in the case for the tenary pattern, which is currently used by dart2js. In real world scenarios this will probably help even more, since TurboFan is able to optimize across the strict equality, i.e. there's no longer a stub call forcibly spilling all registers that are live across the call. This new feedback will be used as a basis for the JSEqual support for ReceiverOrOddball, which will allow dart2js switching to the shorter a==b form, at the same peak performance. Bug: v8:8356 Change-Id: Iafbf5d64fcc9312f9e575b54c32c631ce9b572b2 Reviewed-on: https://chromium-review.googlesource.com/c/1297309 Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Cr-Commit-Position: refs/heads/master@{#56925}
2018-10-23 19:57:43 +00:00
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* check2 = effect = graph()->NewNode(
[turbofan] ReceiverOrNullOrUndefined feedback for JSEqual. This changes the ReceiverOrOddball feedback on JSStrictEqual to ReceiverOrNullOrUndefined feedback, which can also safely be consumed by JSEqual (we cannot generally accept any oddball here since booleans trigger implicit conversions, unfortunately). Thus we replace the previously introduced CheckReceiverOrOddball with CheckReceiverOrNullOrUndefined, and drop CheckOddball, since we will no longer collect Oddball feedback separately. TurboFan will then turn a JSEqual[ReceiverOrNullOrUndefined] into a sequence like this: ``` left = CheckReceiverOrNullOrUndefined(left); right = CheckReceiverOrNullOrUndefined(right); result = if ObjectIsUndetectable(left) then ObjectIsUndetectable(right) else ReferenceEqual(left, right); ``` This significantly improves the peak performance of abstract equality with Receiver, Null or Undefined inputs. On the test case outlined in http://crbug.com/v8/8356 we go from naive: 2946 ms. tenary: 2134 ms. to naive: 2230 ms. tenary: 2250 ms. which corresponds to a 25% improvement on the abstract equality case. For regular code this will probably yield more performance, since we get rid of the JSEqual operator, which might have arbitrary side effects and thus blocks all kinds of TurboFan optimizations. The JSStrictEqual case is slightly slower now, since it has to rule out booleans as well (even though that's not strictly necessary, but consistency is key here). This way developers can safely use `a == b` instead of doing a dance like `a == null ? b == null : a === b` (which is what dart2js does right now) when both `a` and `b` are known to be Receiver, Null or Undefined. The abstract equality is not only faster to parse than the tenary, but also generates a shorter bytecode sequence. In the test case referenced in http://crbug.com/v8/8356 the bytecode for `naive` is ``` StackCheck Ldar a1 TestEqual a0, [0] JumpIfFalse [5] LdaSmi [1] Return LdaSmi [2] Return ``` which is 14 bytes, whereas the `tenary` function generates ``` StackCheck Ldar a0 TestUndetectable JumpIfFalse [7] Ldar a1 TestUndetectable Jump [7] Ldar a1 TestEqualStrict a0, [0] JumpIfToBooleanFalse [5] LdaSmi [1] Return LdaSmi [2] Return ``` which is 24 bytes. So the `naive` version is 40% smaller and requires fewer bytecode dispatches. Bug: chromium:898455, v8:8356 Change-Id: If3961b2518b4438700706b3bd6071d546305e233 Reviewed-on: https://chromium-review.googlesource.com/c/1297315 Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Cr-Commit-Position: refs/heads/master@{#56948}
2018-10-24 12:09:34 +00:00
simplified()->CheckReceiverOrNullOrUndefined(), value, effect, control);
[turbofan] Collect and consume (ReceiverOr)Oddball feedback for StrictEqual. This CL introduces proper Oddball and ReceiverOrOddball states for the CompareOperationFeedback, and updates the StrictEqual IC to collect this feedback as well. Previously it would not collect Oddball feedback, not even in the sense of NumberOrOddball, since that's not usable for the SpeculativeNumberEqual. The new feedback is handled via newly introduced CheckReceiverOrOddball and CheckOddball operators in TurboFan, introduced by JSTypedLowering. Just like with the Receiver feedback, it's enough to check one side and do a ReferenceEqual afterwards, since strict equal can only yield true if both sides refer to the same instance. This improves the benchmark mentioned in http://crbug.com/v8/8356 from naive: 2950 ms. tenary: 2456 ms. to around naive: 2996 ms. tenary: 2192 ms. which corresponds to a roughly 10% improvement in the case for the tenary pattern, which is currently used by dart2js. In real world scenarios this will probably help even more, since TurboFan is able to optimize across the strict equality, i.e. there's no longer a stub call forcibly spilling all registers that are live across the call. This new feedback will be used as a basis for the JSEqual support for ReceiverOrOddball, which will allow dart2js switching to the shorter a==b form, at the same peak performance. Bug: v8:8356 Change-Id: Iafbf5d64fcc9312f9e575b54c32c631ce9b572b2 Reviewed-on: https://chromium-review.googlesource.com/c/1297309 Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Cr-Commit-Position: refs/heads/master@{#56925}
2018-10-23 19:57:43 +00:00
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check1);
}
TEST_F(RedundancyEliminationTest,
[turbofan] ReceiverOrNullOrUndefined feedback for JSEqual. This changes the ReceiverOrOddball feedback on JSStrictEqual to ReceiverOrNullOrUndefined feedback, which can also safely be consumed by JSEqual (we cannot generally accept any oddball here since booleans trigger implicit conversions, unfortunately). Thus we replace the previously introduced CheckReceiverOrOddball with CheckReceiverOrNullOrUndefined, and drop CheckOddball, since we will no longer collect Oddball feedback separately. TurboFan will then turn a JSEqual[ReceiverOrNullOrUndefined] into a sequence like this: ``` left = CheckReceiverOrNullOrUndefined(left); right = CheckReceiverOrNullOrUndefined(right); result = if ObjectIsUndetectable(left) then ObjectIsUndetectable(right) else ReferenceEqual(left, right); ``` This significantly improves the peak performance of abstract equality with Receiver, Null or Undefined inputs. On the test case outlined in http://crbug.com/v8/8356 we go from naive: 2946 ms. tenary: 2134 ms. to naive: 2230 ms. tenary: 2250 ms. which corresponds to a 25% improvement on the abstract equality case. For regular code this will probably yield more performance, since we get rid of the JSEqual operator, which might have arbitrary side effects and thus blocks all kinds of TurboFan optimizations. The JSStrictEqual case is slightly slower now, since it has to rule out booleans as well (even though that's not strictly necessary, but consistency is key here). This way developers can safely use `a == b` instead of doing a dance like `a == null ? b == null : a === b` (which is what dart2js does right now) when both `a` and `b` are known to be Receiver, Null or Undefined. The abstract equality is not only faster to parse than the tenary, but also generates a shorter bytecode sequence. In the test case referenced in http://crbug.com/v8/8356 the bytecode for `naive` is ``` StackCheck Ldar a1 TestEqual a0, [0] JumpIfFalse [5] LdaSmi [1] Return LdaSmi [2] Return ``` which is 14 bytes, whereas the `tenary` function generates ``` StackCheck Ldar a0 TestUndetectable JumpIfFalse [7] Ldar a1 TestUndetectable Jump [7] Ldar a1 TestEqualStrict a0, [0] JumpIfToBooleanFalse [5] LdaSmi [1] Return LdaSmi [2] Return ``` which is 24 bytes. So the `naive` version is 40% smaller and requires fewer bytecode dispatches. Bug: chromium:898455, v8:8356 Change-Id: If3961b2518b4438700706b3bd6071d546305e233 Reviewed-on: https://chromium-review.googlesource.com/c/1297315 Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Cr-Commit-Position: refs/heads/master@{#56948}
2018-10-24 12:09:34 +00:00
CheckReceiverOrNullOrUndefinedSubsumedByCheckReceiver) {
[turbofan] Collect and consume (ReceiverOr)Oddball feedback for StrictEqual. This CL introduces proper Oddball and ReceiverOrOddball states for the CompareOperationFeedback, and updates the StrictEqual IC to collect this feedback as well. Previously it would not collect Oddball feedback, not even in the sense of NumberOrOddball, since that's not usable for the SpeculativeNumberEqual. The new feedback is handled via newly introduced CheckReceiverOrOddball and CheckOddball operators in TurboFan, introduced by JSTypedLowering. Just like with the Receiver feedback, it's enough to check one side and do a ReferenceEqual afterwards, since strict equal can only yield true if both sides refer to the same instance. This improves the benchmark mentioned in http://crbug.com/v8/8356 from naive: 2950 ms. tenary: 2456 ms. to around naive: 2996 ms. tenary: 2192 ms. which corresponds to a roughly 10% improvement in the case for the tenary pattern, which is currently used by dart2js. In real world scenarios this will probably help even more, since TurboFan is able to optimize across the strict equality, i.e. there's no longer a stub call forcibly spilling all registers that are live across the call. This new feedback will be used as a basis for the JSEqual support for ReceiverOrOddball, which will allow dart2js switching to the shorter a==b form, at the same peak performance. Bug: v8:8356 Change-Id: Iafbf5d64fcc9312f9e575b54c32c631ce9b572b2 Reviewed-on: https://chromium-review.googlesource.com/c/1297309 Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Cr-Commit-Position: refs/heads/master@{#56925}
2018-10-23 19:57:43 +00:00
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect =
graph()->NewNode(simplified()->CheckReceiver(), value, effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* check2 = effect = graph()->NewNode(
[turbofan] ReceiverOrNullOrUndefined feedback for JSEqual. This changes the ReceiverOrOddball feedback on JSStrictEqual to ReceiverOrNullOrUndefined feedback, which can also safely be consumed by JSEqual (we cannot generally accept any oddball here since booleans trigger implicit conversions, unfortunately). Thus we replace the previously introduced CheckReceiverOrOddball with CheckReceiverOrNullOrUndefined, and drop CheckOddball, since we will no longer collect Oddball feedback separately. TurboFan will then turn a JSEqual[ReceiverOrNullOrUndefined] into a sequence like this: ``` left = CheckReceiverOrNullOrUndefined(left); right = CheckReceiverOrNullOrUndefined(right); result = if ObjectIsUndetectable(left) then ObjectIsUndetectable(right) else ReferenceEqual(left, right); ``` This significantly improves the peak performance of abstract equality with Receiver, Null or Undefined inputs. On the test case outlined in http://crbug.com/v8/8356 we go from naive: 2946 ms. tenary: 2134 ms. to naive: 2230 ms. tenary: 2250 ms. which corresponds to a 25% improvement on the abstract equality case. For regular code this will probably yield more performance, since we get rid of the JSEqual operator, which might have arbitrary side effects and thus blocks all kinds of TurboFan optimizations. The JSStrictEqual case is slightly slower now, since it has to rule out booleans as well (even though that's not strictly necessary, but consistency is key here). This way developers can safely use `a == b` instead of doing a dance like `a == null ? b == null : a === b` (which is what dart2js does right now) when both `a` and `b` are known to be Receiver, Null or Undefined. The abstract equality is not only faster to parse than the tenary, but also generates a shorter bytecode sequence. In the test case referenced in http://crbug.com/v8/8356 the bytecode for `naive` is ``` StackCheck Ldar a1 TestEqual a0, [0] JumpIfFalse [5] LdaSmi [1] Return LdaSmi [2] Return ``` which is 14 bytes, whereas the `tenary` function generates ``` StackCheck Ldar a0 TestUndetectable JumpIfFalse [7] Ldar a1 TestUndetectable Jump [7] Ldar a1 TestEqualStrict a0, [0] JumpIfToBooleanFalse [5] LdaSmi [1] Return LdaSmi [2] Return ``` which is 24 bytes. So the `naive` version is 40% smaller and requires fewer bytecode dispatches. Bug: chromium:898455, v8:8356 Change-Id: If3961b2518b4438700706b3bd6071d546305e233 Reviewed-on: https://chromium-review.googlesource.com/c/1297315 Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Cr-Commit-Position: refs/heads/master@{#56948}
2018-10-24 12:09:34 +00:00
simplified()->CheckReceiverOrNullOrUndefined(), value, effect, control);
[turbofan] Collect and consume (ReceiverOr)Oddball feedback for StrictEqual. This CL introduces proper Oddball and ReceiverOrOddball states for the CompareOperationFeedback, and updates the StrictEqual IC to collect this feedback as well. Previously it would not collect Oddball feedback, not even in the sense of NumberOrOddball, since that's not usable for the SpeculativeNumberEqual. The new feedback is handled via newly introduced CheckReceiverOrOddball and CheckOddball operators in TurboFan, introduced by JSTypedLowering. Just like with the Receiver feedback, it's enough to check one side and do a ReferenceEqual afterwards, since strict equal can only yield true if both sides refer to the same instance. This improves the benchmark mentioned in http://crbug.com/v8/8356 from naive: 2950 ms. tenary: 2456 ms. to around naive: 2996 ms. tenary: 2192 ms. which corresponds to a roughly 10% improvement in the case for the tenary pattern, which is currently used by dart2js. In real world scenarios this will probably help even more, since TurboFan is able to optimize across the strict equality, i.e. there's no longer a stub call forcibly spilling all registers that are live across the call. This new feedback will be used as a basis for the JSEqual support for ReceiverOrOddball, which will allow dart2js switching to the shorter a==b form, at the same peak performance. Bug: v8:8356 Change-Id: Iafbf5d64fcc9312f9e575b54c32c631ce9b572b2 Reviewed-on: https://chromium-review.googlesource.com/c/1297309 Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Cr-Commit-Position: refs/heads/master@{#56925}
2018-10-23 19:57:43 +00:00
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check1);
}
// -----------------------------------------------------------------------------
// CheckString
TEST_F(RedundancyEliminationTest,
CheckStringSubsumedByCheckInternalizedString) {
TRACED_FOREACH(FeedbackSource, feedback, vector_slot_pairs()) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect = graph()->NewNode(
simplified()->CheckInternalizedString(), value, effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* check2 = effect = graph()->NewNode(
simplified()->CheckString(feedback), value, effect, control);
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check1);
}
}
// -----------------------------------------------------------------------------
// CheckSymbol
TEST_F(RedundancyEliminationTest, CheckSymbol) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect =
graph()->NewNode(simplified()->CheckSymbol(), value, effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* check2 = effect =
graph()->NewNode(simplified()->CheckSymbol(), value, effect, control);
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check1);
}
// -----------------------------------------------------------------------------
// CheckedFloat64ToInt32
TEST_F(RedundancyEliminationTest, CheckedFloat64ToInt32) {
TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect = graph()->NewNode(
simplified()->CheckedFloat64ToInt32(mode, feedback1), value, effect,
control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* check2 = effect = graph()->NewNode(
simplified()->CheckedFloat64ToInt32(mode, feedback2), value, effect,
control);
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check1);
}
}
}
}
[turbofan] Add support for huge DataViews. This introduces Word64 support for the CheckBounds operator, which now lowers to either CheckedUint32Bounds or CheckedUint64Bounds after the representation selection. The right hand side of CheckBounds can now be any positive safe integer on 64-bit architectures, whereas it remains Unsigned31 for 32-bit architectures. We only use the extended Word64 support when the right hand side is outside the Unsigned31 range, so for everything except DataViews this means that the performance should remain the same. The typing rule for the CheckBounds operator was updated to reflect this new behavior. The CheckBounds with a right hand side outside the Unsigned31 range will pass a new Signed64 feedback kind, which is handled with newly introduced CheckedFloat64ToInt64 and CheckedTaggedToInt64 operators in representation selection. The JSCallReducer lowering for DataView getType()/setType() methods was updated to not smi-check the [[ByteLength]] and [[ByteOffset]] anymore, but instead just use the raw uintptr_t values and operate on any value (for 64-bit architectures these fields can hold any positive safe integer, for 32-bit architectures it's limited to Unsigned31 range as before). This means that V8 can now handle huge DataViews fully, without falling off a performance cliff. This refactoring even gave us some performance improvements, on a simple micro-benchmark just exercising different DataView accesses we go from testDataViewGetUint8: 796 ms. testDataViewGetUint16: 997 ms. testDataViewGetInt32: 994 ms. testDataViewGetFloat64: 997 ms. to testDataViewGetUint8: 895 ms. testDataViewGetUint16: 889 ms. testDataViewGetInt32: 888 ms. testDataViewGetFloat64: 890 ms. meaning we lost around 10% on the single byte case, but gained 10% across the board for all the other element sizes. Design-Document: http://bit.ly/turbofan-word64 Bug: chromium:225811, v8:4153, v8:7881, v8:8171, v8:8383 Change-Id: Ic9d1bf152e47802c04dcfd679372e5c85e4abc83 Reviewed-on: https://chromium-review.googlesource.com/c/1303732 Reviewed-by: Sigurd Schneider <sigurds@chromium.org> Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Cr-Commit-Position: refs/heads/master@{#57095}
2018-10-29 14:16:51 +00:00
// -----------------------------------------------------------------------------
// CheckedFloat64ToInt64
TEST_F(RedundancyEliminationTest, CheckedFloat64ToInt64) {
TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
[turbofan] Add support for huge DataViews. This introduces Word64 support for the CheckBounds operator, which now lowers to either CheckedUint32Bounds or CheckedUint64Bounds after the representation selection. The right hand side of CheckBounds can now be any positive safe integer on 64-bit architectures, whereas it remains Unsigned31 for 32-bit architectures. We only use the extended Word64 support when the right hand side is outside the Unsigned31 range, so for everything except DataViews this means that the performance should remain the same. The typing rule for the CheckBounds operator was updated to reflect this new behavior. The CheckBounds with a right hand side outside the Unsigned31 range will pass a new Signed64 feedback kind, which is handled with newly introduced CheckedFloat64ToInt64 and CheckedTaggedToInt64 operators in representation selection. The JSCallReducer lowering for DataView getType()/setType() methods was updated to not smi-check the [[ByteLength]] and [[ByteOffset]] anymore, but instead just use the raw uintptr_t values and operate on any value (for 64-bit architectures these fields can hold any positive safe integer, for 32-bit architectures it's limited to Unsigned31 range as before). This means that V8 can now handle huge DataViews fully, without falling off a performance cliff. This refactoring even gave us some performance improvements, on a simple micro-benchmark just exercising different DataView accesses we go from testDataViewGetUint8: 796 ms. testDataViewGetUint16: 997 ms. testDataViewGetInt32: 994 ms. testDataViewGetFloat64: 997 ms. to testDataViewGetUint8: 895 ms. testDataViewGetUint16: 889 ms. testDataViewGetInt32: 888 ms. testDataViewGetFloat64: 890 ms. meaning we lost around 10% on the single byte case, but gained 10% across the board for all the other element sizes. Design-Document: http://bit.ly/turbofan-word64 Bug: chromium:225811, v8:4153, v8:7881, v8:8171, v8:8383 Change-Id: Ic9d1bf152e47802c04dcfd679372e5c85e4abc83 Reviewed-on: https://chromium-review.googlesource.com/c/1303732 Reviewed-by: Sigurd Schneider <sigurds@chromium.org> Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Cr-Commit-Position: refs/heads/master@{#57095}
2018-10-29 14:16:51 +00:00
TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect = graph()->NewNode(
simplified()->CheckedFloat64ToInt64(mode, feedback1), value, effect,
control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* check2 = effect = graph()->NewNode(
simplified()->CheckedFloat64ToInt64(mode, feedback2), value, effect,
control);
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check1);
}
}
}
}
// -----------------------------------------------------------------------------
// CheckedInt32ToTaggedSigned
TEST_F(RedundancyEliminationTest, CheckedInt32ToTaggedSigned) {
TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect =
graph()->NewNode(simplified()->CheckedInt32ToTaggedSigned(feedback1),
value, effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* check2 = effect =
graph()->NewNode(simplified()->CheckedInt32ToTaggedSigned(feedback2),
value, effect, control);
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check1);
}
}
}
// -----------------------------------------------------------------------------
// CheckedInt64ToInt32
TEST_F(RedundancyEliminationTest, CheckedInt64ToInt32) {
TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect = graph()->NewNode(
simplified()->CheckedInt64ToInt32(feedback1), value, effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* check2 = effect = graph()->NewNode(
simplified()->CheckedInt64ToInt32(feedback2), value, effect, control);
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check1);
}
}
}
// -----------------------------------------------------------------------------
// CheckedInt64ToTaggedSigned
TEST_F(RedundancyEliminationTest, CheckedInt64ToTaggedSigned) {
TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect =
graph()->NewNode(simplified()->CheckedInt64ToTaggedSigned(feedback1),
value, effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* check2 = effect =
graph()->NewNode(simplified()->CheckedInt64ToTaggedSigned(feedback2),
value, effect, control);
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check1);
}
}
}
// -----------------------------------------------------------------------------
// CheckedTaggedSignedToInt32
TEST_F(RedundancyEliminationTest, CheckedTaggedSignedToInt32) {
TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect =
graph()->NewNode(simplified()->CheckedTaggedSignedToInt32(feedback1),
value, effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* check2 = effect =
graph()->NewNode(simplified()->CheckedTaggedSignedToInt32(feedback2),
value, effect, control);
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check1);
}
}
}
// -----------------------------------------------------------------------------
// CheckedTaggedToFloat64
TEST_F(RedundancyEliminationTest, CheckedTaggedToFloat64) {
TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
TRACED_FOREACH(CheckTaggedInputMode, mode, kCheckTaggedInputModes) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect = graph()->NewNode(
simplified()->CheckedTaggedToFloat64(mode, feedback1), value,
effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* check2 = effect = graph()->NewNode(
simplified()->CheckedTaggedToFloat64(mode, feedback2), value,
effect, control);
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check1);
}
}
}
}
TEST_F(RedundancyEliminationTest,
CheckedTaggedToFloat64SubsubmedByCheckedTaggedToFloat64) {
TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
// If the check passed for CheckTaggedInputMode::kNumber, it'll
// also pass later for CheckTaggedInputMode::kNumberOrOddball.
Node* check1 = effect =
graph()->NewNode(simplified()->CheckedTaggedToFloat64(
CheckTaggedInputMode::kNumber, feedback1),
value, effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* check2 = effect = graph()->NewNode(
simplified()->CheckedTaggedToFloat64(
CheckTaggedInputMode::kNumberOrOddball, feedback2),
value, effect, control);
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check1);
}
}
}
// -----------------------------------------------------------------------------
// CheckedTaggedToInt32
TEST_F(RedundancyEliminationTest, CheckedTaggedToInt32) {
TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect = graph()->NewNode(
simplified()->CheckedTaggedToInt32(mode, feedback1), value, effect,
control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* check2 = effect = graph()->NewNode(
simplified()->CheckedTaggedToInt32(mode, feedback2), value, effect,
control);
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check1);
}
}
}
}
TEST_F(RedundancyEliminationTest,
CheckedTaggedToInt32SubsumedByCheckedTaggedSignedToInt32) {
TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect = graph()->NewNode(
simplified()->CheckedTaggedSignedToInt32(feedback1), value, effect,
control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* check2 = effect = graph()->NewNode(
simplified()->CheckedTaggedToInt32(mode, feedback2), value, effect,
control);
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check1);
}
}
}
}
[turbofan] Add support for huge DataViews. This introduces Word64 support for the CheckBounds operator, which now lowers to either CheckedUint32Bounds or CheckedUint64Bounds after the representation selection. The right hand side of CheckBounds can now be any positive safe integer on 64-bit architectures, whereas it remains Unsigned31 for 32-bit architectures. We only use the extended Word64 support when the right hand side is outside the Unsigned31 range, so for everything except DataViews this means that the performance should remain the same. The typing rule for the CheckBounds operator was updated to reflect this new behavior. The CheckBounds with a right hand side outside the Unsigned31 range will pass a new Signed64 feedback kind, which is handled with newly introduced CheckedFloat64ToInt64 and CheckedTaggedToInt64 operators in representation selection. The JSCallReducer lowering for DataView getType()/setType() methods was updated to not smi-check the [[ByteLength]] and [[ByteOffset]] anymore, but instead just use the raw uintptr_t values and operate on any value (for 64-bit architectures these fields can hold any positive safe integer, for 32-bit architectures it's limited to Unsigned31 range as before). This means that V8 can now handle huge DataViews fully, without falling off a performance cliff. This refactoring even gave us some performance improvements, on a simple micro-benchmark just exercising different DataView accesses we go from testDataViewGetUint8: 796 ms. testDataViewGetUint16: 997 ms. testDataViewGetInt32: 994 ms. testDataViewGetFloat64: 997 ms. to testDataViewGetUint8: 895 ms. testDataViewGetUint16: 889 ms. testDataViewGetInt32: 888 ms. testDataViewGetFloat64: 890 ms. meaning we lost around 10% on the single byte case, but gained 10% across the board for all the other element sizes. Design-Document: http://bit.ly/turbofan-word64 Bug: chromium:225811, v8:4153, v8:7881, v8:8171, v8:8383 Change-Id: Ic9d1bf152e47802c04dcfd679372e5c85e4abc83 Reviewed-on: https://chromium-review.googlesource.com/c/1303732 Reviewed-by: Sigurd Schneider <sigurds@chromium.org> Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Cr-Commit-Position: refs/heads/master@{#57095}
2018-10-29 14:16:51 +00:00
// -----------------------------------------------------------------------------
// CheckedTaggedToInt64
TEST_F(RedundancyEliminationTest, CheckedTaggedToInt64) {
TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
[turbofan] Add support for huge DataViews. This introduces Word64 support for the CheckBounds operator, which now lowers to either CheckedUint32Bounds or CheckedUint64Bounds after the representation selection. The right hand side of CheckBounds can now be any positive safe integer on 64-bit architectures, whereas it remains Unsigned31 for 32-bit architectures. We only use the extended Word64 support when the right hand side is outside the Unsigned31 range, so for everything except DataViews this means that the performance should remain the same. The typing rule for the CheckBounds operator was updated to reflect this new behavior. The CheckBounds with a right hand side outside the Unsigned31 range will pass a new Signed64 feedback kind, which is handled with newly introduced CheckedFloat64ToInt64 and CheckedTaggedToInt64 operators in representation selection. The JSCallReducer lowering for DataView getType()/setType() methods was updated to not smi-check the [[ByteLength]] and [[ByteOffset]] anymore, but instead just use the raw uintptr_t values and operate on any value (for 64-bit architectures these fields can hold any positive safe integer, for 32-bit architectures it's limited to Unsigned31 range as before). This means that V8 can now handle huge DataViews fully, without falling off a performance cliff. This refactoring even gave us some performance improvements, on a simple micro-benchmark just exercising different DataView accesses we go from testDataViewGetUint8: 796 ms. testDataViewGetUint16: 997 ms. testDataViewGetInt32: 994 ms. testDataViewGetFloat64: 997 ms. to testDataViewGetUint8: 895 ms. testDataViewGetUint16: 889 ms. testDataViewGetInt32: 888 ms. testDataViewGetFloat64: 890 ms. meaning we lost around 10% on the single byte case, but gained 10% across the board for all the other element sizes. Design-Document: http://bit.ly/turbofan-word64 Bug: chromium:225811, v8:4153, v8:7881, v8:8171, v8:8383 Change-Id: Ic9d1bf152e47802c04dcfd679372e5c85e4abc83 Reviewed-on: https://chromium-review.googlesource.com/c/1303732 Reviewed-by: Sigurd Schneider <sigurds@chromium.org> Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Cr-Commit-Position: refs/heads/master@{#57095}
2018-10-29 14:16:51 +00:00
TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect = graph()->NewNode(
simplified()->CheckedTaggedToInt64(mode, feedback1), value, effect,
control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* check2 = effect = graph()->NewNode(
simplified()->CheckedTaggedToInt64(mode, feedback2), value, effect,
control);
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check1);
}
}
}
}
// -----------------------------------------------------------------------------
// CheckedTaggedToTaggedPointer
TEST_F(RedundancyEliminationTest, CheckedTaggedToTaggedPointer) {
TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect = graph()->NewNode(
simplified()->CheckedTaggedToTaggedPointer(feedback1), value, effect,
control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* check2 = effect = graph()->NewNode(
simplified()->CheckedTaggedToTaggedPointer(feedback2), value, effect,
control);
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check1);
}
}
}
// -----------------------------------------------------------------------------
// CheckedTaggedToTaggedSigned
TEST_F(RedundancyEliminationTest, CheckedTaggedToTaggedSigned) {
TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect =
graph()->NewNode(simplified()->CheckedTaggedToTaggedSigned(feedback1),
value, effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* check2 = effect =
graph()->NewNode(simplified()->CheckedTaggedToTaggedSigned(feedback2),
value, effect, control);
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check1);
}
}
}
// -----------------------------------------------------------------------------
// CheckedTruncateTaggedToWord32
TEST_F(RedundancyEliminationTest, CheckedTruncateTaggedToWord32) {
TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
TRACED_FOREACH(CheckTaggedInputMode, mode, kCheckTaggedInputModes) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect = graph()->NewNode(
simplified()->CheckedTruncateTaggedToWord32(mode, feedback1), value,
effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* check2 = effect = graph()->NewNode(
simplified()->CheckedTruncateTaggedToWord32(mode, feedback2), value,
effect, control);
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check1);
}
}
}
}
TEST_F(RedundancyEliminationTest,
CheckedTruncateTaggedToWord32SubsumedByCheckedTruncateTaggedToWord32) {
TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
// If the check passed for CheckTaggedInputMode::kNumber, it'll
// also pass later for CheckTaggedInputMode::kNumberOrOddball.
Node* check1 = effect =
graph()->NewNode(simplified()->CheckedTruncateTaggedToWord32(
CheckTaggedInputMode::kNumber, feedback1),
value, effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* check2 = effect = graph()->NewNode(
simplified()->CheckedTruncateTaggedToWord32(
CheckTaggedInputMode::kNumberOrOddball, feedback2),
value, effect, control);
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check1);
}
}
}
[turbofan] Add support for huge DataViews. This introduces Word64 support for the CheckBounds operator, which now lowers to either CheckedUint32Bounds or CheckedUint64Bounds after the representation selection. The right hand side of CheckBounds can now be any positive safe integer on 64-bit architectures, whereas it remains Unsigned31 for 32-bit architectures. We only use the extended Word64 support when the right hand side is outside the Unsigned31 range, so for everything except DataViews this means that the performance should remain the same. The typing rule for the CheckBounds operator was updated to reflect this new behavior. The CheckBounds with a right hand side outside the Unsigned31 range will pass a new Signed64 feedback kind, which is handled with newly introduced CheckedFloat64ToInt64 and CheckedTaggedToInt64 operators in representation selection. The JSCallReducer lowering for DataView getType()/setType() methods was updated to not smi-check the [[ByteLength]] and [[ByteOffset]] anymore, but instead just use the raw uintptr_t values and operate on any value (for 64-bit architectures these fields can hold any positive safe integer, for 32-bit architectures it's limited to Unsigned31 range as before). This means that V8 can now handle huge DataViews fully, without falling off a performance cliff. This refactoring even gave us some performance improvements, on a simple micro-benchmark just exercising different DataView accesses we go from testDataViewGetUint8: 796 ms. testDataViewGetUint16: 997 ms. testDataViewGetInt32: 994 ms. testDataViewGetFloat64: 997 ms. to testDataViewGetUint8: 895 ms. testDataViewGetUint16: 889 ms. testDataViewGetInt32: 888 ms. testDataViewGetFloat64: 890 ms. meaning we lost around 10% on the single byte case, but gained 10% across the board for all the other element sizes. Design-Document: http://bit.ly/turbofan-word64 Bug: chromium:225811, v8:4153, v8:7881, v8:8171, v8:8383 Change-Id: Ic9d1bf152e47802c04dcfd679372e5c85e4abc83 Reviewed-on: https://chromium-review.googlesource.com/c/1303732 Reviewed-by: Sigurd Schneider <sigurds@chromium.org> Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Cr-Commit-Position: refs/heads/master@{#57095}
2018-10-29 14:16:51 +00:00
// -----------------------------------------------------------------------------
// CheckedUint32Bounds
TEST_F(RedundancyEliminationTest, CheckedUint32Bounds) {
TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
[turbofan] Add support for huge DataViews. This introduces Word64 support for the CheckBounds operator, which now lowers to either CheckedUint32Bounds or CheckedUint64Bounds after the representation selection. The right hand side of CheckBounds can now be any positive safe integer on 64-bit architectures, whereas it remains Unsigned31 for 32-bit architectures. We only use the extended Word64 support when the right hand side is outside the Unsigned31 range, so for everything except DataViews this means that the performance should remain the same. The typing rule for the CheckBounds operator was updated to reflect this new behavior. The CheckBounds with a right hand side outside the Unsigned31 range will pass a new Signed64 feedback kind, which is handled with newly introduced CheckedFloat64ToInt64 and CheckedTaggedToInt64 operators in representation selection. The JSCallReducer lowering for DataView getType()/setType() methods was updated to not smi-check the [[ByteLength]] and [[ByteOffset]] anymore, but instead just use the raw uintptr_t values and operate on any value (for 64-bit architectures these fields can hold any positive safe integer, for 32-bit architectures it's limited to Unsigned31 range as before). This means that V8 can now handle huge DataViews fully, without falling off a performance cliff. This refactoring even gave us some performance improvements, on a simple micro-benchmark just exercising different DataView accesses we go from testDataViewGetUint8: 796 ms. testDataViewGetUint16: 997 ms. testDataViewGetInt32: 994 ms. testDataViewGetFloat64: 997 ms. to testDataViewGetUint8: 895 ms. testDataViewGetUint16: 889 ms. testDataViewGetInt32: 888 ms. testDataViewGetFloat64: 890 ms. meaning we lost around 10% on the single byte case, but gained 10% across the board for all the other element sizes. Design-Document: http://bit.ly/turbofan-word64 Bug: chromium:225811, v8:4153, v8:7881, v8:8171, v8:8383 Change-Id: Ic9d1bf152e47802c04dcfd679372e5c85e4abc83 Reviewed-on: https://chromium-review.googlesource.com/c/1303732 Reviewed-by: Sigurd Schneider <sigurds@chromium.org> Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Cr-Commit-Position: refs/heads/master@{#57095}
2018-10-29 14:16:51 +00:00
Node* index = Parameter(0);
Node* length = Parameter(1);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect =
graph()->NewNode(simplified()->CheckedUint32Bounds(feedback1, {}),
index, length, effect, control);
[turbofan] Add support for huge DataViews. This introduces Word64 support for the CheckBounds operator, which now lowers to either CheckedUint32Bounds or CheckedUint64Bounds after the representation selection. The right hand side of CheckBounds can now be any positive safe integer on 64-bit architectures, whereas it remains Unsigned31 for 32-bit architectures. We only use the extended Word64 support when the right hand side is outside the Unsigned31 range, so for everything except DataViews this means that the performance should remain the same. The typing rule for the CheckBounds operator was updated to reflect this new behavior. The CheckBounds with a right hand side outside the Unsigned31 range will pass a new Signed64 feedback kind, which is handled with newly introduced CheckedFloat64ToInt64 and CheckedTaggedToInt64 operators in representation selection. The JSCallReducer lowering for DataView getType()/setType() methods was updated to not smi-check the [[ByteLength]] and [[ByteOffset]] anymore, but instead just use the raw uintptr_t values and operate on any value (for 64-bit architectures these fields can hold any positive safe integer, for 32-bit architectures it's limited to Unsigned31 range as before). This means that V8 can now handle huge DataViews fully, without falling off a performance cliff. This refactoring even gave us some performance improvements, on a simple micro-benchmark just exercising different DataView accesses we go from testDataViewGetUint8: 796 ms. testDataViewGetUint16: 997 ms. testDataViewGetInt32: 994 ms. testDataViewGetFloat64: 997 ms. to testDataViewGetUint8: 895 ms. testDataViewGetUint16: 889 ms. testDataViewGetInt32: 888 ms. testDataViewGetFloat64: 890 ms. meaning we lost around 10% on the single byte case, but gained 10% across the board for all the other element sizes. Design-Document: http://bit.ly/turbofan-word64 Bug: chromium:225811, v8:4153, v8:7881, v8:8171, v8:8383 Change-Id: Ic9d1bf152e47802c04dcfd679372e5c85e4abc83 Reviewed-on: https://chromium-review.googlesource.com/c/1303732 Reviewed-by: Sigurd Schneider <sigurds@chromium.org> Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Cr-Commit-Position: refs/heads/master@{#57095}
2018-10-29 14:16:51 +00:00
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* check2 = effect =
graph()->NewNode(simplified()->CheckedUint32Bounds(feedback2, {}),
index, length, effect, control);
[turbofan] Add support for huge DataViews. This introduces Word64 support for the CheckBounds operator, which now lowers to either CheckedUint32Bounds or CheckedUint64Bounds after the representation selection. The right hand side of CheckBounds can now be any positive safe integer on 64-bit architectures, whereas it remains Unsigned31 for 32-bit architectures. We only use the extended Word64 support when the right hand side is outside the Unsigned31 range, so for everything except DataViews this means that the performance should remain the same. The typing rule for the CheckBounds operator was updated to reflect this new behavior. The CheckBounds with a right hand side outside the Unsigned31 range will pass a new Signed64 feedback kind, which is handled with newly introduced CheckedFloat64ToInt64 and CheckedTaggedToInt64 operators in representation selection. The JSCallReducer lowering for DataView getType()/setType() methods was updated to not smi-check the [[ByteLength]] and [[ByteOffset]] anymore, but instead just use the raw uintptr_t values and operate on any value (for 64-bit architectures these fields can hold any positive safe integer, for 32-bit architectures it's limited to Unsigned31 range as before). This means that V8 can now handle huge DataViews fully, without falling off a performance cliff. This refactoring even gave us some performance improvements, on a simple micro-benchmark just exercising different DataView accesses we go from testDataViewGetUint8: 796 ms. testDataViewGetUint16: 997 ms. testDataViewGetInt32: 994 ms. testDataViewGetFloat64: 997 ms. to testDataViewGetUint8: 895 ms. testDataViewGetUint16: 889 ms. testDataViewGetInt32: 888 ms. testDataViewGetFloat64: 890 ms. meaning we lost around 10% on the single byte case, but gained 10% across the board for all the other element sizes. Design-Document: http://bit.ly/turbofan-word64 Bug: chromium:225811, v8:4153, v8:7881, v8:8171, v8:8383 Change-Id: Ic9d1bf152e47802c04dcfd679372e5c85e4abc83 Reviewed-on: https://chromium-review.googlesource.com/c/1303732 Reviewed-by: Sigurd Schneider <sigurds@chromium.org> Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Cr-Commit-Position: refs/heads/master@{#57095}
2018-10-29 14:16:51 +00:00
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check1);
}
}
}
// -----------------------------------------------------------------------------
// CheckedUint32ToInt32
TEST_F(RedundancyEliminationTest, CheckedUint32ToInt32) {
TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect =
graph()->NewNode(simplified()->CheckedUint32ToInt32(feedback1), value,
effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* check2 = effect =
graph()->NewNode(simplified()->CheckedUint32ToInt32(feedback2), value,
effect, control);
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check1);
}
}
}
// -----------------------------------------------------------------------------
// CheckedUint32ToTaggedSigned
TEST_F(RedundancyEliminationTest, CheckedUint32ToTaggedSigned) {
TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect =
graph()->NewNode(simplified()->CheckedUint32ToTaggedSigned(feedback1),
value, effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* check2 = effect =
graph()->NewNode(simplified()->CheckedUint32ToTaggedSigned(feedback2),
value, effect, control);
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check1);
}
}
}
[turbofan] Add support for huge DataViews. This introduces Word64 support for the CheckBounds operator, which now lowers to either CheckedUint32Bounds or CheckedUint64Bounds after the representation selection. The right hand side of CheckBounds can now be any positive safe integer on 64-bit architectures, whereas it remains Unsigned31 for 32-bit architectures. We only use the extended Word64 support when the right hand side is outside the Unsigned31 range, so for everything except DataViews this means that the performance should remain the same. The typing rule for the CheckBounds operator was updated to reflect this new behavior. The CheckBounds with a right hand side outside the Unsigned31 range will pass a new Signed64 feedback kind, which is handled with newly introduced CheckedFloat64ToInt64 and CheckedTaggedToInt64 operators in representation selection. The JSCallReducer lowering for DataView getType()/setType() methods was updated to not smi-check the [[ByteLength]] and [[ByteOffset]] anymore, but instead just use the raw uintptr_t values and operate on any value (for 64-bit architectures these fields can hold any positive safe integer, for 32-bit architectures it's limited to Unsigned31 range as before). This means that V8 can now handle huge DataViews fully, without falling off a performance cliff. This refactoring even gave us some performance improvements, on a simple micro-benchmark just exercising different DataView accesses we go from testDataViewGetUint8: 796 ms. testDataViewGetUint16: 997 ms. testDataViewGetInt32: 994 ms. testDataViewGetFloat64: 997 ms. to testDataViewGetUint8: 895 ms. testDataViewGetUint16: 889 ms. testDataViewGetInt32: 888 ms. testDataViewGetFloat64: 890 ms. meaning we lost around 10% on the single byte case, but gained 10% across the board for all the other element sizes. Design-Document: http://bit.ly/turbofan-word64 Bug: chromium:225811, v8:4153, v8:7881, v8:8171, v8:8383 Change-Id: Ic9d1bf152e47802c04dcfd679372e5c85e4abc83 Reviewed-on: https://chromium-review.googlesource.com/c/1303732 Reviewed-by: Sigurd Schneider <sigurds@chromium.org> Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Cr-Commit-Position: refs/heads/master@{#57095}
2018-10-29 14:16:51 +00:00
// -----------------------------------------------------------------------------
// CheckedUint64Bounds
TEST_F(RedundancyEliminationTest, CheckedUint64Bounds) {
TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
[turbofan] Add support for huge DataViews. This introduces Word64 support for the CheckBounds operator, which now lowers to either CheckedUint32Bounds or CheckedUint64Bounds after the representation selection. The right hand side of CheckBounds can now be any positive safe integer on 64-bit architectures, whereas it remains Unsigned31 for 32-bit architectures. We only use the extended Word64 support when the right hand side is outside the Unsigned31 range, so for everything except DataViews this means that the performance should remain the same. The typing rule for the CheckBounds operator was updated to reflect this new behavior. The CheckBounds with a right hand side outside the Unsigned31 range will pass a new Signed64 feedback kind, which is handled with newly introduced CheckedFloat64ToInt64 and CheckedTaggedToInt64 operators in representation selection. The JSCallReducer lowering for DataView getType()/setType() methods was updated to not smi-check the [[ByteLength]] and [[ByteOffset]] anymore, but instead just use the raw uintptr_t values and operate on any value (for 64-bit architectures these fields can hold any positive safe integer, for 32-bit architectures it's limited to Unsigned31 range as before). This means that V8 can now handle huge DataViews fully, without falling off a performance cliff. This refactoring even gave us some performance improvements, on a simple micro-benchmark just exercising different DataView accesses we go from testDataViewGetUint8: 796 ms. testDataViewGetUint16: 997 ms. testDataViewGetInt32: 994 ms. testDataViewGetFloat64: 997 ms. to testDataViewGetUint8: 895 ms. testDataViewGetUint16: 889 ms. testDataViewGetInt32: 888 ms. testDataViewGetFloat64: 890 ms. meaning we lost around 10% on the single byte case, but gained 10% across the board for all the other element sizes. Design-Document: http://bit.ly/turbofan-word64 Bug: chromium:225811, v8:4153, v8:7881, v8:8171, v8:8383 Change-Id: Ic9d1bf152e47802c04dcfd679372e5c85e4abc83 Reviewed-on: https://chromium-review.googlesource.com/c/1303732 Reviewed-by: Sigurd Schneider <sigurds@chromium.org> Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Cr-Commit-Position: refs/heads/master@{#57095}
2018-10-29 14:16:51 +00:00
Node* index = Parameter(0);
Node* length = Parameter(1);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect =
graph()->NewNode(simplified()->CheckedUint64Bounds(feedback1, {}),
index, length, effect, control);
[turbofan] Add support for huge DataViews. This introduces Word64 support for the CheckBounds operator, which now lowers to either CheckedUint32Bounds or CheckedUint64Bounds after the representation selection. The right hand side of CheckBounds can now be any positive safe integer on 64-bit architectures, whereas it remains Unsigned31 for 32-bit architectures. We only use the extended Word64 support when the right hand side is outside the Unsigned31 range, so for everything except DataViews this means that the performance should remain the same. The typing rule for the CheckBounds operator was updated to reflect this new behavior. The CheckBounds with a right hand side outside the Unsigned31 range will pass a new Signed64 feedback kind, which is handled with newly introduced CheckedFloat64ToInt64 and CheckedTaggedToInt64 operators in representation selection. The JSCallReducer lowering for DataView getType()/setType() methods was updated to not smi-check the [[ByteLength]] and [[ByteOffset]] anymore, but instead just use the raw uintptr_t values and operate on any value (for 64-bit architectures these fields can hold any positive safe integer, for 32-bit architectures it's limited to Unsigned31 range as before). This means that V8 can now handle huge DataViews fully, without falling off a performance cliff. This refactoring even gave us some performance improvements, on a simple micro-benchmark just exercising different DataView accesses we go from testDataViewGetUint8: 796 ms. testDataViewGetUint16: 997 ms. testDataViewGetInt32: 994 ms. testDataViewGetFloat64: 997 ms. to testDataViewGetUint8: 895 ms. testDataViewGetUint16: 889 ms. testDataViewGetInt32: 888 ms. testDataViewGetFloat64: 890 ms. meaning we lost around 10% on the single byte case, but gained 10% across the board for all the other element sizes. Design-Document: http://bit.ly/turbofan-word64 Bug: chromium:225811, v8:4153, v8:7881, v8:8171, v8:8383 Change-Id: Ic9d1bf152e47802c04dcfd679372e5c85e4abc83 Reviewed-on: https://chromium-review.googlesource.com/c/1303732 Reviewed-by: Sigurd Schneider <sigurds@chromium.org> Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Cr-Commit-Position: refs/heads/master@{#57095}
2018-10-29 14:16:51 +00:00
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* check2 = effect =
graph()->NewNode(simplified()->CheckedUint64Bounds(feedback2, {}),
index, length, effect, control);
[turbofan] Add support for huge DataViews. This introduces Word64 support for the CheckBounds operator, which now lowers to either CheckedUint32Bounds or CheckedUint64Bounds after the representation selection. The right hand side of CheckBounds can now be any positive safe integer on 64-bit architectures, whereas it remains Unsigned31 for 32-bit architectures. We only use the extended Word64 support when the right hand side is outside the Unsigned31 range, so for everything except DataViews this means that the performance should remain the same. The typing rule for the CheckBounds operator was updated to reflect this new behavior. The CheckBounds with a right hand side outside the Unsigned31 range will pass a new Signed64 feedback kind, which is handled with newly introduced CheckedFloat64ToInt64 and CheckedTaggedToInt64 operators in representation selection. The JSCallReducer lowering for DataView getType()/setType() methods was updated to not smi-check the [[ByteLength]] and [[ByteOffset]] anymore, but instead just use the raw uintptr_t values and operate on any value (for 64-bit architectures these fields can hold any positive safe integer, for 32-bit architectures it's limited to Unsigned31 range as before). This means that V8 can now handle huge DataViews fully, without falling off a performance cliff. This refactoring even gave us some performance improvements, on a simple micro-benchmark just exercising different DataView accesses we go from testDataViewGetUint8: 796 ms. testDataViewGetUint16: 997 ms. testDataViewGetInt32: 994 ms. testDataViewGetFloat64: 997 ms. to testDataViewGetUint8: 895 ms. testDataViewGetUint16: 889 ms. testDataViewGetInt32: 888 ms. testDataViewGetFloat64: 890 ms. meaning we lost around 10% on the single byte case, but gained 10% across the board for all the other element sizes. Design-Document: http://bit.ly/turbofan-word64 Bug: chromium:225811, v8:4153, v8:7881, v8:8171, v8:8383 Change-Id: Ic9d1bf152e47802c04dcfd679372e5c85e4abc83 Reviewed-on: https://chromium-review.googlesource.com/c/1303732 Reviewed-by: Sigurd Schneider <sigurds@chromium.org> Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Cr-Commit-Position: refs/heads/master@{#57095}
2018-10-29 14:16:51 +00:00
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check1);
}
}
}
// -----------------------------------------------------------------------------
// CheckedUint64ToInt32
TEST_F(RedundancyEliminationTest, CheckedUint64ToInt32) {
TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect =
graph()->NewNode(simplified()->CheckedUint64ToInt32(feedback1), value,
effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* check2 = effect =
graph()->NewNode(simplified()->CheckedUint64ToInt32(feedback2), value,
effect, control);
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check1);
}
}
}
// -----------------------------------------------------------------------------
// CheckedUint64ToTaggedSigned
TEST_F(RedundancyEliminationTest, CheckedUint64ToTaggedSigned) {
TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
Node* value = Parameter(0);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect =
graph()->NewNode(simplified()->CheckedUint64ToTaggedSigned(feedback1),
value, effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* check2 = effect =
graph()->NewNode(simplified()->CheckedUint64ToTaggedSigned(feedback2),
value, effect, control);
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check1);
}
}
}
[turbofan] Eliminate redundant Smi checks around array accesses. As identified in the web-tooling-benchmark, there are specific code patterns involving array indexed property accesses and subsequent comparisons of those indices that lead to repeated Smi checks in the optimized code, which in turn leads to high register pressure and generally bad register allocation. An example of this pattern is code like this: ```js function f(a, n) { const i = a[n]; if (n >= 1) return i; } ``` The `a[n]` property access introduces a CheckBounds on `n`, which later lowers to a `CheckedTaggedToInt32[dont-check-minus-zero]`, however the `n >= 1` comparison has collected `SignedSmall` feedback and so it introduces a `CheckedTaggedToTaggedSigned` operation. This second Smi check is redundant and cannot easily be combined with the earlier tagged->int32 conversion, since that also deals with heap numbers and even truncates -0 to 0. So we teach the RedundancyElimination to look at the inputs of these speculative number comparisons and if there's a leading bounds check on either of these inputs, we change the input to the result of the bounds check. This avoids the redundant Smi checks later and generally allows the SimplifiedLowering to do a significantly better job on the number comparisons. We only do this in case of SignedSmall feedback and only for inputs that are not already known to be in UnsignedSmall range, to avoid doing too many (unnecessary) expensive lookups during RedundancyElimination. All of this is safe despite the fact that CheckBounds truncates -0 to 0, since the regular number comparisons in JavaScript identify 0 and -0 (unlike Object.is()). This also adds appropriate tests, especially for the interesting cases where -0 is used only after the code was optimized. Bug: v8:6936, v8:7094 Change-Id: Ie37114fb6192e941ae1a4f0bfe00e9c0a8305c07 Reviewed-on: https://chromium-review.googlesource.com/c/1246181 Reviewed-by: Sigurd Schneider <sigurds@chromium.org> Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Cr-Commit-Position: refs/heads/master@{#56428}
2018-09-26 14:47:25 +00:00
// -----------------------------------------------------------------------------
// SpeculativeNumberEqual
TEST_F(RedundancyEliminationTest,
SpeculativeNumberEqualWithCheckBoundsBetterType) {
Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
[turbofan] Eliminate redundant Smi checks around array accesses. As identified in the web-tooling-benchmark, there are specific code patterns involving array indexed property accesses and subsequent comparisons of those indices that lead to repeated Smi checks in the optimized code, which in turn leads to high register pressure and generally bad register allocation. An example of this pattern is code like this: ```js function f(a, n) { const i = a[n]; if (n >= 1) return i; } ``` The `a[n]` property access introduces a CheckBounds on `n`, which later lowers to a `CheckedTaggedToInt32[dont-check-minus-zero]`, however the `n >= 1` comparison has collected `SignedSmall` feedback and so it introduces a `CheckedTaggedToTaggedSigned` operation. This second Smi check is redundant and cannot easily be combined with the earlier tagged->int32 conversion, since that also deals with heap numbers and even truncates -0 to 0. So we teach the RedundancyElimination to look at the inputs of these speculative number comparisons and if there's a leading bounds check on either of these inputs, we change the input to the result of the bounds check. This avoids the redundant Smi checks later and generally allows the SimplifiedLowering to do a significantly better job on the number comparisons. We only do this in case of SignedSmall feedback and only for inputs that are not already known to be in UnsignedSmall range, to avoid doing too many (unnecessary) expensive lookups during RedundancyElimination. All of this is safe despite the fact that CheckBounds truncates -0 to 0, since the regular number comparisons in JavaScript identify 0 and -0 (unlike Object.is()). This also adds appropriate tests, especially for the interesting cases where -0 is used only after the code was optimized. Bug: v8:6936, v8:7094 Change-Id: Ie37114fb6192e941ae1a4f0bfe00e9c0a8305c07 Reviewed-on: https://chromium-review.googlesource.com/c/1246181 Reviewed-by: Sigurd Schneider <sigurds@chromium.org> Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Cr-Commit-Position: refs/heads/master@{#56428}
2018-09-26 14:47:25 +00:00
Node* lhs = Parameter(Type::Any(), 0);
Node* rhs = Parameter(Type::Any(), 1);
Node* length = Parameter(Type::Unsigned31(), 2);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect = graph()->NewNode(
simplified()->CheckBounds(feedback1), lhs, length, effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* check2 = effect = graph()->NewNode(
simplified()->CheckBounds(feedback2), rhs, length, effect, control);
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check2);
Node* cmp3 = effect =
graph()->NewNode(simplified()->SpeculativeNumberEqual(
NumberOperationHint::kSignedSmall),
lhs, rhs, effect, control);
Reduction r3 = Reduce(cmp3);
ASSERT_TRUE(r3.Changed());
EXPECT_THAT(r3.replacement(),
IsSpeculativeNumberEqual(NumberOperationHint::kSignedSmall,
check1, check2, _, _));
}
}
}
TEST_F(RedundancyEliminationTest,
SpeculativeNumberEqualWithCheckBoundsSameType) {
Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
[turbofan] Eliminate redundant Smi checks around array accesses. As identified in the web-tooling-benchmark, there are specific code patterns involving array indexed property accesses and subsequent comparisons of those indices that lead to repeated Smi checks in the optimized code, which in turn leads to high register pressure and generally bad register allocation. An example of this pattern is code like this: ```js function f(a, n) { const i = a[n]; if (n >= 1) return i; } ``` The `a[n]` property access introduces a CheckBounds on `n`, which later lowers to a `CheckedTaggedToInt32[dont-check-minus-zero]`, however the `n >= 1` comparison has collected `SignedSmall` feedback and so it introduces a `CheckedTaggedToTaggedSigned` operation. This second Smi check is redundant and cannot easily be combined with the earlier tagged->int32 conversion, since that also deals with heap numbers and even truncates -0 to 0. So we teach the RedundancyElimination to look at the inputs of these speculative number comparisons and if there's a leading bounds check on either of these inputs, we change the input to the result of the bounds check. This avoids the redundant Smi checks later and generally allows the SimplifiedLowering to do a significantly better job on the number comparisons. We only do this in case of SignedSmall feedback and only for inputs that are not already known to be in UnsignedSmall range, to avoid doing too many (unnecessary) expensive lookups during RedundancyElimination. All of this is safe despite the fact that CheckBounds truncates -0 to 0, since the regular number comparisons in JavaScript identify 0 and -0 (unlike Object.is()). This also adds appropriate tests, especially for the interesting cases where -0 is used only after the code was optimized. Bug: v8:6936, v8:7094 Change-Id: Ie37114fb6192e941ae1a4f0bfe00e9c0a8305c07 Reviewed-on: https://chromium-review.googlesource.com/c/1246181 Reviewed-by: Sigurd Schneider <sigurds@chromium.org> Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Cr-Commit-Position: refs/heads/master@{#56428}
2018-09-26 14:47:25 +00:00
Node* lhs = Parameter(Type::UnsignedSmall(), 0);
Node* rhs = Parameter(Type::UnsignedSmall(), 1);
Node* length = Parameter(Type::Unsigned31(), 2);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect = graph()->NewNode(
simplified()->CheckBounds(feedback1), lhs, length, effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* check2 = effect = graph()->NewNode(
simplified()->CheckBounds(feedback2), rhs, length, effect, control);
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check2);
Node* cmp3 = effect =
graph()->NewNode(simplified()->SpeculativeNumberEqual(
NumberOperationHint::kSignedSmall),
lhs, rhs, effect, control);
Reduction r3 = Reduce(cmp3);
ASSERT_TRUE(r3.Changed());
EXPECT_THAT(r3.replacement(),
IsSpeculativeNumberEqual(NumberOperationHint::kSignedSmall,
lhs, rhs, _, _));
}
}
}
// -----------------------------------------------------------------------------
// SpeculativeNumberLessThan
TEST_F(RedundancyEliminationTest,
SpeculativeNumberLessThanWithCheckBoundsBetterType) {
Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
[turbofan] Eliminate redundant Smi checks around array accesses. As identified in the web-tooling-benchmark, there are specific code patterns involving array indexed property accesses and subsequent comparisons of those indices that lead to repeated Smi checks in the optimized code, which in turn leads to high register pressure and generally bad register allocation. An example of this pattern is code like this: ```js function f(a, n) { const i = a[n]; if (n >= 1) return i; } ``` The `a[n]` property access introduces a CheckBounds on `n`, which later lowers to a `CheckedTaggedToInt32[dont-check-minus-zero]`, however the `n >= 1` comparison has collected `SignedSmall` feedback and so it introduces a `CheckedTaggedToTaggedSigned` operation. This second Smi check is redundant and cannot easily be combined with the earlier tagged->int32 conversion, since that also deals with heap numbers and even truncates -0 to 0. So we teach the RedundancyElimination to look at the inputs of these speculative number comparisons and if there's a leading bounds check on either of these inputs, we change the input to the result of the bounds check. This avoids the redundant Smi checks later and generally allows the SimplifiedLowering to do a significantly better job on the number comparisons. We only do this in case of SignedSmall feedback and only for inputs that are not already known to be in UnsignedSmall range, to avoid doing too many (unnecessary) expensive lookups during RedundancyElimination. All of this is safe despite the fact that CheckBounds truncates -0 to 0, since the regular number comparisons in JavaScript identify 0 and -0 (unlike Object.is()). This also adds appropriate tests, especially for the interesting cases where -0 is used only after the code was optimized. Bug: v8:6936, v8:7094 Change-Id: Ie37114fb6192e941ae1a4f0bfe00e9c0a8305c07 Reviewed-on: https://chromium-review.googlesource.com/c/1246181 Reviewed-by: Sigurd Schneider <sigurds@chromium.org> Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Cr-Commit-Position: refs/heads/master@{#56428}
2018-09-26 14:47:25 +00:00
Node* lhs = Parameter(Type::Any(), 0);
Node* rhs = Parameter(Type::Any(), 1);
Node* length = Parameter(Type::Unsigned31(), 2);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect = graph()->NewNode(
simplified()->CheckBounds(feedback1), lhs, length, effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* check2 = effect = graph()->NewNode(
simplified()->CheckBounds(feedback2), rhs, length, effect, control);
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check2);
Node* cmp3 = effect =
graph()->NewNode(simplified()->SpeculativeNumberLessThan(
NumberOperationHint::kSignedSmall),
lhs, rhs, effect, control);
Reduction r3 = Reduce(cmp3);
ASSERT_TRUE(r3.Changed());
EXPECT_THAT(r3.replacement(),
IsSpeculativeNumberLessThan(NumberOperationHint::kSignedSmall,
check1, check2, _, _));
}
}
}
TEST_F(RedundancyEliminationTest,
SpeculativeNumberLessThanWithCheckBoundsSameType) {
Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
[turbofan] Eliminate redundant Smi checks around array accesses. As identified in the web-tooling-benchmark, there are specific code patterns involving array indexed property accesses and subsequent comparisons of those indices that lead to repeated Smi checks in the optimized code, which in turn leads to high register pressure and generally bad register allocation. An example of this pattern is code like this: ```js function f(a, n) { const i = a[n]; if (n >= 1) return i; } ``` The `a[n]` property access introduces a CheckBounds on `n`, which later lowers to a `CheckedTaggedToInt32[dont-check-minus-zero]`, however the `n >= 1` comparison has collected `SignedSmall` feedback and so it introduces a `CheckedTaggedToTaggedSigned` operation. This second Smi check is redundant and cannot easily be combined with the earlier tagged->int32 conversion, since that also deals with heap numbers and even truncates -0 to 0. So we teach the RedundancyElimination to look at the inputs of these speculative number comparisons and if there's a leading bounds check on either of these inputs, we change the input to the result of the bounds check. This avoids the redundant Smi checks later and generally allows the SimplifiedLowering to do a significantly better job on the number comparisons. We only do this in case of SignedSmall feedback and only for inputs that are not already known to be in UnsignedSmall range, to avoid doing too many (unnecessary) expensive lookups during RedundancyElimination. All of this is safe despite the fact that CheckBounds truncates -0 to 0, since the regular number comparisons in JavaScript identify 0 and -0 (unlike Object.is()). This also adds appropriate tests, especially for the interesting cases where -0 is used only after the code was optimized. Bug: v8:6936, v8:7094 Change-Id: Ie37114fb6192e941ae1a4f0bfe00e9c0a8305c07 Reviewed-on: https://chromium-review.googlesource.com/c/1246181 Reviewed-by: Sigurd Schneider <sigurds@chromium.org> Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Cr-Commit-Position: refs/heads/master@{#56428}
2018-09-26 14:47:25 +00:00
Node* lhs = Parameter(Type::UnsignedSmall(), 0);
Node* rhs = Parameter(Type::UnsignedSmall(), 1);
Node* length = Parameter(Type::Unsigned31(), 2);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect = graph()->NewNode(
simplified()->CheckBounds(feedback1), lhs, length, effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* check2 = effect = graph()->NewNode(
simplified()->CheckBounds(feedback2), rhs, length, effect, control);
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check2);
Node* cmp3 = effect =
graph()->NewNode(simplified()->SpeculativeNumberLessThan(
NumberOperationHint::kSignedSmall),
lhs, rhs, effect, control);
Reduction r3 = Reduce(cmp3);
ASSERT_TRUE(r3.Changed());
EXPECT_THAT(r3.replacement(),
IsSpeculativeNumberLessThan(NumberOperationHint::kSignedSmall,
lhs, rhs, _, _));
}
}
}
// -----------------------------------------------------------------------------
// SpeculativeNumberLessThanOrEqual
TEST_F(RedundancyEliminationTest,
SpeculativeNumberLessThanOrEqualWithCheckBoundsBetterType) {
Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
[turbofan] Eliminate redundant Smi checks around array accesses. As identified in the web-tooling-benchmark, there are specific code patterns involving array indexed property accesses and subsequent comparisons of those indices that lead to repeated Smi checks in the optimized code, which in turn leads to high register pressure and generally bad register allocation. An example of this pattern is code like this: ```js function f(a, n) { const i = a[n]; if (n >= 1) return i; } ``` The `a[n]` property access introduces a CheckBounds on `n`, which later lowers to a `CheckedTaggedToInt32[dont-check-minus-zero]`, however the `n >= 1` comparison has collected `SignedSmall` feedback and so it introduces a `CheckedTaggedToTaggedSigned` operation. This second Smi check is redundant and cannot easily be combined with the earlier tagged->int32 conversion, since that also deals with heap numbers and even truncates -0 to 0. So we teach the RedundancyElimination to look at the inputs of these speculative number comparisons and if there's a leading bounds check on either of these inputs, we change the input to the result of the bounds check. This avoids the redundant Smi checks later and generally allows the SimplifiedLowering to do a significantly better job on the number comparisons. We only do this in case of SignedSmall feedback and only for inputs that are not already known to be in UnsignedSmall range, to avoid doing too many (unnecessary) expensive lookups during RedundancyElimination. All of this is safe despite the fact that CheckBounds truncates -0 to 0, since the regular number comparisons in JavaScript identify 0 and -0 (unlike Object.is()). This also adds appropriate tests, especially for the interesting cases where -0 is used only after the code was optimized. Bug: v8:6936, v8:7094 Change-Id: Ie37114fb6192e941ae1a4f0bfe00e9c0a8305c07 Reviewed-on: https://chromium-review.googlesource.com/c/1246181 Reviewed-by: Sigurd Schneider <sigurds@chromium.org> Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Cr-Commit-Position: refs/heads/master@{#56428}
2018-09-26 14:47:25 +00:00
Node* lhs = Parameter(Type::Any(), 0);
Node* rhs = Parameter(Type::Any(), 1);
Node* length = Parameter(Type::Unsigned31(), 2);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect = graph()->NewNode(
simplified()->CheckBounds(feedback1), lhs, length, effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* check2 = effect = graph()->NewNode(
simplified()->CheckBounds(feedback2), rhs, length, effect, control);
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check2);
Node* cmp3 = effect =
graph()->NewNode(simplified()->SpeculativeNumberLessThanOrEqual(
NumberOperationHint::kSignedSmall),
lhs, rhs, effect, control);
Reduction r3 = Reduce(cmp3);
ASSERT_TRUE(r3.Changed());
EXPECT_THAT(r3.replacement(),
IsSpeculativeNumberLessThanOrEqual(
NumberOperationHint::kSignedSmall, check1, check2, _, _));
}
}
}
TEST_F(RedundancyEliminationTest,
SpeculativeNumberLessThanOrEqualWithCheckBoundsSameType) {
Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
[turbofan] Eliminate redundant Smi checks around array accesses. As identified in the web-tooling-benchmark, there are specific code patterns involving array indexed property accesses and subsequent comparisons of those indices that lead to repeated Smi checks in the optimized code, which in turn leads to high register pressure and generally bad register allocation. An example of this pattern is code like this: ```js function f(a, n) { const i = a[n]; if (n >= 1) return i; } ``` The `a[n]` property access introduces a CheckBounds on `n`, which later lowers to a `CheckedTaggedToInt32[dont-check-minus-zero]`, however the `n >= 1` comparison has collected `SignedSmall` feedback and so it introduces a `CheckedTaggedToTaggedSigned` operation. This second Smi check is redundant and cannot easily be combined with the earlier tagged->int32 conversion, since that also deals with heap numbers and even truncates -0 to 0. So we teach the RedundancyElimination to look at the inputs of these speculative number comparisons and if there's a leading bounds check on either of these inputs, we change the input to the result of the bounds check. This avoids the redundant Smi checks later and generally allows the SimplifiedLowering to do a significantly better job on the number comparisons. We only do this in case of SignedSmall feedback and only for inputs that are not already known to be in UnsignedSmall range, to avoid doing too many (unnecessary) expensive lookups during RedundancyElimination. All of this is safe despite the fact that CheckBounds truncates -0 to 0, since the regular number comparisons in JavaScript identify 0 and -0 (unlike Object.is()). This also adds appropriate tests, especially for the interesting cases where -0 is used only after the code was optimized. Bug: v8:6936, v8:7094 Change-Id: Ie37114fb6192e941ae1a4f0bfe00e9c0a8305c07 Reviewed-on: https://chromium-review.googlesource.com/c/1246181 Reviewed-by: Sigurd Schneider <sigurds@chromium.org> Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Cr-Commit-Position: refs/heads/master@{#56428}
2018-09-26 14:47:25 +00:00
Node* lhs = Parameter(Type::UnsignedSmall(), 0);
Node* rhs = Parameter(Type::UnsignedSmall(), 1);
Node* length = Parameter(Type::Unsigned31(), 2);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect = graph()->NewNode(
simplified()->CheckBounds(feedback1), lhs, length, effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* check2 = effect = graph()->NewNode(
simplified()->CheckBounds(feedback2), rhs, length, effect, control);
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check2);
Node* cmp3 = effect =
graph()->NewNode(simplified()->SpeculativeNumberLessThanOrEqual(
NumberOperationHint::kSignedSmall),
lhs, rhs, effect, control);
Reduction r3 = Reduce(cmp3);
ASSERT_TRUE(r3.Changed());
EXPECT_THAT(r3.replacement(),
IsSpeculativeNumberLessThanOrEqual(
NumberOperationHint::kSignedSmall, lhs, rhs, _, _));
}
}
}
// -----------------------------------------------------------------------------
// SpeculativeNumberAdd
TEST_F(RedundancyEliminationTest,
SpeculativeNumberAddWithCheckBoundsBetterType) {
Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(FeedbackSource, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Any(), 0);
Node* rhs = Parameter(Type::Any(), 1);
Node* length = Parameter(Type::Unsigned31(), 2);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect = graph()->NewNode(
simplified()->CheckBounds(feedback), lhs, length, effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* add2 = effect = graph()->NewNode(
simplified()->SpeculativeNumberAdd(hint), lhs, rhs, effect, control);
Reduction r2 = Reduce(add2);
ASSERT_TRUE(r2.Changed());
EXPECT_THAT(r2.replacement(),
IsSpeculativeNumberAdd(hint, check1, rhs, _, _));
}
}
}
TEST_F(RedundancyEliminationTest, SpeculativeNumberAddWithCheckBoundsSameType) {
Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(FeedbackSource, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Range(42.0, 42.0, zone()), 0);
Node* rhs = Parameter(Type::Any(), 0);
Node* length = Parameter(Type::Unsigned31(), 1);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect = graph()->NewNode(
simplified()->CheckBounds(feedback), lhs, length, effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* add2 = effect = graph()->NewNode(
simplified()->SpeculativeNumberAdd(hint), lhs, rhs, effect, control);
Reduction r2 = Reduce(add2);
ASSERT_TRUE(r2.Changed());
EXPECT_THAT(r2.replacement(),
IsSpeculativeNumberAdd(hint, lhs, rhs, _, _));
}
}
}
// -----------------------------------------------------------------------------
// SpeculativeNumberSubtract
TEST_F(RedundancyEliminationTest,
SpeculativeNumberSubtractWithCheckBoundsBetterType) {
Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(FeedbackSource, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Any(), 0);
Node* rhs = Parameter(Type::Any(), 1);
Node* length = Parameter(Type::Unsigned31(), 2);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect = graph()->NewNode(
simplified()->CheckBounds(feedback), lhs, length, effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* subtract2 = effect =
graph()->NewNode(simplified()->SpeculativeNumberSubtract(hint), lhs,
rhs, effect, control);
Reduction r2 = Reduce(subtract2);
ASSERT_TRUE(r2.Changed());
EXPECT_THAT(r2.replacement(),
IsSpeculativeNumberSubtract(hint, check1, rhs, _, _));
}
}
}
TEST_F(RedundancyEliminationTest,
SpeculativeNumberSubtractWithCheckBoundsSameType) {
Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(FeedbackSource, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Range(42.0, 42.0, zone()), 0);
Node* rhs = Parameter(Type::Any(), 0);
Node* length = Parameter(Type::Unsigned31(), 1);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect = graph()->NewNode(
simplified()->CheckBounds(feedback), lhs, length, effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* subtract2 = effect =
graph()->NewNode(simplified()->SpeculativeNumberSubtract(hint), lhs,
rhs, effect, control);
Reduction r2 = Reduce(subtract2);
ASSERT_TRUE(r2.Changed());
EXPECT_THAT(r2.replacement(),
IsSpeculativeNumberSubtract(hint, lhs, rhs, _, _));
}
}
}
// -----------------------------------------------------------------------------
// SpeculativeSafeIntegerAdd
TEST_F(RedundancyEliminationTest,
SpeculativeSafeIntegerAddWithCheckBoundsBetterType) {
Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(FeedbackSource, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Any(), 0);
Node* rhs = Parameter(Type::Any(), 1);
Node* length = Parameter(Type::Unsigned31(), 2);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect = graph()->NewNode(
simplified()->CheckBounds(feedback), lhs, length, effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* add2 = effect =
graph()->NewNode(simplified()->SpeculativeSafeIntegerAdd(hint), lhs,
rhs, effect, control);
Reduction r2 = Reduce(add2);
ASSERT_TRUE(r2.Changed());
EXPECT_THAT(r2.replacement(),
IsSpeculativeSafeIntegerAdd(hint, check1, rhs, _, _));
}
}
}
TEST_F(RedundancyEliminationTest,
SpeculativeSafeIntegerAddWithCheckBoundsSameType) {
Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(FeedbackSource, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Range(42.0, 42.0, zone()), 0);
Node* rhs = Parameter(Type::Any(), 0);
Node* length = Parameter(Type::Unsigned31(), 1);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect = graph()->NewNode(
simplified()->CheckBounds(feedback), lhs, length, effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* add2 = effect =
graph()->NewNode(simplified()->SpeculativeSafeIntegerAdd(hint), lhs,
rhs, effect, control);
Reduction r2 = Reduce(add2);
ASSERT_TRUE(r2.Changed());
EXPECT_THAT(r2.replacement(),
IsSpeculativeSafeIntegerAdd(hint, lhs, rhs, _, _));
}
}
}
// -----------------------------------------------------------------------------
// SpeculativeSafeIntegerSubtract
TEST_F(RedundancyEliminationTest,
SpeculativeSafeIntegerSubtractWithCheckBoundsBetterType) {
Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(FeedbackSource, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Any(), 0);
Node* rhs = Parameter(Type::Any(), 1);
Node* length = Parameter(Type::Unsigned31(), 2);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect = graph()->NewNode(
simplified()->CheckBounds(feedback), lhs, length, effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* subtract2 = effect =
graph()->NewNode(simplified()->SpeculativeSafeIntegerSubtract(hint),
lhs, rhs, effect, control);
Reduction r2 = Reduce(subtract2);
ASSERT_TRUE(r2.Changed());
EXPECT_THAT(r2.replacement(),
IsSpeculativeSafeIntegerSubtract(hint, check1, rhs, _, _));
}
}
}
TEST_F(RedundancyEliminationTest,
SpeculativeSafeIntegerSubtractWithCheckBoundsSameType) {
Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(FeedbackSource, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Range(42.0, 42.0, zone()), 0);
Node* rhs = Parameter(Type::Any(), 0);
Node* length = Parameter(Type::Unsigned31(), 1);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect = graph()->NewNode(
simplified()->CheckBounds(feedback), lhs, length, effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* subtract2 = effect =
graph()->NewNode(simplified()->SpeculativeSafeIntegerSubtract(hint),
lhs, rhs, effect, control);
Reduction r2 = Reduce(subtract2);
ASSERT_TRUE(r2.Changed());
EXPECT_THAT(r2.replacement(),
IsSpeculativeSafeIntegerSubtract(hint, lhs, rhs, _, _));
}
}
}
// -----------------------------------------------------------------------------
// SpeculativeToNumber
TEST_F(RedundancyEliminationTest,
SpeculativeToNumberWithCheckBoundsBetterType) {
Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* index = Parameter(Type::Any(), 0);
Node* length = Parameter(Type::Unsigned31(), 1);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect =
graph()->NewNode(simplified()->CheckBounds(feedback1), index,
length, effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* to_number2 = effect =
graph()->NewNode(simplified()->SpeculativeToNumber(hint, feedback2),
index, effect, control);
Reduction r2 = Reduce(to_number2);
ASSERT_TRUE(r2.Changed());
EXPECT_THAT(r2.replacement(), IsSpeculativeToNumber(check1));
}
}
}
}
TEST_F(RedundancyEliminationTest, SpeculativeToNumberWithCheckBoundsSameType) {
Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(FeedbackSource, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(FeedbackSource, feedback2, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* index = Parameter(Type::Range(42.0, 42.0, zone()), 0);
Node* length = Parameter(Type::Unsigned31(), 1);
Node* effect = graph()->start();
Node* control = graph()->start();
Node* check1 = effect =
graph()->NewNode(simplified()->CheckBounds(feedback1), index,
length, effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
Node* to_number2 = effect =
graph()->NewNode(simplified()->SpeculativeToNumber(hint, feedback2),
index, effect, control);
Reduction r2 = Reduce(to_number2);
ASSERT_TRUE(r2.Changed());
EXPECT_THAT(r2.replacement(), IsSpeculativeToNumber(index));
}
}
}
}
} // namespace redundancy_elimination_unittest
} // namespace compiler
} // namespace internal
} // namespace v8