v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

1005 lines
43 KiB
C++
Raw Normal View History

// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/init/v8.h"
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecode-array-random-iterator.h"
#include "src/numbers/hash-seed-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
#include "test/unittests/interpreter/bytecode-utils.h"
#include "test/unittests/test-utils.h"
namespace v8 {
namespace internal {
namespace interpreter {
class BytecodeArrayRandomIteratorTest : public TestWithIsolateAndZone {
public:
BytecodeArrayRandomIteratorTest() = default;
~BytecodeArrayRandomIteratorTest() override = default;
};
TEST_F(BytecodeArrayRandomIteratorTest, InvalidBeforeStart) {
// Use a builder to create an array with containing multiple bytecodes
// with 0, 1 and 2 operands.
FeedbackVectorSpec feedback_spec(zone());
BytecodeArrayBuilder builder(zone(), 3, 3, &feedback_spec);
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
HashSeed(isolate()));
double heap_num_0 = 2.718;
double heap_num_1 = 2.0 * Smi::kMaxValue;
Smi zero = Smi::zero();
Smi smi_0 = Smi::FromInt(64);
Smi smi_1 = Smi::FromInt(-65536);
Register reg_0(0);
Register reg_1(1);
RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
RegisterList triple = BytecodeUtils::NewRegisterList(0, 3);
Register param = Register::FromParameterIndex(2, builder.parameter_count());
const AstRawString* name = ast_factory.GetOneByteString("abc");
uint32_t feedback_slot = feedback_spec.AddLoadICSlot().ToInt();
builder.LoadLiteral(heap_num_0)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(heap_num_1)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(zero)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_0)
.StackCheck(0)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_1)
.StackCheck(1)
.StoreAccumulatorInRegister(reg_1)
.LoadAccumulatorWithRegister(reg_0)
.BinaryOperation(Token::Value::ADD, reg_0, 2)
.StoreAccumulatorInRegister(reg_1)
.LoadNamedProperty(reg_1, name, feedback_slot)
.BinaryOperation(Token::Value::ADD, reg_0, 3)
.StoreAccumulatorInRegister(param)
.CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, param, pair)
[turbofan] Optimize fast enum cache driven for..in. This CL adds support to optimize for..in in fast enum-cache mode to the same degree that it was optimized in Crankshaft, without adding the same deoptimization loop that Crankshaft had with missing enum cache indices. That means code like for (var k in o) { var v = o[k]; // ... } and code like for (var k in o) { if (Object.prototype.hasOwnProperty.call(o, k)) { var v = o[k]; // ... } } which follows the https://eslint.org/docs/rules/guard-for-in linter rule, can now utilize the enum cache indices if o has only fast properties on the receiver, which speeds up the access o[k] significantly and reduces the pollution of the global megamorphic stub cache. For example the micro-benchmark in the tracking bug v8:6702 now runs faster than ever before: forIn: 1516 ms. forInHasOwnProperty: 1674 ms. forInHasOwnPropertySafe: 1595 ms. forInSum: 2051 ms. forInSumSafe: 2215 ms. Compared to numbers from V8 5.8 which is the last version running with Crankshaft forIn: 1641 ms. forInHasOwnProperty: 1719 ms. forInHasOwnPropertySafe: 1802 ms. forInSum: 2226 ms. forInSumSafe: 2409 ms. and V8 6.0 which is the current stable version with TurboFan: forIn: 1713 ms. forInHasOwnProperty: 5417 ms. forInHasOwnPropertySafe: 5324 ms. forInSum: 7556 ms. forInSumSafe: 11067 ms. It also improves the throughput on the string-fasta benchmark by around 7-10%, and there seems to be a ~5% improvement on the Speedometer/React benchmark locally. For this to work, the ForInPrepare bytecode was split into ForInEnumerate and ForInPrepare, which is very similar to how it was handled in Fullcodegen initially. In TurboFan we introduce a new operator LoadFieldByIndex that does the dynamic property load. This also removes the CheckMapValue operator again in favor of just using LoadField, ReferenceEqual and CheckIf, which work automatically with the EscapeAnalysis and the BranchConditionElimination. Bug: v8:6702 Change-Id: I91235413eea478ba77ace7bd14bb2f62e155dd9a Reviewed-on: https://chromium-review.googlesource.com/645949 Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#47768}
2017-09-01 10:49:06 +00:00
.ForInPrepare(triple, feedback_slot)
.CallRuntime(Runtime::kLoadIC_Miss, reg_0)
.Debugger()
.Return();
Revert "[parser] Slice the source string where possible" This reverts commit 2df5e7a7b6d625a69b17d4e11380eed55da3d3e6. Reason for revert: Mystery crashes https://bugs.chromium.org/p/chromium/issues/detail?id=838805 Original change's description: > [parser] Slice the source string where possible > > When internalizing string literals (for quoted strings or property names), > try to create a sliced string of the source string rather than allocating > a copy of the bytes. > > This will not work for string literals that contain escapes (e.g. unicode > escapes), and currently does not support two-byte strings. > > Bug: chromium:818642 > Change-Id: I686e5ad36baecd1a84ce5e124118431249b6c980 > Reviewed-on: https://chromium-review.googlesource.com/1010282 > Reviewed-by: Michael Lippautz <mlippautz@chromium.org> > Reviewed-by: Yang Guo <yangguo@chromium.org> > Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> > Reviewed-by: Marja Hölttä <marja@chromium.org> > Commit-Queue: Leszek Swirski <leszeks@chromium.org> > Cr-Commit-Position: refs/heads/master@{#52898} TBR=marja@chromium.org,yangguo@chromium.org,jarin@chromium.org,mlippautz@chromium.org,leszeks@chromium.org,verwaest@chromium.org Change-Id: I598b6668c43a3e843e2dd8e60852b2b2f3461954 No-Presubmit: true No-Tree-Checks: true No-Try: true Bug: chromium:818642 Reviewed-on: https://chromium-review.googlesource.com/1039885 Commit-Queue: Leszek Swirski <leszeks@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#52919}
2018-05-02 15:30:06 +00:00
ast_factory.Internalize(isolate());
Handle<BytecodeArray> bytecodeArray = builder.ToBytecodeArray(isolate());
BytecodeArrayRandomIterator iterator(bytecodeArray, zone());
iterator.GoToStart();
ASSERT_TRUE(iterator.IsValid());
--iterator;
ASSERT_FALSE(iterator.IsValid());
}
TEST_F(BytecodeArrayRandomIteratorTest, InvalidAfterEnd) {
// Use a builder to create an array with containing multiple bytecodes
// with 0, 1 and 2 operands.
FeedbackVectorSpec feedback_spec(zone());
BytecodeArrayBuilder builder(zone(), 3, 3, &feedback_spec);
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
HashSeed(isolate()));
double heap_num_0 = 2.718;
double heap_num_1 = 2.0 * Smi::kMaxValue;
Smi zero = Smi::zero();
Smi smi_0 = Smi::FromInt(64);
Smi smi_1 = Smi::FromInt(-65536);
Register reg_0(0);
Register reg_1(1);
RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
RegisterList triple = BytecodeUtils::NewRegisterList(0, 3);
Register param = Register::FromParameterIndex(2, builder.parameter_count());
const AstRawString* name = ast_factory.GetOneByteString("abc");
uint32_t feedback_slot = feedback_spec.AddLoadICSlot().ToInt();
builder.LoadLiteral(heap_num_0)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(heap_num_1)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(zero)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_0)
.StackCheck(0)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_1)
.StackCheck(1)
.StoreAccumulatorInRegister(reg_1)
.LoadAccumulatorWithRegister(reg_0)
.BinaryOperation(Token::Value::ADD, reg_0, 2)
.StoreAccumulatorInRegister(reg_1)
.LoadNamedProperty(reg_1, name, feedback_slot)
.BinaryOperation(Token::Value::ADD, reg_0, 3)
.StoreAccumulatorInRegister(param)
.CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, param, pair)
[turbofan] Optimize fast enum cache driven for..in. This CL adds support to optimize for..in in fast enum-cache mode to the same degree that it was optimized in Crankshaft, without adding the same deoptimization loop that Crankshaft had with missing enum cache indices. That means code like for (var k in o) { var v = o[k]; // ... } and code like for (var k in o) { if (Object.prototype.hasOwnProperty.call(o, k)) { var v = o[k]; // ... } } which follows the https://eslint.org/docs/rules/guard-for-in linter rule, can now utilize the enum cache indices if o has only fast properties on the receiver, which speeds up the access o[k] significantly and reduces the pollution of the global megamorphic stub cache. For example the micro-benchmark in the tracking bug v8:6702 now runs faster than ever before: forIn: 1516 ms. forInHasOwnProperty: 1674 ms. forInHasOwnPropertySafe: 1595 ms. forInSum: 2051 ms. forInSumSafe: 2215 ms. Compared to numbers from V8 5.8 which is the last version running with Crankshaft forIn: 1641 ms. forInHasOwnProperty: 1719 ms. forInHasOwnPropertySafe: 1802 ms. forInSum: 2226 ms. forInSumSafe: 2409 ms. and V8 6.0 which is the current stable version with TurboFan: forIn: 1713 ms. forInHasOwnProperty: 5417 ms. forInHasOwnPropertySafe: 5324 ms. forInSum: 7556 ms. forInSumSafe: 11067 ms. It also improves the throughput on the string-fasta benchmark by around 7-10%, and there seems to be a ~5% improvement on the Speedometer/React benchmark locally. For this to work, the ForInPrepare bytecode was split into ForInEnumerate and ForInPrepare, which is very similar to how it was handled in Fullcodegen initially. In TurboFan we introduce a new operator LoadFieldByIndex that does the dynamic property load. This also removes the CheckMapValue operator again in favor of just using LoadField, ReferenceEqual and CheckIf, which work automatically with the EscapeAnalysis and the BranchConditionElimination. Bug: v8:6702 Change-Id: I91235413eea478ba77ace7bd14bb2f62e155dd9a Reviewed-on: https://chromium-review.googlesource.com/645949 Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#47768}
2017-09-01 10:49:06 +00:00
.ForInPrepare(triple, feedback_slot)
.CallRuntime(Runtime::kLoadIC_Miss, reg_0)
.Debugger()
.Return();
Revert "[parser] Slice the source string where possible" This reverts commit 2df5e7a7b6d625a69b17d4e11380eed55da3d3e6. Reason for revert: Mystery crashes https://bugs.chromium.org/p/chromium/issues/detail?id=838805 Original change's description: > [parser] Slice the source string where possible > > When internalizing string literals (for quoted strings or property names), > try to create a sliced string of the source string rather than allocating > a copy of the bytes. > > This will not work for string literals that contain escapes (e.g. unicode > escapes), and currently does not support two-byte strings. > > Bug: chromium:818642 > Change-Id: I686e5ad36baecd1a84ce5e124118431249b6c980 > Reviewed-on: https://chromium-review.googlesource.com/1010282 > Reviewed-by: Michael Lippautz <mlippautz@chromium.org> > Reviewed-by: Yang Guo <yangguo@chromium.org> > Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> > Reviewed-by: Marja Hölttä <marja@chromium.org> > Commit-Queue: Leszek Swirski <leszeks@chromium.org> > Cr-Commit-Position: refs/heads/master@{#52898} TBR=marja@chromium.org,yangguo@chromium.org,jarin@chromium.org,mlippautz@chromium.org,leszeks@chromium.org,verwaest@chromium.org Change-Id: I598b6668c43a3e843e2dd8e60852b2b2f3461954 No-Presubmit: true No-Tree-Checks: true No-Try: true Bug: chromium:818642 Reviewed-on: https://chromium-review.googlesource.com/1039885 Commit-Queue: Leszek Swirski <leszeks@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#52919}
2018-05-02 15:30:06 +00:00
ast_factory.Internalize(isolate());
Handle<BytecodeArray> bytecodeArray = builder.ToBytecodeArray(isolate());
BytecodeArrayRandomIterator iterator(bytecodeArray, zone());
iterator.GoToEnd();
ASSERT_TRUE(iterator.IsValid());
++iterator;
ASSERT_FALSE(iterator.IsValid());
}
TEST_F(BytecodeArrayRandomIteratorTest, AccessesFirst) {
// Use a builder to create an array with containing multiple bytecodes
// with 0, 1 and 2 operands.
FeedbackVectorSpec feedback_spec(zone());
BytecodeArrayBuilder builder(zone(), 3, 3, &feedback_spec);
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
HashSeed(isolate()));
double heap_num_0 = 2.718;
double heap_num_1 = 2.0 * Smi::kMaxValue;
Smi zero = Smi::zero();
Smi smi_0 = Smi::FromInt(64);
Smi smi_1 = Smi::FromInt(-65536);
Register reg_0(0);
Register reg_1(1);
RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
RegisterList triple = BytecodeUtils::NewRegisterList(0, 3);
Register param = Register::FromParameterIndex(2, builder.parameter_count());
const AstRawString* name = ast_factory.GetOneByteString("abc");
uint32_t feedback_slot = feedback_spec.AddLoadICSlot().ToInt();
builder.LoadLiteral(heap_num_0)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(heap_num_1)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(zero)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_0)
.StackCheck(0)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_1)
.StackCheck(1)
.StoreAccumulatorInRegister(reg_1)
.LoadAccumulatorWithRegister(reg_0)
.BinaryOperation(Token::Value::ADD, reg_0, 2)
.StoreAccumulatorInRegister(reg_1)
.LoadNamedProperty(reg_1, name, feedback_slot)
.BinaryOperation(Token::Value::ADD, reg_0, 3)
.StoreAccumulatorInRegister(param)
.CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, param, pair)
[turbofan] Optimize fast enum cache driven for..in. This CL adds support to optimize for..in in fast enum-cache mode to the same degree that it was optimized in Crankshaft, without adding the same deoptimization loop that Crankshaft had with missing enum cache indices. That means code like for (var k in o) { var v = o[k]; // ... } and code like for (var k in o) { if (Object.prototype.hasOwnProperty.call(o, k)) { var v = o[k]; // ... } } which follows the https://eslint.org/docs/rules/guard-for-in linter rule, can now utilize the enum cache indices if o has only fast properties on the receiver, which speeds up the access o[k] significantly and reduces the pollution of the global megamorphic stub cache. For example the micro-benchmark in the tracking bug v8:6702 now runs faster than ever before: forIn: 1516 ms. forInHasOwnProperty: 1674 ms. forInHasOwnPropertySafe: 1595 ms. forInSum: 2051 ms. forInSumSafe: 2215 ms. Compared to numbers from V8 5.8 which is the last version running with Crankshaft forIn: 1641 ms. forInHasOwnProperty: 1719 ms. forInHasOwnPropertySafe: 1802 ms. forInSum: 2226 ms. forInSumSafe: 2409 ms. and V8 6.0 which is the current stable version with TurboFan: forIn: 1713 ms. forInHasOwnProperty: 5417 ms. forInHasOwnPropertySafe: 5324 ms. forInSum: 7556 ms. forInSumSafe: 11067 ms. It also improves the throughput on the string-fasta benchmark by around 7-10%, and there seems to be a ~5% improvement on the Speedometer/React benchmark locally. For this to work, the ForInPrepare bytecode was split into ForInEnumerate and ForInPrepare, which is very similar to how it was handled in Fullcodegen initially. In TurboFan we introduce a new operator LoadFieldByIndex that does the dynamic property load. This also removes the CheckMapValue operator again in favor of just using LoadField, ReferenceEqual and CheckIf, which work automatically with the EscapeAnalysis and the BranchConditionElimination. Bug: v8:6702 Change-Id: I91235413eea478ba77ace7bd14bb2f62e155dd9a Reviewed-on: https://chromium-review.googlesource.com/645949 Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#47768}
2017-09-01 10:49:06 +00:00
.ForInPrepare(triple, feedback_slot)
.CallRuntime(Runtime::kLoadIC_Miss, reg_0)
.Debugger()
.Return();
Revert "[parser] Slice the source string where possible" This reverts commit 2df5e7a7b6d625a69b17d4e11380eed55da3d3e6. Reason for revert: Mystery crashes https://bugs.chromium.org/p/chromium/issues/detail?id=838805 Original change's description: > [parser] Slice the source string where possible > > When internalizing string literals (for quoted strings or property names), > try to create a sliced string of the source string rather than allocating > a copy of the bytes. > > This will not work for string literals that contain escapes (e.g. unicode > escapes), and currently does not support two-byte strings. > > Bug: chromium:818642 > Change-Id: I686e5ad36baecd1a84ce5e124118431249b6c980 > Reviewed-on: https://chromium-review.googlesource.com/1010282 > Reviewed-by: Michael Lippautz <mlippautz@chromium.org> > Reviewed-by: Yang Guo <yangguo@chromium.org> > Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> > Reviewed-by: Marja Hölttä <marja@chromium.org> > Commit-Queue: Leszek Swirski <leszeks@chromium.org> > Cr-Commit-Position: refs/heads/master@{#52898} TBR=marja@chromium.org,yangguo@chromium.org,jarin@chromium.org,mlippautz@chromium.org,leszeks@chromium.org,verwaest@chromium.org Change-Id: I598b6668c43a3e843e2dd8e60852b2b2f3461954 No-Presubmit: true No-Tree-Checks: true No-Try: true Bug: chromium:818642 Reviewed-on: https://chromium-review.googlesource.com/1039885 Commit-Queue: Leszek Swirski <leszeks@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#52919}
2018-05-02 15:30:06 +00:00
ast_factory.Internalize(isolate());
Handle<BytecodeArray> bytecodeArray = builder.ToBytecodeArray(isolate());
BytecodeArrayRandomIterator iterator(bytecodeArray, zone());
iterator.GoToStart();
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant);
EXPECT_EQ(iterator.current_index(), 0);
EXPECT_EQ(iterator.current_offset(), 0);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetConstantForIndexOperand(0, isolate())->Number(),
heap_num_0);
ASSERT_TRUE(iterator.IsValid());
}
TEST_F(BytecodeArrayRandomIteratorTest, AccessesLast) {
// Use a builder to create an array with containing multiple bytecodes
// with 0, 1 and 2 operands.
FeedbackVectorSpec feedback_spec(zone());
BytecodeArrayBuilder builder(zone(), 3, 3, &feedback_spec);
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
HashSeed(isolate()));
double heap_num_0 = 2.718;
double heap_num_1 = 2.0 * Smi::kMaxValue;
Smi zero = Smi::zero();
Smi smi_0 = Smi::FromInt(64);
Smi smi_1 = Smi::FromInt(-65536);
Register reg_0(0);
Register reg_1(1);
RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
RegisterList triple = BytecodeUtils::NewRegisterList(0, 3);
Register param = Register::FromParameterIndex(2, builder.parameter_count());
const AstRawString* name = ast_factory.GetOneByteString("abc");
uint32_t feedback_slot = feedback_spec.AddLoadICSlot().ToInt();
builder.LoadLiteral(heap_num_0)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(heap_num_1)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(zero)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_0)
.StackCheck(0)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_1)
.StackCheck(1)
.StoreAccumulatorInRegister(reg_1)
.LoadAccumulatorWithRegister(reg_0)
.BinaryOperation(Token::Value::ADD, reg_0, 2)
.StoreAccumulatorInRegister(reg_1)
.LoadNamedProperty(reg_1, name, feedback_slot)
.BinaryOperation(Token::Value::ADD, reg_0, 3)
.StoreAccumulatorInRegister(param)
.CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, param, pair)
[turbofan] Optimize fast enum cache driven for..in. This CL adds support to optimize for..in in fast enum-cache mode to the same degree that it was optimized in Crankshaft, without adding the same deoptimization loop that Crankshaft had with missing enum cache indices. That means code like for (var k in o) { var v = o[k]; // ... } and code like for (var k in o) { if (Object.prototype.hasOwnProperty.call(o, k)) { var v = o[k]; // ... } } which follows the https://eslint.org/docs/rules/guard-for-in linter rule, can now utilize the enum cache indices if o has only fast properties on the receiver, which speeds up the access o[k] significantly and reduces the pollution of the global megamorphic stub cache. For example the micro-benchmark in the tracking bug v8:6702 now runs faster than ever before: forIn: 1516 ms. forInHasOwnProperty: 1674 ms. forInHasOwnPropertySafe: 1595 ms. forInSum: 2051 ms. forInSumSafe: 2215 ms. Compared to numbers from V8 5.8 which is the last version running with Crankshaft forIn: 1641 ms. forInHasOwnProperty: 1719 ms. forInHasOwnPropertySafe: 1802 ms. forInSum: 2226 ms. forInSumSafe: 2409 ms. and V8 6.0 which is the current stable version with TurboFan: forIn: 1713 ms. forInHasOwnProperty: 5417 ms. forInHasOwnPropertySafe: 5324 ms. forInSum: 7556 ms. forInSumSafe: 11067 ms. It also improves the throughput on the string-fasta benchmark by around 7-10%, and there seems to be a ~5% improvement on the Speedometer/React benchmark locally. For this to work, the ForInPrepare bytecode was split into ForInEnumerate and ForInPrepare, which is very similar to how it was handled in Fullcodegen initially. In TurboFan we introduce a new operator LoadFieldByIndex that does the dynamic property load. This also removes the CheckMapValue operator again in favor of just using LoadField, ReferenceEqual and CheckIf, which work automatically with the EscapeAnalysis and the BranchConditionElimination. Bug: v8:6702 Change-Id: I91235413eea478ba77ace7bd14bb2f62e155dd9a Reviewed-on: https://chromium-review.googlesource.com/645949 Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#47768}
2017-09-01 10:49:06 +00:00
.ForInPrepare(triple, feedback_slot)
.CallRuntime(Runtime::kLoadIC_Miss, reg_0)
.Debugger()
.Return();
Revert "[parser] Slice the source string where possible" This reverts commit 2df5e7a7b6d625a69b17d4e11380eed55da3d3e6. Reason for revert: Mystery crashes https://bugs.chromium.org/p/chromium/issues/detail?id=838805 Original change's description: > [parser] Slice the source string where possible > > When internalizing string literals (for quoted strings or property names), > try to create a sliced string of the source string rather than allocating > a copy of the bytes. > > This will not work for string literals that contain escapes (e.g. unicode > escapes), and currently does not support two-byte strings. > > Bug: chromium:818642 > Change-Id: I686e5ad36baecd1a84ce5e124118431249b6c980 > Reviewed-on: https://chromium-review.googlesource.com/1010282 > Reviewed-by: Michael Lippautz <mlippautz@chromium.org> > Reviewed-by: Yang Guo <yangguo@chromium.org> > Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> > Reviewed-by: Marja Hölttä <marja@chromium.org> > Commit-Queue: Leszek Swirski <leszeks@chromium.org> > Cr-Commit-Position: refs/heads/master@{#52898} TBR=marja@chromium.org,yangguo@chromium.org,jarin@chromium.org,mlippautz@chromium.org,leszeks@chromium.org,verwaest@chromium.org Change-Id: I598b6668c43a3e843e2dd8e60852b2b2f3461954 No-Presubmit: true No-Tree-Checks: true No-Try: true Bug: chromium:818642 Reviewed-on: https://chromium-review.googlesource.com/1039885 Commit-Queue: Leszek Swirski <leszeks@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#52919}
2018-05-02 15:30:06 +00:00
ast_factory.Internalize(isolate());
Handle<BytecodeArray> bytecodeArray = builder.ToBytecodeArray(isolate());
BytecodeArrayRandomIterator iterator(bytecodeArray, zone());
iterator.GoToEnd();
int offset = bytecodeArray->length() -
Bytecodes::Size(Bytecode::kReturn, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kReturn);
EXPECT_EQ(iterator.current_index(), 22);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
ASSERT_TRUE(iterator.IsValid());
}
TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
// Use a builder to create an array with containing multiple bytecodes
// with 0, 1 and 2 operands.
FeedbackVectorSpec feedback_spec(zone());
BytecodeArrayBuilder builder(zone(), 3, 3, &feedback_spec);
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
HashSeed(isolate()));
double heap_num_0 = 2.718;
double heap_num_1 = 2.0 * Smi::kMaxValue;
Smi zero = Smi::zero();
Smi smi_0 = Smi::FromInt(64);
Smi smi_1 = Smi::FromInt(-65536);
Register reg_0(0);
Register reg_1(1);
RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
RegisterList triple = BytecodeUtils::NewRegisterList(0, 3);
Register param = Register::FromParameterIndex(2, builder.parameter_count());
const AstRawString* name = ast_factory.GetOneByteString("abc");
uint32_t name_index = 2;
uint32_t feedback_slot = feedback_spec.AddLoadICSlot().ToInt();
builder.LoadLiteral(heap_num_0)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(heap_num_1)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(zero)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_0)
.StackCheck(0)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_1)
.StackCheck(1)
.StoreAccumulatorInRegister(reg_1)
.LoadAccumulatorWithRegister(reg_0)
.BinaryOperation(Token::Value::ADD, reg_0, 2)
.StoreAccumulatorInRegister(reg_1)
.LoadNamedProperty(reg_1, name, feedback_slot)
.BinaryOperation(Token::Value::ADD, reg_0, 3)
.StoreAccumulatorInRegister(param)
.CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, param, pair)
[turbofan] Optimize fast enum cache driven for..in. This CL adds support to optimize for..in in fast enum-cache mode to the same degree that it was optimized in Crankshaft, without adding the same deoptimization loop that Crankshaft had with missing enum cache indices. That means code like for (var k in o) { var v = o[k]; // ... } and code like for (var k in o) { if (Object.prototype.hasOwnProperty.call(o, k)) { var v = o[k]; // ... } } which follows the https://eslint.org/docs/rules/guard-for-in linter rule, can now utilize the enum cache indices if o has only fast properties on the receiver, which speeds up the access o[k] significantly and reduces the pollution of the global megamorphic stub cache. For example the micro-benchmark in the tracking bug v8:6702 now runs faster than ever before: forIn: 1516 ms. forInHasOwnProperty: 1674 ms. forInHasOwnPropertySafe: 1595 ms. forInSum: 2051 ms. forInSumSafe: 2215 ms. Compared to numbers from V8 5.8 which is the last version running with Crankshaft forIn: 1641 ms. forInHasOwnProperty: 1719 ms. forInHasOwnPropertySafe: 1802 ms. forInSum: 2226 ms. forInSumSafe: 2409 ms. and V8 6.0 which is the current stable version with TurboFan: forIn: 1713 ms. forInHasOwnProperty: 5417 ms. forInHasOwnPropertySafe: 5324 ms. forInSum: 7556 ms. forInSumSafe: 11067 ms. It also improves the throughput on the string-fasta benchmark by around 7-10%, and there seems to be a ~5% improvement on the Speedometer/React benchmark locally. For this to work, the ForInPrepare bytecode was split into ForInEnumerate and ForInPrepare, which is very similar to how it was handled in Fullcodegen initially. In TurboFan we introduce a new operator LoadFieldByIndex that does the dynamic property load. This also removes the CheckMapValue operator again in favor of just using LoadField, ReferenceEqual and CheckIf, which work automatically with the EscapeAnalysis and the BranchConditionElimination. Bug: v8:6702 Change-Id: I91235413eea478ba77ace7bd14bb2f62e155dd9a Reviewed-on: https://chromium-review.googlesource.com/645949 Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#47768}
2017-09-01 10:49:06 +00:00
.ForInPrepare(triple, feedback_slot)
.CallRuntime(Runtime::kLoadIC_Miss, reg_0)
.Debugger()
.Return();
// Test iterator sees the expected output from the builder.
Revert "[parser] Slice the source string where possible" This reverts commit 2df5e7a7b6d625a69b17d4e11380eed55da3d3e6. Reason for revert: Mystery crashes https://bugs.chromium.org/p/chromium/issues/detail?id=838805 Original change's description: > [parser] Slice the source string where possible > > When internalizing string literals (for quoted strings or property names), > try to create a sliced string of the source string rather than allocating > a copy of the bytes. > > This will not work for string literals that contain escapes (e.g. unicode > escapes), and currently does not support two-byte strings. > > Bug: chromium:818642 > Change-Id: I686e5ad36baecd1a84ce5e124118431249b6c980 > Reviewed-on: https://chromium-review.googlesource.com/1010282 > Reviewed-by: Michael Lippautz <mlippautz@chromium.org> > Reviewed-by: Yang Guo <yangguo@chromium.org> > Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> > Reviewed-by: Marja Hölttä <marja@chromium.org> > Commit-Queue: Leszek Swirski <leszeks@chromium.org> > Cr-Commit-Position: refs/heads/master@{#52898} TBR=marja@chromium.org,yangguo@chromium.org,jarin@chromium.org,mlippautz@chromium.org,leszeks@chromium.org,verwaest@chromium.org Change-Id: I598b6668c43a3e843e2dd8e60852b2b2f3461954 No-Presubmit: true No-Tree-Checks: true No-Try: true Bug: chromium:818642 Reviewed-on: https://chromium-review.googlesource.com/1039885 Commit-Queue: Leszek Swirski <leszeks@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#52919}
2018-05-02 15:30:06 +00:00
ast_factory.Internalize(isolate());
BytecodeArrayRandomIterator iterator(builder.ToBytecodeArray(isolate()),
zone());
const int kPrefixByteSize = 1;
int offset = 0;
iterator.GoToIndex(13);
offset = Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaZero, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kQuadruple) +
kPrefixByteSize;
offset += Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdar, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kAdd);
EXPECT_EQ(iterator.current_index(), 13);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
ASSERT_TRUE(iterator.IsValid());
iterator.GoToIndex(2);
offset = Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant);
EXPECT_EQ(iterator.current_index(), 2);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetConstantForIndexOperand(0, isolate())->Number(),
heap_num_1);
ASSERT_TRUE(iterator.IsValid());
iterator.GoToIndex(18);
offset = Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaZero, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kQuadruple) +
kPrefixByteSize;
offset += Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaNamedProperty, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kCallRuntimeForPair);
EXPECT_EQ(iterator.current_index(), 18);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRuntimeIdOperand(0), Runtime::kLoadLookupSlotForCall);
EXPECT_EQ(iterator.GetRegisterOperand(1).index(), param.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(1), 1);
EXPECT_EQ(iterator.GetRegisterCountOperand(2), 1u);
EXPECT_EQ(iterator.GetRegisterOperand(3).index(), reg_0.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(3), 2);
ASSERT_TRUE(iterator.IsValid());
iterator -= 3;
offset -= Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
offset -= Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle);
offset -= Bytecodes::Size(Bytecode::kLdaNamedProperty, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaNamedProperty);
EXPECT_EQ(iterator.current_index(), 15);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
EXPECT_EQ(iterator.GetIndexOperand(1), name_index);
EXPECT_EQ(iterator.GetIndexOperand(2), feedback_slot);
ASSERT_TRUE(iterator.IsValid());
iterator += 2;
offset += Bytecodes::Size(Bytecode::kLdaNamedProperty, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
EXPECT_EQ(iterator.current_index(), 17);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), param.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
ASSERT_TRUE(iterator.IsValid());
iterator.GoToIndex(22);
offset = Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaZero, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kQuadruple) +
kPrefixByteSize;
offset += Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kLdaNamedProperty, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
offset +=
Bytecodes::Size(Bytecode::kCallRuntimeForPair, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kForInPrepare, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kCallRuntime, OperandScale::kSingle);
offset += Bytecodes::Size(Bytecode::kDebugger, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kReturn);
EXPECT_EQ(iterator.current_index(), 22);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
ASSERT_TRUE(iterator.IsValid());
iterator.GoToIndex(24);
EXPECT_FALSE(iterator.IsValid());
iterator.GoToIndex(-5);
EXPECT_FALSE(iterator.IsValid());
}
TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
// Use a builder to create an array with containing multiple bytecodes
// with 0, 1 and 2 operands.
FeedbackVectorSpec feedback_spec(zone());
BytecodeArrayBuilder builder(zone(), 3, 3, &feedback_spec);
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
HashSeed(isolate()));
double heap_num_0 = 2.718;
double heap_num_1 = 2.0 * Smi::kMaxValue;
Smi zero = Smi::zero();
Smi smi_0 = Smi::FromInt(64);
Smi smi_1 = Smi::FromInt(-65536);
Register reg_0(0);
Register reg_1(1);
RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
RegisterList triple = BytecodeUtils::NewRegisterList(0, 3);
Register param = Register::FromParameterIndex(2, builder.parameter_count());
const AstRawString* name = ast_factory.GetOneByteString("abc");
uint32_t name_index = 2;
uint32_t feedback_slot = feedback_spec.AddLoadICSlot().ToInt();
builder.LoadLiteral(heap_num_0)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(heap_num_1)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(zero)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_0)
.StackCheck(0)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_1)
.StackCheck(1)
.StoreAccumulatorInRegister(reg_1)
.LoadAccumulatorWithRegister(reg_0)
.BinaryOperation(Token::Value::ADD, reg_0, 2)
.StoreAccumulatorInRegister(reg_1)
.LoadNamedProperty(reg_1, name, feedback_slot)
.BinaryOperation(Token::Value::ADD, reg_0, 3)
.StoreAccumulatorInRegister(param)
.CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, param, pair)
[turbofan] Optimize fast enum cache driven for..in. This CL adds support to optimize for..in in fast enum-cache mode to the same degree that it was optimized in Crankshaft, without adding the same deoptimization loop that Crankshaft had with missing enum cache indices. That means code like for (var k in o) { var v = o[k]; // ... } and code like for (var k in o) { if (Object.prototype.hasOwnProperty.call(o, k)) { var v = o[k]; // ... } } which follows the https://eslint.org/docs/rules/guard-for-in linter rule, can now utilize the enum cache indices if o has only fast properties on the receiver, which speeds up the access o[k] significantly and reduces the pollution of the global megamorphic stub cache. For example the micro-benchmark in the tracking bug v8:6702 now runs faster than ever before: forIn: 1516 ms. forInHasOwnProperty: 1674 ms. forInHasOwnPropertySafe: 1595 ms. forInSum: 2051 ms. forInSumSafe: 2215 ms. Compared to numbers from V8 5.8 which is the last version running with Crankshaft forIn: 1641 ms. forInHasOwnProperty: 1719 ms. forInHasOwnPropertySafe: 1802 ms. forInSum: 2226 ms. forInSumSafe: 2409 ms. and V8 6.0 which is the current stable version with TurboFan: forIn: 1713 ms. forInHasOwnProperty: 5417 ms. forInHasOwnPropertySafe: 5324 ms. forInSum: 7556 ms. forInSumSafe: 11067 ms. It also improves the throughput on the string-fasta benchmark by around 7-10%, and there seems to be a ~5% improvement on the Speedometer/React benchmark locally. For this to work, the ForInPrepare bytecode was split into ForInEnumerate and ForInPrepare, which is very similar to how it was handled in Fullcodegen initially. In TurboFan we introduce a new operator LoadFieldByIndex that does the dynamic property load. This also removes the CheckMapValue operator again in favor of just using LoadField, ReferenceEqual and CheckIf, which work automatically with the EscapeAnalysis and the BranchConditionElimination. Bug: v8:6702 Change-Id: I91235413eea478ba77ace7bd14bb2f62e155dd9a Reviewed-on: https://chromium-review.googlesource.com/645949 Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#47768}
2017-09-01 10:49:06 +00:00
.ForInPrepare(triple, feedback_slot)
.CallRuntime(Runtime::kLoadIC_Miss, reg_0)
.Debugger()
.Return();
// Test iterator sees the expected output from the builder.
Revert "[parser] Slice the source string where possible" This reverts commit 2df5e7a7b6d625a69b17d4e11380eed55da3d3e6. Reason for revert: Mystery crashes https://bugs.chromium.org/p/chromium/issues/detail?id=838805 Original change's description: > [parser] Slice the source string where possible > > When internalizing string literals (for quoted strings or property names), > try to create a sliced string of the source string rather than allocating > a copy of the bytes. > > This will not work for string literals that contain escapes (e.g. unicode > escapes), and currently does not support two-byte strings. > > Bug: chromium:818642 > Change-Id: I686e5ad36baecd1a84ce5e124118431249b6c980 > Reviewed-on: https://chromium-review.googlesource.com/1010282 > Reviewed-by: Michael Lippautz <mlippautz@chromium.org> > Reviewed-by: Yang Guo <yangguo@chromium.org> > Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> > Reviewed-by: Marja Hölttä <marja@chromium.org> > Commit-Queue: Leszek Swirski <leszeks@chromium.org> > Cr-Commit-Position: refs/heads/master@{#52898} TBR=marja@chromium.org,yangguo@chromium.org,jarin@chromium.org,mlippautz@chromium.org,leszeks@chromium.org,verwaest@chromium.org Change-Id: I598b6668c43a3e843e2dd8e60852b2b2f3461954 No-Presubmit: true No-Tree-Checks: true No-Try: true Bug: chromium:818642 Reviewed-on: https://chromium-review.googlesource.com/1039885 Commit-Queue: Leszek Swirski <leszeks@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#52919}
2018-05-02 15:30:06 +00:00
ast_factory.Internalize(isolate());
BytecodeArrayRandomIterator iterator(builder.ToBytecodeArray(isolate()),
zone());
const int kPrefixByteSize = 1;
int offset = 0;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant);
EXPECT_EQ(iterator.current_index(), 0);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetConstantForIndexOperand(0, isolate())->Number(),
heap_num_0);
ASSERT_TRUE(iterator.IsValid());
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
EXPECT_EQ(iterator.current_index(), 1);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
ASSERT_TRUE(iterator.IsValid());
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant);
EXPECT_EQ(iterator.current_index(), 2);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetConstantForIndexOperand(0, isolate())->Number(),
heap_num_1);
ASSERT_TRUE(iterator.IsValid());
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
EXPECT_EQ(iterator.current_index(), 3);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
ASSERT_TRUE(iterator.IsValid());
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaZero);
EXPECT_EQ(iterator.current_index(), 4);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
ASSERT_TRUE(iterator.IsValid());
offset += Bytecodes::Size(Bytecode::kLdaZero, OperandScale::kSingle);
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
EXPECT_EQ(iterator.current_index(), 5);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
ASSERT_TRUE(iterator.IsValid());
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaSmi);
EXPECT_EQ(iterator.current_index(), 6);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(Smi::FromInt(iterator.GetImmediateOperand(0)), smi_0);
ASSERT_TRUE(iterator.IsValid());
offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kSingle);
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStackCheck);
EXPECT_EQ(iterator.current_index(), 7);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(Bytecodes::NumberOfOperands(iterator.current_bytecode()), 0);
ASSERT_TRUE(iterator.IsValid());
offset += Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle);
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
EXPECT_EQ(iterator.current_index(), 8);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
ASSERT_TRUE(iterator.IsValid());
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaSmi);
EXPECT_EQ(iterator.current_index(), 9);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kQuadruple);
EXPECT_EQ(Smi::FromInt(iterator.GetImmediateOperand(0)), smi_1);
ASSERT_TRUE(iterator.IsValid());
offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kQuadruple) +
kPrefixByteSize;
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStackCheck);
EXPECT_EQ(iterator.current_index(), 10);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(Bytecodes::NumberOfOperands(iterator.current_bytecode()), 0);
ASSERT_TRUE(iterator.IsValid());
offset += Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle);
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
EXPECT_EQ(iterator.current_index(), 11);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
ASSERT_TRUE(iterator.IsValid());
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdar);
EXPECT_EQ(iterator.current_index(), 12);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
ASSERT_TRUE(iterator.IsValid());
offset += Bytecodes::Size(Bytecode::kLdar, OperandScale::kSingle);
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kAdd);
EXPECT_EQ(iterator.current_index(), 13);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
ASSERT_TRUE(iterator.IsValid());
offset += Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle);
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
EXPECT_EQ(iterator.current_index(), 14);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
ASSERT_TRUE(iterator.IsValid());
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaNamedProperty);
EXPECT_EQ(iterator.current_index(), 15);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
EXPECT_EQ(iterator.GetIndexOperand(1), name_index);
EXPECT_EQ(iterator.GetIndexOperand(2), feedback_slot);
ASSERT_TRUE(iterator.IsValid());
offset += Bytecodes::Size(Bytecode::kLdaNamedProperty, OperandScale::kSingle);
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kAdd);
EXPECT_EQ(iterator.current_index(), 16);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
ASSERT_TRUE(iterator.IsValid());
offset += Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle);
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
EXPECT_EQ(iterator.current_index(), 17);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), param.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
ASSERT_TRUE(iterator.IsValid());
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kCallRuntimeForPair);
EXPECT_EQ(iterator.current_index(), 18);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRuntimeIdOperand(0), Runtime::kLoadLookupSlotForCall);
EXPECT_EQ(iterator.GetRegisterOperand(1).index(), param.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(1), 1);
EXPECT_EQ(iterator.GetRegisterCountOperand(2), 1u);
EXPECT_EQ(iterator.GetRegisterOperand(3).index(), reg_0.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(3), 2);
ASSERT_TRUE(iterator.IsValid());
offset +=
Bytecodes::Size(Bytecode::kCallRuntimeForPair, OperandScale::kSingle);
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kForInPrepare);
EXPECT_EQ(iterator.current_index(), 19);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
[turbofan] Optimize fast enum cache driven for..in. This CL adds support to optimize for..in in fast enum-cache mode to the same degree that it was optimized in Crankshaft, without adding the same deoptimization loop that Crankshaft had with missing enum cache indices. That means code like for (var k in o) { var v = o[k]; // ... } and code like for (var k in o) { if (Object.prototype.hasOwnProperty.call(o, k)) { var v = o[k]; // ... } } which follows the https://eslint.org/docs/rules/guard-for-in linter rule, can now utilize the enum cache indices if o has only fast properties on the receiver, which speeds up the access o[k] significantly and reduces the pollution of the global megamorphic stub cache. For example the micro-benchmark in the tracking bug v8:6702 now runs faster than ever before: forIn: 1516 ms. forInHasOwnProperty: 1674 ms. forInHasOwnPropertySafe: 1595 ms. forInSum: 2051 ms. forInSumSafe: 2215 ms. Compared to numbers from V8 5.8 which is the last version running with Crankshaft forIn: 1641 ms. forInHasOwnProperty: 1719 ms. forInHasOwnPropertySafe: 1802 ms. forInSum: 2226 ms. forInSumSafe: 2409 ms. and V8 6.0 which is the current stable version with TurboFan: forIn: 1713 ms. forInHasOwnProperty: 5417 ms. forInHasOwnPropertySafe: 5324 ms. forInSum: 7556 ms. forInSumSafe: 11067 ms. It also improves the throughput on the string-fasta benchmark by around 7-10%, and there seems to be a ~5% improvement on the Speedometer/React benchmark locally. For this to work, the ForInPrepare bytecode was split into ForInEnumerate and ForInPrepare, which is very similar to how it was handled in Fullcodegen initially. In TurboFan we introduce a new operator LoadFieldByIndex that does the dynamic property load. This also removes the CheckMapValue operator again in favor of just using LoadField, ReferenceEqual and CheckIf, which work automatically with the EscapeAnalysis and the BranchConditionElimination. Bug: v8:6702 Change-Id: I91235413eea478ba77ace7bd14bb2f62e155dd9a Reviewed-on: https://chromium-review.googlesource.com/645949 Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#47768}
2017-09-01 10:49:06 +00:00
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 3);
EXPECT_EQ(iterator.GetIndexOperand(1), feedback_slot);
ASSERT_TRUE(iterator.IsValid());
offset += Bytecodes::Size(Bytecode::kForInPrepare, OperandScale::kSingle);
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kCallRuntime);
EXPECT_EQ(iterator.current_index(), 20);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRuntimeIdOperand(0), Runtime::kLoadIC_Miss);
EXPECT_EQ(iterator.GetRegisterOperand(1).index(), reg_0.index());
EXPECT_EQ(iterator.GetRegisterCountOperand(2), 1u);
ASSERT_TRUE(iterator.IsValid());
offset += Bytecodes::Size(Bytecode::kCallRuntime, OperandScale::kSingle);
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kDebugger);
EXPECT_EQ(iterator.current_index(), 21);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
ASSERT_TRUE(iterator.IsValid());
offset += Bytecodes::Size(Bytecode::kDebugger, OperandScale::kSingle);
++iterator;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kReturn);
EXPECT_EQ(iterator.current_index(), 22);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
ASSERT_TRUE(iterator.IsValid());
++iterator;
ASSERT_TRUE(!iterator.IsValid());
}
TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
// Use a builder to create an array with containing multiple bytecodes
// with 0, 1 and 2 operands.
FeedbackVectorSpec feedback_spec(zone());
BytecodeArrayBuilder builder(zone(), 3, 3, &feedback_spec);
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
HashSeed(isolate()));
double heap_num_0 = 2.718;
double heap_num_1 = 2.0 * Smi::kMaxValue;
Smi zero = Smi::zero();
Smi smi_0 = Smi::FromInt(64);
Smi smi_1 = Smi::FromInt(-65536);
Register reg_0(0);
Register reg_1(1);
RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
RegisterList triple = BytecodeUtils::NewRegisterList(0, 3);
Register param = Register::FromParameterIndex(2, builder.parameter_count());
const AstRawString* name = ast_factory.GetOneByteString("abc");
uint32_t name_index = 2;
uint32_t feedback_slot = feedback_spec.AddLoadICSlot().ToInt();
builder.LoadLiteral(heap_num_0)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(heap_num_1)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(zero)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_0)
.StackCheck(0)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_1)
.StackCheck(1)
.StoreAccumulatorInRegister(reg_1)
.LoadAccumulatorWithRegister(reg_0)
.BinaryOperation(Token::Value::ADD, reg_0, 2)
.StoreAccumulatorInRegister(reg_1)
.LoadNamedProperty(reg_1, name, feedback_slot)
.BinaryOperation(Token::Value::ADD, reg_0, 3)
.StoreAccumulatorInRegister(param)
.CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, param, pair)
[turbofan] Optimize fast enum cache driven for..in. This CL adds support to optimize for..in in fast enum-cache mode to the same degree that it was optimized in Crankshaft, without adding the same deoptimization loop that Crankshaft had with missing enum cache indices. That means code like for (var k in o) { var v = o[k]; // ... } and code like for (var k in o) { if (Object.prototype.hasOwnProperty.call(o, k)) { var v = o[k]; // ... } } which follows the https://eslint.org/docs/rules/guard-for-in linter rule, can now utilize the enum cache indices if o has only fast properties on the receiver, which speeds up the access o[k] significantly and reduces the pollution of the global megamorphic stub cache. For example the micro-benchmark in the tracking bug v8:6702 now runs faster than ever before: forIn: 1516 ms. forInHasOwnProperty: 1674 ms. forInHasOwnPropertySafe: 1595 ms. forInSum: 2051 ms. forInSumSafe: 2215 ms. Compared to numbers from V8 5.8 which is the last version running with Crankshaft forIn: 1641 ms. forInHasOwnProperty: 1719 ms. forInHasOwnPropertySafe: 1802 ms. forInSum: 2226 ms. forInSumSafe: 2409 ms. and V8 6.0 which is the current stable version with TurboFan: forIn: 1713 ms. forInHasOwnProperty: 5417 ms. forInHasOwnPropertySafe: 5324 ms. forInSum: 7556 ms. forInSumSafe: 11067 ms. It also improves the throughput on the string-fasta benchmark by around 7-10%, and there seems to be a ~5% improvement on the Speedometer/React benchmark locally. For this to work, the ForInPrepare bytecode was split into ForInEnumerate and ForInPrepare, which is very similar to how it was handled in Fullcodegen initially. In TurboFan we introduce a new operator LoadFieldByIndex that does the dynamic property load. This also removes the CheckMapValue operator again in favor of just using LoadField, ReferenceEqual and CheckIf, which work automatically with the EscapeAnalysis and the BranchConditionElimination. Bug: v8:6702 Change-Id: I91235413eea478ba77ace7bd14bb2f62e155dd9a Reviewed-on: https://chromium-review.googlesource.com/645949 Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#47768}
2017-09-01 10:49:06 +00:00
.ForInPrepare(triple, feedback_slot)
.CallRuntime(Runtime::kLoadIC_Miss, reg_0)
.Debugger()
.Return();
// Test iterator sees the expected output from the builder.
Revert "[parser] Slice the source string where possible" This reverts commit 2df5e7a7b6d625a69b17d4e11380eed55da3d3e6. Reason for revert: Mystery crashes https://bugs.chromium.org/p/chromium/issues/detail?id=838805 Original change's description: > [parser] Slice the source string where possible > > When internalizing string literals (for quoted strings or property names), > try to create a sliced string of the source string rather than allocating > a copy of the bytes. > > This will not work for string literals that contain escapes (e.g. unicode > escapes), and currently does not support two-byte strings. > > Bug: chromium:818642 > Change-Id: I686e5ad36baecd1a84ce5e124118431249b6c980 > Reviewed-on: https://chromium-review.googlesource.com/1010282 > Reviewed-by: Michael Lippautz <mlippautz@chromium.org> > Reviewed-by: Yang Guo <yangguo@chromium.org> > Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> > Reviewed-by: Marja Hölttä <marja@chromium.org> > Commit-Queue: Leszek Swirski <leszeks@chromium.org> > Cr-Commit-Position: refs/heads/master@{#52898} TBR=marja@chromium.org,yangguo@chromium.org,jarin@chromium.org,mlippautz@chromium.org,leszeks@chromium.org,verwaest@chromium.org Change-Id: I598b6668c43a3e843e2dd8e60852b2b2f3461954 No-Presubmit: true No-Tree-Checks: true No-Try: true Bug: chromium:818642 Reviewed-on: https://chromium-review.googlesource.com/1039885 Commit-Queue: Leszek Swirski <leszeks@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#52919}
2018-05-02 15:30:06 +00:00
ast_factory.Internalize(isolate());
Handle<BytecodeArray> bytecodeArray = builder.ToBytecodeArray(isolate());
BytecodeArrayRandomIterator iterator(bytecodeArray, zone());
const int kPrefixByteSize = 1;
int offset = bytecodeArray->length();
iterator.GoToEnd();
offset -= Bytecodes::Size(Bytecode::kReturn, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kReturn);
EXPECT_EQ(iterator.current_index(), 22);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
ASSERT_TRUE(iterator.IsValid());
--iterator;
offset -= Bytecodes::Size(Bytecode::kDebugger, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kDebugger);
EXPECT_EQ(iterator.current_index(), 21);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
ASSERT_TRUE(iterator.IsValid());
--iterator;
offset -= Bytecodes::Size(Bytecode::kCallRuntime, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kCallRuntime);
EXPECT_EQ(iterator.current_index(), 20);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRuntimeIdOperand(0), Runtime::kLoadIC_Miss);
EXPECT_EQ(iterator.GetRegisterOperand(1).index(), reg_0.index());
EXPECT_EQ(iterator.GetRegisterCountOperand(2), 1u);
ASSERT_TRUE(iterator.IsValid());
--iterator;
offset -= Bytecodes::Size(Bytecode::kForInPrepare, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kForInPrepare);
EXPECT_EQ(iterator.current_index(), 19);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
[turbofan] Optimize fast enum cache driven for..in. This CL adds support to optimize for..in in fast enum-cache mode to the same degree that it was optimized in Crankshaft, without adding the same deoptimization loop that Crankshaft had with missing enum cache indices. That means code like for (var k in o) { var v = o[k]; // ... } and code like for (var k in o) { if (Object.prototype.hasOwnProperty.call(o, k)) { var v = o[k]; // ... } } which follows the https://eslint.org/docs/rules/guard-for-in linter rule, can now utilize the enum cache indices if o has only fast properties on the receiver, which speeds up the access o[k] significantly and reduces the pollution of the global megamorphic stub cache. For example the micro-benchmark in the tracking bug v8:6702 now runs faster than ever before: forIn: 1516 ms. forInHasOwnProperty: 1674 ms. forInHasOwnPropertySafe: 1595 ms. forInSum: 2051 ms. forInSumSafe: 2215 ms. Compared to numbers from V8 5.8 which is the last version running with Crankshaft forIn: 1641 ms. forInHasOwnProperty: 1719 ms. forInHasOwnPropertySafe: 1802 ms. forInSum: 2226 ms. forInSumSafe: 2409 ms. and V8 6.0 which is the current stable version with TurboFan: forIn: 1713 ms. forInHasOwnProperty: 5417 ms. forInHasOwnPropertySafe: 5324 ms. forInSum: 7556 ms. forInSumSafe: 11067 ms. It also improves the throughput on the string-fasta benchmark by around 7-10%, and there seems to be a ~5% improvement on the Speedometer/React benchmark locally. For this to work, the ForInPrepare bytecode was split into ForInEnumerate and ForInPrepare, which is very similar to how it was handled in Fullcodegen initially. In TurboFan we introduce a new operator LoadFieldByIndex that does the dynamic property load. This also removes the CheckMapValue operator again in favor of just using LoadField, ReferenceEqual and CheckIf, which work automatically with the EscapeAnalysis and the BranchConditionElimination. Bug: v8:6702 Change-Id: I91235413eea478ba77ace7bd14bb2f62e155dd9a Reviewed-on: https://chromium-review.googlesource.com/645949 Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#47768}
2017-09-01 10:49:06 +00:00
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 3);
EXPECT_EQ(iterator.GetIndexOperand(1), feedback_slot);
ASSERT_TRUE(iterator.IsValid());
--iterator;
offset -=
Bytecodes::Size(Bytecode::kCallRuntimeForPair, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kCallRuntimeForPair);
EXPECT_EQ(iterator.current_index(), 18);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRuntimeIdOperand(0), Runtime::kLoadLookupSlotForCall);
EXPECT_EQ(iterator.GetRegisterOperand(1).index(), param.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(1), 1);
EXPECT_EQ(iterator.GetRegisterCountOperand(2), 1u);
EXPECT_EQ(iterator.GetRegisterOperand(3).index(), reg_0.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(3), 2);
ASSERT_TRUE(iterator.IsValid());
--iterator;
offset -= Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
EXPECT_EQ(iterator.current_index(), 17);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), param.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
ASSERT_TRUE(iterator.IsValid());
--iterator;
offset -= Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kAdd);
EXPECT_EQ(iterator.current_index(), 16);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
ASSERT_TRUE(iterator.IsValid());
--iterator;
offset -= Bytecodes::Size(Bytecode::kLdaNamedProperty, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaNamedProperty);
EXPECT_EQ(iterator.current_index(), 15);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
EXPECT_EQ(iterator.GetIndexOperand(1), name_index);
EXPECT_EQ(iterator.GetIndexOperand(2), feedback_slot);
ASSERT_TRUE(iterator.IsValid());
--iterator;
offset -= Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
EXPECT_EQ(iterator.current_index(), 14);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
ASSERT_TRUE(iterator.IsValid());
--iterator;
offset -= Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kAdd);
EXPECT_EQ(iterator.current_index(), 13);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
ASSERT_TRUE(iterator.IsValid());
--iterator;
offset -= Bytecodes::Size(Bytecode::kLdar, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdar);
EXPECT_EQ(iterator.current_index(), 12);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
ASSERT_TRUE(iterator.IsValid());
--iterator;
offset -= Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
EXPECT_EQ(iterator.current_index(), 11);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
ASSERT_TRUE(iterator.IsValid());
--iterator;
offset -= Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStackCheck);
EXPECT_EQ(iterator.current_index(), 10);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(Bytecodes::NumberOfOperands(iterator.current_bytecode()), 0);
ASSERT_TRUE(iterator.IsValid());
--iterator;
offset -= Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kQuadruple) +
kPrefixByteSize;
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaSmi);
EXPECT_EQ(iterator.current_index(), 9);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kQuadruple);
EXPECT_EQ(Smi::FromInt(iterator.GetImmediateOperand(0)), smi_1);
ASSERT_TRUE(iterator.IsValid());
--iterator;
offset -= Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
EXPECT_EQ(iterator.current_index(), 8);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
ASSERT_TRUE(iterator.IsValid());
--iterator;
offset -= Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStackCheck);
EXPECT_EQ(iterator.current_index(), 7);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(Bytecodes::NumberOfOperands(iterator.current_bytecode()), 0);
ASSERT_TRUE(iterator.IsValid());
--iterator;
offset -= Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaSmi);
EXPECT_EQ(iterator.current_index(), 6);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(Smi::FromInt(iterator.GetImmediateOperand(0)), smi_0);
ASSERT_TRUE(iterator.IsValid());
--iterator;
offset -= Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
EXPECT_EQ(iterator.current_index(), 5);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
ASSERT_TRUE(iterator.IsValid());
--iterator;
offset -= Bytecodes::Size(Bytecode::kLdaZero, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaZero);
EXPECT_EQ(iterator.current_index(), 4);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
ASSERT_TRUE(iterator.IsValid());
--iterator;
offset -= Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
EXPECT_EQ(iterator.current_index(), 3);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
ASSERT_TRUE(iterator.IsValid());
--iterator;
offset -= Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant);
EXPECT_EQ(iterator.current_index(), 2);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetConstantForIndexOperand(0, isolate())->Number(),
heap_num_1);
ASSERT_TRUE(iterator.IsValid());
--iterator;
offset -= Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
EXPECT_EQ(iterator.current_index(), 1);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
ASSERT_TRUE(iterator.IsValid());
--iterator;
offset -= Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant);
EXPECT_EQ(iterator.current_index(), 0);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetConstantForIndexOperand(0, isolate())->Number(),
heap_num_0);
ASSERT_TRUE(iterator.IsValid());
--iterator;
ASSERT_FALSE(iterator.IsValid());
}
} // namespace interpreter
} // namespace internal
} // namespace v8