v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

271 lines
12 KiB
C++
Raw Normal View History

// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/init/v8.h"
#include "src/interpreter/bytecode-array-builder.h"
#include "src/numbers/hash-seed-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
#include "test/unittests/interpreter/bytecode-utils.h"
#include "test/unittests/test-utils.h"
namespace v8 {
namespace internal {
namespace interpreter {
class BytecodeArrayIteratorTest : public TestWithIsolateAndZone {
public:
BytecodeArrayIteratorTest() = default;
~BytecodeArrayIteratorTest() override = default;
};
TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
// Use a builder to create an array with containing multiple bytecodes
// with 0, 1 and 2 operands.
FeedbackVectorSpec feedback_spec(zone());
Reland "[interpreter] Short Star bytecode" This is a reland of cf93071c91a932996a4df2e2d8880aca3cffbe28 Original change's description: > [interpreter] Short Star bytecode > > Design doc: > https://docs.google.com/document/d/1g_NExMT78II_KnIYNa9MvyPYIj23qAiFUEsyemY5KRk/edit > > This change adds 16 new interpreter opcodes, kStar0 through kStar15, so > that we can use a single byte to represent the common operation of > storing to a low-numbered register. This generally reduces the quantity > of bytecode generated on web sites by 8-9%. > > In order to not degrade speed, a couple of other changes are required: > > The existing lookahead logic to check for Star after certain other > bytecode handlers is updated to check for these new short Star codes > instead. Furthermore, that lookahead logic is updated to contain its own > copy of the dispatch jump rather than merging control flow with the > lookahead-failed case, to improve branch prediction. > > A bunch of constants use bytecode size in bytes as a proxy for the size > or complexity of a function, and are adjusted downward proportionally to > the decrease in generated bytecode size. > > Other small drive-by fix: update generate-bytecode-expectations to emit > \n instead of \r\n on Windows. > > Change-Id: I6307c2b0f5794a3a1088bb0fb94f6e1615441ed5 > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2641180 > Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> > Commit-Queue: Seth Brenith <seth.brenith@microsoft.com> > Cr-Commit-Position: refs/heads/master@{#72773} Change-Id: I1afb670c25694498b3989de615858f984a8c7f6f Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2698057 Commit-Queue: Seth Brenith <seth.brenith@microsoft.com> Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> Reviewed-by: Mythri Alle <mythria@chromium.org> Cr-Commit-Position: refs/heads/master@{#72821}
2021-02-17 14:36:58 +00:00
BytecodeArrayBuilder builder(zone(), 3, 17, &feedback_spec);
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
HashSeed(isolate()));
double heap_num_0 = 2.718;
double heap_num_1 = 2.0 * Smi::kMaxValue;
Smi zero = Smi::zero();
Smi smi_0 = Smi::FromInt(64);
Smi smi_1 = Smi::FromInt(-65536);
Register reg_0(0);
Reland "[interpreter] Short Star bytecode" This is a reland of cf93071c91a932996a4df2e2d8880aca3cffbe28 Original change's description: > [interpreter] Short Star bytecode > > Design doc: > https://docs.google.com/document/d/1g_NExMT78II_KnIYNa9MvyPYIj23qAiFUEsyemY5KRk/edit > > This change adds 16 new interpreter opcodes, kStar0 through kStar15, so > that we can use a single byte to represent the common operation of > storing to a low-numbered register. This generally reduces the quantity > of bytecode generated on web sites by 8-9%. > > In order to not degrade speed, a couple of other changes are required: > > The existing lookahead logic to check for Star after certain other > bytecode handlers is updated to check for these new short Star codes > instead. Furthermore, that lookahead logic is updated to contain its own > copy of the dispatch jump rather than merging control flow with the > lookahead-failed case, to improve branch prediction. > > A bunch of constants use bytecode size in bytes as a proxy for the size > or complexity of a function, and are adjusted downward proportionally to > the decrease in generated bytecode size. > > Other small drive-by fix: update generate-bytecode-expectations to emit > \n instead of \r\n on Windows. > > Change-Id: I6307c2b0f5794a3a1088bb0fb94f6e1615441ed5 > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2641180 > Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> > Commit-Queue: Seth Brenith <seth.brenith@microsoft.com> > Cr-Commit-Position: refs/heads/master@{#72773} Change-Id: I1afb670c25694498b3989de615858f984a8c7f6f Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2698057 Commit-Queue: Seth Brenith <seth.brenith@microsoft.com> Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> Reviewed-by: Mythri Alle <mythria@chromium.org> Cr-Commit-Position: refs/heads/master@{#72821}
2021-02-17 14:36:58 +00:00
Register reg_16(16); // Something not eligible for short Star.
RegisterList pair = BytecodeUtils::NewRegisterList(0, 2);
RegisterList triple = BytecodeUtils::NewRegisterList(0, 3);
Register param = Register::FromParameterIndex(2);
const AstRawString* name = ast_factory.GetOneByteString("abc");
uint32_t name_index = 2;
uint32_t load_feedback_slot = feedback_spec.AddLoadICSlot().ToInt();
uint32_t forin_feedback_slot = feedback_spec.AddForInSlot().ToInt();
uint32_t load_global_feedback_slot =
feedback_spec.AddLoadGlobalICSlot(TypeofMode::kNotInside).ToInt();
builder.LoadLiteral(heap_num_0)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(heap_num_1)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(zero)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_0)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_1)
Reland "[interpreter] Short Star bytecode" This is a reland of cf93071c91a932996a4df2e2d8880aca3cffbe28 Original change's description: > [interpreter] Short Star bytecode > > Design doc: > https://docs.google.com/document/d/1g_NExMT78II_KnIYNa9MvyPYIj23qAiFUEsyemY5KRk/edit > > This change adds 16 new interpreter opcodes, kStar0 through kStar15, so > that we can use a single byte to represent the common operation of > storing to a low-numbered register. This generally reduces the quantity > of bytecode generated on web sites by 8-9%. > > In order to not degrade speed, a couple of other changes are required: > > The existing lookahead logic to check for Star after certain other > bytecode handlers is updated to check for these new short Star codes > instead. Furthermore, that lookahead logic is updated to contain its own > copy of the dispatch jump rather than merging control flow with the > lookahead-failed case, to improve branch prediction. > > A bunch of constants use bytecode size in bytes as a proxy for the size > or complexity of a function, and are adjusted downward proportionally to > the decrease in generated bytecode size. > > Other small drive-by fix: update generate-bytecode-expectations to emit > \n instead of \r\n on Windows. > > Change-Id: I6307c2b0f5794a3a1088bb0fb94f6e1615441ed5 > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2641180 > Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> > Commit-Queue: Seth Brenith <seth.brenith@microsoft.com> > Cr-Commit-Position: refs/heads/master@{#72773} Change-Id: I1afb670c25694498b3989de615858f984a8c7f6f Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2698057 Commit-Queue: Seth Brenith <seth.brenith@microsoft.com> Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> Reviewed-by: Mythri Alle <mythria@chromium.org> Cr-Commit-Position: refs/heads/master@{#72821}
2021-02-17 14:36:58 +00:00
.StoreAccumulatorInRegister(reg_16)
.LoadAccumulatorWithRegister(reg_0)
.BinaryOperation(Token::Value::ADD, reg_0, 2)
Reland "[interpreter] Short Star bytecode" This is a reland of cf93071c91a932996a4df2e2d8880aca3cffbe28 Original change's description: > [interpreter] Short Star bytecode > > Design doc: > https://docs.google.com/document/d/1g_NExMT78II_KnIYNa9MvyPYIj23qAiFUEsyemY5KRk/edit > > This change adds 16 new interpreter opcodes, kStar0 through kStar15, so > that we can use a single byte to represent the common operation of > storing to a low-numbered register. This generally reduces the quantity > of bytecode generated on web sites by 8-9%. > > In order to not degrade speed, a couple of other changes are required: > > The existing lookahead logic to check for Star after certain other > bytecode handlers is updated to check for these new short Star codes > instead. Furthermore, that lookahead logic is updated to contain its own > copy of the dispatch jump rather than merging control flow with the > lookahead-failed case, to improve branch prediction. > > A bunch of constants use bytecode size in bytes as a proxy for the size > or complexity of a function, and are adjusted downward proportionally to > the decrease in generated bytecode size. > > Other small drive-by fix: update generate-bytecode-expectations to emit > \n instead of \r\n on Windows. > > Change-Id: I6307c2b0f5794a3a1088bb0fb94f6e1615441ed5 > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2641180 > Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> > Commit-Queue: Seth Brenith <seth.brenith@microsoft.com> > Cr-Commit-Position: refs/heads/master@{#72773} Change-Id: I1afb670c25694498b3989de615858f984a8c7f6f Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2698057 Commit-Queue: Seth Brenith <seth.brenith@microsoft.com> Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> Reviewed-by: Mythri Alle <mythria@chromium.org> Cr-Commit-Position: refs/heads/master@{#72821}
2021-02-17 14:36:58 +00:00
.StoreAccumulatorInRegister(reg_16)
.LoadNamedProperty(reg_16, name, load_feedback_slot)
.BinaryOperation(Token::Value::ADD, reg_0, 3)
.StoreAccumulatorInRegister(param)
.CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, param, pair)
.ForInPrepare(triple, forin_feedback_slot)
.CallRuntime(Runtime::kLoadIC_Miss, reg_0)
.Debugger()
.LoadGlobal(name, load_global_feedback_slot, TypeofMode::kNotInside)
.Return();
// Test iterator sees the expected output from the builder.
[offthread] Add an OffThreadIsolate The Factory/OffThreadFactory allows us to cleanly separate object construction behaviour between main-thread and off-thread in a syntactically consistent way (so that methods templated on the factory type can be made to work on both). However, there are cases where we also have to access the Isolate, for handle creation or exception throwing. So far we have been pushing more and more "customization points" into the factories to allow these factory-templated methods to dispatch on this isolate behaviour via these factory methods. Unfortunately, this is an increasing layering violation between Factory and Isolate, particularly around exception handling. Now, we introduce an OffThreadIsolate, analogous to Isolate in the same way as OffThreadFactory is analogous to Factory. All methods which were templated on Factory are now templated on Isolate, and methods which used to take an Isolate, and which were recently changed to take a templated Factory, are changed/reverted to take a templated Isolate. OffThreadFactory gets an isolate() method to match Factory's. Notably, FactoryHandle is changed to "HandleFor", where the template argument can be either of the Isolate type or the Factory type (allowing us to dispatch on both depending on what is available). Bug: chromium:1011762 Change-Id: Id144176f7da534dd76f3d535ab2ade008b6845e3 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2030909 Commit-Queue: Leszek Swirski <leszeks@chromium.org> Reviewed-by: Toon Verwaest <verwaest@chromium.org> Reviewed-by: Ulan Degenbaev <ulan@chromium.org> Cr-Commit-Position: refs/heads/master@{#66101}
2020-02-04 10:50:53 +00:00
ast_factory.Internalize(isolate());
BytecodeArrayIterator iterator(builder.ToBytecodeArray(isolate()));
const int kPrefixByteSize = 1;
int offset = 0;
[turbofan] Optimize fast enum cache driven for..in. This CL adds support to optimize for..in in fast enum-cache mode to the same degree that it was optimized in Crankshaft, without adding the same deoptimization loop that Crankshaft had with missing enum cache indices. That means code like for (var k in o) { var v = o[k]; // ... } and code like for (var k in o) { if (Object.prototype.hasOwnProperty.call(o, k)) { var v = o[k]; // ... } } which follows the https://eslint.org/docs/rules/guard-for-in linter rule, can now utilize the enum cache indices if o has only fast properties on the receiver, which speeds up the access o[k] significantly and reduces the pollution of the global megamorphic stub cache. For example the micro-benchmark in the tracking bug v8:6702 now runs faster than ever before: forIn: 1516 ms. forInHasOwnProperty: 1674 ms. forInHasOwnPropertySafe: 1595 ms. forInSum: 2051 ms. forInSumSafe: 2215 ms. Compared to numbers from V8 5.8 which is the last version running with Crankshaft forIn: 1641 ms. forInHasOwnProperty: 1719 ms. forInHasOwnPropertySafe: 1802 ms. forInSum: 2226 ms. forInSumSafe: 2409 ms. and V8 6.0 which is the current stable version with TurboFan: forIn: 1713 ms. forInHasOwnProperty: 5417 ms. forInHasOwnPropertySafe: 5324 ms. forInSum: 7556 ms. forInSumSafe: 11067 ms. It also improves the throughput on the string-fasta benchmark by around 7-10%, and there seems to be a ~5% improvement on the Speedometer/React benchmark locally. For this to work, the ForInPrepare bytecode was split into ForInEnumerate and ForInPrepare, which is very similar to how it was handled in Fullcodegen initially. In TurboFan we introduce a new operator LoadFieldByIndex that does the dynamic property load. This also removes the CheckMapValue operator again in favor of just using LoadField, ReferenceEqual and CheckIf, which work automatically with the EscapeAnalysis and the BranchConditionElimination. Bug: v8:6702 Change-Id: I91235413eea478ba77ace7bd14bb2f62e155dd9a Reviewed-on: https://chromium-review.googlesource.com/645949 Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#47768}
2017-09-01 10:49:06 +00:00
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetConstantForIndexOperand(0, isolate())->Number(),
heap_num_0);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
iterator.Advance();
Reland "[interpreter] Short Star bytecode" This is a reland of cf93071c91a932996a4df2e2d8880aca3cffbe28 Original change's description: > [interpreter] Short Star bytecode > > Design doc: > https://docs.google.com/document/d/1g_NExMT78II_KnIYNa9MvyPYIj23qAiFUEsyemY5KRk/edit > > This change adds 16 new interpreter opcodes, kStar0 through kStar15, so > that we can use a single byte to represent the common operation of > storing to a low-numbered register. This generally reduces the quantity > of bytecode generated on web sites by 8-9%. > > In order to not degrade speed, a couple of other changes are required: > > The existing lookahead logic to check for Star after certain other > bytecode handlers is updated to check for these new short Star codes > instead. Furthermore, that lookahead logic is updated to contain its own > copy of the dispatch jump rather than merging control flow with the > lookahead-failed case, to improve branch prediction. > > A bunch of constants use bytecode size in bytes as a proxy for the size > or complexity of a function, and are adjusted downward proportionally to > the decrease in generated bytecode size. > > Other small drive-by fix: update generate-bytecode-expectations to emit > \n instead of \r\n on Windows. > > Change-Id: I6307c2b0f5794a3a1088bb0fb94f6e1615441ed5 > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2641180 > Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> > Commit-Queue: Seth Brenith <seth.brenith@microsoft.com> > Cr-Commit-Position: refs/heads/master@{#72773} Change-Id: I1afb670c25694498b3989de615858f984a8c7f6f Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2698057 Commit-Queue: Seth Brenith <seth.brenith@microsoft.com> Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> Reviewed-by: Mythri Alle <mythria@chromium.org> Cr-Commit-Position: refs/heads/master@{#72821}
2021-02-17 14:36:58 +00:00
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar0);
[turbofan] Optimize fast enum cache driven for..in. This CL adds support to optimize for..in in fast enum-cache mode to the same degree that it was optimized in Crankshaft, without adding the same deoptimization loop that Crankshaft had with missing enum cache indices. That means code like for (var k in o) { var v = o[k]; // ... } and code like for (var k in o) { if (Object.prototype.hasOwnProperty.call(o, k)) { var v = o[k]; // ... } } which follows the https://eslint.org/docs/rules/guard-for-in linter rule, can now utilize the enum cache indices if o has only fast properties on the receiver, which speeds up the access o[k] significantly and reduces the pollution of the global megamorphic stub cache. For example the micro-benchmark in the tracking bug v8:6702 now runs faster than ever before: forIn: 1516 ms. forInHasOwnProperty: 1674 ms. forInHasOwnPropertySafe: 1595 ms. forInSum: 2051 ms. forInSumSafe: 2215 ms. Compared to numbers from V8 5.8 which is the last version running with Crankshaft forIn: 1641 ms. forInHasOwnProperty: 1719 ms. forInHasOwnPropertySafe: 1802 ms. forInSum: 2226 ms. forInSumSafe: 2409 ms. and V8 6.0 which is the current stable version with TurboFan: forIn: 1713 ms. forInHasOwnProperty: 5417 ms. forInHasOwnPropertySafe: 5324 ms. forInSum: 7556 ms. forInSumSafe: 11067 ms. It also improves the throughput on the string-fasta benchmark by around 7-10%, and there seems to be a ~5% improvement on the Speedometer/React benchmark locally. For this to work, the ForInPrepare bytecode was split into ForInEnumerate and ForInPrepare, which is very similar to how it was handled in Fullcodegen initially. In TurboFan we introduce a new operator LoadFieldByIndex that does the dynamic property load. This also removes the CheckMapValue operator again in favor of just using LoadField, ReferenceEqual and CheckIf, which work automatically with the EscapeAnalysis and the BranchConditionElimination. Bug: v8:6702 Change-Id: I91235413eea478ba77ace7bd14bb2f62e155dd9a Reviewed-on: https://chromium-review.googlesource.com/645949 Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#47768}
2017-09-01 10:49:06 +00:00
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK(!iterator.done());
Reland "[interpreter] Short Star bytecode" This is a reland of cf93071c91a932996a4df2e2d8880aca3cffbe28 Original change's description: > [interpreter] Short Star bytecode > > Design doc: > https://docs.google.com/document/d/1g_NExMT78II_KnIYNa9MvyPYIj23qAiFUEsyemY5KRk/edit > > This change adds 16 new interpreter opcodes, kStar0 through kStar15, so > that we can use a single byte to represent the common operation of > storing to a low-numbered register. This generally reduces the quantity > of bytecode generated on web sites by 8-9%. > > In order to not degrade speed, a couple of other changes are required: > > The existing lookahead logic to check for Star after certain other > bytecode handlers is updated to check for these new short Star codes > instead. Furthermore, that lookahead logic is updated to contain its own > copy of the dispatch jump rather than merging control flow with the > lookahead-failed case, to improve branch prediction. > > A bunch of constants use bytecode size in bytes as a proxy for the size > or complexity of a function, and are adjusted downward proportionally to > the decrease in generated bytecode size. > > Other small drive-by fix: update generate-bytecode-expectations to emit > \n instead of \r\n on Windows. > > Change-Id: I6307c2b0f5794a3a1088bb0fb94f6e1615441ed5 > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2641180 > Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> > Commit-Queue: Seth Brenith <seth.brenith@microsoft.com> > Cr-Commit-Position: refs/heads/master@{#72773} Change-Id: I1afb670c25694498b3989de615858f984a8c7f6f Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2698057 Commit-Queue: Seth Brenith <seth.brenith@microsoft.com> Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> Reviewed-by: Mythri Alle <mythria@chromium.org> Cr-Commit-Position: refs/heads/master@{#72821}
2021-02-17 14:36:58 +00:00
offset += Bytecodes::Size(Bytecode::kStar0, OperandScale::kSingle);
iterator.Advance();
[turbofan] Optimize fast enum cache driven for..in. This CL adds support to optimize for..in in fast enum-cache mode to the same degree that it was optimized in Crankshaft, without adding the same deoptimization loop that Crankshaft had with missing enum cache indices. That means code like for (var k in o) { var v = o[k]; // ... } and code like for (var k in o) { if (Object.prototype.hasOwnProperty.call(o, k)) { var v = o[k]; // ... } } which follows the https://eslint.org/docs/rules/guard-for-in linter rule, can now utilize the enum cache indices if o has only fast properties on the receiver, which speeds up the access o[k] significantly and reduces the pollution of the global megamorphic stub cache. For example the micro-benchmark in the tracking bug v8:6702 now runs faster than ever before: forIn: 1516 ms. forInHasOwnProperty: 1674 ms. forInHasOwnPropertySafe: 1595 ms. forInSum: 2051 ms. forInSumSafe: 2215 ms. Compared to numbers from V8 5.8 which is the last version running with Crankshaft forIn: 1641 ms. forInHasOwnProperty: 1719 ms. forInHasOwnPropertySafe: 1802 ms. forInSum: 2226 ms. forInSumSafe: 2409 ms. and V8 6.0 which is the current stable version with TurboFan: forIn: 1713 ms. forInHasOwnProperty: 5417 ms. forInHasOwnPropertySafe: 5324 ms. forInSum: 7556 ms. forInSumSafe: 11067 ms. It also improves the throughput on the string-fasta benchmark by around 7-10%, and there seems to be a ~5% improvement on the Speedometer/React benchmark locally. For this to work, the ForInPrepare bytecode was split into ForInEnumerate and ForInPrepare, which is very similar to how it was handled in Fullcodegen initially. In TurboFan we introduce a new operator LoadFieldByIndex that does the dynamic property load. This also removes the CheckMapValue operator again in favor of just using LoadField, ReferenceEqual and CheckIf, which work automatically with the EscapeAnalysis and the BranchConditionElimination. Bug: v8:6702 Change-Id: I91235413eea478ba77ace7bd14bb2f62e155dd9a Reviewed-on: https://chromium-review.googlesource.com/645949 Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#47768}
2017-09-01 10:49:06 +00:00
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetConstantForIndexOperand(0, isolate())->Number(),
heap_num_1);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
iterator.Advance();
Reland "[interpreter] Short Star bytecode" This is a reland of cf93071c91a932996a4df2e2d8880aca3cffbe28 Original change's description: > [interpreter] Short Star bytecode > > Design doc: > https://docs.google.com/document/d/1g_NExMT78II_KnIYNa9MvyPYIj23qAiFUEsyemY5KRk/edit > > This change adds 16 new interpreter opcodes, kStar0 through kStar15, so > that we can use a single byte to represent the common operation of > storing to a low-numbered register. This generally reduces the quantity > of bytecode generated on web sites by 8-9%. > > In order to not degrade speed, a couple of other changes are required: > > The existing lookahead logic to check for Star after certain other > bytecode handlers is updated to check for these new short Star codes > instead. Furthermore, that lookahead logic is updated to contain its own > copy of the dispatch jump rather than merging control flow with the > lookahead-failed case, to improve branch prediction. > > A bunch of constants use bytecode size in bytes as a proxy for the size > or complexity of a function, and are adjusted downward proportionally to > the decrease in generated bytecode size. > > Other small drive-by fix: update generate-bytecode-expectations to emit > \n instead of \r\n on Windows. > > Change-Id: I6307c2b0f5794a3a1088bb0fb94f6e1615441ed5 > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2641180 > Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> > Commit-Queue: Seth Brenith <seth.brenith@microsoft.com> > Cr-Commit-Position: refs/heads/master@{#72773} Change-Id: I1afb670c25694498b3989de615858f984a8c7f6f Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2698057 Commit-Queue: Seth Brenith <seth.brenith@microsoft.com> Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> Reviewed-by: Mythri Alle <mythria@chromium.org> Cr-Commit-Position: refs/heads/master@{#72821}
2021-02-17 14:36:58 +00:00
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar0);
[turbofan] Optimize fast enum cache driven for..in. This CL adds support to optimize for..in in fast enum-cache mode to the same degree that it was optimized in Crankshaft, without adding the same deoptimization loop that Crankshaft had with missing enum cache indices. That means code like for (var k in o) { var v = o[k]; // ... } and code like for (var k in o) { if (Object.prototype.hasOwnProperty.call(o, k)) { var v = o[k]; // ... } } which follows the https://eslint.org/docs/rules/guard-for-in linter rule, can now utilize the enum cache indices if o has only fast properties on the receiver, which speeds up the access o[k] significantly and reduces the pollution of the global megamorphic stub cache. For example the micro-benchmark in the tracking bug v8:6702 now runs faster than ever before: forIn: 1516 ms. forInHasOwnProperty: 1674 ms. forInHasOwnPropertySafe: 1595 ms. forInSum: 2051 ms. forInSumSafe: 2215 ms. Compared to numbers from V8 5.8 which is the last version running with Crankshaft forIn: 1641 ms. forInHasOwnProperty: 1719 ms. forInHasOwnPropertySafe: 1802 ms. forInSum: 2226 ms. forInSumSafe: 2409 ms. and V8 6.0 which is the current stable version with TurboFan: forIn: 1713 ms. forInHasOwnProperty: 5417 ms. forInHasOwnPropertySafe: 5324 ms. forInSum: 7556 ms. forInSumSafe: 11067 ms. It also improves the throughput on the string-fasta benchmark by around 7-10%, and there seems to be a ~5% improvement on the Speedometer/React benchmark locally. For this to work, the ForInPrepare bytecode was split into ForInEnumerate and ForInPrepare, which is very similar to how it was handled in Fullcodegen initially. In TurboFan we introduce a new operator LoadFieldByIndex that does the dynamic property load. This also removes the CheckMapValue operator again in favor of just using LoadField, ReferenceEqual and CheckIf, which work automatically with the EscapeAnalysis and the BranchConditionElimination. Bug: v8:6702 Change-Id: I91235413eea478ba77ace7bd14bb2f62e155dd9a Reviewed-on: https://chromium-review.googlesource.com/645949 Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#47768}
2017-09-01 10:49:06 +00:00
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK(!iterator.done());
Reland "[interpreter] Short Star bytecode" This is a reland of cf93071c91a932996a4df2e2d8880aca3cffbe28 Original change's description: > [interpreter] Short Star bytecode > > Design doc: > https://docs.google.com/document/d/1g_NExMT78II_KnIYNa9MvyPYIj23qAiFUEsyemY5KRk/edit > > This change adds 16 new interpreter opcodes, kStar0 through kStar15, so > that we can use a single byte to represent the common operation of > storing to a low-numbered register. This generally reduces the quantity > of bytecode generated on web sites by 8-9%. > > In order to not degrade speed, a couple of other changes are required: > > The existing lookahead logic to check for Star after certain other > bytecode handlers is updated to check for these new short Star codes > instead. Furthermore, that lookahead logic is updated to contain its own > copy of the dispatch jump rather than merging control flow with the > lookahead-failed case, to improve branch prediction. > > A bunch of constants use bytecode size in bytes as a proxy for the size > or complexity of a function, and are adjusted downward proportionally to > the decrease in generated bytecode size. > > Other small drive-by fix: update generate-bytecode-expectations to emit > \n instead of \r\n on Windows. > > Change-Id: I6307c2b0f5794a3a1088bb0fb94f6e1615441ed5 > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2641180 > Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> > Commit-Queue: Seth Brenith <seth.brenith@microsoft.com> > Cr-Commit-Position: refs/heads/master@{#72773} Change-Id: I1afb670c25694498b3989de615858f984a8c7f6f Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2698057 Commit-Queue: Seth Brenith <seth.brenith@microsoft.com> Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> Reviewed-by: Mythri Alle <mythria@chromium.org> Cr-Commit-Position: refs/heads/master@{#72821}
2021-02-17 14:36:58 +00:00
offset += Bytecodes::Size(Bytecode::kStar0, OperandScale::kSingle);
iterator.Advance();
[turbofan] Optimize fast enum cache driven for..in. This CL adds support to optimize for..in in fast enum-cache mode to the same degree that it was optimized in Crankshaft, without adding the same deoptimization loop that Crankshaft had with missing enum cache indices. That means code like for (var k in o) { var v = o[k]; // ... } and code like for (var k in o) { if (Object.prototype.hasOwnProperty.call(o, k)) { var v = o[k]; // ... } } which follows the https://eslint.org/docs/rules/guard-for-in linter rule, can now utilize the enum cache indices if o has only fast properties on the receiver, which speeds up the access o[k] significantly and reduces the pollution of the global megamorphic stub cache. For example the micro-benchmark in the tracking bug v8:6702 now runs faster than ever before: forIn: 1516 ms. forInHasOwnProperty: 1674 ms. forInHasOwnPropertySafe: 1595 ms. forInSum: 2051 ms. forInSumSafe: 2215 ms. Compared to numbers from V8 5.8 which is the last version running with Crankshaft forIn: 1641 ms. forInHasOwnProperty: 1719 ms. forInHasOwnPropertySafe: 1802 ms. forInSum: 2226 ms. forInSumSafe: 2409 ms. and V8 6.0 which is the current stable version with TurboFan: forIn: 1713 ms. forInHasOwnProperty: 5417 ms. forInHasOwnPropertySafe: 5324 ms. forInSum: 7556 ms. forInSumSafe: 11067 ms. It also improves the throughput on the string-fasta benchmark by around 7-10%, and there seems to be a ~5% improvement on the Speedometer/React benchmark locally. For this to work, the ForInPrepare bytecode was split into ForInEnumerate and ForInPrepare, which is very similar to how it was handled in Fullcodegen initially. In TurboFan we introduce a new operator LoadFieldByIndex that does the dynamic property load. This also removes the CheckMapValue operator again in favor of just using LoadField, ReferenceEqual and CheckIf, which work automatically with the EscapeAnalysis and the BranchConditionElimination. Bug: v8:6702 Change-Id: I91235413eea478ba77ace7bd14bb2f62e155dd9a Reviewed-on: https://chromium-review.googlesource.com/645949 Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#47768}
2017-09-01 10:49:06 +00:00
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaZero);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kLdaZero, OperandScale::kSingle);
iterator.Advance();
Reland "[interpreter] Short Star bytecode" This is a reland of cf93071c91a932996a4df2e2d8880aca3cffbe28 Original change's description: > [interpreter] Short Star bytecode > > Design doc: > https://docs.google.com/document/d/1g_NExMT78II_KnIYNa9MvyPYIj23qAiFUEsyemY5KRk/edit > > This change adds 16 new interpreter opcodes, kStar0 through kStar15, so > that we can use a single byte to represent the common operation of > storing to a low-numbered register. This generally reduces the quantity > of bytecode generated on web sites by 8-9%. > > In order to not degrade speed, a couple of other changes are required: > > The existing lookahead logic to check for Star after certain other > bytecode handlers is updated to check for these new short Star codes > instead. Furthermore, that lookahead logic is updated to contain its own > copy of the dispatch jump rather than merging control flow with the > lookahead-failed case, to improve branch prediction. > > A bunch of constants use bytecode size in bytes as a proxy for the size > or complexity of a function, and are adjusted downward proportionally to > the decrease in generated bytecode size. > > Other small drive-by fix: update generate-bytecode-expectations to emit > \n instead of \r\n on Windows. > > Change-Id: I6307c2b0f5794a3a1088bb0fb94f6e1615441ed5 > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2641180 > Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> > Commit-Queue: Seth Brenith <seth.brenith@microsoft.com> > Cr-Commit-Position: refs/heads/master@{#72773} Change-Id: I1afb670c25694498b3989de615858f984a8c7f6f Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2698057 Commit-Queue: Seth Brenith <seth.brenith@microsoft.com> Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> Reviewed-by: Mythri Alle <mythria@chromium.org> Cr-Commit-Position: refs/heads/master@{#72821}
2021-02-17 14:36:58 +00:00
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar0);
[turbofan] Optimize fast enum cache driven for..in. This CL adds support to optimize for..in in fast enum-cache mode to the same degree that it was optimized in Crankshaft, without adding the same deoptimization loop that Crankshaft had with missing enum cache indices. That means code like for (var k in o) { var v = o[k]; // ... } and code like for (var k in o) { if (Object.prototype.hasOwnProperty.call(o, k)) { var v = o[k]; // ... } } which follows the https://eslint.org/docs/rules/guard-for-in linter rule, can now utilize the enum cache indices if o has only fast properties on the receiver, which speeds up the access o[k] significantly and reduces the pollution of the global megamorphic stub cache. For example the micro-benchmark in the tracking bug v8:6702 now runs faster than ever before: forIn: 1516 ms. forInHasOwnProperty: 1674 ms. forInHasOwnPropertySafe: 1595 ms. forInSum: 2051 ms. forInSumSafe: 2215 ms. Compared to numbers from V8 5.8 which is the last version running with Crankshaft forIn: 1641 ms. forInHasOwnProperty: 1719 ms. forInHasOwnPropertySafe: 1802 ms. forInSum: 2226 ms. forInSumSafe: 2409 ms. and V8 6.0 which is the current stable version with TurboFan: forIn: 1713 ms. forInHasOwnProperty: 5417 ms. forInHasOwnPropertySafe: 5324 ms. forInSum: 7556 ms. forInSumSafe: 11067 ms. It also improves the throughput on the string-fasta benchmark by around 7-10%, and there seems to be a ~5% improvement on the Speedometer/React benchmark locally. For this to work, the ForInPrepare bytecode was split into ForInEnumerate and ForInPrepare, which is very similar to how it was handled in Fullcodegen initially. In TurboFan we introduce a new operator LoadFieldByIndex that does the dynamic property load. This also removes the CheckMapValue operator again in favor of just using LoadField, ReferenceEqual and CheckIf, which work automatically with the EscapeAnalysis and the BranchConditionElimination. Bug: v8:6702 Change-Id: I91235413eea478ba77ace7bd14bb2f62e155dd9a Reviewed-on: https://chromium-review.googlesource.com/645949 Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#47768}
2017-09-01 10:49:06 +00:00
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK(!iterator.done());
Reland "[interpreter] Short Star bytecode" This is a reland of cf93071c91a932996a4df2e2d8880aca3cffbe28 Original change's description: > [interpreter] Short Star bytecode > > Design doc: > https://docs.google.com/document/d/1g_NExMT78II_KnIYNa9MvyPYIj23qAiFUEsyemY5KRk/edit > > This change adds 16 new interpreter opcodes, kStar0 through kStar15, so > that we can use a single byte to represent the common operation of > storing to a low-numbered register. This generally reduces the quantity > of bytecode generated on web sites by 8-9%. > > In order to not degrade speed, a couple of other changes are required: > > The existing lookahead logic to check for Star after certain other > bytecode handlers is updated to check for these new short Star codes > instead. Furthermore, that lookahead logic is updated to contain its own > copy of the dispatch jump rather than merging control flow with the > lookahead-failed case, to improve branch prediction. > > A bunch of constants use bytecode size in bytes as a proxy for the size > or complexity of a function, and are adjusted downward proportionally to > the decrease in generated bytecode size. > > Other small drive-by fix: update generate-bytecode-expectations to emit > \n instead of \r\n on Windows. > > Change-Id: I6307c2b0f5794a3a1088bb0fb94f6e1615441ed5 > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2641180 > Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> > Commit-Queue: Seth Brenith <seth.brenith@microsoft.com> > Cr-Commit-Position: refs/heads/master@{#72773} Change-Id: I1afb670c25694498b3989de615858f984a8c7f6f Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2698057 Commit-Queue: Seth Brenith <seth.brenith@microsoft.com> Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> Reviewed-by: Mythri Alle <mythria@chromium.org> Cr-Commit-Position: refs/heads/master@{#72821}
2021-02-17 14:36:58 +00:00
offset += Bytecodes::Size(Bytecode::kStar0, OperandScale::kSingle);
iterator.Advance();
[turbofan] Optimize fast enum cache driven for..in. This CL adds support to optimize for..in in fast enum-cache mode to the same degree that it was optimized in Crankshaft, without adding the same deoptimization loop that Crankshaft had with missing enum cache indices. That means code like for (var k in o) { var v = o[k]; // ... } and code like for (var k in o) { if (Object.prototype.hasOwnProperty.call(o, k)) { var v = o[k]; // ... } } which follows the https://eslint.org/docs/rules/guard-for-in linter rule, can now utilize the enum cache indices if o has only fast properties on the receiver, which speeds up the access o[k] significantly and reduces the pollution of the global megamorphic stub cache. For example the micro-benchmark in the tracking bug v8:6702 now runs faster than ever before: forIn: 1516 ms. forInHasOwnProperty: 1674 ms. forInHasOwnPropertySafe: 1595 ms. forInSum: 2051 ms. forInSumSafe: 2215 ms. Compared to numbers from V8 5.8 which is the last version running with Crankshaft forIn: 1641 ms. forInHasOwnProperty: 1719 ms. forInHasOwnPropertySafe: 1802 ms. forInSum: 2226 ms. forInSumSafe: 2409 ms. and V8 6.0 which is the current stable version with TurboFan: forIn: 1713 ms. forInHasOwnProperty: 5417 ms. forInHasOwnPropertySafe: 5324 ms. forInSum: 7556 ms. forInSumSafe: 11067 ms. It also improves the throughput on the string-fasta benchmark by around 7-10%, and there seems to be a ~5% improvement on the Speedometer/React benchmark locally. For this to work, the ForInPrepare bytecode was split into ForInEnumerate and ForInPrepare, which is very similar to how it was handled in Fullcodegen initially. In TurboFan we introduce a new operator LoadFieldByIndex that does the dynamic property load. This also removes the CheckMapValue operator again in favor of just using LoadField, ReferenceEqual and CheckIf, which work automatically with the EscapeAnalysis and the BranchConditionElimination. Bug: v8:6702 Change-Id: I91235413eea478ba77ace7bd14bb2f62e155dd9a Reviewed-on: https://chromium-review.googlesource.com/645949 Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#47768}
2017-09-01 10:49:06 +00:00
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaSmi);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(Smi::FromInt(iterator.GetImmediateOperand(0)), smi_0);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kSingle);
iterator.Advance();
Reland "[interpreter] Short Star bytecode" This is a reland of cf93071c91a932996a4df2e2d8880aca3cffbe28 Original change's description: > [interpreter] Short Star bytecode > > Design doc: > https://docs.google.com/document/d/1g_NExMT78II_KnIYNa9MvyPYIj23qAiFUEsyemY5KRk/edit > > This change adds 16 new interpreter opcodes, kStar0 through kStar15, so > that we can use a single byte to represent the common operation of > storing to a low-numbered register. This generally reduces the quantity > of bytecode generated on web sites by 8-9%. > > In order to not degrade speed, a couple of other changes are required: > > The existing lookahead logic to check for Star after certain other > bytecode handlers is updated to check for these new short Star codes > instead. Furthermore, that lookahead logic is updated to contain its own > copy of the dispatch jump rather than merging control flow with the > lookahead-failed case, to improve branch prediction. > > A bunch of constants use bytecode size in bytes as a proxy for the size > or complexity of a function, and are adjusted downward proportionally to > the decrease in generated bytecode size. > > Other small drive-by fix: update generate-bytecode-expectations to emit > \n instead of \r\n on Windows. > > Change-Id: I6307c2b0f5794a3a1088bb0fb94f6e1615441ed5 > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2641180 > Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> > Commit-Queue: Seth Brenith <seth.brenith@microsoft.com> > Cr-Commit-Position: refs/heads/master@{#72773} Change-Id: I1afb670c25694498b3989de615858f984a8c7f6f Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2698057 Commit-Queue: Seth Brenith <seth.brenith@microsoft.com> Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> Reviewed-by: Mythri Alle <mythria@chromium.org> Cr-Commit-Position: refs/heads/master@{#72821}
2021-02-17 14:36:58 +00:00
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar0);
[turbofan] Optimize fast enum cache driven for..in. This CL adds support to optimize for..in in fast enum-cache mode to the same degree that it was optimized in Crankshaft, without adding the same deoptimization loop that Crankshaft had with missing enum cache indices. That means code like for (var k in o) { var v = o[k]; // ... } and code like for (var k in o) { if (Object.prototype.hasOwnProperty.call(o, k)) { var v = o[k]; // ... } } which follows the https://eslint.org/docs/rules/guard-for-in linter rule, can now utilize the enum cache indices if o has only fast properties on the receiver, which speeds up the access o[k] significantly and reduces the pollution of the global megamorphic stub cache. For example the micro-benchmark in the tracking bug v8:6702 now runs faster than ever before: forIn: 1516 ms. forInHasOwnProperty: 1674 ms. forInHasOwnPropertySafe: 1595 ms. forInSum: 2051 ms. forInSumSafe: 2215 ms. Compared to numbers from V8 5.8 which is the last version running with Crankshaft forIn: 1641 ms. forInHasOwnProperty: 1719 ms. forInHasOwnPropertySafe: 1802 ms. forInSum: 2226 ms. forInSumSafe: 2409 ms. and V8 6.0 which is the current stable version with TurboFan: forIn: 1713 ms. forInHasOwnProperty: 5417 ms. forInHasOwnPropertySafe: 5324 ms. forInSum: 7556 ms. forInSumSafe: 11067 ms. It also improves the throughput on the string-fasta benchmark by around 7-10%, and there seems to be a ~5% improvement on the Speedometer/React benchmark locally. For this to work, the ForInPrepare bytecode was split into ForInEnumerate and ForInPrepare, which is very similar to how it was handled in Fullcodegen initially. In TurboFan we introduce a new operator LoadFieldByIndex that does the dynamic property load. This also removes the CheckMapValue operator again in favor of just using LoadField, ReferenceEqual and CheckIf, which work automatically with the EscapeAnalysis and the BranchConditionElimination. Bug: v8:6702 Change-Id: I91235413eea478ba77ace7bd14bb2f62e155dd9a Reviewed-on: https://chromium-review.googlesource.com/645949 Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#47768}
2017-09-01 10:49:06 +00:00
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK(!iterator.done());
Reland "[interpreter] Short Star bytecode" This is a reland of cf93071c91a932996a4df2e2d8880aca3cffbe28 Original change's description: > [interpreter] Short Star bytecode > > Design doc: > https://docs.google.com/document/d/1g_NExMT78II_KnIYNa9MvyPYIj23qAiFUEsyemY5KRk/edit > > This change adds 16 new interpreter opcodes, kStar0 through kStar15, so > that we can use a single byte to represent the common operation of > storing to a low-numbered register. This generally reduces the quantity > of bytecode generated on web sites by 8-9%. > > In order to not degrade speed, a couple of other changes are required: > > The existing lookahead logic to check for Star after certain other > bytecode handlers is updated to check for these new short Star codes > instead. Furthermore, that lookahead logic is updated to contain its own > copy of the dispatch jump rather than merging control flow with the > lookahead-failed case, to improve branch prediction. > > A bunch of constants use bytecode size in bytes as a proxy for the size > or complexity of a function, and are adjusted downward proportionally to > the decrease in generated bytecode size. > > Other small drive-by fix: update generate-bytecode-expectations to emit > \n instead of \r\n on Windows. > > Change-Id: I6307c2b0f5794a3a1088bb0fb94f6e1615441ed5 > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2641180 > Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> > Commit-Queue: Seth Brenith <seth.brenith@microsoft.com> > Cr-Commit-Position: refs/heads/master@{#72773} Change-Id: I1afb670c25694498b3989de615858f984a8c7f6f Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2698057 Commit-Queue: Seth Brenith <seth.brenith@microsoft.com> Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> Reviewed-by: Mythri Alle <mythria@chromium.org> Cr-Commit-Position: refs/heads/master@{#72821}
2021-02-17 14:36:58 +00:00
offset += Bytecodes::Size(Bytecode::kStar0, OperandScale::kSingle);
iterator.Advance();
[turbofan] Optimize fast enum cache driven for..in. This CL adds support to optimize for..in in fast enum-cache mode to the same degree that it was optimized in Crankshaft, without adding the same deoptimization loop that Crankshaft had with missing enum cache indices. That means code like for (var k in o) { var v = o[k]; // ... } and code like for (var k in o) { if (Object.prototype.hasOwnProperty.call(o, k)) { var v = o[k]; // ... } } which follows the https://eslint.org/docs/rules/guard-for-in linter rule, can now utilize the enum cache indices if o has only fast properties on the receiver, which speeds up the access o[k] significantly and reduces the pollution of the global megamorphic stub cache. For example the micro-benchmark in the tracking bug v8:6702 now runs faster than ever before: forIn: 1516 ms. forInHasOwnProperty: 1674 ms. forInHasOwnPropertySafe: 1595 ms. forInSum: 2051 ms. forInSumSafe: 2215 ms. Compared to numbers from V8 5.8 which is the last version running with Crankshaft forIn: 1641 ms. forInHasOwnProperty: 1719 ms. forInHasOwnPropertySafe: 1802 ms. forInSum: 2226 ms. forInSumSafe: 2409 ms. and V8 6.0 which is the current stable version with TurboFan: forIn: 1713 ms. forInHasOwnProperty: 5417 ms. forInHasOwnPropertySafe: 5324 ms. forInSum: 7556 ms. forInSumSafe: 11067 ms. It also improves the throughput on the string-fasta benchmark by around 7-10%, and there seems to be a ~5% improvement on the Speedometer/React benchmark locally. For this to work, the ForInPrepare bytecode was split into ForInEnumerate and ForInPrepare, which is very similar to how it was handled in Fullcodegen initially. In TurboFan we introduce a new operator LoadFieldByIndex that does the dynamic property load. This also removes the CheckMapValue operator again in favor of just using LoadField, ReferenceEqual and CheckIf, which work automatically with the EscapeAnalysis and the BranchConditionElimination. Bug: v8:6702 Change-Id: I91235413eea478ba77ace7bd14bb2f62e155dd9a Reviewed-on: https://chromium-review.googlesource.com/645949 Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#47768}
2017-09-01 10:49:06 +00:00
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaSmi);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kQuadruple);
EXPECT_EQ(Smi::FromInt(iterator.GetImmediateOperand(0)), smi_1);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kQuadruple) +
kPrefixByteSize;
iterator.Advance();
[turbofan] Optimize fast enum cache driven for..in. This CL adds support to optimize for..in in fast enum-cache mode to the same degree that it was optimized in Crankshaft, without adding the same deoptimization loop that Crankshaft had with missing enum cache indices. That means code like for (var k in o) { var v = o[k]; // ... } and code like for (var k in o) { if (Object.prototype.hasOwnProperty.call(o, k)) { var v = o[k]; // ... } } which follows the https://eslint.org/docs/rules/guard-for-in linter rule, can now utilize the enum cache indices if o has only fast properties on the receiver, which speeds up the access o[k] significantly and reduces the pollution of the global megamorphic stub cache. For example the micro-benchmark in the tracking bug v8:6702 now runs faster than ever before: forIn: 1516 ms. forInHasOwnProperty: 1674 ms. forInHasOwnPropertySafe: 1595 ms. forInSum: 2051 ms. forInSumSafe: 2215 ms. Compared to numbers from V8 5.8 which is the last version running with Crankshaft forIn: 1641 ms. forInHasOwnProperty: 1719 ms. forInHasOwnPropertySafe: 1802 ms. forInSum: 2226 ms. forInSumSafe: 2409 ms. and V8 6.0 which is the current stable version with TurboFan: forIn: 1713 ms. forInHasOwnProperty: 5417 ms. forInHasOwnPropertySafe: 5324 ms. forInSum: 7556 ms. forInSumSafe: 11067 ms. It also improves the throughput on the string-fasta benchmark by around 7-10%, and there seems to be a ~5% improvement on the Speedometer/React benchmark locally. For this to work, the ForInPrepare bytecode was split into ForInEnumerate and ForInPrepare, which is very similar to how it was handled in Fullcodegen initially. In TurboFan we introduce a new operator LoadFieldByIndex that does the dynamic property load. This also removes the CheckMapValue operator again in favor of just using LoadField, ReferenceEqual and CheckIf, which work automatically with the EscapeAnalysis and the BranchConditionElimination. Bug: v8:6702 Change-Id: I91235413eea478ba77ace7bd14bb2f62e155dd9a Reviewed-on: https://chromium-review.googlesource.com/645949 Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#47768}
2017-09-01 10:49:06 +00:00
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
Reland "[interpreter] Short Star bytecode" This is a reland of cf93071c91a932996a4df2e2d8880aca3cffbe28 Original change's description: > [interpreter] Short Star bytecode > > Design doc: > https://docs.google.com/document/d/1g_NExMT78II_KnIYNa9MvyPYIj23qAiFUEsyemY5KRk/edit > > This change adds 16 new interpreter opcodes, kStar0 through kStar15, so > that we can use a single byte to represent the common operation of > storing to a low-numbered register. This generally reduces the quantity > of bytecode generated on web sites by 8-9%. > > In order to not degrade speed, a couple of other changes are required: > > The existing lookahead logic to check for Star after certain other > bytecode handlers is updated to check for these new short Star codes > instead. Furthermore, that lookahead logic is updated to contain its own > copy of the dispatch jump rather than merging control flow with the > lookahead-failed case, to improve branch prediction. > > A bunch of constants use bytecode size in bytes as a proxy for the size > or complexity of a function, and are adjusted downward proportionally to > the decrease in generated bytecode size. > > Other small drive-by fix: update generate-bytecode-expectations to emit > \n instead of \r\n on Windows. > > Change-Id: I6307c2b0f5794a3a1088bb0fb94f6e1615441ed5 > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2641180 > Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> > Commit-Queue: Seth Brenith <seth.brenith@microsoft.com> > Cr-Commit-Position: refs/heads/master@{#72773} Change-Id: I1afb670c25694498b3989de615858f984a8c7f6f Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2698057 Commit-Queue: Seth Brenith <seth.brenith@microsoft.com> Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> Reviewed-by: Mythri Alle <mythria@chromium.org> Cr-Commit-Position: refs/heads/master@{#72821}
2021-02-17 14:36:58 +00:00
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_16.index());
[turbofan] Optimize fast enum cache driven for..in. This CL adds support to optimize for..in in fast enum-cache mode to the same degree that it was optimized in Crankshaft, without adding the same deoptimization loop that Crankshaft had with missing enum cache indices. That means code like for (var k in o) { var v = o[k]; // ... } and code like for (var k in o) { if (Object.prototype.hasOwnProperty.call(o, k)) { var v = o[k]; // ... } } which follows the https://eslint.org/docs/rules/guard-for-in linter rule, can now utilize the enum cache indices if o has only fast properties on the receiver, which speeds up the access o[k] significantly and reduces the pollution of the global megamorphic stub cache. For example the micro-benchmark in the tracking bug v8:6702 now runs faster than ever before: forIn: 1516 ms. forInHasOwnProperty: 1674 ms. forInHasOwnPropertySafe: 1595 ms. forInSum: 2051 ms. forInSumSafe: 2215 ms. Compared to numbers from V8 5.8 which is the last version running with Crankshaft forIn: 1641 ms. forInHasOwnProperty: 1719 ms. forInHasOwnPropertySafe: 1802 ms. forInSum: 2226 ms. forInSumSafe: 2409 ms. and V8 6.0 which is the current stable version with TurboFan: forIn: 1713 ms. forInHasOwnProperty: 5417 ms. forInHasOwnPropertySafe: 5324 ms. forInSum: 7556 ms. forInSumSafe: 11067 ms. It also improves the throughput on the string-fasta benchmark by around 7-10%, and there seems to be a ~5% improvement on the Speedometer/React benchmark locally. For this to work, the ForInPrepare bytecode was split into ForInEnumerate and ForInPrepare, which is very similar to how it was handled in Fullcodegen initially. In TurboFan we introduce a new operator LoadFieldByIndex that does the dynamic property load. This also removes the CheckMapValue operator again in favor of just using LoadField, ReferenceEqual and CheckIf, which work automatically with the EscapeAnalysis and the BranchConditionElimination. Bug: v8:6702 Change-Id: I91235413eea478ba77ace7bd14bb2f62e155dd9a Reviewed-on: https://chromium-review.googlesource.com/645949 Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#47768}
2017-09-01 10:49:06 +00:00
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
iterator.Advance();
[turbofan] Optimize fast enum cache driven for..in. This CL adds support to optimize for..in in fast enum-cache mode to the same degree that it was optimized in Crankshaft, without adding the same deoptimization loop that Crankshaft had with missing enum cache indices. That means code like for (var k in o) { var v = o[k]; // ... } and code like for (var k in o) { if (Object.prototype.hasOwnProperty.call(o, k)) { var v = o[k]; // ... } } which follows the https://eslint.org/docs/rules/guard-for-in linter rule, can now utilize the enum cache indices if o has only fast properties on the receiver, which speeds up the access o[k] significantly and reduces the pollution of the global megamorphic stub cache. For example the micro-benchmark in the tracking bug v8:6702 now runs faster than ever before: forIn: 1516 ms. forInHasOwnProperty: 1674 ms. forInHasOwnPropertySafe: 1595 ms. forInSum: 2051 ms. forInSumSafe: 2215 ms. Compared to numbers from V8 5.8 which is the last version running with Crankshaft forIn: 1641 ms. forInHasOwnProperty: 1719 ms. forInHasOwnPropertySafe: 1802 ms. forInSum: 2226 ms. forInSumSafe: 2409 ms. and V8 6.0 which is the current stable version with TurboFan: forIn: 1713 ms. forInHasOwnProperty: 5417 ms. forInHasOwnPropertySafe: 5324 ms. forInSum: 7556 ms. forInSumSafe: 11067 ms. It also improves the throughput on the string-fasta benchmark by around 7-10%, and there seems to be a ~5% improvement on the Speedometer/React benchmark locally. For this to work, the ForInPrepare bytecode was split into ForInEnumerate and ForInPrepare, which is very similar to how it was handled in Fullcodegen initially. In TurboFan we introduce a new operator LoadFieldByIndex that does the dynamic property load. This also removes the CheckMapValue operator again in favor of just using LoadField, ReferenceEqual and CheckIf, which work automatically with the EscapeAnalysis and the BranchConditionElimination. Bug: v8:6702 Change-Id: I91235413eea478ba77ace7bd14bb2f62e155dd9a Reviewed-on: https://chromium-review.googlesource.com/645949 Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#47768}
2017-09-01 10:49:06 +00:00
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdar);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kLdar, OperandScale::kSingle);
iterator.Advance();
[turbofan] Optimize fast enum cache driven for..in. This CL adds support to optimize for..in in fast enum-cache mode to the same degree that it was optimized in Crankshaft, without adding the same deoptimization loop that Crankshaft had with missing enum cache indices. That means code like for (var k in o) { var v = o[k]; // ... } and code like for (var k in o) { if (Object.prototype.hasOwnProperty.call(o, k)) { var v = o[k]; // ... } } which follows the https://eslint.org/docs/rules/guard-for-in linter rule, can now utilize the enum cache indices if o has only fast properties on the receiver, which speeds up the access o[k] significantly and reduces the pollution of the global megamorphic stub cache. For example the micro-benchmark in the tracking bug v8:6702 now runs faster than ever before: forIn: 1516 ms. forInHasOwnProperty: 1674 ms. forInHasOwnPropertySafe: 1595 ms. forInSum: 2051 ms. forInSumSafe: 2215 ms. Compared to numbers from V8 5.8 which is the last version running with Crankshaft forIn: 1641 ms. forInHasOwnProperty: 1719 ms. forInHasOwnPropertySafe: 1802 ms. forInSum: 2226 ms. forInSumSafe: 2409 ms. and V8 6.0 which is the current stable version with TurboFan: forIn: 1713 ms. forInHasOwnProperty: 5417 ms. forInHasOwnPropertySafe: 5324 ms. forInSum: 7556 ms. forInSumSafe: 11067 ms. It also improves the throughput on the string-fasta benchmark by around 7-10%, and there seems to be a ~5% improvement on the Speedometer/React benchmark locally. For this to work, the ForInPrepare bytecode was split into ForInEnumerate and ForInPrepare, which is very similar to how it was handled in Fullcodegen initially. In TurboFan we introduce a new operator LoadFieldByIndex that does the dynamic property load. This also removes the CheckMapValue operator again in favor of just using LoadField, ReferenceEqual and CheckIf, which work automatically with the EscapeAnalysis and the BranchConditionElimination. Bug: v8:6702 Change-Id: I91235413eea478ba77ace7bd14bb2f62e155dd9a Reviewed-on: https://chromium-review.googlesource.com/645949 Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#47768}
2017-09-01 10:49:06 +00:00
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kAdd);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle);
iterator.Advance();
[turbofan] Optimize fast enum cache driven for..in. This CL adds support to optimize for..in in fast enum-cache mode to the same degree that it was optimized in Crankshaft, without adding the same deoptimization loop that Crankshaft had with missing enum cache indices. That means code like for (var k in o) { var v = o[k]; // ... } and code like for (var k in o) { if (Object.prototype.hasOwnProperty.call(o, k)) { var v = o[k]; // ... } } which follows the https://eslint.org/docs/rules/guard-for-in linter rule, can now utilize the enum cache indices if o has only fast properties on the receiver, which speeds up the access o[k] significantly and reduces the pollution of the global megamorphic stub cache. For example the micro-benchmark in the tracking bug v8:6702 now runs faster than ever before: forIn: 1516 ms. forInHasOwnProperty: 1674 ms. forInHasOwnPropertySafe: 1595 ms. forInSum: 2051 ms. forInSumSafe: 2215 ms. Compared to numbers from V8 5.8 which is the last version running with Crankshaft forIn: 1641 ms. forInHasOwnProperty: 1719 ms. forInHasOwnPropertySafe: 1802 ms. forInSum: 2226 ms. forInSumSafe: 2409 ms. and V8 6.0 which is the current stable version with TurboFan: forIn: 1713 ms. forInHasOwnProperty: 5417 ms. forInHasOwnPropertySafe: 5324 ms. forInSum: 7556 ms. forInSumSafe: 11067 ms. It also improves the throughput on the string-fasta benchmark by around 7-10%, and there seems to be a ~5% improvement on the Speedometer/React benchmark locally. For this to work, the ForInPrepare bytecode was split into ForInEnumerate and ForInPrepare, which is very similar to how it was handled in Fullcodegen initially. In TurboFan we introduce a new operator LoadFieldByIndex that does the dynamic property load. This also removes the CheckMapValue operator again in favor of just using LoadField, ReferenceEqual and CheckIf, which work automatically with the EscapeAnalysis and the BranchConditionElimination. Bug: v8:6702 Change-Id: I91235413eea478ba77ace7bd14bb2f62e155dd9a Reviewed-on: https://chromium-review.googlesource.com/645949 Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#47768}
2017-09-01 10:49:06 +00:00
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
Reland "[interpreter] Short Star bytecode" This is a reland of cf93071c91a932996a4df2e2d8880aca3cffbe28 Original change's description: > [interpreter] Short Star bytecode > > Design doc: > https://docs.google.com/document/d/1g_NExMT78II_KnIYNa9MvyPYIj23qAiFUEsyemY5KRk/edit > > This change adds 16 new interpreter opcodes, kStar0 through kStar15, so > that we can use a single byte to represent the common operation of > storing to a low-numbered register. This generally reduces the quantity > of bytecode generated on web sites by 8-9%. > > In order to not degrade speed, a couple of other changes are required: > > The existing lookahead logic to check for Star after certain other > bytecode handlers is updated to check for these new short Star codes > instead. Furthermore, that lookahead logic is updated to contain its own > copy of the dispatch jump rather than merging control flow with the > lookahead-failed case, to improve branch prediction. > > A bunch of constants use bytecode size in bytes as a proxy for the size > or complexity of a function, and are adjusted downward proportionally to > the decrease in generated bytecode size. > > Other small drive-by fix: update generate-bytecode-expectations to emit > \n instead of \r\n on Windows. > > Change-Id: I6307c2b0f5794a3a1088bb0fb94f6e1615441ed5 > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2641180 > Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> > Commit-Queue: Seth Brenith <seth.brenith@microsoft.com> > Cr-Commit-Position: refs/heads/master@{#72773} Change-Id: I1afb670c25694498b3989de615858f984a8c7f6f Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2698057 Commit-Queue: Seth Brenith <seth.brenith@microsoft.com> Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> Reviewed-by: Mythri Alle <mythria@chromium.org> Cr-Commit-Position: refs/heads/master@{#72821}
2021-02-17 14:36:58 +00:00
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_16.index());
[turbofan] Optimize fast enum cache driven for..in. This CL adds support to optimize for..in in fast enum-cache mode to the same degree that it was optimized in Crankshaft, without adding the same deoptimization loop that Crankshaft had with missing enum cache indices. That means code like for (var k in o) { var v = o[k]; // ... } and code like for (var k in o) { if (Object.prototype.hasOwnProperty.call(o, k)) { var v = o[k]; // ... } } which follows the https://eslint.org/docs/rules/guard-for-in linter rule, can now utilize the enum cache indices if o has only fast properties on the receiver, which speeds up the access o[k] significantly and reduces the pollution of the global megamorphic stub cache. For example the micro-benchmark in the tracking bug v8:6702 now runs faster than ever before: forIn: 1516 ms. forInHasOwnProperty: 1674 ms. forInHasOwnPropertySafe: 1595 ms. forInSum: 2051 ms. forInSumSafe: 2215 ms. Compared to numbers from V8 5.8 which is the last version running with Crankshaft forIn: 1641 ms. forInHasOwnProperty: 1719 ms. forInHasOwnPropertySafe: 1802 ms. forInSum: 2226 ms. forInSumSafe: 2409 ms. and V8 6.0 which is the current stable version with TurboFan: forIn: 1713 ms. forInHasOwnProperty: 5417 ms. forInHasOwnPropertySafe: 5324 ms. forInSum: 7556 ms. forInSumSafe: 11067 ms. It also improves the throughput on the string-fasta benchmark by around 7-10%, and there seems to be a ~5% improvement on the Speedometer/React benchmark locally. For this to work, the ForInPrepare bytecode was split into ForInEnumerate and ForInPrepare, which is very similar to how it was handled in Fullcodegen initially. In TurboFan we introduce a new operator LoadFieldByIndex that does the dynamic property load. This also removes the CheckMapValue operator again in favor of just using LoadField, ReferenceEqual and CheckIf, which work automatically with the EscapeAnalysis and the BranchConditionElimination. Bug: v8:6702 Change-Id: I91235413eea478ba77ace7bd14bb2f62e155dd9a Reviewed-on: https://chromium-review.googlesource.com/645949 Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#47768}
2017-09-01 10:49:06 +00:00
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
iterator.Advance();
[ic] name Set/Define/Store property operations more consistently For background and reasoning, see https://docs.google.com/document/d/1jvSEvXFHRkxg4JX-j6ho3nRqAF8vZI2Ai7RI8AY54gM/edit This is the first step towards pulling the DefineNamedOwn operation out of StoreIC. Summary of the renamed identifiers: Bytecodes: - StaNamedProperty -> SetNamedProperty: calls StoreIC and emitted for normal named property sets like obj.x = 1. - StaNamedOwnProperty -> DefineNamedOwnProperty: calls DefineNamedOwnIC (previously StoreOwnIC), and emitted for initialization of named properties in object literals and named public class fields. - StaKeyedProperty -> SetKeyedProperty: calls KeyedStoreIC and emitted for keyed property sets like obj[x] = 1. - StaKeyedPropertyAsDefine -> DefineKeyedOwnProperty: calls DefineKeyedOwnIC (previously KeyedDefineOwnIC) and emitted for initialization of private class fields and computed public class fields. - StaDataPropertyInLiteral -> DefineKeyedOwnPropertyInLiteral: calls DefineKeyedOwnPropertyInLiteral runtime function (previously DefineDataPropertyInLiteral) and emitted for initialization of keyed properties in object literals and static class initializers. (note that previously the StoreDataPropertyInLiteral runtime function name was taken by object spreads and array literal creation instead) - LdaKeyedProperty -> GetKeyedProperty, LdaNamedProperty -> GetNamedProperty, LdaNamedPropertyFromSuper -> GetNamedPropertyFromSuper: we drop the Sta prefix for the property store operations since the accumulator use is implicit and to make the wording more natural, for symmetry the Lda prefix for the property load operations is also dropped. opcodes: - (JS)StoreNamed -> (JS)SetNamedProperty: implements set semantics for named properties, compiled from SetNamedProperty (previously StaNamedProperty) and lowers to StoreIC or Runtime::kSetNamedProperty - (JS)StoreNamedOwn -> (JS)DefineNamedOwnProperty: implements define semantics for initializing named own properties in object literal and public class fields, compiled from DefineNamedOwnProperty (previously StaNamedOwnProperty) and lowers to DefineNamedOwnIC (previously StoreOwnIC) - (JS)StoreProperty -> (JS)SetKeyedProperty: implements set semantics for keyed properties, only compiled from SetKeyedProperty(previously StaKeyedProperty) and lowers to KeyedStoreIC - (JS)DefineProperty -> (JS)DefineKeyedOwnProperty: implements define semantics for initialization of private class fields and computed public class fields, compiled from DefineKeyedOwnProperty (previously StaKeyedPropertyAsDefine) and calls DefineKeyedOwnIC (previously KeyedDefineOwnIC). - (JS)StoreDataPropertyInLiteral -> (JS)DefineKeyedOwnPropertyInLiteral: implements define semantics for initialization of keyed properties in object literals and static class initializers, compiled from DefineKeyedOwnPropertyInLiteral (previously StaDataPropertyInLiteral) and calls the DefineKeyedOwnPropertyInLiteral runtime function (previously DefineDataPropertyInLiteral). Runtime: - DefineDataPropertyInLiteral -> DefineKeyedOwnPropertyInLiteral: following the bytecode/opcodes change, this is used by DefineKeyedOwnPropertyInLiteral (previously StaDataPropertyInLiteral) for object and class literal initialization. - StoreDataPropertyInLiteral -> DefineKeyedOwnPropertyInLiteral_Simple: it's just a simplified version of DefineDataPropertyInLiteral that does not update feedback or perform function name configuration. This is used by object spread and array literal creation. Since we are renaming DefineDataPropertyInLiteral to DefineKeyedOwnPropertyInLiteral, rename this simplified version with a `_Simple` suffix. We can consider merging it into DefineKeyedOwnPropertyInLiteral in the future. See https://docs.google.com/document/d/1jvSEvXFHRkxg4JX-j6ho3nRqAF8vZI2Ai7RI8AY54gM/edit?disco=AAAAQQIz6mU - Other changes following the bytecode/IR changes IC: - StoreOwn -> DefineNamedOwn: used for initialization of named properties in object literals and named public class fields. - StoreOwnIC -> DefineNamedOwnIC - StoreMode::kStoreOwn -> StoreMode::kDefineNamedOwn - StoreICMode::kStoreOwn -> StoreICMode::kDefineNamedOwn - IsStoreOwn() -> IsDefineNamedOwn() - DefineOwn -> DefineKeyedOwn: IsDefineOwnIC() was already just IsDefineKeyedOwnIC(), and IsAnyDefineOwn() includes both named and keyed defines so we don't need an extra generic predicate. - StoreMode::kDefineOwn -> StoreMode::kDefineKeyedOwn - StoreICMode::kDefineOwn -> StoreICMode::kDefineKeyedOwn - IsDefineOwn() -> IsDefineKeyedOwn() - IsDefineOwnIC() -> IsDefineKeyedOwnIC() - Removing IsKeyedDefineOwnIC() as its now a duplicate of IsDefineKeyedOwnIC() - KeyedDefineOwnIC -> DefineKeyedOwnIC, KeyedDefineOwnGenericGenerator() -> DefineKeyedOwnGenericGenerator: make the ordering of terms more consistent - IsAnyStoreOwn() -> IsAnyDefineOwn(): this includes the renamed and DefineNamedOwn and DefineKeyedOwn. Also is_any_store_own() is removed since it's just a duplicate of this. - IsKeyedStoreOwn() -> IsDefineNamedOwn(): it's unclear where the "keyed" part came from, but it's only used when DefineNamedOwnIC (previously StoreOwnIC) reuses KeyedStoreIC, so rename it accordingly Interpreter & compiler: - BytecodeArrayBuilder: following bytecode changes - StoreNamedProperty -> SetNamedProperty - StoreNamedOwnProperty -> DefineNamedOwnProperty - StoreKeyedProperty -> SetKeyedProperty - DefineKeyedProperty -> DefineKeyedOwnProperty - StoreDataPropertyInLiteral -> DefineKeyedOwnPropertyInLiteral - FeedbackSlotKind: - kDefineOwnKeyed -> kDefineKeyedOwn: make the ordering of terms more consistent - kStoreOwnNamed -> kDefineNamedOwn: following the IC change - kStoreNamed{Sloppy|Strict} -> kSetNamed{Sloppy|Strict}: only used in StoreIC for set semantics - kStoreKeyed{Sloppy|Strict} -> kSetKeyed{Sloppy|Strict}: only used in KeyedStoreIC for set semantics - kStoreDataPropertyInLiteral -> kDefineKeyedOwnPropertyInLiteral: following the IC change - BytecodeGraphBuilder - StoreMode::kNormal, kOwn -> NamedStoreMode::kSet, kDefineOwn: this is only used by BytecodeGraphBuilder::BuildNamedStore() to tell the difference between SetNamedProperty and DefineNamedOwnProperty operations. Not changed: - StoreIC and KeyedStoreIC currently contain mixed logic for both Set and Define operations, and the paths are controlled by feedback. The plan is to refactor the hierarchy like this: ``` - StoreIC - DefineNamedOwnIC - SetNamedIC (there could also be a NamedStoreIC if that's helpful) - KeyedStoreIC - SetKeyedIC - DefineKeyedOwnIC - DefineKeyedOwnICLiteral (could be merged into DefineKeyedOwnIC) - StoreInArrayLiteralIC - ... ``` StoreIC and KeyedStoreIC would then contain helpers shared by their subclasses, therefore it still makes sense to keep the word "Store" in their names since they would be generic base classes for both set and define operations. - The Lda and Sta prefixes of bytecodes not involving object properties (e.g. Ldar, Star, LdaZero) are kept, since this patch focuses on property operations, and distinction between Set and Define might be less relevant or nonexistent for bytecodes not involving object properties. We could consider rename some of them in future patches if that's helpful though. Bug: v8:12548 Change-Id: Ia36997b02f59a87da3247f20e0560a7eb13077f3 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3481475 Reviewed-by: Leszek Swirski <leszeks@chromium.org> Reviewed-by: Tobias Tebbi <tebbi@chromium.org> Reviewed-by: Igor Sheludko <ishell@chromium.org> Reviewed-by: Dominik Inführ <dinfuehr@chromium.org> Reviewed-by: Shu-yu Guo <syg@chromium.org> Reviewed-by: Jakob Gruber <jgruber@chromium.org> Reviewed-by: Toon Verwaest <verwaest@chromium.org> Commit-Queue: Joyee Cheung <joyee@igalia.com> Cr-Commit-Position: refs/heads/main@{#79409}
2022-03-08 17:36:04 +00:00
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kGetNamedProperty);
[turbofan] Optimize fast enum cache driven for..in. This CL adds support to optimize for..in in fast enum-cache mode to the same degree that it was optimized in Crankshaft, without adding the same deoptimization loop that Crankshaft had with missing enum cache indices. That means code like for (var k in o) { var v = o[k]; // ... } and code like for (var k in o) { if (Object.prototype.hasOwnProperty.call(o, k)) { var v = o[k]; // ... } } which follows the https://eslint.org/docs/rules/guard-for-in linter rule, can now utilize the enum cache indices if o has only fast properties on the receiver, which speeds up the access o[k] significantly and reduces the pollution of the global megamorphic stub cache. For example the micro-benchmark in the tracking bug v8:6702 now runs faster than ever before: forIn: 1516 ms. forInHasOwnProperty: 1674 ms. forInHasOwnPropertySafe: 1595 ms. forInSum: 2051 ms. forInSumSafe: 2215 ms. Compared to numbers from V8 5.8 which is the last version running with Crankshaft forIn: 1641 ms. forInHasOwnProperty: 1719 ms. forInHasOwnPropertySafe: 1802 ms. forInSum: 2226 ms. forInSumSafe: 2409 ms. and V8 6.0 which is the current stable version with TurboFan: forIn: 1713 ms. forInHasOwnProperty: 5417 ms. forInHasOwnPropertySafe: 5324 ms. forInSum: 7556 ms. forInSumSafe: 11067 ms. It also improves the throughput on the string-fasta benchmark by around 7-10%, and there seems to be a ~5% improvement on the Speedometer/React benchmark locally. For this to work, the ForInPrepare bytecode was split into ForInEnumerate and ForInPrepare, which is very similar to how it was handled in Fullcodegen initially. In TurboFan we introduce a new operator LoadFieldByIndex that does the dynamic property load. This also removes the CheckMapValue operator again in favor of just using LoadField, ReferenceEqual and CheckIf, which work automatically with the EscapeAnalysis and the BranchConditionElimination. Bug: v8:6702 Change-Id: I91235413eea478ba77ace7bd14bb2f62e155dd9a Reviewed-on: https://chromium-review.googlesource.com/645949 Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#47768}
2017-09-01 10:49:06 +00:00
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
Reland "[interpreter] Short Star bytecode" This is a reland of cf93071c91a932996a4df2e2d8880aca3cffbe28 Original change's description: > [interpreter] Short Star bytecode > > Design doc: > https://docs.google.com/document/d/1g_NExMT78II_KnIYNa9MvyPYIj23qAiFUEsyemY5KRk/edit > > This change adds 16 new interpreter opcodes, kStar0 through kStar15, so > that we can use a single byte to represent the common operation of > storing to a low-numbered register. This generally reduces the quantity > of bytecode generated on web sites by 8-9%. > > In order to not degrade speed, a couple of other changes are required: > > The existing lookahead logic to check for Star after certain other > bytecode handlers is updated to check for these new short Star codes > instead. Furthermore, that lookahead logic is updated to contain its own > copy of the dispatch jump rather than merging control flow with the > lookahead-failed case, to improve branch prediction. > > A bunch of constants use bytecode size in bytes as a proxy for the size > or complexity of a function, and are adjusted downward proportionally to > the decrease in generated bytecode size. > > Other small drive-by fix: update generate-bytecode-expectations to emit > \n instead of \r\n on Windows. > > Change-Id: I6307c2b0f5794a3a1088bb0fb94f6e1615441ed5 > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2641180 > Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> > Commit-Queue: Seth Brenith <seth.brenith@microsoft.com> > Cr-Commit-Position: refs/heads/master@{#72773} Change-Id: I1afb670c25694498b3989de615858f984a8c7f6f Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2698057 Commit-Queue: Seth Brenith <seth.brenith@microsoft.com> Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> Reviewed-by: Mythri Alle <mythria@chromium.org> Cr-Commit-Position: refs/heads/master@{#72821}
2021-02-17 14:36:58 +00:00
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_16.index());
[turbofan] Optimize fast enum cache driven for..in. This CL adds support to optimize for..in in fast enum-cache mode to the same degree that it was optimized in Crankshaft, without adding the same deoptimization loop that Crankshaft had with missing enum cache indices. That means code like for (var k in o) { var v = o[k]; // ... } and code like for (var k in o) { if (Object.prototype.hasOwnProperty.call(o, k)) { var v = o[k]; // ... } } which follows the https://eslint.org/docs/rules/guard-for-in linter rule, can now utilize the enum cache indices if o has only fast properties on the receiver, which speeds up the access o[k] significantly and reduces the pollution of the global megamorphic stub cache. For example the micro-benchmark in the tracking bug v8:6702 now runs faster than ever before: forIn: 1516 ms. forInHasOwnProperty: 1674 ms. forInHasOwnPropertySafe: 1595 ms. forInSum: 2051 ms. forInSumSafe: 2215 ms. Compared to numbers from V8 5.8 which is the last version running with Crankshaft forIn: 1641 ms. forInHasOwnProperty: 1719 ms. forInHasOwnPropertySafe: 1802 ms. forInSum: 2226 ms. forInSumSafe: 2409 ms. and V8 6.0 which is the current stable version with TurboFan: forIn: 1713 ms. forInHasOwnProperty: 5417 ms. forInHasOwnPropertySafe: 5324 ms. forInSum: 7556 ms. forInSumSafe: 11067 ms. It also improves the throughput on the string-fasta benchmark by around 7-10%, and there seems to be a ~5% improvement on the Speedometer/React benchmark locally. For this to work, the ForInPrepare bytecode was split into ForInEnumerate and ForInPrepare, which is very similar to how it was handled in Fullcodegen initially. In TurboFan we introduce a new operator LoadFieldByIndex that does the dynamic property load. This also removes the CheckMapValue operator again in favor of just using LoadField, ReferenceEqual and CheckIf, which work automatically with the EscapeAnalysis and the BranchConditionElimination. Bug: v8:6702 Change-Id: I91235413eea478ba77ace7bd14bb2f62e155dd9a Reviewed-on: https://chromium-review.googlesource.com/645949 Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#47768}
2017-09-01 10:49:06 +00:00
EXPECT_EQ(iterator.GetIndexOperand(1), name_index);
EXPECT_EQ(iterator.GetIndexOperand(2), load_feedback_slot);
CHECK(!iterator.done());
[ic] name Set/Define/Store property operations more consistently For background and reasoning, see https://docs.google.com/document/d/1jvSEvXFHRkxg4JX-j6ho3nRqAF8vZI2Ai7RI8AY54gM/edit This is the first step towards pulling the DefineNamedOwn operation out of StoreIC. Summary of the renamed identifiers: Bytecodes: - StaNamedProperty -> SetNamedProperty: calls StoreIC and emitted for normal named property sets like obj.x = 1. - StaNamedOwnProperty -> DefineNamedOwnProperty: calls DefineNamedOwnIC (previously StoreOwnIC), and emitted for initialization of named properties in object literals and named public class fields. - StaKeyedProperty -> SetKeyedProperty: calls KeyedStoreIC and emitted for keyed property sets like obj[x] = 1. - StaKeyedPropertyAsDefine -> DefineKeyedOwnProperty: calls DefineKeyedOwnIC (previously KeyedDefineOwnIC) and emitted for initialization of private class fields and computed public class fields. - StaDataPropertyInLiteral -> DefineKeyedOwnPropertyInLiteral: calls DefineKeyedOwnPropertyInLiteral runtime function (previously DefineDataPropertyInLiteral) and emitted for initialization of keyed properties in object literals and static class initializers. (note that previously the StoreDataPropertyInLiteral runtime function name was taken by object spreads and array literal creation instead) - LdaKeyedProperty -> GetKeyedProperty, LdaNamedProperty -> GetNamedProperty, LdaNamedPropertyFromSuper -> GetNamedPropertyFromSuper: we drop the Sta prefix for the property store operations since the accumulator use is implicit and to make the wording more natural, for symmetry the Lda prefix for the property load operations is also dropped. opcodes: - (JS)StoreNamed -> (JS)SetNamedProperty: implements set semantics for named properties, compiled from SetNamedProperty (previously StaNamedProperty) and lowers to StoreIC or Runtime::kSetNamedProperty - (JS)StoreNamedOwn -> (JS)DefineNamedOwnProperty: implements define semantics for initializing named own properties in object literal and public class fields, compiled from DefineNamedOwnProperty (previously StaNamedOwnProperty) and lowers to DefineNamedOwnIC (previously StoreOwnIC) - (JS)StoreProperty -> (JS)SetKeyedProperty: implements set semantics for keyed properties, only compiled from SetKeyedProperty(previously StaKeyedProperty) and lowers to KeyedStoreIC - (JS)DefineProperty -> (JS)DefineKeyedOwnProperty: implements define semantics for initialization of private class fields and computed public class fields, compiled from DefineKeyedOwnProperty (previously StaKeyedPropertyAsDefine) and calls DefineKeyedOwnIC (previously KeyedDefineOwnIC). - (JS)StoreDataPropertyInLiteral -> (JS)DefineKeyedOwnPropertyInLiteral: implements define semantics for initialization of keyed properties in object literals and static class initializers, compiled from DefineKeyedOwnPropertyInLiteral (previously StaDataPropertyInLiteral) and calls the DefineKeyedOwnPropertyInLiteral runtime function (previously DefineDataPropertyInLiteral). Runtime: - DefineDataPropertyInLiteral -> DefineKeyedOwnPropertyInLiteral: following the bytecode/opcodes change, this is used by DefineKeyedOwnPropertyInLiteral (previously StaDataPropertyInLiteral) for object and class literal initialization. - StoreDataPropertyInLiteral -> DefineKeyedOwnPropertyInLiteral_Simple: it's just a simplified version of DefineDataPropertyInLiteral that does not update feedback or perform function name configuration. This is used by object spread and array literal creation. Since we are renaming DefineDataPropertyInLiteral to DefineKeyedOwnPropertyInLiteral, rename this simplified version with a `_Simple` suffix. We can consider merging it into DefineKeyedOwnPropertyInLiteral in the future. See https://docs.google.com/document/d/1jvSEvXFHRkxg4JX-j6ho3nRqAF8vZI2Ai7RI8AY54gM/edit?disco=AAAAQQIz6mU - Other changes following the bytecode/IR changes IC: - StoreOwn -> DefineNamedOwn: used for initialization of named properties in object literals and named public class fields. - StoreOwnIC -> DefineNamedOwnIC - StoreMode::kStoreOwn -> StoreMode::kDefineNamedOwn - StoreICMode::kStoreOwn -> StoreICMode::kDefineNamedOwn - IsStoreOwn() -> IsDefineNamedOwn() - DefineOwn -> DefineKeyedOwn: IsDefineOwnIC() was already just IsDefineKeyedOwnIC(), and IsAnyDefineOwn() includes both named and keyed defines so we don't need an extra generic predicate. - StoreMode::kDefineOwn -> StoreMode::kDefineKeyedOwn - StoreICMode::kDefineOwn -> StoreICMode::kDefineKeyedOwn - IsDefineOwn() -> IsDefineKeyedOwn() - IsDefineOwnIC() -> IsDefineKeyedOwnIC() - Removing IsKeyedDefineOwnIC() as its now a duplicate of IsDefineKeyedOwnIC() - KeyedDefineOwnIC -> DefineKeyedOwnIC, KeyedDefineOwnGenericGenerator() -> DefineKeyedOwnGenericGenerator: make the ordering of terms more consistent - IsAnyStoreOwn() -> IsAnyDefineOwn(): this includes the renamed and DefineNamedOwn and DefineKeyedOwn. Also is_any_store_own() is removed since it's just a duplicate of this. - IsKeyedStoreOwn() -> IsDefineNamedOwn(): it's unclear where the "keyed" part came from, but it's only used when DefineNamedOwnIC (previously StoreOwnIC) reuses KeyedStoreIC, so rename it accordingly Interpreter & compiler: - BytecodeArrayBuilder: following bytecode changes - StoreNamedProperty -> SetNamedProperty - StoreNamedOwnProperty -> DefineNamedOwnProperty - StoreKeyedProperty -> SetKeyedProperty - DefineKeyedProperty -> DefineKeyedOwnProperty - StoreDataPropertyInLiteral -> DefineKeyedOwnPropertyInLiteral - FeedbackSlotKind: - kDefineOwnKeyed -> kDefineKeyedOwn: make the ordering of terms more consistent - kStoreOwnNamed -> kDefineNamedOwn: following the IC change - kStoreNamed{Sloppy|Strict} -> kSetNamed{Sloppy|Strict}: only used in StoreIC for set semantics - kStoreKeyed{Sloppy|Strict} -> kSetKeyed{Sloppy|Strict}: only used in KeyedStoreIC for set semantics - kStoreDataPropertyInLiteral -> kDefineKeyedOwnPropertyInLiteral: following the IC change - BytecodeGraphBuilder - StoreMode::kNormal, kOwn -> NamedStoreMode::kSet, kDefineOwn: this is only used by BytecodeGraphBuilder::BuildNamedStore() to tell the difference between SetNamedProperty and DefineNamedOwnProperty operations. Not changed: - StoreIC and KeyedStoreIC currently contain mixed logic for both Set and Define operations, and the paths are controlled by feedback. The plan is to refactor the hierarchy like this: ``` - StoreIC - DefineNamedOwnIC - SetNamedIC (there could also be a NamedStoreIC if that's helpful) - KeyedStoreIC - SetKeyedIC - DefineKeyedOwnIC - DefineKeyedOwnICLiteral (could be merged into DefineKeyedOwnIC) - StoreInArrayLiteralIC - ... ``` StoreIC and KeyedStoreIC would then contain helpers shared by their subclasses, therefore it still makes sense to keep the word "Store" in their names since they would be generic base classes for both set and define operations. - The Lda and Sta prefixes of bytecodes not involving object properties (e.g. Ldar, Star, LdaZero) are kept, since this patch focuses on property operations, and distinction between Set and Define might be less relevant or nonexistent for bytecodes not involving object properties. We could consider rename some of them in future patches if that's helpful though. Bug: v8:12548 Change-Id: Ia36997b02f59a87da3247f20e0560a7eb13077f3 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3481475 Reviewed-by: Leszek Swirski <leszeks@chromium.org> Reviewed-by: Tobias Tebbi <tebbi@chromium.org> Reviewed-by: Igor Sheludko <ishell@chromium.org> Reviewed-by: Dominik Inführ <dinfuehr@chromium.org> Reviewed-by: Shu-yu Guo <syg@chromium.org> Reviewed-by: Jakob Gruber <jgruber@chromium.org> Reviewed-by: Toon Verwaest <verwaest@chromium.org> Commit-Queue: Joyee Cheung <joyee@igalia.com> Cr-Commit-Position: refs/heads/main@{#79409}
2022-03-08 17:36:04 +00:00
offset += Bytecodes::Size(Bytecode::kGetNamedProperty, OperandScale::kSingle);
iterator.Advance();
[turbofan] Optimize fast enum cache driven for..in. This CL adds support to optimize for..in in fast enum-cache mode to the same degree that it was optimized in Crankshaft, without adding the same deoptimization loop that Crankshaft had with missing enum cache indices. That means code like for (var k in o) { var v = o[k]; // ... } and code like for (var k in o) { if (Object.prototype.hasOwnProperty.call(o, k)) { var v = o[k]; // ... } } which follows the https://eslint.org/docs/rules/guard-for-in linter rule, can now utilize the enum cache indices if o has only fast properties on the receiver, which speeds up the access o[k] significantly and reduces the pollution of the global megamorphic stub cache. For example the micro-benchmark in the tracking bug v8:6702 now runs faster than ever before: forIn: 1516 ms. forInHasOwnProperty: 1674 ms. forInHasOwnPropertySafe: 1595 ms. forInSum: 2051 ms. forInSumSafe: 2215 ms. Compared to numbers from V8 5.8 which is the last version running with Crankshaft forIn: 1641 ms. forInHasOwnProperty: 1719 ms. forInHasOwnPropertySafe: 1802 ms. forInSum: 2226 ms. forInSumSafe: 2409 ms. and V8 6.0 which is the current stable version with TurboFan: forIn: 1713 ms. forInHasOwnProperty: 5417 ms. forInHasOwnPropertySafe: 5324 ms. forInSum: 7556 ms. forInSumSafe: 11067 ms. It also improves the throughput on the string-fasta benchmark by around 7-10%, and there seems to be a ~5% improvement on the Speedometer/React benchmark locally. For this to work, the ForInPrepare bytecode was split into ForInEnumerate and ForInPrepare, which is very similar to how it was handled in Fullcodegen initially. In TurboFan we introduce a new operator LoadFieldByIndex that does the dynamic property load. This also removes the CheckMapValue operator again in favor of just using LoadField, ReferenceEqual and CheckIf, which work automatically with the EscapeAnalysis and the BranchConditionElimination. Bug: v8:6702 Change-Id: I91235413eea478ba77ace7bd14bb2f62e155dd9a Reviewed-on: https://chromium-review.googlesource.com/645949 Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#47768}
2017-09-01 10:49:06 +00:00
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kAdd);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle);
iterator.Advance();
[turbofan] Optimize fast enum cache driven for..in. This CL adds support to optimize for..in in fast enum-cache mode to the same degree that it was optimized in Crankshaft, without adding the same deoptimization loop that Crankshaft had with missing enum cache indices. That means code like for (var k in o) { var v = o[k]; // ... } and code like for (var k in o) { if (Object.prototype.hasOwnProperty.call(o, k)) { var v = o[k]; // ... } } which follows the https://eslint.org/docs/rules/guard-for-in linter rule, can now utilize the enum cache indices if o has only fast properties on the receiver, which speeds up the access o[k] significantly and reduces the pollution of the global megamorphic stub cache. For example the micro-benchmark in the tracking bug v8:6702 now runs faster than ever before: forIn: 1516 ms. forInHasOwnProperty: 1674 ms. forInHasOwnPropertySafe: 1595 ms. forInSum: 2051 ms. forInSumSafe: 2215 ms. Compared to numbers from V8 5.8 which is the last version running with Crankshaft forIn: 1641 ms. forInHasOwnProperty: 1719 ms. forInHasOwnPropertySafe: 1802 ms. forInSum: 2226 ms. forInSumSafe: 2409 ms. and V8 6.0 which is the current stable version with TurboFan: forIn: 1713 ms. forInHasOwnProperty: 5417 ms. forInHasOwnPropertySafe: 5324 ms. forInSum: 7556 ms. forInSumSafe: 11067 ms. It also improves the throughput on the string-fasta benchmark by around 7-10%, and there seems to be a ~5% improvement on the Speedometer/React benchmark locally. For this to work, the ForInPrepare bytecode was split into ForInEnumerate and ForInPrepare, which is very similar to how it was handled in Fullcodegen initially. In TurboFan we introduce a new operator LoadFieldByIndex that does the dynamic property load. This also removes the CheckMapValue operator again in favor of just using LoadField, ReferenceEqual and CheckIf, which work automatically with the EscapeAnalysis and the BranchConditionElimination. Bug: v8:6702 Change-Id: I91235413eea478ba77ace7bd14bb2f62e155dd9a Reviewed-on: https://chromium-review.googlesource.com/645949 Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#47768}
2017-09-01 10:49:06 +00:00
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kStar);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), param.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 1);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
iterator.Advance();
[turbofan] Optimize fast enum cache driven for..in. This CL adds support to optimize for..in in fast enum-cache mode to the same degree that it was optimized in Crankshaft, without adding the same deoptimization loop that Crankshaft had with missing enum cache indices. That means code like for (var k in o) { var v = o[k]; // ... } and code like for (var k in o) { if (Object.prototype.hasOwnProperty.call(o, k)) { var v = o[k]; // ... } } which follows the https://eslint.org/docs/rules/guard-for-in linter rule, can now utilize the enum cache indices if o has only fast properties on the receiver, which speeds up the access o[k] significantly and reduces the pollution of the global megamorphic stub cache. For example the micro-benchmark in the tracking bug v8:6702 now runs faster than ever before: forIn: 1516 ms. forInHasOwnProperty: 1674 ms. forInHasOwnPropertySafe: 1595 ms. forInSum: 2051 ms. forInSumSafe: 2215 ms. Compared to numbers from V8 5.8 which is the last version running with Crankshaft forIn: 1641 ms. forInHasOwnProperty: 1719 ms. forInHasOwnPropertySafe: 1802 ms. forInSum: 2226 ms. forInSumSafe: 2409 ms. and V8 6.0 which is the current stable version with TurboFan: forIn: 1713 ms. forInHasOwnProperty: 5417 ms. forInHasOwnPropertySafe: 5324 ms. forInSum: 7556 ms. forInSumSafe: 11067 ms. It also improves the throughput on the string-fasta benchmark by around 7-10%, and there seems to be a ~5% improvement on the Speedometer/React benchmark locally. For this to work, the ForInPrepare bytecode was split into ForInEnumerate and ForInPrepare, which is very similar to how it was handled in Fullcodegen initially. In TurboFan we introduce a new operator LoadFieldByIndex that does the dynamic property load. This also removes the CheckMapValue operator again in favor of just using LoadField, ReferenceEqual and CheckIf, which work automatically with the EscapeAnalysis and the BranchConditionElimination. Bug: v8:6702 Change-Id: I91235413eea478ba77ace7bd14bb2f62e155dd9a Reviewed-on: https://chromium-review.googlesource.com/645949 Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#47768}
2017-09-01 10:49:06 +00:00
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kCallRuntimeForPair);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRuntimeIdOperand(0), Runtime::kLoadLookupSlotForCall);
EXPECT_EQ(iterator.GetRegisterOperand(1).index(), param.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(1), 1);
EXPECT_EQ(iterator.GetRegisterCountOperand(2), 1u);
EXPECT_EQ(iterator.GetRegisterOperand(3).index(), reg_0.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(3), 2);
CHECK(!iterator.done());
offset +=
Bytecodes::Size(Bytecode::kCallRuntimeForPair, OperandScale::kSingle);
iterator.Advance();
[turbofan] Optimize fast enum cache driven for..in. This CL adds support to optimize for..in in fast enum-cache mode to the same degree that it was optimized in Crankshaft, without adding the same deoptimization loop that Crankshaft had with missing enum cache indices. That means code like for (var k in o) { var v = o[k]; // ... } and code like for (var k in o) { if (Object.prototype.hasOwnProperty.call(o, k)) { var v = o[k]; // ... } } which follows the https://eslint.org/docs/rules/guard-for-in linter rule, can now utilize the enum cache indices if o has only fast properties on the receiver, which speeds up the access o[k] significantly and reduces the pollution of the global megamorphic stub cache. For example the micro-benchmark in the tracking bug v8:6702 now runs faster than ever before: forIn: 1516 ms. forInHasOwnProperty: 1674 ms. forInHasOwnPropertySafe: 1595 ms. forInSum: 2051 ms. forInSumSafe: 2215 ms. Compared to numbers from V8 5.8 which is the last version running with Crankshaft forIn: 1641 ms. forInHasOwnProperty: 1719 ms. forInHasOwnPropertySafe: 1802 ms. forInSum: 2226 ms. forInSumSafe: 2409 ms. and V8 6.0 which is the current stable version with TurboFan: forIn: 1713 ms. forInHasOwnProperty: 5417 ms. forInHasOwnPropertySafe: 5324 ms. forInSum: 7556 ms. forInSumSafe: 11067 ms. It also improves the throughput on the string-fasta benchmark by around 7-10%, and there seems to be a ~5% improvement on the Speedometer/React benchmark locally. For this to work, the ForInPrepare bytecode was split into ForInEnumerate and ForInPrepare, which is very similar to how it was handled in Fullcodegen initially. In TurboFan we introduce a new operator LoadFieldByIndex that does the dynamic property load. This also removes the CheckMapValue operator again in favor of just using LoadField, ReferenceEqual and CheckIf, which work automatically with the EscapeAnalysis and the BranchConditionElimination. Bug: v8:6702 Change-Id: I91235413eea478ba77ace7bd14bb2f62e155dd9a Reviewed-on: https://chromium-review.googlesource.com/645949 Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#47768}
2017-09-01 10:49:06 +00:00
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kForInPrepare);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
EXPECT_EQ(iterator.GetRegisterOperandRange(0), 3);
EXPECT_EQ(iterator.GetIndexOperand(1), forin_feedback_slot);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kForInPrepare, OperandScale::kSingle);
iterator.Advance();
[turbofan] Optimize fast enum cache driven for..in. This CL adds support to optimize for..in in fast enum-cache mode to the same degree that it was optimized in Crankshaft, without adding the same deoptimization loop that Crankshaft had with missing enum cache indices. That means code like for (var k in o) { var v = o[k]; // ... } and code like for (var k in o) { if (Object.prototype.hasOwnProperty.call(o, k)) { var v = o[k]; // ... } } which follows the https://eslint.org/docs/rules/guard-for-in linter rule, can now utilize the enum cache indices if o has only fast properties on the receiver, which speeds up the access o[k] significantly and reduces the pollution of the global megamorphic stub cache. For example the micro-benchmark in the tracking bug v8:6702 now runs faster than ever before: forIn: 1516 ms. forInHasOwnProperty: 1674 ms. forInHasOwnPropertySafe: 1595 ms. forInSum: 2051 ms. forInSumSafe: 2215 ms. Compared to numbers from V8 5.8 which is the last version running with Crankshaft forIn: 1641 ms. forInHasOwnProperty: 1719 ms. forInHasOwnPropertySafe: 1802 ms. forInSum: 2226 ms. forInSumSafe: 2409 ms. and V8 6.0 which is the current stable version with TurboFan: forIn: 1713 ms. forInHasOwnProperty: 5417 ms. forInHasOwnPropertySafe: 5324 ms. forInSum: 7556 ms. forInSumSafe: 11067 ms. It also improves the throughput on the string-fasta benchmark by around 7-10%, and there seems to be a ~5% improvement on the Speedometer/React benchmark locally. For this to work, the ForInPrepare bytecode was split into ForInEnumerate and ForInPrepare, which is very similar to how it was handled in Fullcodegen initially. In TurboFan we introduce a new operator LoadFieldByIndex that does the dynamic property load. This also removes the CheckMapValue operator again in favor of just using LoadField, ReferenceEqual and CheckIf, which work automatically with the EscapeAnalysis and the BranchConditionElimination. Bug: v8:6702 Change-Id: I91235413eea478ba77ace7bd14bb2f62e155dd9a Reviewed-on: https://chromium-review.googlesource.com/645949 Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#47768}
2017-09-01 10:49:06 +00:00
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kCallRuntime);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.GetRuntimeIdOperand(0), Runtime::kLoadIC_Miss);
EXPECT_EQ(iterator.GetRegisterOperand(1).index(), reg_0.index());
EXPECT_EQ(iterator.GetRegisterCountOperand(2), 1u);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kCallRuntime, OperandScale::kSingle);
iterator.Advance();
[turbofan] Optimize fast enum cache driven for..in. This CL adds support to optimize for..in in fast enum-cache mode to the same degree that it was optimized in Crankshaft, without adding the same deoptimization loop that Crankshaft had with missing enum cache indices. That means code like for (var k in o) { var v = o[k]; // ... } and code like for (var k in o) { if (Object.prototype.hasOwnProperty.call(o, k)) { var v = o[k]; // ... } } which follows the https://eslint.org/docs/rules/guard-for-in linter rule, can now utilize the enum cache indices if o has only fast properties on the receiver, which speeds up the access o[k] significantly and reduces the pollution of the global megamorphic stub cache. For example the micro-benchmark in the tracking bug v8:6702 now runs faster than ever before: forIn: 1516 ms. forInHasOwnProperty: 1674 ms. forInHasOwnPropertySafe: 1595 ms. forInSum: 2051 ms. forInSumSafe: 2215 ms. Compared to numbers from V8 5.8 which is the last version running with Crankshaft forIn: 1641 ms. forInHasOwnProperty: 1719 ms. forInHasOwnPropertySafe: 1802 ms. forInSum: 2226 ms. forInSumSafe: 2409 ms. and V8 6.0 which is the current stable version with TurboFan: forIn: 1713 ms. forInHasOwnProperty: 5417 ms. forInHasOwnPropertySafe: 5324 ms. forInSum: 7556 ms. forInSumSafe: 11067 ms. It also improves the throughput on the string-fasta benchmark by around 7-10%, and there seems to be a ~5% improvement on the Speedometer/React benchmark locally. For this to work, the ForInPrepare bytecode was split into ForInEnumerate and ForInPrepare, which is very similar to how it was handled in Fullcodegen initially. In TurboFan we introduce a new operator LoadFieldByIndex that does the dynamic property load. This also removes the CheckMapValue operator again in favor of just using LoadField, ReferenceEqual and CheckIf, which work automatically with the EscapeAnalysis and the BranchConditionElimination. Bug: v8:6702 Change-Id: I91235413eea478ba77ace7bd14bb2f62e155dd9a Reviewed-on: https://chromium-review.googlesource.com/645949 Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#47768}
2017-09-01 10:49:06 +00:00
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kDebugger);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kDebugger, OperandScale::kSingle);
iterator.Advance();
[turbofan] Optimize fast enum cache driven for..in. This CL adds support to optimize for..in in fast enum-cache mode to the same degree that it was optimized in Crankshaft, without adding the same deoptimization loop that Crankshaft had with missing enum cache indices. That means code like for (var k in o) { var v = o[k]; // ... } and code like for (var k in o) { if (Object.prototype.hasOwnProperty.call(o, k)) { var v = o[k]; // ... } } which follows the https://eslint.org/docs/rules/guard-for-in linter rule, can now utilize the enum cache indices if o has only fast properties on the receiver, which speeds up the access o[k] significantly and reduces the pollution of the global megamorphic stub cache. For example the micro-benchmark in the tracking bug v8:6702 now runs faster than ever before: forIn: 1516 ms. forInHasOwnProperty: 1674 ms. forInHasOwnPropertySafe: 1595 ms. forInSum: 2051 ms. forInSumSafe: 2215 ms. Compared to numbers from V8 5.8 which is the last version running with Crankshaft forIn: 1641 ms. forInHasOwnProperty: 1719 ms. forInHasOwnPropertySafe: 1802 ms. forInSum: 2226 ms. forInSumSafe: 2409 ms. and V8 6.0 which is the current stable version with TurboFan: forIn: 1713 ms. forInHasOwnProperty: 5417 ms. forInHasOwnPropertySafe: 5324 ms. forInSum: 7556 ms. forInSumSafe: 11067 ms. It also improves the throughput on the string-fasta benchmark by around 7-10%, and there seems to be a ~5% improvement on the Speedometer/React benchmark locally. For this to work, the ForInPrepare bytecode was split into ForInEnumerate and ForInPrepare, which is very similar to how it was handled in Fullcodegen initially. In TurboFan we introduce a new operator LoadFieldByIndex that does the dynamic property load. This also removes the CheckMapValue operator again in favor of just using LoadField, ReferenceEqual and CheckIf, which work automatically with the EscapeAnalysis and the BranchConditionElimination. Bug: v8:6702 Change-Id: I91235413eea478ba77ace7bd14bb2f62e155dd9a Reviewed-on: https://chromium-review.googlesource.com/645949 Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#47768}
2017-09-01 10:49:06 +00:00
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaGlobal);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
EXPECT_EQ(iterator.current_bytecode_size(), 3);
EXPECT_EQ(iterator.GetIndexOperand(1), load_global_feedback_slot);
offset += Bytecodes::Size(Bytecode::kLdaGlobal, OperandScale::kSingle);
iterator.Advance();
[turbofan] Optimize fast enum cache driven for..in. This CL adds support to optimize for..in in fast enum-cache mode to the same degree that it was optimized in Crankshaft, without adding the same deoptimization loop that Crankshaft had with missing enum cache indices. That means code like for (var k in o) { var v = o[k]; // ... } and code like for (var k in o) { if (Object.prototype.hasOwnProperty.call(o, k)) { var v = o[k]; // ... } } which follows the https://eslint.org/docs/rules/guard-for-in linter rule, can now utilize the enum cache indices if o has only fast properties on the receiver, which speeds up the access o[k] significantly and reduces the pollution of the global megamorphic stub cache. For example the micro-benchmark in the tracking bug v8:6702 now runs faster than ever before: forIn: 1516 ms. forInHasOwnProperty: 1674 ms. forInHasOwnPropertySafe: 1595 ms. forInSum: 2051 ms. forInSumSafe: 2215 ms. Compared to numbers from V8 5.8 which is the last version running with Crankshaft forIn: 1641 ms. forInHasOwnProperty: 1719 ms. forInHasOwnPropertySafe: 1802 ms. forInSum: 2226 ms. forInSumSafe: 2409 ms. and V8 6.0 which is the current stable version with TurboFan: forIn: 1713 ms. forInHasOwnProperty: 5417 ms. forInHasOwnPropertySafe: 5324 ms. forInSum: 7556 ms. forInSumSafe: 11067 ms. It also improves the throughput on the string-fasta benchmark by around 7-10%, and there seems to be a ~5% improvement on the Speedometer/React benchmark locally. For this to work, the ForInPrepare bytecode was split into ForInEnumerate and ForInPrepare, which is very similar to how it was handled in Fullcodegen initially. In TurboFan we introduce a new operator LoadFieldByIndex that does the dynamic property load. This also removes the CheckMapValue operator again in favor of just using LoadField, ReferenceEqual and CheckIf, which work automatically with the EscapeAnalysis and the BranchConditionElimination. Bug: v8:6702 Change-Id: I91235413eea478ba77ace7bd14bb2f62e155dd9a Reviewed-on: https://chromium-review.googlesource.com/645949 Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#47768}
2017-09-01 10:49:06 +00:00
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kReturn);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK(!iterator.done());
iterator.Advance();
CHECK(iterator.done());
}
} // namespace interpreter
} // namespace internal
} // namespace v8