v8/test/cctest/test-descriptor-array.cc

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

428 lines
15 KiB
C++
Raw Normal View History

// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/base/logging.h"
#include "src/codegen/code-stub-assembler.h"
#include "src/common/globals.h"
#include "src/objects/descriptor-array.h"
#include "src/objects/property-details.h"
#include "src/objects/string-inl.h"
#include "src/objects/transitions-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/code-assembler-tester.h"
#include "test/cctest/compiler/function-tester.h"
#include "test/cctest/test-transitions.h"
namespace v8 {
namespace internal {
namespace {
using Label = compiler::CodeAssemblerLabel;
template <class T>
using TVariable = compiler::TypedCodeAssemblerVariable<T>;
Handle<Name> NewNameWithHash(Isolate* isolate, const char* str, uint32_t hash,
bool is_integer) {
uint32_t hash_field = Name::CreateHashFieldValue(
hash, is_integer ? Name::HashFieldType::kIntegerIndex
: Name::HashFieldType::kHash);
Handle<Name> name = isolate->factory()->NewOneByteInternalizedString(
base::OneByteVector(str), hash_field);
name->set_raw_hash_field(hash_field);
CHECK(name->IsUniqueName());
return name;
}
template <typename... Args>
MaybeHandle<Object> Call(Isolate* isolate, Handle<JSFunction> function,
Args... args) {
const int nof_args = sizeof...(Args);
Handle<Object> call_args[] = {args...};
Handle<Object> receiver = isolate->factory()->undefined_value();
return Execution::Call(isolate, function, receiver, nof_args, call_args);
}
void CheckDescriptorArrayLookups(Isolate* isolate, Handle<Map> map,
std::vector<Handle<Name>>& names,
Handle<JSFunction> csa_lookup) {
// Test C++ implementation.
{
DisallowGarbageCollection no_gc;
DescriptorArray descriptors = map->instance_descriptors(isolate);
DCHECK(descriptors.IsSortedNoDuplicates());
int nof_descriptors = descriptors.number_of_descriptors();
for (size_t i = 0; i < names.size(); ++i) {
Name name = *names[i];
InternalIndex index = descriptors.Search(name, nof_descriptors, false);
CHECK(index.is_found());
CHECK_EQ(i, index.as_uint32());
}
}
// Test CSA implementation.
if (!FLAG_jitless) {
for (size_t i = 0; i < names.size(); ++i) {
Handle<Object> name_index =
Call(isolate, csa_lookup, map, names[i]).ToHandleChecked();
CHECK(name_index->IsSmi());
CHECK_EQ(DescriptorArray::ToKeyIndex(static_cast<int>(i)),
Smi::ToInt(*name_index));
}
}
}
void CheckTransitionArrayLookups(Isolate* isolate,
Handle<TransitionArray> transitions,
std::vector<Handle<Map>>& maps,
Handle<JSFunction> csa_lookup) {
// Test C++ implementation.
{
DisallowGarbageCollection no_gc;
DCHECK(transitions->IsSortedNoDuplicates());
for (size_t i = 0; i < maps.size(); ++i) {
Map expected_map = *maps[i];
Name name = expected_map.instance_descriptors(isolate).GetKey(
expected_map.LastAdded());
Map map = transitions->SearchAndGetTargetForTesting(PropertyKind::kData,
name, NONE);
CHECK(!map.is_null());
CHECK_EQ(expected_map, map);
}
}
// Test CSA implementation.
if (!FLAG_jitless) {
for (size_t i = 0; i < maps.size(); ++i) {
Handle<Map> expected_map = maps[i];
Handle<Name> name(expected_map->instance_descriptors(isolate).GetKey(
expected_map->LastAdded()),
isolate);
Handle<Object> transition_map =
Call(isolate, csa_lookup, transitions, name).ToHandleChecked();
CHECK(transition_map->IsMap());
CHECK_EQ(*expected_map, *transition_map);
}
}
}
// Creates function with (Map, Name) arguments. Returns Smi with the index of
// the name value of the found descriptor (DescriptorArray::ToKeyIndex())
// or null otherwise.
Handle<JSFunction> CreateCsaDescriptorArrayLookup(Isolate* isolate) {
// We are not allowed to generate code in jitless mode.
if (FLAG_jitless) return Handle<JSFunction>();
// Preallocate handle for the result in the current handle scope.
Handle<JSFunction> result_function(JSFunction{}, isolate);
const int kNumParams = 2;
compiler::CodeAssemblerTester asm_tester(
isolate, kNumParams + 1, // +1 to include receiver.
Reland "Reland "[deoptimizer] Change deopt entries into builtins"" This is a reland of fbfa9bf4ec72b1b73a96b70ccb68cd98c321511b The arm64 was missing proper codegen for CFI, thus sizes were off. Original change's description: > Reland "[deoptimizer] Change deopt entries into builtins" > > This is a reland of 7f58ced72eb65b6b5530ccabaf2eaebe45bf9d33 > > It fixes the different exit size emitted on x64/Atom CPUs due to > performance tuning in TurboAssembler::Call. Additionally, add > cctests to verify the fixed size exits. > > Original change's description: > > [deoptimizer] Change deopt entries into builtins > > > > While the overall goal of this commit is to change deoptimization > > entries into builtins, there are multiple related things happening: > > > > - Deoptimization entries, formerly stubs (i.e. Code objects generated > > at runtime, guaranteed to be immovable), have been converted into > > builtins. The major restriction is that we now need to preserve the > > kRootRegister, which was formerly used on most architectures to pass > > the deoptimization id. The solution differs based on platform. > > - Renamed DEOPT_ENTRIES_OR_FOR_TESTING code kind to FOR_TESTING. > > - Removed heap/ support for immovable Code generation. > > - Removed the DeserializerData class (no longer needed). > > - arm64: to preserve 4-byte deopt exits, introduced a new optimization > > in which the final jump to the deoptimization entry is generated > > once per Code object, and deopt exits can continue to emit a > > near-call. > > - arm,ia32,x64: change to fixed-size deopt exits. This reduces exit > > sizes by 4/8, 5, and 5 bytes, respectively. > > > > On arm the deopt exit size is reduced from 12 (or 16) bytes to 8 bytes > > by using the same strategy as on arm64 (recalc deopt id from return > > address). Before: > > > > e300a002 movw r10, <id> > > e59fc024 ldr ip, [pc, <entry offset>] > > e12fff3c blx ip > > > > After: > > > > e59acb35 ldr ip, [r10, <entry offset>] > > e12fff3c blx ip > > > > On arm64 the deopt exit size remains 4 bytes (or 8 bytes in same cases > > with CFI). Additionally, up to 4 builtin jumps are emitted per Code > > object (max 32 bytes added overhead per Code object). Before: > > > > 9401cdae bl <entry offset> > > > > After: > > > > # eager deoptimization entry jump. > > f95b1f50 ldr x16, [x26, <eager entry offset>] > > d61f0200 br x16 > > # lazy deoptimization entry jump. > > f95b2b50 ldr x16, [x26, <lazy entry offset>] > > d61f0200 br x16 > > # the deopt exit. > > 97fffffc bl <eager deoptimization entry jump offset> > > > > On ia32 the deopt exit size is reduced from 10 to 5 bytes. Before: > > > > bb00000000 mov ebx,<id> > > e825f5372b call <entry> > > > > After: > > > > e8ea2256ba call <entry> > > > > On x64 the deopt exit size is reduced from 12 to 7 bytes. Before: > > > > 49c7c511000000 REX.W movq r13,<id> > > e8ea2f0700 call <entry> > > > > After: > > > > 41ff9560360000 call [r13+<entry offset>] > > > > Bug: v8:8661,v8:8768 > > Change-Id: I13e30aedc360474dc818fecc528ce87c3bfeed42 > > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2465834 > > Commit-Queue: Jakob Gruber <jgruber@chromium.org> > > Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> > > Reviewed-by: Tobias Tebbi <tebbi@chromium.org> > > Reviewed-by: Ulan Degenbaev <ulan@chromium.org> > > Cr-Commit-Position: refs/heads/master@{#70597} > > Tbr: ulan@chromium.org, tebbi@chromium.org, rmcilroy@chromium.org > Bug: v8:8661,v8:8768,chromium:1140165 > Change-Id: Ibcd5c39c58a70bf2b2ac221aa375fc68d495e144 > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2485506 > Reviewed-by: Jakob Gruber <jgruber@chromium.org> > Reviewed-by: Tobias Tebbi <tebbi@chromium.org> > Commit-Queue: Jakob Gruber <jgruber@chromium.org> > Cr-Commit-Position: refs/heads/master@{#70655} Tbr: ulan@chromium.org, tebbi@chromium.org, rmcilroy@chromium.org Bug: v8:8661 Bug: v8:8768 Bug: chromium:1140165 Change-Id: I471cc94fc085e527dc9bfb5a84b96bd907c2333f Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2488682 Reviewed-by: Jakob Gruber <jgruber@chromium.org> Commit-Queue: Jakob Gruber <jgruber@chromium.org> Cr-Commit-Position: refs/heads/master@{#70672}
2020-10-21 05:12:25 +00:00
CodeKind::FOR_TESTING);
{
CodeStubAssembler m(asm_tester.state());
auto map = m.Parameter<Map>(1);
auto unique_name = m.Parameter<Name>(2);
Label passed(&m), failed(&m);
Label if_found(&m), if_not_found(&m);
TVariable<IntPtrT> var_name_index(&m);
TNode<Uint32T> bit_field3 = m.LoadMapBitField3(map);
TNode<DescriptorArray> descriptors = m.LoadMapDescriptors(map);
m.DescriptorLookup(unique_name, descriptors, bit_field3, &if_found,
&var_name_index, &if_not_found);
m.BIND(&if_found);
m.Return(m.SmiTag(var_name_index.value()));
m.BIND(&if_not_found);
m.Return(m.NullConstant());
}
{
compiler::FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
// Copy function value to a handle created in the outer handle scope.
result_function.PatchValue(*ft.function);
}
return result_function;
}
// Creates function with (TransitionArray, Name) arguments. Returns transition
// map if transition is found or null otherwise.
Handle<JSFunction> CreateCsaTransitionArrayLookup(Isolate* isolate) {
// We are not allowed to generate code in jitless mode.
if (FLAG_jitless) return Handle<JSFunction>();
// Preallocate handle for the result in the current handle scope.
Handle<JSFunction> result_function(JSFunction{}, isolate);
const int kNumParams = 2;
compiler::CodeAssemblerTester asm_tester(
isolate, kNumParams + 1, // +1 to include receiver.
Reland "Reland "[deoptimizer] Change deopt entries into builtins"" This is a reland of fbfa9bf4ec72b1b73a96b70ccb68cd98c321511b The arm64 was missing proper codegen for CFI, thus sizes were off. Original change's description: > Reland "[deoptimizer] Change deopt entries into builtins" > > This is a reland of 7f58ced72eb65b6b5530ccabaf2eaebe45bf9d33 > > It fixes the different exit size emitted on x64/Atom CPUs due to > performance tuning in TurboAssembler::Call. Additionally, add > cctests to verify the fixed size exits. > > Original change's description: > > [deoptimizer] Change deopt entries into builtins > > > > While the overall goal of this commit is to change deoptimization > > entries into builtins, there are multiple related things happening: > > > > - Deoptimization entries, formerly stubs (i.e. Code objects generated > > at runtime, guaranteed to be immovable), have been converted into > > builtins. The major restriction is that we now need to preserve the > > kRootRegister, which was formerly used on most architectures to pass > > the deoptimization id. The solution differs based on platform. > > - Renamed DEOPT_ENTRIES_OR_FOR_TESTING code kind to FOR_TESTING. > > - Removed heap/ support for immovable Code generation. > > - Removed the DeserializerData class (no longer needed). > > - arm64: to preserve 4-byte deopt exits, introduced a new optimization > > in which the final jump to the deoptimization entry is generated > > once per Code object, and deopt exits can continue to emit a > > near-call. > > - arm,ia32,x64: change to fixed-size deopt exits. This reduces exit > > sizes by 4/8, 5, and 5 bytes, respectively. > > > > On arm the deopt exit size is reduced from 12 (or 16) bytes to 8 bytes > > by using the same strategy as on arm64 (recalc deopt id from return > > address). Before: > > > > e300a002 movw r10, <id> > > e59fc024 ldr ip, [pc, <entry offset>] > > e12fff3c blx ip > > > > After: > > > > e59acb35 ldr ip, [r10, <entry offset>] > > e12fff3c blx ip > > > > On arm64 the deopt exit size remains 4 bytes (or 8 bytes in same cases > > with CFI). Additionally, up to 4 builtin jumps are emitted per Code > > object (max 32 bytes added overhead per Code object). Before: > > > > 9401cdae bl <entry offset> > > > > After: > > > > # eager deoptimization entry jump. > > f95b1f50 ldr x16, [x26, <eager entry offset>] > > d61f0200 br x16 > > # lazy deoptimization entry jump. > > f95b2b50 ldr x16, [x26, <lazy entry offset>] > > d61f0200 br x16 > > # the deopt exit. > > 97fffffc bl <eager deoptimization entry jump offset> > > > > On ia32 the deopt exit size is reduced from 10 to 5 bytes. Before: > > > > bb00000000 mov ebx,<id> > > e825f5372b call <entry> > > > > After: > > > > e8ea2256ba call <entry> > > > > On x64 the deopt exit size is reduced from 12 to 7 bytes. Before: > > > > 49c7c511000000 REX.W movq r13,<id> > > e8ea2f0700 call <entry> > > > > After: > > > > 41ff9560360000 call [r13+<entry offset>] > > > > Bug: v8:8661,v8:8768 > > Change-Id: I13e30aedc360474dc818fecc528ce87c3bfeed42 > > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2465834 > > Commit-Queue: Jakob Gruber <jgruber@chromium.org> > > Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> > > Reviewed-by: Tobias Tebbi <tebbi@chromium.org> > > Reviewed-by: Ulan Degenbaev <ulan@chromium.org> > > Cr-Commit-Position: refs/heads/master@{#70597} > > Tbr: ulan@chromium.org, tebbi@chromium.org, rmcilroy@chromium.org > Bug: v8:8661,v8:8768,chromium:1140165 > Change-Id: Ibcd5c39c58a70bf2b2ac221aa375fc68d495e144 > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2485506 > Reviewed-by: Jakob Gruber <jgruber@chromium.org> > Reviewed-by: Tobias Tebbi <tebbi@chromium.org> > Commit-Queue: Jakob Gruber <jgruber@chromium.org> > Cr-Commit-Position: refs/heads/master@{#70655} Tbr: ulan@chromium.org, tebbi@chromium.org, rmcilroy@chromium.org Bug: v8:8661 Bug: v8:8768 Bug: chromium:1140165 Change-Id: I471cc94fc085e527dc9bfb5a84b96bd907c2333f Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2488682 Reviewed-by: Jakob Gruber <jgruber@chromium.org> Commit-Queue: Jakob Gruber <jgruber@chromium.org> Cr-Commit-Position: refs/heads/master@{#70672}
2020-10-21 05:12:25 +00:00
CodeKind::FOR_TESTING);
{
CodeStubAssembler m(asm_tester.state());
auto transitions = m.Parameter<TransitionArray>(1);
auto unique_name = m.Parameter<Name>(2);
Label passed(&m), failed(&m);
Label if_found(&m), if_not_found(&m);
TVariable<IntPtrT> var_name_index(&m);
m.TransitionLookup(unique_name, transitions, &if_found, &var_name_index,
&if_not_found);
m.BIND(&if_found);
{
static_assert(static_cast<int>(PropertyKind::kData) == 0);
static_assert(NONE == 0);
const int kKeyToTargetOffset = (TransitionArray::kEntryTargetIndex -
TransitionArray::kEntryKeyIndex) *
kTaggedSize;
TNode<Map> transition_map = m.CAST(m.GetHeapObjectAssumeWeak(
m.LoadArrayElement(transitions, WeakFixedArray::kHeaderSize,
var_name_index.value(), kKeyToTargetOffset)));
m.Return(transition_map);
}
m.BIND(&if_not_found);
m.Return(m.NullConstant());
}
{
compiler::FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
// Copy function value to a handle created in the outer handle scope.
result_function.PatchValue(*ft.function);
}
return result_function;
}
} // namespace
TEST(DescriptorArrayHashCollisionMassive) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope handle_scope(isolate);
static_assert(Name::HashFieldTypeBits::kSize == 2,
"This test might require updating if more HashFieldType values "
"are introduced");
std::vector<Handle<Name>> names;
// Use the same hash value for all names.
uint32_t hash = static_cast<uint32_t>(
isolate->GenerateIdentityHash(Name::HashBits::kMax));
for (int i = 0; i < kMaxNumberOfDescriptors / 2; ++i) {
// Add pairs of names having the same base hash value but having different
// values of is_integer bit.
bool first_is_integer = (i & 1) != 0;
bool second_is_integer = (i & 2) != 0;
names.push_back(NewNameWithHash(isolate, "a", hash, first_is_integer));
names.push_back(NewNameWithHash(isolate, "b", hash, second_is_integer));
}
// Create descriptor array with the created names by appending fields to some
// map. DescriptorArray marking relies on the fact that it's attached to an
// owning map.
Handle<Map> map = Map::Create(isolate, 0);
Handle<FieldType> any_type = FieldType::Any(isolate);
for (size_t i = 0; i < names.size(); ++i) {
map = Map::CopyWithField(isolate, map, names[i], any_type, NONE,
PropertyConstness::kMutable,
Representation::Tagged(), OMIT_TRANSITION)
.ToHandleChecked();
}
Handle<JSFunction> csa_lookup = CreateCsaDescriptorArrayLookup(isolate);
CheckDescriptorArrayLookups(isolate, map, names, csa_lookup);
// Sort descriptor array and check it again.
map->instance_descriptors(isolate).Sort();
CheckDescriptorArrayLookups(isolate, map, names, csa_lookup);
}
TEST(DescriptorArrayHashCollision) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope handle_scope(isolate);
static_assert(Name::HashFieldTypeBits::kSize == 2,
"This test might require updating if more HashFieldType values "
"are introduced");
std::vector<Handle<Name>> names;
uint32_t hash = 0;
for (int i = 0; i < kMaxNumberOfDescriptors / 2; ++i) {
if (i % 2 == 0) {
// Change hash value for every pair of names.
hash = static_cast<uint32_t>(
isolate->GenerateIdentityHash(Name::HashBits::kMax));
}
// Add pairs of names having the same base hash value but having different
// values of is_integer bit.
bool first_is_integer = (i & 1) != 0;
bool second_is_integer = (i & 2) != 0;
names.push_back(NewNameWithHash(isolate, "a", hash, first_is_integer));
names.push_back(NewNameWithHash(isolate, "b", hash, second_is_integer));
}
// Create descriptor array with the created names by appending fields to some
// map. DescriptorArray marking relies on the fact that it's attached to an
// owning map.
Handle<Map> map = Map::Create(isolate, 0);
Handle<FieldType> any_type = FieldType::Any(isolate);
for (size_t i = 0; i < names.size(); ++i) {
map = Map::CopyWithField(isolate, map, names[i], any_type, NONE,
PropertyConstness::kMutable,
Representation::Tagged(), OMIT_TRANSITION)
.ToHandleChecked();
}
Handle<JSFunction> csa_lookup = CreateCsaDescriptorArrayLookup(isolate);
CheckDescriptorArrayLookups(isolate, map, names, csa_lookup);
// Sort descriptor array and check it again.
map->instance_descriptors(isolate).Sort();
CheckDescriptorArrayLookups(isolate, map, names, csa_lookup);
}
TEST(TransitionArrayHashCollisionMassive) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope handle_scope(isolate);
static_assert(Name::HashFieldTypeBits::kSize == 2,
"This test might require updating if more HashFieldType values "
"are introduced");
std::vector<Handle<Name>> names;
// Use the same hash value for all names.
uint32_t hash = static_cast<uint32_t>(
isolate->GenerateIdentityHash(Name::HashBits::kMax));
for (int i = 0; i < TransitionsAccessor::kMaxNumberOfTransitions / 2; ++i) {
// Add pairs of names having the same base hash value but having different
// values of is_integer bit.
bool first_is_integer = (i & 1) != 0;
bool second_is_integer = (i & 2) != 0;
names.push_back(NewNameWithHash(isolate, "a", hash, first_is_integer));
names.push_back(NewNameWithHash(isolate, "b", hash, second_is_integer));
}
// Create transitions for each name.
Handle<Map> root_map = Map::Create(isolate, 0);
std::vector<Handle<Map>> maps;
Handle<FieldType> any_type = FieldType::Any(isolate);
for (size_t i = 0; i < names.size(); ++i) {
Handle<Map> map =
Map::CopyWithField(isolate, root_map, names[i], any_type, NONE,
PropertyConstness::kMutable,
Representation::Tagged(), INSERT_TRANSITION)
.ToHandleChecked();
maps.push_back(map);
}
Handle<JSFunction> csa_lookup = CreateCsaTransitionArrayLookup(isolate);
Handle<TransitionArray> transition_array(
TestTransitionsAccessor(isolate, root_map).transitions(), isolate);
CheckTransitionArrayLookups(isolate, transition_array, maps, csa_lookup);
// Sort transition array and check it again.
transition_array->Sort();
CheckTransitionArrayLookups(isolate, transition_array, maps, csa_lookup);
}
TEST(TransitionArrayHashCollision) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope handle_scope(isolate);
static_assert(Name::HashFieldTypeBits::kSize == 2,
"This test might require updating if more HashFieldType values "
"are introduced");
std::vector<Handle<Name>> names;
// Use the same hash value for all names.
uint32_t hash = static_cast<uint32_t>(
isolate->GenerateIdentityHash(Name::HashBits::kMax));
for (int i = 0; i < TransitionsAccessor::kMaxNumberOfTransitions / 2; ++i) {
if (i % 2 == 0) {
// Change hash value for every pair of names.
hash = static_cast<uint32_t>(
isolate->GenerateIdentityHash(Name::HashBits::kMax));
}
// Add pairs of names having the same base hash value but having different
// values of is_integer bit.
bool first_is_integer = (i & 1) != 0;
bool second_is_integer = (i & 2) != 0;
names.push_back(NewNameWithHash(isolate, "a", hash, first_is_integer));
names.push_back(NewNameWithHash(isolate, "b", hash, second_is_integer));
}
// Create transitions for each name.
Handle<Map> root_map = Map::Create(isolate, 0);
std::vector<Handle<Map>> maps;
Handle<FieldType> any_type = FieldType::Any(isolate);
for (size_t i = 0; i < names.size(); ++i) {
Handle<Map> map =
Map::CopyWithField(isolate, root_map, names[i], any_type, NONE,
PropertyConstness::kMutable,
Representation::Tagged(), INSERT_TRANSITION)
.ToHandleChecked();
maps.push_back(map);
}
Handle<JSFunction> csa_lookup = CreateCsaTransitionArrayLookup(isolate);
Handle<TransitionArray> transition_array(
TestTransitionsAccessor(isolate, root_map).transitions(), isolate);
CheckTransitionArrayLookups(isolate, transition_array, maps, csa_lookup);
// Sort transition array and check it again.
transition_array->Sort();
CheckTransitionArrayLookups(isolate, transition_array, maps, csa_lookup);
}
} // namespace internal
} // namespace v8