v8/test/cctest/test-accessor-assembler.cc

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

262 lines
7.8 KiB
C++
Raw Normal View History

// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "test/cctest/cctest.h"
#include "src/base/utils/random-number-generator.h"
#include "src/ic/accessor-assembler.h"
#include "src/ic/stub-cache.h"
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
#include "test/cctest/compiler/code-assembler-tester.h"
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
namespace internal {
using compiler::CodeAssemblerTester;
using compiler::FunctionTester;
using compiler::Node;
namespace {
void TestStubCacheOffsetCalculation(StubCache::Table table) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 2;
CodeAssemblerTester data(isolate, kNumParams + 1); // Include receiver.
AccessorAssembler m(data.state());
{
auto name = m.Parameter<Name>(1);
auto map = m.Parameter<Map>(2);
TNode<IntPtrT> primary_offset =
m.StubCachePrimaryOffsetForTesting(name, map);
Node* result;
if (table == StubCache::kPrimary) {
result = primary_offset;
} else {
CHECK_EQ(StubCache::kSecondary, table);
result = m.StubCacheSecondaryOffsetForTesting(name, primary_offset);
}
m.Return(m.SmiTag(result));
}
Handle<Code> code = data.GenerateCode();
FunctionTester ft(code, kNumParams);
Factory* factory = isolate->factory();
Handle<Name> names[] = {
factory->NewSymbol(),
factory->InternalizeUtf8String("a"),
factory->InternalizeUtf8String("bb"),
factory->InternalizeUtf8String("ccc"),
factory->NewPrivateSymbol(),
factory->InternalizeUtf8String("dddd"),
factory->InternalizeUtf8String("eeeee"),
factory->InternalizeUtf8String("name"),
factory->NewSymbol(),
factory->NewPrivateSymbol(),
};
Handle<Map> maps[] = {
factory->cell_map(),
Map::Create(isolate, 0),
factory->meta_map(),
factory->code_map(),
Map::Create(isolate, 0),
factory->hash_table_map(),
factory->symbol_map(),
factory->string_map(),
Map::Create(isolate, 0),
factory->sloppy_arguments_elements_map(),
};
for (size_t name_index = 0; name_index < arraysize(names); name_index++) {
Handle<Name> name = names[name_index];
for (size_t map_index = 0; map_index < arraysize(maps); map_index++) {
Handle<Map> map = maps[map_index];
int expected_result;
{
int primary_offset = StubCache::PrimaryOffsetForTesting(*name, *map);
if (table == StubCache::kPrimary) {
expected_result = primary_offset;
} else {
expected_result =
StubCache::SecondaryOffsetForTesting(*name, primary_offset);
}
}
Handle<Object> result = ft.Call(name, map).ToHandleChecked();
Smi expected = Smi::FromInt(expected_result & Smi::kMaxValue);
CHECK_EQ(expected, Smi::cast(*result));
}
}
}
} // namespace
TEST(StubCachePrimaryOffset) {
TestStubCacheOffsetCalculation(StubCache::kPrimary);
}
TEST(StubCacheSecondaryOffset) {
TestStubCacheOffsetCalculation(StubCache::kSecondary);
}
namespace {
Handle<Code> CreateCodeOfKind(CodeKind kind) {
Isolate* isolate(CcTest::InitIsolateOnce());
CodeAssemblerTester data(isolate, kind);
CodeStubAssembler m(data.state());
m.Return(m.UndefinedConstant());
return data.GenerateCodeCloseAndEscape();
}
} // namespace
TEST(TryProbeStubCache) {
using Label = CodeStubAssembler::Label;
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 3;
CodeAssemblerTester data(isolate, kNumParams + 1); // Include receiver.
AccessorAssembler m(data.state());
StubCache stub_cache(isolate);
stub_cache.Clear();
{
auto receiver = m.Parameter<Object>(1);
auto name = m.Parameter<Name>(2);
TNode<MaybeObject> expected_handler = m.UncheckedParameter<MaybeObject>(3);
Label passed(&m), failed(&m);
CodeStubAssembler::TVariable<MaybeObject> var_handler(&m);
Label if_handler(&m), if_miss(&m);
m.TryProbeStubCache(&stub_cache, receiver, name, &if_handler, &var_handler,
&if_miss);
m.BIND(&if_handler);
m.Branch(m.TaggedEqual(expected_handler, var_handler.value()), &passed,
&failed);
m.BIND(&if_miss);
m.Branch(m.TaggedEqual(expected_handler, m.SmiConstant(0)), &passed,
&failed);
m.BIND(&passed);
m.Return(m.BooleanConstant(true));
m.BIND(&failed);
m.Return(m.BooleanConstant(false));
}
Handle<Code> code = data.GenerateCode();
FunctionTester ft(code, kNumParams);
std::vector<Handle<Name>> names;
std::vector<Handle<JSObject>> receivers;
std::vector<Handle<Code>> handlers;
base::RandomNumberGenerator rand_gen(FLAG_random_seed);
Factory* factory = isolate->factory();
// Generate some number of names.
for (int i = 0; i < StubCache::kPrimaryTableSize / 7; i++) {
Handle<Name> name;
switch (rand_gen.NextInt(3)) {
case 0: {
// Generate string.
std::stringstream ss;
ss << "s" << std::hex
<< (rand_gen.NextInt(Smi::kMaxValue) % StubCache::kPrimaryTableSize);
name = factory->InternalizeUtf8String(ss.str().c_str());
break;
}
case 1: {
// Generate number string.
std::stringstream ss;
ss << (rand_gen.NextInt(Smi::kMaxValue) % StubCache::kPrimaryTableSize);
name = factory->InternalizeUtf8String(ss.str().c_str());
break;
}
case 2: {
// Generate symbol.
name = factory->NewSymbol();
break;
}
default:
UNREACHABLE();
}
names.push_back(name);
}
// Generate some number of receiver maps and receivers.
for (int i = 0; i < StubCache::kSecondaryTableSize / 2; i++) {
Handle<Map> map = Map::Create(isolate, 0);
receivers.push_back(factory->NewJSObjectFromMap(map));
}
// Generate some number of handlers.
for (int i = 0; i < 30; i++) {
Reland "Reland "[deoptimizer] Change deopt entries into builtins"" This is a reland of fbfa9bf4ec72b1b73a96b70ccb68cd98c321511b The arm64 was missing proper codegen for CFI, thus sizes were off. Original change's description: > Reland "[deoptimizer] Change deopt entries into builtins" > > This is a reland of 7f58ced72eb65b6b5530ccabaf2eaebe45bf9d33 > > It fixes the different exit size emitted on x64/Atom CPUs due to > performance tuning in TurboAssembler::Call. Additionally, add > cctests to verify the fixed size exits. > > Original change's description: > > [deoptimizer] Change deopt entries into builtins > > > > While the overall goal of this commit is to change deoptimization > > entries into builtins, there are multiple related things happening: > > > > - Deoptimization entries, formerly stubs (i.e. Code objects generated > > at runtime, guaranteed to be immovable), have been converted into > > builtins. The major restriction is that we now need to preserve the > > kRootRegister, which was formerly used on most architectures to pass > > the deoptimization id. The solution differs based on platform. > > - Renamed DEOPT_ENTRIES_OR_FOR_TESTING code kind to FOR_TESTING. > > - Removed heap/ support for immovable Code generation. > > - Removed the DeserializerData class (no longer needed). > > - arm64: to preserve 4-byte deopt exits, introduced a new optimization > > in which the final jump to the deoptimization entry is generated > > once per Code object, and deopt exits can continue to emit a > > near-call. > > - arm,ia32,x64: change to fixed-size deopt exits. This reduces exit > > sizes by 4/8, 5, and 5 bytes, respectively. > > > > On arm the deopt exit size is reduced from 12 (or 16) bytes to 8 bytes > > by using the same strategy as on arm64 (recalc deopt id from return > > address). Before: > > > > e300a002 movw r10, <id> > > e59fc024 ldr ip, [pc, <entry offset>] > > e12fff3c blx ip > > > > After: > > > > e59acb35 ldr ip, [r10, <entry offset>] > > e12fff3c blx ip > > > > On arm64 the deopt exit size remains 4 bytes (or 8 bytes in same cases > > with CFI). Additionally, up to 4 builtin jumps are emitted per Code > > object (max 32 bytes added overhead per Code object). Before: > > > > 9401cdae bl <entry offset> > > > > After: > > > > # eager deoptimization entry jump. > > f95b1f50 ldr x16, [x26, <eager entry offset>] > > d61f0200 br x16 > > # lazy deoptimization entry jump. > > f95b2b50 ldr x16, [x26, <lazy entry offset>] > > d61f0200 br x16 > > # the deopt exit. > > 97fffffc bl <eager deoptimization entry jump offset> > > > > On ia32 the deopt exit size is reduced from 10 to 5 bytes. Before: > > > > bb00000000 mov ebx,<id> > > e825f5372b call <entry> > > > > After: > > > > e8ea2256ba call <entry> > > > > On x64 the deopt exit size is reduced from 12 to 7 bytes. Before: > > > > 49c7c511000000 REX.W movq r13,<id> > > e8ea2f0700 call <entry> > > > > After: > > > > 41ff9560360000 call [r13+<entry offset>] > > > > Bug: v8:8661,v8:8768 > > Change-Id: I13e30aedc360474dc818fecc528ce87c3bfeed42 > > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2465834 > > Commit-Queue: Jakob Gruber <jgruber@chromium.org> > > Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> > > Reviewed-by: Tobias Tebbi <tebbi@chromium.org> > > Reviewed-by: Ulan Degenbaev <ulan@chromium.org> > > Cr-Commit-Position: refs/heads/master@{#70597} > > Tbr: ulan@chromium.org, tebbi@chromium.org, rmcilroy@chromium.org > Bug: v8:8661,v8:8768,chromium:1140165 > Change-Id: Ibcd5c39c58a70bf2b2ac221aa375fc68d495e144 > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2485506 > Reviewed-by: Jakob Gruber <jgruber@chromium.org> > Reviewed-by: Tobias Tebbi <tebbi@chromium.org> > Commit-Queue: Jakob Gruber <jgruber@chromium.org> > Cr-Commit-Position: refs/heads/master@{#70655} Tbr: ulan@chromium.org, tebbi@chromium.org, rmcilroy@chromium.org Bug: v8:8661 Bug: v8:8768 Bug: chromium:1140165 Change-Id: I471cc94fc085e527dc9bfb5a84b96bd907c2333f Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2488682 Reviewed-by: Jakob Gruber <jgruber@chromium.org> Commit-Queue: Jakob Gruber <jgruber@chromium.org> Cr-Commit-Position: refs/heads/master@{#70672}
2020-10-21 05:12:25 +00:00
handlers.push_back(CreateCodeOfKind(CodeKind::FOR_TESTING));
}
// Ensure that GC does happen because from now on we are going to fill our
// own stub cache instance with raw values.
DisallowHeapAllocation no_gc;
// Populate {stub_cache}.
const int N = StubCache::kPrimaryTableSize + StubCache::kSecondaryTableSize;
for (int i = 0; i < N; i++) {
int index = rand_gen.NextInt();
Handle<Name> name = names[index % names.size()];
Handle<JSObject> receiver = receivers[index % receivers.size()];
Handle<Code> handler = handlers[index % handlers.size()];
stub_cache.Set(*name, receiver->map(), MaybeObject::FromObject(*handler));
}
// Perform some queries.
bool queried_existing = false;
bool queried_non_existing = false;
for (int i = 0; i < N; i++) {
int index = rand_gen.NextInt();
Handle<Name> name = names[index % names.size()];
Handle<JSObject> receiver = receivers[index % receivers.size()];
MaybeObject handler = stub_cache.Get(*name, receiver->map());
if (handler.ptr() == kNullAddress) {
queried_non_existing = true;
} else {
queried_existing = true;
}
Handle<Object> expected_handler(handler->GetHeapObjectOrSmi(), isolate);
ft.CheckTrue(receiver, name, expected_handler);
}
for (int i = 0; i < N; i++) {
int index1 = rand_gen.NextInt();
int index2 = rand_gen.NextInt();
Handle<Name> name = names[index1 % names.size()];
Handle<JSObject> receiver = receivers[index2 % receivers.size()];
MaybeObject handler = stub_cache.Get(*name, receiver->map());
if (handler.ptr() == kNullAddress) {
queried_non_existing = true;
} else {
queried_existing = true;
}
Handle<Object> expected_handler(handler->GetHeapObjectOrSmi(), isolate);
ft.CheckTrue(receiver, name, expected_handler);
}
// Ensure we performed both kind of queries.
CHECK(queried_existing && queried_non_existing);
}
} // namespace internal
} // namespace v8