v8/test/cctest/heap/test-lab.cc

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

286 lines
9.1 KiB
C++
Raw Normal View History

// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <vector>
#include "src/common/globals.h"
#include "src/heap/heap-inl.h"
#include "src/heap/spaces-inl.h"
#include "src/objects/objects.h"
#include "test/cctest/cctest.h"
namespace v8 {
namespace internal {
namespace heap {
static Address AllocateLabBackingStore(Heap* heap, intptr_t size_in_bytes) {
AllocationResult result = heap->old_space()->AllocateRaw(
static_cast<int>(size_in_bytes), kDoubleAligned);
Address adr = result.ToObjectChecked().address();
return adr;
}
static void VerifyIterable(v8::internal::Address base,
v8::internal::Address limit,
std::vector<intptr_t> expected_size) {
CHECK_LE(base, limit);
HeapObject object;
size_t counter = 0;
while (base < limit) {
object = HeapObject::FromAddress(base);
[torque] Use generated instance types, part 1 This change begins making use of the fact that Torque now knows about the relationship between classes and instance types, to replace a few repetitive lists: - Instance type checkers (single and range), defined in src/objects/instance-type.h - Verification dispatch in src/diagnostics/objects-debug.cc - Printer dispatch in src/diagnostics/objects-printer.cc - Postmortem object type detection in tools/debug_helper/get-object-properties.cc Torque is updated to generate four macro lists for the instance types, representing all of the classes separated in two dimensions: classes that correspond to a single instance type versus those that have a range, and classes that are fully defined in Torque (with fields and methods inside '{}') versus those that are only declared. The latter distinction is useful because fully-defined classes are guaranteed to correspond to real C++ classes, whereas only-declared classes are not. A few other changes were required to make the lists above work: - Renamed IsFiller to IsFreeSpaceOrFiller to better reflect what it does and avoid conflicts with the new macro-generated IsFiller method. This is the part I'm most worried about: I think the new name is an improvement for clarity and consistency, but I could imagine someone typing IsFiller out of habit and introducing a bug. If we'd prefer to keep the name IsFiller, my other idea is to rename FreeSpace to VariableSizeFiller and Filler to FixedSizeFiller. - Made Tuple3 extend from Struct, not Tuple2, because IsTuple2 is expected to check for only TUPLE2_TYPE and not include TUPLE3_TYPE. - Normalized the dispatched behavior for BigIntBase and HeapNumber. - Added a few new object printers. Bug: v8:7793 Change-Id: I5462bb105f8a314baa59bd6ab6ab6215df6f313c Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1860314 Commit-Queue: Seth Brenith <seth.brenith@microsoft.com> Reviewed-by: Jakob Gruber <jgruber@chromium.org> Reviewed-by: Jakob Kummerow <jkummerow@chromium.org> Reviewed-by: Tobias Tebbi <tebbi@chromium.org> Reviewed-by: Dan Elphick <delphick@chromium.org> Reviewed-by: Ulan Degenbaev <ulan@chromium.org> Cr-Commit-Position: refs/heads/master@{#64597}
2019-10-28 17:42:41 +00:00
CHECK(object.IsFreeSpaceOrFiller());
CHECK_LT(counter, expected_size.size());
CHECK_EQ(expected_size[counter], object.Size());
base += object.Size();
counter++;
}
}
static bool AllocateFromLab(Heap* heap, LocalAllocationBuffer* lab,
intptr_t size_in_bytes,
AllocationAlignment alignment = kWordAligned) {
HeapObject obj;
AllocationResult result =
lab->AllocateRawAligned(static_cast<int>(size_in_bytes), alignment);
if (result.To(&obj)) {
heap->CreateFillerObjectAt(obj.address(), static_cast<int>(size_in_bytes),
ClearRecordedSlots::kNo);
return true;
}
return false;
}
TEST(InvalidLab) {
LocalAllocationBuffer lab = LocalAllocationBuffer::InvalidBuffer();
CHECK(!lab.IsValid());
}
TEST(UnusedLabImplicitClose) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
const int kLabSize = 4 * KB;
Address base = AllocateLabBackingStore(heap, kLabSize);
Address limit = base + kLabSize;
intptr_t expected_sizes_raw[1] = {kLabSize};
std::vector<intptr_t> expected_sizes(expected_sizes_raw,
expected_sizes_raw + 1);
{
AllocationResult lab_backing_store(HeapObject::FromAddress(base));
LocalAllocationBuffer lab =
LocalAllocationBuffer::FromResult(heap, lab_backing_store, kLabSize);
CHECK(lab.IsValid());
}
VerifyIterable(base, limit, expected_sizes);
}
TEST(SimpleAllocate) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
const int kLabSize = 4 * KB;
Address base = AllocateLabBackingStore(heap, kLabSize);
Address limit = base + kLabSize;
intptr_t sizes_raw[1] = {128};
intptr_t expected_sizes_raw[2] = {128, kLabSize - 128};
std::vector<intptr_t> sizes(sizes_raw, sizes_raw + 1);
std::vector<intptr_t> expected_sizes(expected_sizes_raw,
expected_sizes_raw + 2);
{
AllocationResult lab_backing_store(HeapObject::FromAddress(base));
LocalAllocationBuffer lab =
LocalAllocationBuffer::FromResult(heap, lab_backing_store, kLabSize);
CHECK(lab.IsValid());
intptr_t sum = 0;
for (auto size : sizes) {
if (AllocateFromLab(heap, &lab, size)) {
sum += size;
}
}
}
VerifyIterable(base, limit, expected_sizes);
}
TEST(AllocateUntilLabOOM) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
const int kLabSize = 2 * KB;
Address base = AllocateLabBackingStore(heap, kLabSize);
Address limit = base + kLabSize;
// The following objects won't fit in {kLabSize}.
intptr_t sizes_raw[5] = {512, 512, 128, 512, 512};
intptr_t expected_sizes_raw[5] = {512, 512, 128, 512, 384 /* left over */};
std::vector<intptr_t> sizes(sizes_raw, sizes_raw + 5);
std::vector<intptr_t> expected_sizes(expected_sizes_raw,
expected_sizes_raw + 5);
intptr_t sum = 0;
{
AllocationResult lab_backing_store(HeapObject::FromAddress(base));
LocalAllocationBuffer lab =
LocalAllocationBuffer::FromResult(heap, lab_backing_store, kLabSize);
CHECK(lab.IsValid());
for (auto size : sizes) {
if (AllocateFromLab(heap, &lab, size)) {
sum += size;
}
}
CHECK_EQ(kLabSize - sum, 384);
}
VerifyIterable(base, limit, expected_sizes);
}
TEST(AllocateExactlyUntilLimit) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
const int kLabSize = 2 * KB;
Address base = AllocateLabBackingStore(heap, kLabSize);
Address limit = base + kLabSize;
intptr_t sizes_raw[4] = {512, 512, 512, 512};
intptr_t expected_sizes_raw[5] = {512, 512, 512, 512, 0};
std::vector<intptr_t> sizes(sizes_raw, sizes_raw + 4);
std::vector<intptr_t> expected_sizes(expected_sizes_raw,
expected_sizes_raw + 5);
{
AllocationResult lab_backing_store(HeapObject::FromAddress(base));
LocalAllocationBuffer lab =
LocalAllocationBuffer::FromResult(heap, lab_backing_store, kLabSize);
CHECK(lab.IsValid());
intptr_t sum = 0;
for (auto size : sizes) {
if (AllocateFromLab(heap, &lab, size)) {
sum += size;
} else {
break;
}
}
CHECK_EQ(kLabSize - sum, 0);
}
VerifyIterable(base, limit, expected_sizes);
}
TEST(MergeSuccessful) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
const int kLabSize = 2 * KB;
Address base1 = AllocateLabBackingStore(heap, 2 * kLabSize);
Address limit1 = base1 + kLabSize;
Address base2 = limit1;
Address limit2 = base2 + kLabSize;
intptr_t sizes1_raw[4] = {512, 512, 512, 256};
intptr_t expected_sizes1_raw[5] = {512, 512, 512, 256, 256};
std::vector<intptr_t> sizes1(sizes1_raw, sizes1_raw + 4);
std::vector<intptr_t> expected_sizes1(expected_sizes1_raw,
expected_sizes1_raw + 5);
intptr_t sizes2_raw[5] = {256, 512, 512, 512, 512};
intptr_t expected_sizes2_raw[10] = {512, 512, 512, 256, 256,
512, 512, 512, 512, 0};
std::vector<intptr_t> sizes2(sizes2_raw, sizes2_raw + 5);
std::vector<intptr_t> expected_sizes2(expected_sizes2_raw,
expected_sizes2_raw + 10);
{
AllocationResult lab_backing_store1(HeapObject::FromAddress(base1));
LocalAllocationBuffer lab1 =
LocalAllocationBuffer::FromResult(heap, lab_backing_store1, kLabSize);
CHECK(lab1.IsValid());
intptr_t sum = 0;
for (auto size : sizes1) {
if (AllocateFromLab(heap, &lab1, size)) {
sum += size;
} else {
break;
}
}
AllocationResult lab_backing_store2(HeapObject::FromAddress(base2));
LocalAllocationBuffer lab2 =
LocalAllocationBuffer::FromResult(heap, lab_backing_store2, kLabSize);
CHECK(lab2.IsValid());
CHECK(lab2.TryMerge(&lab1));
CHECK(!lab1.IsValid());
for (auto size : sizes2) {
if (AllocateFromLab(heap, &lab2, size)) {
sum += size;
} else {
break;
}
}
CHECK_EQ(2 * kLabSize - sum, 0);
}
VerifyIterable(base1, limit1, expected_sizes1);
VerifyIterable(base1, limit2, expected_sizes2);
}
TEST(MergeFailed) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
const int kLabSize = 2 * KB;
Address base1 = AllocateLabBackingStore(heap, 3 * kLabSize);
Address base2 = base1 + kLabSize;
Address base3 = base2 + kLabSize;
{
AllocationResult lab_backing_store1(HeapObject::FromAddress(base1));
LocalAllocationBuffer lab1 =
LocalAllocationBuffer::FromResult(heap, lab_backing_store1, kLabSize);
CHECK(lab1.IsValid());
AllocationResult lab_backing_store2(HeapObject::FromAddress(base2));
LocalAllocationBuffer lab2 =
LocalAllocationBuffer::FromResult(heap, lab_backing_store2, kLabSize);
CHECK(lab2.IsValid());
AllocationResult lab_backing_store3(HeapObject::FromAddress(base3));
LocalAllocationBuffer lab3 =
LocalAllocationBuffer::FromResult(heap, lab_backing_store3, kLabSize);
CHECK(lab3.IsValid());
CHECK(!lab3.TryMerge(&lab1));
}
}
#ifdef V8_HOST_ARCH_32_BIT
TEST(AllocateAligned) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
const int kLabSize = 2 * KB;
Address base = AllocateLabBackingStore(heap, kLabSize);
Address limit = base + kLabSize;
std::pair<intptr_t, AllocationAlignment> sizes_raw[2] = {
std::make_pair(116, kWordAligned), std::make_pair(64, kDoubleAligned)};
std::vector<std::pair<intptr_t, AllocationAlignment>> sizes(sizes_raw,
sizes_raw + 2);
intptr_t expected_sizes_raw[4] = {116, 4, 64, 1864};
std::vector<intptr_t> expected_sizes(expected_sizes_raw,
expected_sizes_raw + 4);
{
AllocationResult lab_backing_store(HeapObject::FromAddress(base));
LocalAllocationBuffer lab =
LocalAllocationBuffer::FromResult(heap, lab_backing_store, kLabSize);
CHECK(lab.IsValid());
for (auto pair : sizes) {
if (!AllocateFromLab(heap, &lab, pair.first, pair.second)) {
break;
}
}
}
VerifyIterable(base, limit, expected_sizes);
}
#endif // V8_HOST_ARCH_32_BIT
} // namespace heap
} // namespace internal
} // namespace v8