[cctest] Add tests for aborting compaction of pages
Tests for * aborting a full page. * partially aborting a page. * partially aborting a page with pointers between aborted pages. * partially aborting a page with store buffer entries. Also introduces ShouldForceOOM() which prohibits a PagedSpace from expanding. Compaction spaces refer to the corresponding actual space. BUG=chromium:524425 LOG=N Review URL: https://codereview.chromium.org/1511933002 Cr-Commit-Position: refs/heads/master@{#32783}
This commit is contained in:
parent
7e5ff19ee2
commit
161a0e0051
@ -164,7 +164,8 @@ Heap::Heap()
|
||||
deserialization_complete_(false),
|
||||
concurrent_sweeping_enabled_(false),
|
||||
strong_roots_list_(NULL),
|
||||
array_buffer_tracker_(NULL) {
|
||||
array_buffer_tracker_(NULL),
|
||||
force_oom_(false) {
|
||||
// Allow build-time customization of the max semispace size. Building
|
||||
// V8 with snapshots and a non-default max semispace size is much
|
||||
// easier if you can define it as part of the build environment.
|
||||
|
@ -817,6 +817,7 @@ class Heap {
|
||||
// TODO(hpayer): There is still a missmatch between capacity and actual
|
||||
// committed memory size.
|
||||
bool CanExpandOldGeneration(int size) {
|
||||
if (force_oom_) return false;
|
||||
return (CommittedOldGenerationMemory() + size) < MaxOldGenerationSize();
|
||||
}
|
||||
|
||||
@ -2119,6 +2120,8 @@ class Heap {
|
||||
|
||||
MUST_USE_RESULT AllocationResult InternalizeString(String* str);
|
||||
|
||||
void set_force_oom(bool value) { force_oom_ = value; }
|
||||
|
||||
// The amount of external memory registered through the API kept alive
|
||||
// by global handles
|
||||
int64_t amount_of_external_allocated_memory_;
|
||||
@ -2369,6 +2372,9 @@ class Heap {
|
||||
|
||||
ArrayBufferTracker* array_buffer_tracker_;
|
||||
|
||||
// Used for testing purposes.
|
||||
bool force_oom_;
|
||||
|
||||
// Classes in "heap" can be friends.
|
||||
friend class AlwaysAllocateScope;
|
||||
friend class GCCallbacksScope;
|
||||
|
@ -96,6 +96,7 @@
|
||||
'gay-shortest.cc',
|
||||
'heap/heap-tester.h',
|
||||
'heap/test-alloc.cc',
|
||||
'heap/test-compaction.cc',
|
||||
'heap/test-heap.cc',
|
||||
'heap/test-incremental-marking.cc',
|
||||
'heap/test-mark-compact.cc',
|
||||
|
@ -11,6 +11,10 @@
|
||||
// Tests that should have access to private methods of {v8::internal::Heap}.
|
||||
// Those tests need to be defined using HEAP_TEST(Name) { ... }.
|
||||
#define HEAP_TEST_METHODS(V) \
|
||||
V(CompactionFullAbortedPage) \
|
||||
V(CompactionPartiallyAbortedPage) \
|
||||
V(CompactionPartiallyAbortedPageIntraAbortedPointers) \
|
||||
V(CompactionPartiallyAbortedPageWithStoreBufferEntries) \
|
||||
V(CompactionSpaceDivideMultiplePages) \
|
||||
V(CompactionSpaceDivideSinglePage) \
|
||||
V(GCFlags) \
|
||||
|
344
test/cctest/heap/test-compaction.cc
Normal file
344
test/cctest/heap/test-compaction.cc
Normal file
@ -0,0 +1,344 @@
|
||||
// Copyright 2015 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "test/cctest/cctest.h"
|
||||
#include "test/cctest/heap/heap-tester.h"
|
||||
#include "test/cctest/heap/utils-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
static std::vector<Handle<FixedArray>> FillUpFirstOldSpacePage(Heap* heap) {
|
||||
// This functions assumes that old space top is still on the first page
|
||||
heap->old_space()->EmptyAllocationInfo();
|
||||
int free_on_first_page = static_cast<int>(heap->old_space()->Available());
|
||||
return CreatePadding(heap, free_on_first_page, TENURED);
|
||||
}
|
||||
|
||||
|
||||
static void CheckInvariantsOfAbortedPage(Page* page) {
|
||||
// Check invariants:
|
||||
// 1) Markbits are cleared
|
||||
// 2) The page is not marked as evacuation candidate anymore
|
||||
// 3) The page is not marked as aborted compaction anymore.
|
||||
CHECK(page->markbits()->IsClean());
|
||||
CHECK(!page->IsEvacuationCandidate());
|
||||
CHECK(!page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
|
||||
}
|
||||
|
||||
|
||||
HEAP_TEST(CompactionFullAbortedPage) {
|
||||
// Test the scenario where we reach OOM during compaction and the whole page
|
||||
// is aborted.
|
||||
|
||||
FLAG_manual_evacuation_candidates_selection = true;
|
||||
CcTest::InitializeVM();
|
||||
Isolate* isolate = CcTest::i_isolate();
|
||||
Heap* heap = isolate->heap();
|
||||
// Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
|
||||
// we can reach the state of a half aborted page. We cannot just set
|
||||
// {FLAG_concurrent_sweeping} because the flag is cached in heap, which is
|
||||
// initialized earlier.
|
||||
heap->concurrent_sweeping_enabled_ = false;
|
||||
{
|
||||
HandleScope scope1(isolate);
|
||||
// Fill up the first page since it cannot be evacuated.
|
||||
auto first_page_handles = FillUpFirstOldSpacePage(heap);
|
||||
|
||||
{
|
||||
HandleScope scope2(isolate);
|
||||
heap->old_space()->EmptyAllocationInfo();
|
||||
auto second_page_handles =
|
||||
CreatePadding(heap, Page::kAllocatableMemory, TENURED);
|
||||
Page* to_be_aborted_page =
|
||||
Page::FromAddress(second_page_handles.front()->address());
|
||||
to_be_aborted_page->SetFlag(
|
||||
MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
|
||||
heap->set_force_oom(true);
|
||||
heap->CollectAllGarbage();
|
||||
|
||||
// Check that all handles still point to the same page, i.e., compaction
|
||||
// has been aborted on the page.
|
||||
for (Handle<FixedArray> object : second_page_handles) {
|
||||
CHECK_EQ(to_be_aborted_page, Page::FromAddress(object->address()));
|
||||
}
|
||||
CheckInvariantsOfAbortedPage(to_be_aborted_page);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
HEAP_TEST(CompactionPartiallyAbortedPage) {
|
||||
// Test the scenario where we reach OOM during compaction and parts of the
|
||||
// page have already been migrated to a new one.
|
||||
|
||||
FLAG_manual_evacuation_candidates_selection = true;
|
||||
|
||||
const int object_size = 128 * KB;
|
||||
|
||||
CcTest::InitializeVM();
|
||||
Isolate* isolate = CcTest::i_isolate();
|
||||
Heap* heap = isolate->heap();
|
||||
// Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
|
||||
// we can reach the state of a half aborted page. We cannot just set
|
||||
// {FLAG_concurrent_sweeping} because the flag is cached in heap, which is
|
||||
// initialized earlier.
|
||||
heap->concurrent_sweeping_enabled_ = false;
|
||||
{
|
||||
HandleScope scope1(isolate);
|
||||
// Fill up the first page since it cannot be evacuated.
|
||||
auto first_page_handles = FillUpFirstOldSpacePage(heap);
|
||||
|
||||
{
|
||||
HandleScope scope2(isolate);
|
||||
// Fill the second page with objects of size {object_size} (last one is
|
||||
// properly adjusted).
|
||||
heap->old_space()->EmptyAllocationInfo();
|
||||
auto second_page_handles =
|
||||
CreatePadding(heap, Page::kAllocatableMemory, TENURED, object_size);
|
||||
// Mark the second page for evacuation.
|
||||
Page* to_be_aborted_page =
|
||||
Page::FromAddress(second_page_handles.front()->address());
|
||||
to_be_aborted_page->SetFlag(
|
||||
MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
|
||||
|
||||
{
|
||||
// Add a third page that is filled with {num_objects} objects of size
|
||||
// {object_size}.
|
||||
HandleScope scope3(isolate);
|
||||
heap->old_space()->EmptyAllocationInfo();
|
||||
const int num_objects = 3;
|
||||
std::vector<Handle<FixedArray>> third_page_handles = CreatePadding(
|
||||
heap, object_size * num_objects, TENURED, object_size);
|
||||
Page* third_page =
|
||||
Page::FromAddress(third_page_handles.front()->address());
|
||||
heap->set_force_oom(true);
|
||||
heap->CollectAllGarbage();
|
||||
|
||||
bool migration_aborted = false;
|
||||
for (Handle<FixedArray> object : second_page_handles) {
|
||||
// Once compaction has been aborted, all following objects still have
|
||||
// to be on the initial page.
|
||||
CHECK(!migration_aborted ||
|
||||
(Page::FromAddress(object->address()) == to_be_aborted_page));
|
||||
if (Page::FromAddress(object->address()) == to_be_aborted_page) {
|
||||
// This object has not been migrated.
|
||||
migration_aborted = true;
|
||||
} else {
|
||||
CHECK_EQ(Page::FromAddress(object->address()), third_page);
|
||||
}
|
||||
}
|
||||
// Check that we actually created a scenario with a partially aborted
|
||||
// page.
|
||||
CHECK(migration_aborted);
|
||||
CheckInvariantsOfAbortedPage(to_be_aborted_page);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
|
||||
// Test the scenario where we reach OOM during compaction and parts of the
|
||||
// page have already been migrated to a new one. Objects on the aborted page
|
||||
// are linked together. This test makes sure that intra-aborted page pointers
|
||||
// get properly updated.
|
||||
|
||||
FLAG_manual_evacuation_candidates_selection = true;
|
||||
|
||||
const int object_size = 128 * KB;
|
||||
|
||||
CcTest::InitializeVM();
|
||||
Isolate* isolate = CcTest::i_isolate();
|
||||
Heap* heap = isolate->heap();
|
||||
// Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
|
||||
// we can reach the state of a half aborted page. We cannot just set
|
||||
// {FLAG_concurrent_sweeping} because the flag is cached in heap, which is
|
||||
// initialized earlier.
|
||||
heap->concurrent_sweeping_enabled_ = false;
|
||||
{
|
||||
HandleScope scope1(isolate);
|
||||
// Fill up the first page since it cannot be evacuated.
|
||||
auto first_page_handles = FillUpFirstOldSpacePage(heap);
|
||||
|
||||
Page* to_be_aborted_page = nullptr;
|
||||
{
|
||||
HandleScope temporary_scope(isolate);
|
||||
// Fill the second page with objects of size {object_size} (last one is
|
||||
// properly adjusted).
|
||||
heap->old_space()->EmptyAllocationInfo();
|
||||
const int free_on_second_page = Page::kAllocatableMemory;
|
||||
std::vector<Handle<FixedArray>> second_page_handles =
|
||||
CreatePadding(heap, free_on_second_page, TENURED, object_size);
|
||||
// Mark the second page for evacuation.
|
||||
to_be_aborted_page =
|
||||
Page::FromAddress(second_page_handles.front()->address());
|
||||
to_be_aborted_page->SetFlag(
|
||||
MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
|
||||
|
||||
for (size_t i = second_page_handles.size() - 1; i > 0; i--) {
|
||||
second_page_handles[i]->set(0, *second_page_handles[i - 1]);
|
||||
}
|
||||
first_page_handles.front()->set(0, *second_page_handles.back());
|
||||
}
|
||||
|
||||
{
|
||||
// Add a third page that is filled with {num_objects} objects of size
|
||||
// {object_size}.
|
||||
HandleScope scope3(isolate);
|
||||
heap->old_space()->EmptyAllocationInfo();
|
||||
const int num_objects = 2;
|
||||
int used_memory = object_size * num_objects;
|
||||
std::vector<Handle<FixedArray>> third_page_handles =
|
||||
CreatePadding(heap, used_memory, TENURED, object_size);
|
||||
Page* third_page =
|
||||
Page::FromAddress(third_page_handles.front()->address());
|
||||
heap->set_force_oom(true);
|
||||
heap->CollectAllGarbage();
|
||||
|
||||
// The following check makes sure that we compacted "some" objects, while
|
||||
// leaving others in place.
|
||||
bool in_place = true;
|
||||
Handle<FixedArray> current = first_page_handles.front();
|
||||
while (current->get(0) != heap->undefined_value()) {
|
||||
current = Handle<FixedArray>(FixedArray::cast(current->get(0)));
|
||||
CHECK(current->IsFixedArray());
|
||||
if (Page::FromAddress(current->address()) != to_be_aborted_page) {
|
||||
in_place = false;
|
||||
}
|
||||
bool on_aborted_page =
|
||||
Page::FromAddress(current->address()) == to_be_aborted_page;
|
||||
bool on_third_page =
|
||||
Page::FromAddress(current->address()) == third_page;
|
||||
CHECK((in_place && on_aborted_page) || (!in_place && on_third_page));
|
||||
}
|
||||
// Check that we at least migrated one object, as otherwise the test would
|
||||
// not trigger.
|
||||
CHECK(!in_place);
|
||||
|
||||
CheckInvariantsOfAbortedPage(to_be_aborted_page);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
|
||||
// Test the scenario where we reach OOM during compaction and parts of the
|
||||
// page have already been migrated to a new one. Objects on the aborted page
|
||||
// are linked together and the very first object on the aborted page points
|
||||
// into new space. The test verifies that the store buffer entries are
|
||||
// properly cleared and rebuilt after aborting a page. Failing to do so can
|
||||
// result in other objects being allocated in the free space where their
|
||||
// payload looks like a valid new space pointer.
|
||||
|
||||
FLAG_manual_evacuation_candidates_selection = true;
|
||||
|
||||
const int object_size = 128 * KB;
|
||||
|
||||
CcTest::InitializeVM();
|
||||
Isolate* isolate = CcTest::i_isolate();
|
||||
Heap* heap = isolate->heap();
|
||||
// Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
|
||||
// we can reach the state of a half aborted page. We cannot just set
|
||||
// {FLAG_concurrent_sweeping} because the flag is cached in heap, which is
|
||||
// initialized earlier.
|
||||
heap->concurrent_sweeping_enabled_ = false;
|
||||
{
|
||||
HandleScope scope1(isolate);
|
||||
// Fill up the first page since it cannot be evacuated.
|
||||
auto first_page_handles = FillUpFirstOldSpacePage(heap);
|
||||
|
||||
Page* to_be_aborted_page = nullptr;
|
||||
{
|
||||
HandleScope temporary_scope(isolate);
|
||||
// Fill the second page with objects of size {object_size} (last one is
|
||||
// properly adjusted).
|
||||
heap->old_space()->EmptyAllocationInfo();
|
||||
auto second_page_handles =
|
||||
CreatePadding(heap, Page::kAllocatableMemory, TENURED, object_size);
|
||||
// Mark the second page for evacuation.
|
||||
to_be_aborted_page =
|
||||
Page::FromAddress(second_page_handles.front()->address());
|
||||
to_be_aborted_page->SetFlag(
|
||||
MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
|
||||
|
||||
for (size_t i = second_page_handles.size() - 1; i > 0; i--) {
|
||||
second_page_handles[i]->set(0, *second_page_handles[i - 1]);
|
||||
}
|
||||
first_page_handles.front()->set(0, *second_page_handles.back());
|
||||
Handle<FixedArray> new_space_array =
|
||||
isolate->factory()->NewFixedArray(1, NOT_TENURED);
|
||||
CHECK(heap->InNewSpace(*new_space_array));
|
||||
second_page_handles.front()->set(1, *new_space_array);
|
||||
}
|
||||
|
||||
{
|
||||
// Add a third page that is filled with {num_objects} objects of size
|
||||
// {object_size}.
|
||||
HandleScope scope3(isolate);
|
||||
heap->old_space()->EmptyAllocationInfo();
|
||||
const int num_objects = 2;
|
||||
int used_memory = object_size * num_objects;
|
||||
std::vector<Handle<FixedArray>> third_page_handles =
|
||||
CreatePadding(heap, used_memory, TENURED, object_size);
|
||||
Page* third_page =
|
||||
Page::FromAddress(third_page_handles.front()->address());
|
||||
heap->set_force_oom(true);
|
||||
heap->CollectAllGarbage();
|
||||
|
||||
// The following check makes sure that we compacted "some" objects, while
|
||||
// leaving others in place.
|
||||
bool in_place = true;
|
||||
Handle<FixedArray> current = first_page_handles.front();
|
||||
while (current->get(0) != heap->undefined_value()) {
|
||||
current = Handle<FixedArray>(FixedArray::cast(current->get(0)));
|
||||
CHECK(!heap->InNewSpace(*current));
|
||||
CHECK(current->IsFixedArray());
|
||||
if (Page::FromAddress(current->address()) != to_be_aborted_page) {
|
||||
in_place = false;
|
||||
}
|
||||
bool on_aborted_page =
|
||||
Page::FromAddress(current->address()) == to_be_aborted_page;
|
||||
bool on_third_page =
|
||||
Page::FromAddress(current->address()) == third_page;
|
||||
CHECK((in_place && on_aborted_page) || (!in_place && on_third_page));
|
||||
}
|
||||
// Check that we at least migrated one object, as otherwise the test would
|
||||
// not trigger.
|
||||
CHECK(!in_place);
|
||||
|
||||
CheckInvariantsOfAbortedPage(to_be_aborted_page);
|
||||
|
||||
// Allocate a new object in new space.
|
||||
Handle<FixedArray> holder =
|
||||
isolate->factory()->NewFixedArray(10, NOT_TENURED);
|
||||
// Create a broken address that looks like a tagged pointer to a new space
|
||||
// object.
|
||||
Address broken_address = holder->address() + 2 * kPointerSize + 1;
|
||||
// Convert it to a vector to create a string from it.
|
||||
Vector<const uint8_t> string_to_broken_addresss(
|
||||
reinterpret_cast<const uint8_t*>(&broken_address), 8);
|
||||
|
||||
Handle<String> string;
|
||||
do {
|
||||
// We know that the interesting slot will be on the aborted page and
|
||||
// hence we allocate until we get our string on the aborted page.
|
||||
// We used slot 1 in the fixed size array which corresponds to the
|
||||
// the first word in the string. Since the first object definitely
|
||||
// migrated we can just allocate until we hit the aborted page.
|
||||
string = isolate->factory()
|
||||
->NewStringFromOneByte(string_to_broken_addresss, TENURED)
|
||||
.ToHandleChecked();
|
||||
} while (Page::FromAddress(string->address()) != to_be_aborted_page);
|
||||
|
||||
// If store buffer entries are not properly filtered/reset for aborted
|
||||
// pages we have now a broken address at an object slot in old space and
|
||||
// the following scavenge will crash.
|
||||
heap->CollectGarbage(NEW_SPACE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
@ -16,16 +16,15 @@ namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
static int LenFromSize(int size) {
|
||||
return (size - i::FixedArray::kHeaderSize) / i::kPointerSize;
|
||||
return (size - FixedArray::kHeaderSize) / kPointerSize;
|
||||
}
|
||||
|
||||
|
||||
static inline void CreatePadding(i::Heap* heap, int padding_size,
|
||||
i::PretenureFlag tenure) {
|
||||
const int max_number_of_objects = 20;
|
||||
v8::internal::Handle<v8::internal::FixedArray>
|
||||
big_objects[max_number_of_objects];
|
||||
i::Isolate* isolate = heap->isolate();
|
||||
static inline std::vector<Handle<FixedArray>> CreatePadding(
|
||||
Heap* heap, int padding_size, PretenureFlag tenure,
|
||||
int object_size = Page::kMaxRegularHeapObjectSize) {
|
||||
std::vector<Handle<FixedArray>> handles;
|
||||
Isolate* isolate = heap->isolate();
|
||||
int allocate_memory;
|
||||
int length;
|
||||
int free_memory = padding_size;
|
||||
@ -41,9 +40,10 @@ static inline void CreatePadding(i::Heap* heap, int padding_size,
|
||||
*heap->new_space()->allocation_top_address());
|
||||
CHECK(padding_size <= current_free_memory || current_free_memory == 0);
|
||||
}
|
||||
for (int i = 0; i < max_number_of_objects && free_memory > 0; i++) {
|
||||
if (free_memory > i::Page::kMaxRegularHeapObjectSize) {
|
||||
allocate_memory = i::Page::kMaxRegularHeapObjectSize;
|
||||
while (free_memory > 0) {
|
||||
// for (int i = 0; i < max_number_of_objects && free_memory > 0; i++) {
|
||||
if (free_memory > object_size) {
|
||||
allocate_memory = object_size;
|
||||
length = LenFromSize(allocate_memory);
|
||||
} else {
|
||||
allocate_memory = free_memory;
|
||||
@ -55,11 +55,12 @@ static inline void CreatePadding(i::Heap* heap, int padding_size,
|
||||
break;
|
||||
}
|
||||
}
|
||||
big_objects[i] = isolate->factory()->NewFixedArray(length, tenure);
|
||||
CHECK((tenure == i::NOT_TENURED && heap->InNewSpace(*big_objects[i])) ||
|
||||
(tenure == i::TENURED && heap->InOldSpace(*big_objects[i])));
|
||||
handles.push_back(isolate->factory()->NewFixedArray(length, tenure));
|
||||
CHECK((tenure == NOT_TENURED && heap->InNewSpace(*handles.back())) ||
|
||||
(tenure == TENURED && heap->InOldSpace(*handles.back())));
|
||||
free_memory -= allocate_memory;
|
||||
}
|
||||
return handles;
|
||||
}
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user