Reland of "[cctest] Add tests for aborting compaction of pages"

Tests for
* aborting a full page.
* partially aborting a page.
* partially aborting a page with pointers between aborted pages.
* partially aborting a page with store buffer entries.

Also introduces force_oom() which prohibits a old space to
expand

BUG=chromium:524425
LOG=N

CQ_EXTRA_TRYBOTS=tryserver.v8:v8_linux_nosnap_rel,v8_linux_nosnap_dbg,v8_win_nosnap_shared_rel,v8_win_nosnap_shared_compile_rel

Review URL: https://codereview.chromium.org/1518803005

Cr-Commit-Position: refs/heads/master@{#32899}
This commit is contained in:
mlippautz 2015-12-16 06:06:33 -08:00 committed by Commit bot
parent 65d3009e03
commit 2bb51df9bd
9 changed files with 406 additions and 39 deletions

View File

@ -163,7 +163,8 @@ Heap::Heap()
gc_callbacks_depth_(0),
deserialization_complete_(false),
strong_roots_list_(NULL),
array_buffer_tracker_(NULL) {
array_buffer_tracker_(NULL),
force_oom_(false) {
// Allow build-time customization of the max semispace size. Building
// V8 with snapshots and a non-default max semispace size is much
// easier if you can define it as part of the build environment.

View File

@ -817,6 +817,7 @@ class Heap {
// TODO(hpayer): There is still a missmatch between capacity and actual
// committed memory size.
bool CanExpandOldGeneration(int size) {
if (force_oom_) return false;
return (CommittedOldGenerationMemory() + size) < MaxOldGenerationSize();
}
@ -2117,6 +2118,8 @@ class Heap {
MUST_USE_RESULT AllocationResult InternalizeString(String* str);
void set_force_oom(bool value) { force_oom_ = value; }
// The amount of external memory registered through the API kept alive
// by global handles
int64_t amount_of_external_allocated_memory_;
@ -2365,6 +2368,9 @@ class Heap {
ArrayBufferTracker* array_buffer_tracker_;
// Used for testing purposes.
bool force_oom_;
// Classes in "heap" can be friends.
friend class AlwaysAllocateScope;
friend class GCCallbacksScope;

View File

@ -3774,6 +3774,16 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
continue;
}
if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
// We need to sweep the page to get it into an iterable state again. Note
// that this adds unusable memory into the free list that is later on
// (in the free list) dropped again. Since we only use the flag for
// testing this is fine.
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
IGNORE_FREE_SPACE>(space, nullptr, p, nullptr);
continue;
}
// One unused page is kept, all further are released before sweeping them.
if (p->LiveBytes() == 0) {
if (unused_page_present) {

View File

@ -2291,7 +2291,7 @@ FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
if (node == nullptr) return nullptr;
Page* page = Page::FromAddress(node->address());
while ((node != nullptr) && page->IsEvacuationCandidate()) {
while ((node != nullptr) && !page->CanAllocate()) {
available_ -= node->size();
page->add_available_in_free_list(type_, -(node->Size()));
node = node->next();
@ -2333,7 +2333,7 @@ FreeSpace* FreeListCategory::SearchForNodeInList(int size_in_bytes,
int size = cur_node->size();
Page* page_for_node = Page::FromAddress(cur_node->address());
if ((size >= size_in_bytes) || page_for_node->IsEvacuationCandidate()) {
if ((size >= size_in_bytes) || !page_for_node->CanAllocate()) {
// The node is either large enough or contained in an evacuation
// candidate. In both cases we need to unlink it from the list.
available_ -= size;
@ -2347,7 +2347,7 @@ FreeSpace* FreeListCategory::SearchForNodeInList(int size_in_bytes,
prev_non_evac_node->set_next(cur_node->next());
}
// For evacuation candidates we continue.
if (page_for_node->IsEvacuationCandidate()) {
if (!page_for_node->CanAllocate()) {
page_for_node->add_available_in_free_list(type_, -size);
continue;
}
@ -2758,8 +2758,7 @@ void PagedSpace::RepairFreeListsAfterDeserialization() {
void PagedSpace::EvictEvacuationCandidatesFromLinearAllocationArea() {
if (allocation_info_.top() >= allocation_info_.limit()) return;
if (Page::FromAllocationTop(allocation_info_.top())
->IsEvacuationCandidate()) {
if (!Page::FromAllocationTop(allocation_info_.top())->CanAllocate()) {
// Create filler object to keep page iterable if it was iterable.
int remaining =
static_cast<int>(allocation_info_.limit() - allocation_info_.top());

View File

@ -323,6 +323,9 @@ class MemoryChunk {
// candidates selection cycle.
FORCE_EVACUATION_CANDIDATE_FOR_TESTING,
// This flag is inteded to be used for testing.
NEVER_ALLOCATE_ON_PAGE,
// The memory chunk is already logically freed, however the actual freeing
// still has to be performed.
PRE_FREED,
@ -682,6 +685,10 @@ class MemoryChunk {
return IsFlagSet(EVACUATION_CANDIDATE);
}
bool CanAllocate() {
return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
}
bool ShouldSkipEvacuationSlotRecording() {
return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
}

View File

@ -96,6 +96,7 @@
'gay-shortest.cc',
'heap/heap-tester.h',
'heap/test-alloc.cc',
'heap/test-compaction.cc',
'heap/test-heap.cc',
'heap/test-incremental-marking.cc',
'heap/test-mark-compact.cc',

View File

@ -10,20 +10,24 @@
// Tests that should have access to private methods of {v8::internal::Heap}.
// Those tests need to be defined using HEAP_TEST(Name) { ... }.
#define HEAP_TEST_METHODS(V) \
V(CompactionSpaceDivideMultiplePages) \
V(CompactionSpaceDivideSinglePage) \
V(GCFlags) \
V(MarkCompactCollector) \
V(NoPromotion) \
V(NumberStringCacheSize) \
V(ObjectGroups) \
V(Promotion) \
V(Regression39128) \
V(ResetWeakHandle) \
V(StressHandles) \
V(TestMemoryReducerSampleJsCalls) \
V(TestSizeOfObjects) \
#define HEAP_TEST_METHODS(V) \
V(CompactionFullAbortedPage) \
V(CompactionPartiallyAbortedPage) \
V(CompactionPartiallyAbortedPageIntraAbortedPointers) \
V(CompactionPartiallyAbortedPageWithStoreBufferEntries) \
V(CompactionSpaceDivideMultiplePages) \
V(CompactionSpaceDivideSinglePage) \
V(GCFlags) \
V(MarkCompactCollector) \
V(NoPromotion) \
V(NumberStringCacheSize) \
V(ObjectGroups) \
V(Promotion) \
V(Regression39128) \
V(ResetWeakHandle) \
V(StressHandles) \
V(TestMemoryReducerSampleJsCalls) \
V(TestSizeOfObjects) \
V(WriteBarriersInCopyJSObject)

View File

@ -0,0 +1,340 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
#include "test/cctest/heap/utils-inl.h"
namespace v8 {
namespace internal {
static void CheckInvariantsOfAbortedPage(Page* page) {
// Check invariants:
// 1) Markbits are cleared
// 2) The page is not marked as evacuation candidate anymore
// 3) The page is not marked as aborted compaction anymore.
CHECK(page->markbits()->IsClean());
CHECK(!page->IsEvacuationCandidate());
CHECK(!page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
}
HEAP_TEST(CompactionFullAbortedPage) {
// Test the scenario where we reach OOM during compaction and the whole page
// is aborted.
// Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
// we can reach the state of a half aborted page.
FLAG_concurrent_sweeping = false;
FLAG_manual_evacuation_candidates_selection = true;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
{
HandleScope scope1(isolate);
PageIterator it(heap->old_space());
while (it.has_next()) {
it.next()->SetFlag(Page::NEVER_ALLOCATE_ON_PAGE);
}
{
HandleScope scope2(isolate);
CHECK(heap->old_space()->Expand());
auto compaction_page_handles =
CreatePadding(heap, Page::kAllocatableMemory, TENURED);
Page* to_be_aborted_page =
Page::FromAddress(compaction_page_handles.front()->address());
to_be_aborted_page->SetFlag(
MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
heap->set_force_oom(true);
heap->CollectAllGarbage();
// Check that all handles still point to the same page, i.e., compaction
// has been aborted on the page.
for (Handle<FixedArray> object : compaction_page_handles) {
CHECK_EQ(to_be_aborted_page, Page::FromAddress(object->address()));
}
CheckInvariantsOfAbortedPage(to_be_aborted_page);
}
}
}
HEAP_TEST(CompactionPartiallyAbortedPage) {
// Test the scenario where we reach OOM during compaction and parts of the
// page have already been migrated to a new one.
// Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
// we can reach the state of a half aborted page.
FLAG_concurrent_sweeping = false;
FLAG_manual_evacuation_candidates_selection = true;
const int object_size = 128 * KB;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
{
HandleScope scope1(isolate);
PageIterator it(heap->old_space());
while (it.has_next()) {
it.next()->SetFlag(Page::NEVER_ALLOCATE_ON_PAGE);
}
{
HandleScope scope2(isolate);
// Fill another page with objects of size {object_size} (last one is
// properly adjusted).
CHECK(heap->old_space()->Expand());
auto compaction_page_handles =
CreatePadding(heap, Page::kAllocatableMemory, TENURED, object_size);
Page* to_be_aborted_page =
Page::FromAddress(compaction_page_handles.front()->address());
to_be_aborted_page->SetFlag(
MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
{
// Add another page that is filled with {num_objects} objects of size
// {object_size}.
HandleScope scope3(isolate);
CHECK(heap->old_space()->Expand());
const int num_objects = 3;
std::vector<Handle<FixedArray>> page_to_fill_handles = CreatePadding(
heap, object_size * num_objects, TENURED, object_size);
Page* page_to_fill =
Page::FromAddress(page_to_fill_handles.front()->address());
heap->set_force_oom(true);
heap->CollectAllGarbage();
bool migration_aborted = false;
for (Handle<FixedArray> object : compaction_page_handles) {
// Once compaction has been aborted, all following objects still have
// to be on the initial page.
CHECK(!migration_aborted ||
(Page::FromAddress(object->address()) == to_be_aborted_page));
if (Page::FromAddress(object->address()) == to_be_aborted_page) {
// This object has not been migrated.
migration_aborted = true;
} else {
CHECK_EQ(Page::FromAddress(object->address()), page_to_fill);
}
}
// Check that we actually created a scenario with a partially aborted
// page.
CHECK(migration_aborted);
CheckInvariantsOfAbortedPage(to_be_aborted_page);
}
}
}
}
HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
// Test the scenario where we reach OOM during compaction and parts of the
// page have already been migrated to a new one. Objects on the aborted page
// are linked together. This test makes sure that intra-aborted page pointers
// get properly updated.
// Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
// we can reach the state of a half aborted page.
FLAG_concurrent_sweeping = false;
FLAG_manual_evacuation_candidates_selection = true;
const int object_size = 128 * KB;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
{
HandleScope scope1(isolate);
Handle<FixedArray> root_array =
isolate->factory()->NewFixedArray(10, TENURED);
PageIterator it(heap->old_space());
while (it.has_next()) {
it.next()->SetFlag(Page::NEVER_ALLOCATE_ON_PAGE);
}
Page* to_be_aborted_page = nullptr;
{
HandleScope temporary_scope(isolate);
// Fill a fresh page with objects of size {object_size} (last one is
// properly adjusted).
CHECK(heap->old_space()->Expand());
std::vector<Handle<FixedArray>> compaction_page_handles =
CreatePadding(heap, Page::kAllocatableMemory, TENURED, object_size);
to_be_aborted_page =
Page::FromAddress(compaction_page_handles.front()->address());
to_be_aborted_page->SetFlag(
MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
for (size_t i = compaction_page_handles.size() - 1; i > 0; i--) {
compaction_page_handles[i]->set(0, *compaction_page_handles[i - 1]);
}
root_array->set(0, *compaction_page_handles.back());
}
{
// Add another page that is filled with {num_objects} objects of size
// {object_size}.
HandleScope scope3(isolate);
CHECK(heap->old_space()->Expand());
const int num_objects = 2;
int used_memory = object_size * num_objects;
std::vector<Handle<FixedArray>> page_to_fill_handles =
CreatePadding(heap, used_memory, TENURED, object_size);
Page* page_to_fill =
Page::FromAddress(page_to_fill_handles.front()->address());
heap->set_force_oom(true);
heap->CollectAllGarbage();
// The following check makes sure that we compacted "some" objects, while
// leaving others in place.
bool in_place = true;
Handle<FixedArray> current = root_array;
while (current->get(0) != heap->undefined_value()) {
current = Handle<FixedArray>(FixedArray::cast(current->get(0)));
CHECK(current->IsFixedArray());
if (Page::FromAddress(current->address()) != to_be_aborted_page) {
in_place = false;
}
bool on_aborted_page =
Page::FromAddress(current->address()) == to_be_aborted_page;
bool on_fill_page =
Page::FromAddress(current->address()) == page_to_fill;
CHECK((in_place && on_aborted_page) || (!in_place && on_fill_page));
}
// Check that we at least migrated one object, as otherwise the test would
// not trigger.
CHECK(!in_place);
CheckInvariantsOfAbortedPage(to_be_aborted_page);
}
}
}
HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
// Test the scenario where we reach OOM during compaction and parts of the
// page have already been migrated to a new one. Objects on the aborted page
// are linked together and the very first object on the aborted page points
// into new space. The test verifies that the store buffer entries are
// properly cleared and rebuilt after aborting a page. Failing to do so can
// result in other objects being allocated in the free space where their
// payload looks like a valid new space pointer.
// Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
// we can reach the state of a half aborted page.
FLAG_concurrent_sweeping = false;
FLAG_manual_evacuation_candidates_selection = true;
const int object_size = 128 * KB;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
{
HandleScope scope1(isolate);
Handle<FixedArray> root_array =
isolate->factory()->NewFixedArray(10, TENURED);
PageIterator it(heap->old_space());
while (it.has_next()) {
it.next()->SetFlag(Page::NEVER_ALLOCATE_ON_PAGE);
}
Page* to_be_aborted_page = nullptr;
{
HandleScope temporary_scope(isolate);
// Fill another page with objects of size {object_size} (last one is
// properly adjusted).
CHECK(heap->old_space()->Expand());
auto compaction_page_handles =
CreatePadding(heap, Page::kAllocatableMemory, TENURED, object_size);
// Sanity check that we have enough space for linking up arrays.
CHECK_GE(compaction_page_handles.front()->length(), 2);
to_be_aborted_page =
Page::FromAddress(compaction_page_handles.front()->address());
to_be_aborted_page->SetFlag(
MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
for (size_t i = compaction_page_handles.size() - 1; i > 0; i--) {
compaction_page_handles[i]->set(0, *compaction_page_handles[i - 1]);
}
root_array->set(0, *compaction_page_handles.back());
Handle<FixedArray> new_space_array =
isolate->factory()->NewFixedArray(1, NOT_TENURED);
CHECK(heap->InNewSpace(*new_space_array));
compaction_page_handles.front()->set(1, *new_space_array);
}
{
// Add another page that is filled with {num_objects} objects of size
// {object_size}.
HandleScope scope3(isolate);
CHECK(heap->old_space()->Expand());
const int num_objects = 2;
int used_memory = object_size * num_objects;
std::vector<Handle<FixedArray>> page_to_fill_handles =
CreatePadding(heap, used_memory, TENURED, object_size);
Page* page_to_fill =
Page::FromAddress(page_to_fill_handles.front()->address());
heap->set_force_oom(true);
heap->CollectAllGarbage();
// The following check makes sure that we compacted "some" objects, while
// leaving others in place.
bool in_place = true;
Handle<FixedArray> current = root_array;
while (current->get(0) != heap->undefined_value()) {
current = Handle<FixedArray>(FixedArray::cast(current->get(0)));
CHECK(!heap->InNewSpace(*current));
CHECK(current->IsFixedArray());
if (Page::FromAddress(current->address()) != to_be_aborted_page) {
in_place = false;
}
bool on_aborted_page =
Page::FromAddress(current->address()) == to_be_aborted_page;
bool on_fill_page =
Page::FromAddress(current->address()) == page_to_fill;
CHECK((in_place && on_aborted_page) || (!in_place && on_fill_page));
}
// Check that we at least migrated one object, as otherwise the test would
// not trigger.
CHECK(!in_place);
CheckInvariantsOfAbortedPage(to_be_aborted_page);
// Allocate a new object in new space.
Handle<FixedArray> holder =
isolate->factory()->NewFixedArray(10, NOT_TENURED);
// Create a broken address that looks like a tagged pointer to a new space
// object.
Address broken_address = holder->address() + 2 * kPointerSize + 1;
// Convert it to a vector to create a string from it.
Vector<const uint8_t> string_to_broken_addresss(
reinterpret_cast<const uint8_t*>(&broken_address), 8);
Handle<String> string;
do {
// We know that the interesting slot will be on the aborted page and
// hence we allocate until we get our string on the aborted page.
// We used slot 1 in the fixed size array which corresponds to the
// the first word in the string. Since the first object definitely
// migrated we can just allocate until we hit the aborted page.
string = isolate->factory()
->NewStringFromOneByte(string_to_broken_addresss, TENURED)
.ToHandleChecked();
} while (Page::FromAddress(string->address()) != to_be_aborted_page);
// If store buffer entries are not properly filtered/reset for aborted
// pages we have now a broken address at an object slot in old space and
// the following scavenge will crash.
heap->CollectGarbage(NEW_SPACE);
}
}
}
} // namespace internal
} // namespace v8

View File

@ -16,34 +16,32 @@ namespace v8 {
namespace internal {
static int LenFromSize(int size) {
return (size - i::FixedArray::kHeaderSize) / i::kPointerSize;
return (size - FixedArray::kHeaderSize) / kPointerSize;
}
static inline void CreatePadding(i::Heap* heap, int padding_size,
i::PretenureFlag tenure) {
const int max_number_of_objects = 20;
v8::internal::Handle<v8::internal::FixedArray>
big_objects[max_number_of_objects];
i::Isolate* isolate = heap->isolate();
static inline std::vector<Handle<FixedArray>> CreatePadding(
Heap* heap, int padding_size, PretenureFlag tenure,
int object_size = Page::kMaxRegularHeapObjectSize) {
std::vector<Handle<FixedArray>> handles;
Isolate* isolate = heap->isolate();
int allocate_memory;
int length;
int free_memory = padding_size;
if (tenure == i::TENURED) {
int current_free_memory =
static_cast<int>(*heap->old_space()->allocation_limit_address() -
*heap->old_space()->allocation_top_address());
CHECK(padding_size <= current_free_memory || current_free_memory == 0);
heap->old_space()->EmptyAllocationInfo();
int overall_free_memory = static_cast<int>(heap->old_space()->Available());
CHECK(padding_size <= overall_free_memory || overall_free_memory == 0);
} else {
heap->new_space()->DisableInlineAllocationSteps();
int current_free_memory =
int overall_free_memory =
static_cast<int>(*heap->new_space()->allocation_limit_address() -
*heap->new_space()->allocation_top_address());
CHECK(padding_size <= current_free_memory || current_free_memory == 0);
CHECK(padding_size <= overall_free_memory || overall_free_memory == 0);
}
for (int i = 0; i < max_number_of_objects && free_memory > 0; i++) {
if (free_memory > i::Page::kMaxRegularHeapObjectSize) {
allocate_memory = i::Page::kMaxRegularHeapObjectSize;
while (free_memory > 0) {
if (free_memory > object_size) {
allocate_memory = object_size;
length = LenFromSize(allocate_memory);
} else {
allocate_memory = free_memory;
@ -55,11 +53,12 @@ static inline void CreatePadding(i::Heap* heap, int padding_size,
break;
}
}
big_objects[i] = isolate->factory()->NewFixedArray(length, tenure);
CHECK((tenure == i::NOT_TENURED && heap->InNewSpace(*big_objects[i])) ||
(tenure == i::TENURED && heap->InOldSpace(*big_objects[i])));
handles.push_back(isolate->factory()->NewFixedArray(length, tenure));
CHECK((tenure == NOT_TENURED && heap->InNewSpace(*handles.back())) ||
(tenure == TENURED && heap->InOldSpace(*handles.back())));
free_memory -= allocate_memory;
}
return handles;
}