diff --git a/src/heap/heap.cc b/src/heap/heap.cc index aa65e8d78a..674b03b6da 100644 --- a/src/heap/heap.cc +++ b/src/heap/heap.cc @@ -163,7 +163,8 @@ Heap::Heap() gc_callbacks_depth_(0), deserialization_complete_(false), strong_roots_list_(NULL), - array_buffer_tracker_(NULL) { + array_buffer_tracker_(NULL), + force_oom_(false) { // Allow build-time customization of the max semispace size. Building // V8 with snapshots and a non-default max semispace size is much // easier if you can define it as part of the build environment. diff --git a/src/heap/heap.h b/src/heap/heap.h index 8eeb04f0cd..3919bdcc2c 100644 --- a/src/heap/heap.h +++ b/src/heap/heap.h @@ -817,6 +817,7 @@ class Heap { // TODO(hpayer): There is still a missmatch between capacity and actual // committed memory size. bool CanExpandOldGeneration(int size) { + if (force_oom_) return false; return (CommittedOldGenerationMemory() + size) < MaxOldGenerationSize(); } @@ -2117,6 +2118,8 @@ class Heap { MUST_USE_RESULT AllocationResult InternalizeString(String* str); + void set_force_oom(bool value) { force_oom_ = value; } + // The amount of external memory registered through the API kept alive // by global handles int64_t amount_of_external_allocated_memory_; @@ -2365,6 +2368,9 @@ class Heap { ArrayBufferTracker* array_buffer_tracker_; + // Used for testing purposes. + bool force_oom_; + // Classes in "heap" can be friends. friend class AlwaysAllocateScope; friend class GCCallbacksScope; diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc index 4f16a3ae38..81fc924050 100644 --- a/src/heap/mark-compact.cc +++ b/src/heap/mark-compact.cc @@ -3774,6 +3774,16 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) { continue; } + if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) { + // We need to sweep the page to get it into an iterable state again. Note + // that this adds unusable memory into the free list that is later on + // (in the free list) dropped again. Since we only use the flag for + // testing this is fine. + Sweep(space, nullptr, p, nullptr); + continue; + } + // One unused page is kept, all further are released before sweeping them. if (p->LiveBytes() == 0) { if (unused_page_present) { diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc index 988da6bc99..7e12d22c46 100644 --- a/src/heap/spaces.cc +++ b/src/heap/spaces.cc @@ -2291,7 +2291,7 @@ FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) { if (node == nullptr) return nullptr; Page* page = Page::FromAddress(node->address()); - while ((node != nullptr) && page->IsEvacuationCandidate()) { + while ((node != nullptr) && !page->CanAllocate()) { available_ -= node->size(); page->add_available_in_free_list(type_, -(node->Size())); node = node->next(); @@ -2333,7 +2333,7 @@ FreeSpace* FreeListCategory::SearchForNodeInList(int size_in_bytes, int size = cur_node->size(); Page* page_for_node = Page::FromAddress(cur_node->address()); - if ((size >= size_in_bytes) || page_for_node->IsEvacuationCandidate()) { + if ((size >= size_in_bytes) || !page_for_node->CanAllocate()) { // The node is either large enough or contained in an evacuation // candidate. In both cases we need to unlink it from the list. available_ -= size; @@ -2347,7 +2347,7 @@ FreeSpace* FreeListCategory::SearchForNodeInList(int size_in_bytes, prev_non_evac_node->set_next(cur_node->next()); } // For evacuation candidates we continue. - if (page_for_node->IsEvacuationCandidate()) { + if (!page_for_node->CanAllocate()) { page_for_node->add_available_in_free_list(type_, -size); continue; } @@ -2758,8 +2758,7 @@ void PagedSpace::RepairFreeListsAfterDeserialization() { void PagedSpace::EvictEvacuationCandidatesFromLinearAllocationArea() { if (allocation_info_.top() >= allocation_info_.limit()) return; - if (Page::FromAllocationTop(allocation_info_.top()) - ->IsEvacuationCandidate()) { + if (!Page::FromAllocationTop(allocation_info_.top())->CanAllocate()) { // Create filler object to keep page iterable if it was iterable. int remaining = static_cast(allocation_info_.limit() - allocation_info_.top()); diff --git a/src/heap/spaces.h b/src/heap/spaces.h index 40604d424c..7a9b6915e7 100644 --- a/src/heap/spaces.h +++ b/src/heap/spaces.h @@ -323,6 +323,9 @@ class MemoryChunk { // candidates selection cycle. FORCE_EVACUATION_CANDIDATE_FOR_TESTING, + // This flag is inteded to be used for testing. + NEVER_ALLOCATE_ON_PAGE, + // The memory chunk is already logically freed, however the actual freeing // still has to be performed. PRE_FREED, @@ -682,6 +685,10 @@ class MemoryChunk { return IsFlagSet(EVACUATION_CANDIDATE); } + bool CanAllocate() { + return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE); + } + bool ShouldSkipEvacuationSlotRecording() { return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0; } diff --git a/test/cctest/cctest.gyp b/test/cctest/cctest.gyp index f144710e07..4e4674e6cf 100644 --- a/test/cctest/cctest.gyp +++ b/test/cctest/cctest.gyp @@ -96,6 +96,7 @@ 'gay-shortest.cc', 'heap/heap-tester.h', 'heap/test-alloc.cc', + 'heap/test-compaction.cc', 'heap/test-heap.cc', 'heap/test-incremental-marking.cc', 'heap/test-mark-compact.cc', diff --git a/test/cctest/heap/heap-tester.h b/test/cctest/heap/heap-tester.h index f2a1a243c9..0a0860bcc4 100644 --- a/test/cctest/heap/heap-tester.h +++ b/test/cctest/heap/heap-tester.h @@ -10,20 +10,24 @@ // Tests that should have access to private methods of {v8::internal::Heap}. // Those tests need to be defined using HEAP_TEST(Name) { ... }. -#define HEAP_TEST_METHODS(V) \ - V(CompactionSpaceDivideMultiplePages) \ - V(CompactionSpaceDivideSinglePage) \ - V(GCFlags) \ - V(MarkCompactCollector) \ - V(NoPromotion) \ - V(NumberStringCacheSize) \ - V(ObjectGroups) \ - V(Promotion) \ - V(Regression39128) \ - V(ResetWeakHandle) \ - V(StressHandles) \ - V(TestMemoryReducerSampleJsCalls) \ - V(TestSizeOfObjects) \ +#define HEAP_TEST_METHODS(V) \ + V(CompactionFullAbortedPage) \ + V(CompactionPartiallyAbortedPage) \ + V(CompactionPartiallyAbortedPageIntraAbortedPointers) \ + V(CompactionPartiallyAbortedPageWithStoreBufferEntries) \ + V(CompactionSpaceDivideMultiplePages) \ + V(CompactionSpaceDivideSinglePage) \ + V(GCFlags) \ + V(MarkCompactCollector) \ + V(NoPromotion) \ + V(NumberStringCacheSize) \ + V(ObjectGroups) \ + V(Promotion) \ + V(Regression39128) \ + V(ResetWeakHandle) \ + V(StressHandles) \ + V(TestMemoryReducerSampleJsCalls) \ + V(TestSizeOfObjects) \ V(WriteBarriersInCopyJSObject) diff --git a/test/cctest/heap/test-compaction.cc b/test/cctest/heap/test-compaction.cc new file mode 100644 index 0000000000..064e5a82c0 --- /dev/null +++ b/test/cctest/heap/test-compaction.cc @@ -0,0 +1,340 @@ +// Copyright 2015 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "test/cctest/cctest.h" +#include "test/cctest/heap/heap-tester.h" +#include "test/cctest/heap/utils-inl.h" + +namespace v8 { +namespace internal { + +static void CheckInvariantsOfAbortedPage(Page* page) { + // Check invariants: + // 1) Markbits are cleared + // 2) The page is not marked as evacuation candidate anymore + // 3) The page is not marked as aborted compaction anymore. + CHECK(page->markbits()->IsClean()); + CHECK(!page->IsEvacuationCandidate()); + CHECK(!page->IsFlagSet(Page::COMPACTION_WAS_ABORTED)); +} + + +HEAP_TEST(CompactionFullAbortedPage) { + // Test the scenario where we reach OOM during compaction and the whole page + // is aborted. + + // Disable concurrent sweeping to ensure memory is in an expected state, i.e., + // we can reach the state of a half aborted page. + FLAG_concurrent_sweeping = false; + FLAG_manual_evacuation_candidates_selection = true; + CcTest::InitializeVM(); + Isolate* isolate = CcTest::i_isolate(); + Heap* heap = isolate->heap(); + { + HandleScope scope1(isolate); + PageIterator it(heap->old_space()); + while (it.has_next()) { + it.next()->SetFlag(Page::NEVER_ALLOCATE_ON_PAGE); + } + + { + HandleScope scope2(isolate); + CHECK(heap->old_space()->Expand()); + auto compaction_page_handles = + CreatePadding(heap, Page::kAllocatableMemory, TENURED); + Page* to_be_aborted_page = + Page::FromAddress(compaction_page_handles.front()->address()); + to_be_aborted_page->SetFlag( + MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING); + + heap->set_force_oom(true); + heap->CollectAllGarbage(); + + // Check that all handles still point to the same page, i.e., compaction + // has been aborted on the page. + for (Handle object : compaction_page_handles) { + CHECK_EQ(to_be_aborted_page, Page::FromAddress(object->address())); + } + CheckInvariantsOfAbortedPage(to_be_aborted_page); + } + } +} + + +HEAP_TEST(CompactionPartiallyAbortedPage) { + // Test the scenario where we reach OOM during compaction and parts of the + // page have already been migrated to a new one. + + // Disable concurrent sweeping to ensure memory is in an expected state, i.e., + // we can reach the state of a half aborted page. + FLAG_concurrent_sweeping = false; + FLAG_manual_evacuation_candidates_selection = true; + + const int object_size = 128 * KB; + + CcTest::InitializeVM(); + Isolate* isolate = CcTest::i_isolate(); + Heap* heap = isolate->heap(); + { + HandleScope scope1(isolate); + PageIterator it(heap->old_space()); + while (it.has_next()) { + it.next()->SetFlag(Page::NEVER_ALLOCATE_ON_PAGE); + } + + { + HandleScope scope2(isolate); + // Fill another page with objects of size {object_size} (last one is + // properly adjusted). + CHECK(heap->old_space()->Expand()); + auto compaction_page_handles = + CreatePadding(heap, Page::kAllocatableMemory, TENURED, object_size); + Page* to_be_aborted_page = + Page::FromAddress(compaction_page_handles.front()->address()); + to_be_aborted_page->SetFlag( + MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING); + + { + // Add another page that is filled with {num_objects} objects of size + // {object_size}. + HandleScope scope3(isolate); + CHECK(heap->old_space()->Expand()); + const int num_objects = 3; + std::vector> page_to_fill_handles = CreatePadding( + heap, object_size * num_objects, TENURED, object_size); + Page* page_to_fill = + Page::FromAddress(page_to_fill_handles.front()->address()); + + heap->set_force_oom(true); + heap->CollectAllGarbage(); + + bool migration_aborted = false; + for (Handle object : compaction_page_handles) { + // Once compaction has been aborted, all following objects still have + // to be on the initial page. + CHECK(!migration_aborted || + (Page::FromAddress(object->address()) == to_be_aborted_page)); + if (Page::FromAddress(object->address()) == to_be_aborted_page) { + // This object has not been migrated. + migration_aborted = true; + } else { + CHECK_EQ(Page::FromAddress(object->address()), page_to_fill); + } + } + // Check that we actually created a scenario with a partially aborted + // page. + CHECK(migration_aborted); + CheckInvariantsOfAbortedPage(to_be_aborted_page); + } + } + } +} + + +HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) { + // Test the scenario where we reach OOM during compaction and parts of the + // page have already been migrated to a new one. Objects on the aborted page + // are linked together. This test makes sure that intra-aborted page pointers + // get properly updated. + + // Disable concurrent sweeping to ensure memory is in an expected state, i.e., + // we can reach the state of a half aborted page. + FLAG_concurrent_sweeping = false; + FLAG_manual_evacuation_candidates_selection = true; + + const int object_size = 128 * KB; + + CcTest::InitializeVM(); + Isolate* isolate = CcTest::i_isolate(); + Heap* heap = isolate->heap(); + { + HandleScope scope1(isolate); + Handle root_array = + isolate->factory()->NewFixedArray(10, TENURED); + + PageIterator it(heap->old_space()); + while (it.has_next()) { + it.next()->SetFlag(Page::NEVER_ALLOCATE_ON_PAGE); + } + + Page* to_be_aborted_page = nullptr; + { + HandleScope temporary_scope(isolate); + // Fill a fresh page with objects of size {object_size} (last one is + // properly adjusted). + CHECK(heap->old_space()->Expand()); + std::vector> compaction_page_handles = + CreatePadding(heap, Page::kAllocatableMemory, TENURED, object_size); + to_be_aborted_page = + Page::FromAddress(compaction_page_handles.front()->address()); + to_be_aborted_page->SetFlag( + MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING); + for (size_t i = compaction_page_handles.size() - 1; i > 0; i--) { + compaction_page_handles[i]->set(0, *compaction_page_handles[i - 1]); + } + root_array->set(0, *compaction_page_handles.back()); + } + + { + // Add another page that is filled with {num_objects} objects of size + // {object_size}. + HandleScope scope3(isolate); + CHECK(heap->old_space()->Expand()); + const int num_objects = 2; + int used_memory = object_size * num_objects; + std::vector> page_to_fill_handles = + CreatePadding(heap, used_memory, TENURED, object_size); + Page* page_to_fill = + Page::FromAddress(page_to_fill_handles.front()->address()); + + heap->set_force_oom(true); + heap->CollectAllGarbage(); + + // The following check makes sure that we compacted "some" objects, while + // leaving others in place. + bool in_place = true; + Handle current = root_array; + while (current->get(0) != heap->undefined_value()) { + current = Handle(FixedArray::cast(current->get(0))); + CHECK(current->IsFixedArray()); + if (Page::FromAddress(current->address()) != to_be_aborted_page) { + in_place = false; + } + bool on_aborted_page = + Page::FromAddress(current->address()) == to_be_aborted_page; + bool on_fill_page = + Page::FromAddress(current->address()) == page_to_fill; + CHECK((in_place && on_aborted_page) || (!in_place && on_fill_page)); + } + // Check that we at least migrated one object, as otherwise the test would + // not trigger. + CHECK(!in_place); + CheckInvariantsOfAbortedPage(to_be_aborted_page); + } + } +} + + +HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) { + // Test the scenario where we reach OOM during compaction and parts of the + // page have already been migrated to a new one. Objects on the aborted page + // are linked together and the very first object on the aborted page points + // into new space. The test verifies that the store buffer entries are + // properly cleared and rebuilt after aborting a page. Failing to do so can + // result in other objects being allocated in the free space where their + // payload looks like a valid new space pointer. + + // Disable concurrent sweeping to ensure memory is in an expected state, i.e., + // we can reach the state of a half aborted page. + FLAG_concurrent_sweeping = false; + FLAG_manual_evacuation_candidates_selection = true; + + const int object_size = 128 * KB; + + CcTest::InitializeVM(); + Isolate* isolate = CcTest::i_isolate(); + Heap* heap = isolate->heap(); + { + HandleScope scope1(isolate); + Handle root_array = + isolate->factory()->NewFixedArray(10, TENURED); + PageIterator it(heap->old_space()); + while (it.has_next()) { + it.next()->SetFlag(Page::NEVER_ALLOCATE_ON_PAGE); + } + + Page* to_be_aborted_page = nullptr; + { + HandleScope temporary_scope(isolate); + // Fill another page with objects of size {object_size} (last one is + // properly adjusted). + CHECK(heap->old_space()->Expand()); + auto compaction_page_handles = + CreatePadding(heap, Page::kAllocatableMemory, TENURED, object_size); + // Sanity check that we have enough space for linking up arrays. + CHECK_GE(compaction_page_handles.front()->length(), 2); + to_be_aborted_page = + Page::FromAddress(compaction_page_handles.front()->address()); + to_be_aborted_page->SetFlag( + MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING); + + for (size_t i = compaction_page_handles.size() - 1; i > 0; i--) { + compaction_page_handles[i]->set(0, *compaction_page_handles[i - 1]); + } + root_array->set(0, *compaction_page_handles.back()); + Handle new_space_array = + isolate->factory()->NewFixedArray(1, NOT_TENURED); + CHECK(heap->InNewSpace(*new_space_array)); + compaction_page_handles.front()->set(1, *new_space_array); + } + + { + // Add another page that is filled with {num_objects} objects of size + // {object_size}. + HandleScope scope3(isolate); + CHECK(heap->old_space()->Expand()); + const int num_objects = 2; + int used_memory = object_size * num_objects; + std::vector> page_to_fill_handles = + CreatePadding(heap, used_memory, TENURED, object_size); + Page* page_to_fill = + Page::FromAddress(page_to_fill_handles.front()->address()); + + heap->set_force_oom(true); + heap->CollectAllGarbage(); + + // The following check makes sure that we compacted "some" objects, while + // leaving others in place. + bool in_place = true; + Handle current = root_array; + while (current->get(0) != heap->undefined_value()) { + current = Handle(FixedArray::cast(current->get(0))); + CHECK(!heap->InNewSpace(*current)); + CHECK(current->IsFixedArray()); + if (Page::FromAddress(current->address()) != to_be_aborted_page) { + in_place = false; + } + bool on_aborted_page = + Page::FromAddress(current->address()) == to_be_aborted_page; + bool on_fill_page = + Page::FromAddress(current->address()) == page_to_fill; + CHECK((in_place && on_aborted_page) || (!in_place && on_fill_page)); + } + // Check that we at least migrated one object, as otherwise the test would + // not trigger. + CHECK(!in_place); + CheckInvariantsOfAbortedPage(to_be_aborted_page); + + // Allocate a new object in new space. + Handle holder = + isolate->factory()->NewFixedArray(10, NOT_TENURED); + // Create a broken address that looks like a tagged pointer to a new space + // object. + Address broken_address = holder->address() + 2 * kPointerSize + 1; + // Convert it to a vector to create a string from it. + Vector string_to_broken_addresss( + reinterpret_cast(&broken_address), 8); + + Handle string; + do { + // We know that the interesting slot will be on the aborted page and + // hence we allocate until we get our string on the aborted page. + // We used slot 1 in the fixed size array which corresponds to the + // the first word in the string. Since the first object definitely + // migrated we can just allocate until we hit the aborted page. + string = isolate->factory() + ->NewStringFromOneByte(string_to_broken_addresss, TENURED) + .ToHandleChecked(); + } while (Page::FromAddress(string->address()) != to_be_aborted_page); + + // If store buffer entries are not properly filtered/reset for aborted + // pages we have now a broken address at an object slot in old space and + // the following scavenge will crash. + heap->CollectGarbage(NEW_SPACE); + } + } +} + +} // namespace internal +} // namespace v8 diff --git a/test/cctest/heap/utils-inl.h b/test/cctest/heap/utils-inl.h index 8c2508fcb6..f255bb6c03 100644 --- a/test/cctest/heap/utils-inl.h +++ b/test/cctest/heap/utils-inl.h @@ -16,34 +16,32 @@ namespace v8 { namespace internal { static int LenFromSize(int size) { - return (size - i::FixedArray::kHeaderSize) / i::kPointerSize; + return (size - FixedArray::kHeaderSize) / kPointerSize; } -static inline void CreatePadding(i::Heap* heap, int padding_size, - i::PretenureFlag tenure) { - const int max_number_of_objects = 20; - v8::internal::Handle - big_objects[max_number_of_objects]; - i::Isolate* isolate = heap->isolate(); +static inline std::vector> CreatePadding( + Heap* heap, int padding_size, PretenureFlag tenure, + int object_size = Page::kMaxRegularHeapObjectSize) { + std::vector> handles; + Isolate* isolate = heap->isolate(); int allocate_memory; int length; int free_memory = padding_size; if (tenure == i::TENURED) { - int current_free_memory = - static_cast(*heap->old_space()->allocation_limit_address() - - *heap->old_space()->allocation_top_address()); - CHECK(padding_size <= current_free_memory || current_free_memory == 0); + heap->old_space()->EmptyAllocationInfo(); + int overall_free_memory = static_cast(heap->old_space()->Available()); + CHECK(padding_size <= overall_free_memory || overall_free_memory == 0); } else { heap->new_space()->DisableInlineAllocationSteps(); - int current_free_memory = + int overall_free_memory = static_cast(*heap->new_space()->allocation_limit_address() - *heap->new_space()->allocation_top_address()); - CHECK(padding_size <= current_free_memory || current_free_memory == 0); + CHECK(padding_size <= overall_free_memory || overall_free_memory == 0); } - for (int i = 0; i < max_number_of_objects && free_memory > 0; i++) { - if (free_memory > i::Page::kMaxRegularHeapObjectSize) { - allocate_memory = i::Page::kMaxRegularHeapObjectSize; + while (free_memory > 0) { + if (free_memory > object_size) { + allocate_memory = object_size; length = LenFromSize(allocate_memory); } else { allocate_memory = free_memory; @@ -55,11 +53,12 @@ static inline void CreatePadding(i::Heap* heap, int padding_size, break; } } - big_objects[i] = isolate->factory()->NewFixedArray(length, tenure); - CHECK((tenure == i::NOT_TENURED && heap->InNewSpace(*big_objects[i])) || - (tenure == i::TENURED && heap->InOldSpace(*big_objects[i]))); + handles.push_back(isolate->factory()->NewFixedArray(length, tenure)); + CHECK((tenure == NOT_TENURED && heap->InNewSpace(*handles.back())) || + (tenure == TENURED && heap->InOldSpace(*handles.back()))); free_memory -= allocate_memory; } + return handles; }