2015-12-16 14:06:33 +00:00
|
|
|
// Copyright 2015 the V8 project authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file.
|
|
|
|
|
2018-04-09 19:11:22 +00:00
|
|
|
#include "src/heap/factory.h"
|
2016-09-01 12:01:33 +00:00
|
|
|
#include "src/heap/mark-compact.h"
|
|
|
|
#include "src/isolate.h"
|
|
|
|
#include "src/objects-inl.h"
|
2015-12-16 14:06:33 +00:00
|
|
|
#include "test/cctest/cctest.h"
|
|
|
|
#include "test/cctest/heap/heap-tester.h"
|
2016-05-20 13:30:22 +00:00
|
|
|
#include "test/cctest/heap/heap-utils.h"
|
2015-12-16 14:06:33 +00:00
|
|
|
|
|
|
|
namespace v8 {
|
|
|
|
namespace internal {
|
2017-08-11 10:04:47 +00:00
|
|
|
namespace heap {
|
2015-12-16 14:06:33 +00:00
|
|
|
|
2016-05-20 13:30:22 +00:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
void CheckInvariantsOfAbortedPage(Page* page) {
|
2015-12-16 14:06:33 +00:00
|
|
|
// Check invariants:
|
|
|
|
// 1) Markbits are cleared
|
|
|
|
// 2) The page is not marked as evacuation candidate anymore
|
|
|
|
// 3) The page is not marked as aborted compaction anymore.
|
2017-08-10 16:54:55 +00:00
|
|
|
CHECK(page->heap()
|
|
|
|
->mark_compact_collector()
|
2017-08-12 12:17:52 +00:00
|
|
|
->non_atomic_marking_state()
|
2017-08-10 16:54:55 +00:00
|
|
|
->bitmap(page)
|
|
|
|
->IsClean());
|
2015-12-16 14:06:33 +00:00
|
|
|
CHECK(!page->IsEvacuationCandidate());
|
|
|
|
CHECK(!page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
|
|
|
|
}
|
|
|
|
|
2016-05-20 13:30:22 +00:00
|
|
|
void CheckAllObjectsOnPage(std::vector<Handle<FixedArray>>& handles,
|
|
|
|
Page* page) {
|
|
|
|
for (auto& fixed_array : handles) {
|
|
|
|
CHECK(Page::FromAddress(fixed_array->address()) == page);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace
|
2015-12-16 14:06:33 +00:00
|
|
|
|
|
|
|
HEAP_TEST(CompactionFullAbortedPage) {
|
2017-06-13 14:39:46 +00:00
|
|
|
if (FLAG_never_compact) return;
|
2015-12-16 14:06:33 +00:00
|
|
|
// Test the scenario where we reach OOM during compaction and the whole page
|
|
|
|
// is aborted.
|
|
|
|
|
|
|
|
// Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
|
|
|
|
// we can reach the state of a half aborted page.
|
2017-11-20 12:13:24 +00:00
|
|
|
ManualGCScope manual_gc_scope;
|
2015-12-16 14:06:33 +00:00
|
|
|
FLAG_manual_evacuation_candidates_selection = true;
|
|
|
|
CcTest::InitializeVM();
|
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
|
|
|
Heap* heap = isolate->heap();
|
|
|
|
{
|
|
|
|
HandleScope scope1(isolate);
|
2016-05-20 13:30:22 +00:00
|
|
|
|
|
|
|
heap::SealCurrentObjects(heap);
|
2015-12-16 14:06:33 +00:00
|
|
|
|
|
|
|
{
|
|
|
|
HandleScope scope2(isolate);
|
|
|
|
CHECK(heap->old_space()->Expand());
|
|
|
|
auto compaction_page_handles =
|
2016-05-20 13:30:22 +00:00
|
|
|
heap::CreatePadding(heap, Page::kAllocatableMemory, TENURED);
|
2015-12-16 14:06:33 +00:00
|
|
|
Page* to_be_aborted_page =
|
|
|
|
Page::FromAddress(compaction_page_handles.front()->address());
|
|
|
|
to_be_aborted_page->SetFlag(
|
|
|
|
MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
|
2016-05-20 13:30:22 +00:00
|
|
|
CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
|
2015-12-16 14:06:33 +00:00
|
|
|
|
|
|
|
heap->set_force_oom(true);
|
2017-04-26 22:16:41 +00:00
|
|
|
CcTest::CollectAllGarbage();
|
2016-04-14 11:06:22 +00:00
|
|
|
heap->mark_compact_collector()->EnsureSweepingCompleted();
|
2015-12-16 14:06:33 +00:00
|
|
|
|
|
|
|
// Check that all handles still point to the same page, i.e., compaction
|
|
|
|
// has been aborted on the page.
|
|
|
|
for (Handle<FixedArray> object : compaction_page_handles) {
|
|
|
|
CHECK_EQ(to_be_aborted_page, Page::FromAddress(object->address()));
|
|
|
|
}
|
|
|
|
CheckInvariantsOfAbortedPage(to_be_aborted_page);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
HEAP_TEST(CompactionPartiallyAbortedPage) {
|
2017-06-13 14:39:46 +00:00
|
|
|
if (FLAG_never_compact) return;
|
2015-12-16 14:06:33 +00:00
|
|
|
// Test the scenario where we reach OOM during compaction and parts of the
|
|
|
|
// page have already been migrated to a new one.
|
|
|
|
|
|
|
|
// Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
|
|
|
|
// we can reach the state of a half aborted page.
|
2017-11-20 12:13:24 +00:00
|
|
|
ManualGCScope manual_gc_scope;
|
2015-12-16 14:06:33 +00:00
|
|
|
FLAG_manual_evacuation_candidates_selection = true;
|
|
|
|
|
2016-05-20 13:30:22 +00:00
|
|
|
const int objects_per_page = 10;
|
|
|
|
const int object_size = Page::kAllocatableMemory / objects_per_page;
|
2015-12-16 14:06:33 +00:00
|
|
|
|
|
|
|
CcTest::InitializeVM();
|
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
|
|
|
Heap* heap = isolate->heap();
|
|
|
|
{
|
|
|
|
HandleScope scope1(isolate);
|
2016-05-20 13:30:22 +00:00
|
|
|
|
|
|
|
heap::SealCurrentObjects(heap);
|
2015-12-16 14:06:33 +00:00
|
|
|
|
|
|
|
{
|
|
|
|
HandleScope scope2(isolate);
|
|
|
|
// Fill another page with objects of size {object_size} (last one is
|
|
|
|
// properly adjusted).
|
|
|
|
CHECK(heap->old_space()->Expand());
|
2016-05-20 13:30:22 +00:00
|
|
|
auto compaction_page_handles = heap::CreatePadding(
|
|
|
|
heap, Page::kAllocatableMemory, TENURED, object_size);
|
2015-12-16 14:06:33 +00:00
|
|
|
Page* to_be_aborted_page =
|
|
|
|
Page::FromAddress(compaction_page_handles.front()->address());
|
|
|
|
to_be_aborted_page->SetFlag(
|
|
|
|
MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
|
2016-05-20 13:30:22 +00:00
|
|
|
CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
|
2015-12-16 14:06:33 +00:00
|
|
|
|
|
|
|
{
|
|
|
|
// Add another page that is filled with {num_objects} objects of size
|
|
|
|
// {object_size}.
|
|
|
|
HandleScope scope3(isolate);
|
|
|
|
CHECK(heap->old_space()->Expand());
|
|
|
|
const int num_objects = 3;
|
2016-05-20 13:30:22 +00:00
|
|
|
std::vector<Handle<FixedArray>> page_to_fill_handles =
|
|
|
|
heap::CreatePadding(heap, object_size * num_objects, TENURED,
|
|
|
|
object_size);
|
2015-12-16 14:06:33 +00:00
|
|
|
Page* page_to_fill =
|
|
|
|
Page::FromAddress(page_to_fill_handles.front()->address());
|
|
|
|
|
|
|
|
heap->set_force_oom(true);
|
2017-04-26 22:16:41 +00:00
|
|
|
CcTest::CollectAllGarbage();
|
2016-04-14 11:06:22 +00:00
|
|
|
heap->mark_compact_collector()->EnsureSweepingCompleted();
|
2015-12-16 14:06:33 +00:00
|
|
|
|
|
|
|
bool migration_aborted = false;
|
|
|
|
for (Handle<FixedArray> object : compaction_page_handles) {
|
|
|
|
// Once compaction has been aborted, all following objects still have
|
|
|
|
// to be on the initial page.
|
|
|
|
CHECK(!migration_aborted ||
|
|
|
|
(Page::FromAddress(object->address()) == to_be_aborted_page));
|
|
|
|
if (Page::FromAddress(object->address()) == to_be_aborted_page) {
|
|
|
|
// This object has not been migrated.
|
|
|
|
migration_aborted = true;
|
|
|
|
} else {
|
|
|
|
CHECK_EQ(Page::FromAddress(object->address()), page_to_fill);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Check that we actually created a scenario with a partially aborted
|
|
|
|
// page.
|
|
|
|
CHECK(migration_aborted);
|
|
|
|
CheckInvariantsOfAbortedPage(to_be_aborted_page);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
|
2017-06-13 14:39:46 +00:00
|
|
|
if (FLAG_never_compact) return;
|
2015-12-16 14:06:33 +00:00
|
|
|
// Test the scenario where we reach OOM during compaction and parts of the
|
|
|
|
// page have already been migrated to a new one. Objects on the aborted page
|
|
|
|
// are linked together. This test makes sure that intra-aborted page pointers
|
|
|
|
// get properly updated.
|
|
|
|
|
|
|
|
// Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
|
|
|
|
// we can reach the state of a half aborted page.
|
2017-11-20 12:13:24 +00:00
|
|
|
ManualGCScope manual_gc_scope;
|
2015-12-16 14:06:33 +00:00
|
|
|
FLAG_manual_evacuation_candidates_selection = true;
|
|
|
|
|
2016-05-20 13:30:22 +00:00
|
|
|
const int objects_per_page = 10;
|
|
|
|
const int object_size = Page::kAllocatableMemory / objects_per_page;
|
2015-12-16 14:06:33 +00:00
|
|
|
|
|
|
|
CcTest::InitializeVM();
|
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
|
|
|
Heap* heap = isolate->heap();
|
|
|
|
{
|
|
|
|
HandleScope scope1(isolate);
|
|
|
|
Handle<FixedArray> root_array =
|
|
|
|
isolate->factory()->NewFixedArray(10, TENURED);
|
|
|
|
|
2016-05-20 13:30:22 +00:00
|
|
|
heap::SealCurrentObjects(heap);
|
2015-12-16 14:06:33 +00:00
|
|
|
|
|
|
|
Page* to_be_aborted_page = nullptr;
|
|
|
|
{
|
|
|
|
HandleScope temporary_scope(isolate);
|
|
|
|
// Fill a fresh page with objects of size {object_size} (last one is
|
|
|
|
// properly adjusted).
|
|
|
|
CHECK(heap->old_space()->Expand());
|
|
|
|
std::vector<Handle<FixedArray>> compaction_page_handles =
|
2016-05-20 13:30:22 +00:00
|
|
|
heap::CreatePadding(heap, Page::kAllocatableMemory, TENURED,
|
|
|
|
object_size);
|
2015-12-16 14:06:33 +00:00
|
|
|
to_be_aborted_page =
|
|
|
|
Page::FromAddress(compaction_page_handles.front()->address());
|
|
|
|
to_be_aborted_page->SetFlag(
|
|
|
|
MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
|
|
|
|
for (size_t i = compaction_page_handles.size() - 1; i > 0; i--) {
|
|
|
|
compaction_page_handles[i]->set(0, *compaction_page_handles[i - 1]);
|
|
|
|
}
|
|
|
|
root_array->set(0, *compaction_page_handles.back());
|
2016-05-20 13:30:22 +00:00
|
|
|
CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
|
2015-12-16 14:06:33 +00:00
|
|
|
}
|
|
|
|
{
|
|
|
|
// Add another page that is filled with {num_objects} objects of size
|
|
|
|
// {object_size}.
|
|
|
|
HandleScope scope3(isolate);
|
|
|
|
CHECK(heap->old_space()->Expand());
|
|
|
|
const int num_objects = 2;
|
|
|
|
int used_memory = object_size * num_objects;
|
|
|
|
std::vector<Handle<FixedArray>> page_to_fill_handles =
|
2016-05-20 13:30:22 +00:00
|
|
|
heap::CreatePadding(heap, used_memory, TENURED, object_size);
|
2015-12-16 14:06:33 +00:00
|
|
|
Page* page_to_fill =
|
|
|
|
Page::FromAddress(page_to_fill_handles.front()->address());
|
|
|
|
|
|
|
|
heap->set_force_oom(true);
|
2017-04-26 22:16:41 +00:00
|
|
|
CcTest::CollectAllGarbage();
|
2016-04-14 11:06:22 +00:00
|
|
|
heap->mark_compact_collector()->EnsureSweepingCompleted();
|
2015-12-16 14:06:33 +00:00
|
|
|
|
|
|
|
// The following check makes sure that we compacted "some" objects, while
|
|
|
|
// leaving others in place.
|
|
|
|
bool in_place = true;
|
|
|
|
Handle<FixedArray> current = root_array;
|
2018-07-04 09:10:05 +00:00
|
|
|
while (current->get(0) != ReadOnlyRoots(heap).undefined_value()) {
|
2018-06-23 09:05:50 +00:00
|
|
|
current =
|
|
|
|
Handle<FixedArray>(FixedArray::cast(current->get(0)), isolate);
|
2015-12-16 14:06:33 +00:00
|
|
|
CHECK(current->IsFixedArray());
|
|
|
|
if (Page::FromAddress(current->address()) != to_be_aborted_page) {
|
|
|
|
in_place = false;
|
|
|
|
}
|
|
|
|
bool on_aborted_page =
|
|
|
|
Page::FromAddress(current->address()) == to_be_aborted_page;
|
|
|
|
bool on_fill_page =
|
|
|
|
Page::FromAddress(current->address()) == page_to_fill;
|
|
|
|
CHECK((in_place && on_aborted_page) || (!in_place && on_fill_page));
|
|
|
|
}
|
|
|
|
// Check that we at least migrated one object, as otherwise the test would
|
|
|
|
// not trigger.
|
|
|
|
CHECK(!in_place);
|
|
|
|
CheckInvariantsOfAbortedPage(to_be_aborted_page);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
|
2017-06-13 09:25:18 +00:00
|
|
|
if (FLAG_never_compact) return;
|
2015-12-16 14:06:33 +00:00
|
|
|
// Test the scenario where we reach OOM during compaction and parts of the
|
|
|
|
// page have already been migrated to a new one. Objects on the aborted page
|
|
|
|
// are linked together and the very first object on the aborted page points
|
|
|
|
// into new space. The test verifies that the store buffer entries are
|
|
|
|
// properly cleared and rebuilt after aborting a page. Failing to do so can
|
|
|
|
// result in other objects being allocated in the free space where their
|
|
|
|
// payload looks like a valid new space pointer.
|
|
|
|
|
|
|
|
// Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
|
|
|
|
// we can reach the state of a half aborted page.
|
2017-11-20 12:13:24 +00:00
|
|
|
ManualGCScope manual_gc_scope;
|
2015-12-16 14:06:33 +00:00
|
|
|
FLAG_manual_evacuation_candidates_selection = true;
|
|
|
|
|
2016-05-20 13:30:22 +00:00
|
|
|
const int objects_per_page = 10;
|
|
|
|
const int object_size = Page::kAllocatableMemory / objects_per_page;
|
2015-12-16 14:06:33 +00:00
|
|
|
|
|
|
|
CcTest::InitializeVM();
|
|
|
|
Isolate* isolate = CcTest::i_isolate();
|
|
|
|
Heap* heap = isolate->heap();
|
|
|
|
{
|
|
|
|
HandleScope scope1(isolate);
|
|
|
|
Handle<FixedArray> root_array =
|
|
|
|
isolate->factory()->NewFixedArray(10, TENURED);
|
2016-05-20 13:30:22 +00:00
|
|
|
heap::SealCurrentObjects(heap);
|
2015-12-16 14:06:33 +00:00
|
|
|
|
|
|
|
Page* to_be_aborted_page = nullptr;
|
|
|
|
{
|
|
|
|
HandleScope temporary_scope(isolate);
|
|
|
|
// Fill another page with objects of size {object_size} (last one is
|
|
|
|
// properly adjusted).
|
|
|
|
CHECK(heap->old_space()->Expand());
|
2016-05-20 13:30:22 +00:00
|
|
|
auto compaction_page_handles = heap::CreatePadding(
|
|
|
|
heap, Page::kAllocatableMemory, TENURED, object_size);
|
2015-12-16 14:06:33 +00:00
|
|
|
// Sanity check that we have enough space for linking up arrays.
|
|
|
|
CHECK_GE(compaction_page_handles.front()->length(), 2);
|
|
|
|
to_be_aborted_page =
|
|
|
|
Page::FromAddress(compaction_page_handles.front()->address());
|
|
|
|
to_be_aborted_page->SetFlag(
|
|
|
|
MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
|
|
|
|
|
|
|
|
for (size_t i = compaction_page_handles.size() - 1; i > 0; i--) {
|
|
|
|
compaction_page_handles[i]->set(0, *compaction_page_handles[i - 1]);
|
|
|
|
}
|
|
|
|
root_array->set(0, *compaction_page_handles.back());
|
|
|
|
Handle<FixedArray> new_space_array =
|
|
|
|
isolate->factory()->NewFixedArray(1, NOT_TENURED);
|
2018-07-10 08:07:58 +00:00
|
|
|
CHECK(Heap::InNewSpace(*new_space_array));
|
2015-12-16 14:06:33 +00:00
|
|
|
compaction_page_handles.front()->set(1, *new_space_array);
|
2016-05-20 13:30:22 +00:00
|
|
|
CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
|
2015-12-16 14:06:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// Add another page that is filled with {num_objects} objects of size
|
|
|
|
// {object_size}.
|
|
|
|
HandleScope scope3(isolate);
|
|
|
|
CHECK(heap->old_space()->Expand());
|
|
|
|
const int num_objects = 2;
|
|
|
|
int used_memory = object_size * num_objects;
|
|
|
|
std::vector<Handle<FixedArray>> page_to_fill_handles =
|
2016-05-20 13:30:22 +00:00
|
|
|
heap::CreatePadding(heap, used_memory, TENURED, object_size);
|
2015-12-16 14:06:33 +00:00
|
|
|
Page* page_to_fill =
|
|
|
|
Page::FromAddress(page_to_fill_handles.front()->address());
|
|
|
|
|
|
|
|
heap->set_force_oom(true);
|
2017-04-26 22:16:41 +00:00
|
|
|
CcTest::CollectAllGarbage();
|
2016-04-14 11:06:22 +00:00
|
|
|
heap->mark_compact_collector()->EnsureSweepingCompleted();
|
2015-12-16 14:06:33 +00:00
|
|
|
|
|
|
|
// The following check makes sure that we compacted "some" objects, while
|
|
|
|
// leaving others in place.
|
|
|
|
bool in_place = true;
|
|
|
|
Handle<FixedArray> current = root_array;
|
2018-07-04 09:10:05 +00:00
|
|
|
while (current->get(0) != ReadOnlyRoots(heap).undefined_value()) {
|
2018-06-23 09:05:50 +00:00
|
|
|
current =
|
|
|
|
Handle<FixedArray>(FixedArray::cast(current->get(0)), isolate);
|
2018-07-10 08:07:58 +00:00
|
|
|
CHECK(!Heap::InNewSpace(*current));
|
2015-12-16 14:06:33 +00:00
|
|
|
CHECK(current->IsFixedArray());
|
|
|
|
if (Page::FromAddress(current->address()) != to_be_aborted_page) {
|
|
|
|
in_place = false;
|
|
|
|
}
|
|
|
|
bool on_aborted_page =
|
|
|
|
Page::FromAddress(current->address()) == to_be_aborted_page;
|
|
|
|
bool on_fill_page =
|
|
|
|
Page::FromAddress(current->address()) == page_to_fill;
|
|
|
|
CHECK((in_place && on_aborted_page) || (!in_place && on_fill_page));
|
|
|
|
}
|
|
|
|
// Check that we at least migrated one object, as otherwise the test would
|
|
|
|
// not trigger.
|
|
|
|
CHECK(!in_place);
|
|
|
|
CheckInvariantsOfAbortedPage(to_be_aborted_page);
|
|
|
|
|
|
|
|
// Allocate a new object in new space.
|
|
|
|
Handle<FixedArray> holder =
|
|
|
|
isolate->factory()->NewFixedArray(10, NOT_TENURED);
|
|
|
|
// Create a broken address that looks like a tagged pointer to a new space
|
|
|
|
// object.
|
|
|
|
Address broken_address = holder->address() + 2 * kPointerSize + 1;
|
|
|
|
// Convert it to a vector to create a string from it.
|
|
|
|
Vector<const uint8_t> string_to_broken_addresss(
|
2017-10-18 08:22:13 +00:00
|
|
|
reinterpret_cast<const uint8_t*>(&broken_address), kPointerSize);
|
2015-12-16 14:06:33 +00:00
|
|
|
|
|
|
|
Handle<String> string;
|
|
|
|
do {
|
|
|
|
// We know that the interesting slot will be on the aborted page and
|
|
|
|
// hence we allocate until we get our string on the aborted page.
|
|
|
|
// We used slot 1 in the fixed size array which corresponds to the
|
|
|
|
// the first word in the string. Since the first object definitely
|
|
|
|
// migrated we can just allocate until we hit the aborted page.
|
|
|
|
string = isolate->factory()
|
|
|
|
->NewStringFromOneByte(string_to_broken_addresss, TENURED)
|
|
|
|
.ToHandleChecked();
|
|
|
|
} while (Page::FromAddress(string->address()) != to_be_aborted_page);
|
|
|
|
|
|
|
|
// If store buffer entries are not properly filtered/reset for aborted
|
|
|
|
// pages we have now a broken address at an object slot in old space and
|
|
|
|
// the following scavenge will crash.
|
2016-09-07 10:02:58 +00:00
|
|
|
CcTest::CollectGarbage(NEW_SPACE);
|
2015-12-16 14:06:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-11 10:04:47 +00:00
|
|
|
} // namespace heap
|
2015-12-16 14:06:33 +00:00
|
|
|
} // namespace internal
|
|
|
|
} // namespace v8
|