v8/test/cctest/heap/heap-utils.cc

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

263 lines
9.8 KiB
C++
Raw Normal View History

// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "test/cctest/heap/heap-utils.h"
#include "src/base/platform/mutex.h"
#include "src/execution/isolate.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/safepoint.h"
#include "test/cctest/cctest.h"
namespace v8 {
namespace internal {
namespace heap {
void InvokeScavenge(Isolate* isolate) {
CcTest::CollectGarbage(i::NEW_SPACE, isolate);
}
2019-01-29 19:12:21 +00:00
void InvokeMarkSweep(Isolate* isolate) { CcTest::CollectAllGarbage(isolate); }
2019-01-29 19:12:21 +00:00
void SealCurrentObjects(Heap* heap) {
// If you see this check failing, disable the flag at the start of your test:
// FLAG_stress_concurrent_allocation = false;
// Background thread allocating concurrently interferes with this function.
CHECK(!FLAG_stress_concurrent_allocation);
CcTest::CollectAllGarbage();
CcTest::CollectAllGarbage();
heap->mark_compact_collector()->EnsureSweepingCompleted();
heap->old_space()->FreeLinearAllocationArea();
for (Page* page : *heap->old_space()) {
page->MarkNeverAllocateForTesting();
}
}
int FixedArrayLenFromSize(int size) {
return std::min({(size - FixedArray::kHeaderSize) / kTaggedSize,
FixedArray::kMaxRegularLength});
}
std::vector<Handle<FixedArray>> FillOldSpacePageWithFixedArrays(Heap* heap,
int remainder) {
PauseAllocationObserversScope pause_observers(heap);
std::vector<Handle<FixedArray>> handles;
Isolate* isolate = heap->isolate();
const int kArraySize = 128;
const int kArrayLen = heap::FixedArrayLenFromSize(kArraySize);
Handle<FixedArray> array;
int allocated = 0;
do {
if (allocated + kArraySize * 2 >
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage())) {
int size =
kArraySize * 2 -
((allocated + kArraySize * 2) -
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage())) -
remainder;
int last_array_len = heap::FixedArrayLenFromSize(size);
array = isolate->factory()->NewFixedArray(last_array_len,
AllocationType::kOld);
CHECK_EQ(size, array->Size());
allocated += array->Size() + remainder;
} else {
array =
isolate->factory()->NewFixedArray(kArrayLen, AllocationType::kOld);
allocated += array->Size();
CHECK_EQ(kArraySize, array->Size());
}
if (handles.empty()) {
// Check that allocations started on a new page.
CHECK_EQ(array->address(), Page::FromHeapObject(*array)->area_start());
}
handles.push_back(array);
} while (allocated <
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()));
return handles;
}
std::vector<Handle<FixedArray>> CreatePadding(Heap* heap, int padding_size,
AllocationType allocation,
int object_size) {
std::vector<Handle<FixedArray>> handles;
Isolate* isolate = heap->isolate();
int allocate_memory;
int length;
int free_memory = padding_size;
if (allocation == i::AllocationType::kOld) {
heap->old_space()->FreeLinearAllocationArea();
int overall_free_memory = static_cast<int>(heap->old_space()->Available());
CHECK(padding_size <= overall_free_memory || overall_free_memory == 0);
} else {
[cctest][heap] Do not rely on page limit for full space simulation. This reverts https://chromium-review.googlesource.com/c/v8/v8/+/2372545 in favour of different solution. In order to simulate filling up a page, it's not suitable to look at the limit() since there might be observers that have lowered it, so the page will not actually be full. Instead, let's relax the CHECK() in CreatePadding() to not look at the limit() but all available space. For instance, the test-heap/Regress978156 cctest uses FillCurrentPage() to fill the current page. However if there's an observer on the current page, it will not be filled entirely and the test will fail. This works because by default, when the new space is empty, the scavenger observer happens to be on the second page of the space. However if one changes the V8 page size to 512k, then it fails. This can be reproduced as such: # Make sure the scavenge trigger is on the first page. ./cctest test-heap/Regress978156 --scavenge-task-trigger=10 # Stress marking adds random observers to trigger incremental # marking. ./cctest test-heap/Regress978156 --stress-marking=100 This issue also causes crashes when using the %SimulateNewspaceFull() runtime test function, as found by fuzzing and you can find more details in the bug. Bug: v8:10808, v8:9906, chromium:1122848 Change-Id: Ie043ae0a1d3754d2423cb5d97f2b3e1ee860e5c8 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2401427 Reviewed-by: Ulan Degenbaev <ulan@chromium.org> Commit-Queue: Pierre Langlois <pierre.langlois@arm.com> Cr-Commit-Position: refs/heads/master@{#69805}
2020-09-09 17:40:22 +00:00
int overall_free_memory = static_cast<int>(heap->new_space()->Available());
CHECK(padding_size <= overall_free_memory || overall_free_memory == 0);
}
while (free_memory > 0) {
if (free_memory > object_size) {
allocate_memory = object_size;
length = FixedArrayLenFromSize(allocate_memory);
} else {
allocate_memory = free_memory;
length = FixedArrayLenFromSize(allocate_memory);
if (length <= 0) {
// Not enough room to create another FixedArray, so create a filler.
if (allocation == i::AllocationType::kOld) {
heap->CreateFillerObjectAt(
*heap->old_space()->allocation_top_address(), free_memory,
ClearRecordedSlots::kNo);
} else {
heap->CreateFillerObjectAt(
*heap->new_space()->allocation_top_address(), free_memory,
ClearRecordedSlots::kNo);
}
break;
}
}
handles.push_back(isolate->factory()->NewFixedArray(length, allocation));
CHECK((allocation == AllocationType::kYoung &&
heap->new_space()->Contains(*handles.back())) ||
(allocation == AllocationType::kOld &&
heap->InOldSpace(*handles.back())));
free_memory -= handles.back()->Size();
}
return handles;
}
bool FillCurrentPage(v8::internal::NewSpace* space,
std::vector<Handle<FixedArray>>* out_handles) {
return heap::FillCurrentPageButNBytes(space, 0, out_handles);
}
bool FillCurrentPageButNBytes(v8::internal::NewSpace* space, int extra_bytes,
std::vector<Handle<FixedArray>>* out_handles) {
PauseAllocationObserversScope pause_observers(space->heap());
// We cannot rely on `space->limit()` to point to the end of the current page
// in the case where inline allocations are disabled, it actually points to
// the current allocation pointer.
DCHECK_IMPLIES(space->heap()->inline_allocation_disabled(),
space->limit() == space->top());
[cctest][heap] Do not rely on page limit for full space simulation. This reverts https://chromium-review.googlesource.com/c/v8/v8/+/2372545 in favour of different solution. In order to simulate filling up a page, it's not suitable to look at the limit() since there might be observers that have lowered it, so the page will not actually be full. Instead, let's relax the CHECK() in CreatePadding() to not look at the limit() but all available space. For instance, the test-heap/Regress978156 cctest uses FillCurrentPage() to fill the current page. However if there's an observer on the current page, it will not be filled entirely and the test will fail. This works because by default, when the new space is empty, the scavenger observer happens to be on the second page of the space. However if one changes the V8 page size to 512k, then it fails. This can be reproduced as such: # Make sure the scavenge trigger is on the first page. ./cctest test-heap/Regress978156 --scavenge-task-trigger=10 # Stress marking adds random observers to trigger incremental # marking. ./cctest test-heap/Regress978156 --stress-marking=100 This issue also causes crashes when using the %SimulateNewspaceFull() runtime test function, as found by fuzzing and you can find more details in the bug. Bug: v8:10808, v8:9906, chromium:1122848 Change-Id: Ie043ae0a1d3754d2423cb5d97f2b3e1ee860e5c8 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2401427 Reviewed-by: Ulan Degenbaev <ulan@chromium.org> Commit-Queue: Pierre Langlois <pierre.langlois@arm.com> Cr-Commit-Position: refs/heads/master@{#69805}
2020-09-09 17:40:22 +00:00
int space_remaining =
static_cast<int>(space->to_space().page_high() - space->top());
CHECK(space_remaining >= extra_bytes);
int new_linear_size = space_remaining - extra_bytes;
if (new_linear_size == 0) return false;
std::vector<Handle<FixedArray>> handles = heap::CreatePadding(
space->heap(), space_remaining, i::AllocationType::kYoung);
if (out_handles != nullptr) {
out_handles->insert(out_handles->end(), handles.begin(), handles.end());
}
return true;
}
void SimulateFullSpace(v8::internal::NewSpace* space,
std::vector<Handle<FixedArray>>* out_handles) {
// If you see this check failing, disable the flag at the start of your test:
// FLAG_stress_concurrent_allocation = false;
// Background thread allocating concurrently interferes with this function.
CHECK(!FLAG_stress_concurrent_allocation);
while (heap::FillCurrentPage(space, out_handles) || space->AddFreshPage()) {
}
}
void SimulateIncrementalMarking(i::Heap* heap, bool force_completion) {
const double kStepSizeInMs = 100;
CHECK(FLAG_incremental_marking);
i::IncrementalMarking* marking = heap->incremental_marking();
i::MarkCompactCollector* collector = heap->mark_compact_collector();
if (collector->sweeping_in_progress()) {
SafepointScope scope(heap);
collector->EnsureSweepingCompleted();
}
CHECK(marking->IsMarking() || marking->IsStopped() || marking->IsComplete());
if (marking->IsStopped()) {
heap->StartIncrementalMarking(i::Heap::kNoGCFlags,
i::GarbageCollectionReason::kTesting);
}
CHECK(marking->IsMarking() || marking->IsComplete());
if (!force_completion) return;
while (!marking->IsComplete()) {
marking->Step(kStepSizeInMs, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
i::StepOrigin::kV8);
if (marking->IsReadyToOverApproximateWeakClosure()) {
SafepointScope scope(heap);
marking->FinalizeIncrementally();
}
}
CHECK(marking->IsComplete());
}
void SimulateFullSpace(v8::internal::PagedSpace* space) {
// If you see this check failing, disable the flag at the start of your test:
// FLAG_stress_concurrent_allocation = false;
// Background thread allocating concurrently interferes with this function.
CHECK(!FLAG_stress_concurrent_allocation);
CodeSpaceMemoryModificationScope modification_scope(space->heap());
i::MarkCompactCollector* collector = space->heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
collector->EnsureSweepingCompleted();
}
space->FreeLinearAllocationArea();
space->ResetFreeList();
}
void AbandonCurrentlyFreeMemory(PagedSpace* space) {
space->FreeLinearAllocationArea();
for (Page* page : *space) {
page->MarkNeverAllocateForTesting();
}
}
void GcAndSweep(Heap* heap, AllocationSpace space) {
heap->CollectGarbage(space, GarbageCollectionReason::kTesting);
if (heap->mark_compact_collector()->sweeping_in_progress()) {
SafepointScope scope(heap);
heap->mark_compact_collector()->EnsureSweepingCompleted();
}
}
void ForceEvacuationCandidate(Page* page) {
CHECK(FLAG_manual_evacuation_candidates_selection);
page->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
PagedSpace* space = static_cast<PagedSpace*>(page->owner());
DCHECK_NOT_NULL(space);
Address top = space->top();
Address limit = space->limit();
if (top < limit && Page::FromAllocationAreaAddress(top) == page) {
// Create filler object to keep page iterable if it was iterable.
int remaining = static_cast<int>(limit - top);
space->heap()->CreateFillerObjectAt(top, remaining,
ClearRecordedSlots::kNo);
base::MutexGuard guard(space->mutex());
space->FreeLinearAllocationArea();
}
}
bool InCorrectGeneration(HeapObject object) {
return FLAG_single_generation ? !i::Heap::InYoungGeneration(object)
: i::Heap::InYoungGeneration(object);
}
void GrowNewSpace(Heap* heap) {
SafepointScope scope(heap);
if (!heap->new_space()->IsAtMaximumCapacity()) {
heap->new_space()->Grow();
}
}
void GrowNewSpaceToMaximumCapacity(Heap* heap) {
SafepointScope scope(heap);
while (!heap->new_space()->IsAtMaximumCapacity()) {
heap->new_space()->Grow();
}
}
} // namespace heap
} // namespace internal
} // namespace v8