v8/test/cctest/heap/heap-utils.cc
Leon Bettscheider 924cf85e00 [heap] IsMarkingComplete only for MajorMC
ShouldFinalize should only be called if major incremental marking is
active, and can crash if minor incremental marking is active, if
MajorMC's local_marking_worklists_ was reset.

The only caller is IsMarkingComplete. This CL changes the IsMarking
check to IsMajorMarking to solve this issue, and renames
IsMarkingComplete to IsMajorMarkingComplete.

Bug: v8:13012
Change-Id: Iba6bd5b7977ec8566c3ab0f047646d8cafd45038
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3879485
Commit-Queue: Leon Bettscheider <bettscheider@google.com>
Reviewed-by: Omer Katz <omerkatz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#83064}
2022-09-08 11:15:29 +00:00

274 lines
10 KiB
C++

// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "test/cctest/heap/heap-utils.h"
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
#include "src/execution/isolate.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
#include "src/heap/marking-barrier.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/safepoint.h"
#include "test/cctest/cctest.h"
namespace v8 {
namespace internal {
namespace heap {
void InvokeScavenge(Isolate* isolate) {
CcTest::CollectGarbage(i::NEW_SPACE, isolate);
}
void InvokeMarkSweep(Isolate* isolate) { CcTest::CollectAllGarbage(isolate); }
void SealCurrentObjects(Heap* heap) {
// If you see this check failing, disable the flag at the start of your test:
// v8_flags.stress_concurrent_allocation = false;
// Background thread allocating concurrently interferes with this function.
CHECK(!v8_flags.stress_concurrent_allocation);
CcTest::CollectAllGarbage();
CcTest::CollectAllGarbage();
heap->mark_compact_collector()->EnsureSweepingCompleted(
MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
heap->old_space()->FreeLinearAllocationArea();
for (Page* page : *heap->old_space()) {
page->MarkNeverAllocateForTesting();
}
}
int FixedArrayLenFromSize(int size) {
return std::min({(size - FixedArray::kHeaderSize) / kTaggedSize,
FixedArray::kMaxRegularLength});
}
std::vector<Handle<FixedArray>> FillOldSpacePageWithFixedArrays(Heap* heap,
int remainder) {
PauseAllocationObserversScope pause_observers(heap);
std::vector<Handle<FixedArray>> handles;
Isolate* isolate = heap->isolate();
const int kArraySize = 128;
const int kArrayLen = heap::FixedArrayLenFromSize(kArraySize);
Handle<FixedArray> array;
int allocated = 0;
do {
if (allocated + kArraySize * 2 >
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage())) {
int size =
kArraySize * 2 -
((allocated + kArraySize * 2) -
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage())) -
remainder;
int last_array_len = heap::FixedArrayLenFromSize(size);
array = isolate->factory()->NewFixedArray(last_array_len,
AllocationType::kOld);
CHECK_EQ(size, array->Size());
allocated += array->Size() + remainder;
} else {
array =
isolate->factory()->NewFixedArray(kArrayLen, AllocationType::kOld);
allocated += array->Size();
CHECK_EQ(kArraySize, array->Size());
}
if (handles.empty()) {
// Check that allocations started on a new page.
CHECK_EQ(array->address(), Page::FromHeapObject(*array)->area_start());
}
handles.push_back(array);
} while (allocated <
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()));
return handles;
}
std::vector<Handle<FixedArray>> CreatePadding(Heap* heap, int padding_size,
AllocationType allocation,
int object_size) {
std::vector<Handle<FixedArray>> handles;
Isolate* isolate = heap->isolate();
int allocate_memory;
int length;
int free_memory = padding_size;
if (allocation == i::AllocationType::kOld) {
heap->old_space()->FreeLinearAllocationArea();
int overall_free_memory = static_cast<int>(heap->old_space()->Available());
CHECK(padding_size <= overall_free_memory || overall_free_memory == 0);
} else {
int overall_free_memory = static_cast<int>(heap->new_space()->Available());
CHECK(padding_size <= overall_free_memory || overall_free_memory == 0);
}
while (free_memory > 0) {
if (free_memory > object_size) {
allocate_memory = object_size;
length = FixedArrayLenFromSize(allocate_memory);
} else {
allocate_memory = free_memory;
length = FixedArrayLenFromSize(allocate_memory);
if (length <= 0) {
// Not enough room to create another FixedArray, so create a filler.
if (allocation == i::AllocationType::kOld) {
heap->CreateFillerObjectAt(
*heap->old_space()->allocation_top_address(), free_memory);
} else {
heap->CreateFillerObjectAt(
*heap->new_space()->allocation_top_address(), free_memory);
}
break;
}
}
handles.push_back(isolate->factory()->NewFixedArray(length, allocation));
CHECK((allocation == AllocationType::kYoung &&
heap->new_space()->Contains(*handles.back())) ||
(allocation == AllocationType::kOld &&
heap->InOldSpace(*handles.back())) ||
v8_flags.single_generation);
free_memory -= handles.back()->Size();
}
return handles;
}
bool FillCurrentPage(v8::internal::NewSpace* space,
std::vector<Handle<FixedArray>>* out_handles) {
return heap::FillCurrentPageButNBytes(space, 0, out_handles);
}
namespace {
int GetSpaceRemainingOnCurrentPage(v8::internal::NewSpace* space) {
Address top = space->top();
if ((top & kPageAlignmentMask) == 0) {
// `top` points to the start of a page signifies that there is not room in
// the current page.
return 0;
}
return static_cast<int>(Page::FromAddress(space->top())->area_end() - top);
}
} // namespace
bool FillCurrentPageButNBytes(v8::internal::NewSpace* space, int extra_bytes,
std::vector<Handle<FixedArray>>* out_handles) {
PauseAllocationObserversScope pause_observers(space->heap());
// We cannot rely on `space->limit()` to point to the end of the current page
// in the case where inline allocations are disabled, it actually points to
// the current allocation pointer.
DCHECK_IMPLIES(!space->IsInlineAllocationEnabled(),
space->limit() == space->top());
int space_remaining = GetSpaceRemainingOnCurrentPage(space);
CHECK(space_remaining >= extra_bytes);
int new_linear_size = space_remaining - extra_bytes;
if (new_linear_size == 0) return false;
std::vector<Handle<FixedArray>> handles = heap::CreatePadding(
space->heap(), space_remaining, i::AllocationType::kYoung);
if (out_handles != nullptr) {
out_handles->insert(out_handles->end(), handles.begin(), handles.end());
}
return true;
}
void SimulateIncrementalMarking(i::Heap* heap, bool force_completion) {
const double kStepSizeInMs = 100;
CHECK(v8_flags.incremental_marking);
i::IncrementalMarking* marking = heap->incremental_marking();
i::MarkCompactCollector* collector = heap->mark_compact_collector();
if (collector->sweeping_in_progress()) {
SafepointScope scope(heap);
collector->EnsureSweepingCompleted(
MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
}
if (marking->IsMinorMarking()) {
// If minor incremental marking is running, we need to finalize it first
// because of the AdvanceForTesting call in this function which is currently
// only possible for MajorMC.
heap->CollectGarbage(NEW_SPACE, GarbageCollectionReason::kFinalizeMinorMC);
}
if (marking->IsStopped()) {
heap->StartIncrementalMarking(i::Heap::kNoGCFlags,
i::GarbageCollectionReason::kTesting);
}
CHECK(marking->IsMarking());
if (!force_completion) return;
SafepointScope scope(heap);
MarkingBarrier::PublishAll(heap);
marking->MarkRootsForTesting();
while (!marking->IsMajorMarkingComplete()) {
marking->AdvanceForTesting(kStepSizeInMs);
}
}
void SimulateFullSpace(v8::internal::PagedSpace* space) {
// If you see this check failing, disable the flag at the start of your test:
// v8_flags.stress_concurrent_allocation = false;
// Background thread allocating concurrently interferes with this function.
CHECK(!v8_flags.stress_concurrent_allocation);
CodePageCollectionMemoryModificationScopeForTesting code_scope(space->heap());
i::MarkCompactCollector* collector = space->heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
collector->EnsureSweepingCompleted(
MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
}
space->FreeLinearAllocationArea();
space->ResetFreeList();
}
void AbandonCurrentlyFreeMemory(PagedSpace* space) {
space->FreeLinearAllocationArea();
for (Page* page : *space) {
page->MarkNeverAllocateForTesting();
}
}
void GcAndSweep(Heap* heap, AllocationSpace space) {
heap->CollectGarbage(space, GarbageCollectionReason::kTesting);
if (heap->mark_compact_collector()->sweeping_in_progress()) {
SafepointScope scope(heap);
heap->mark_compact_collector()->EnsureSweepingCompleted(
MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
}
}
void ForceEvacuationCandidate(Page* page) {
CHECK(v8_flags.manual_evacuation_candidates_selection);
page->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
PagedSpace* space = static_cast<PagedSpace*>(page->owner());
DCHECK_NOT_NULL(space);
Address top = space->top();
Address limit = space->limit();
if (top < limit && Page::FromAllocationAreaAddress(top) == page) {
// Create filler object to keep page iterable if it was iterable.
int remaining = static_cast<int>(limit - top);
space->heap()->CreateFillerObjectAt(top, remaining);
base::MutexGuard guard(space->mutex());
space->FreeLinearAllocationArea();
}
}
bool InCorrectGeneration(HeapObject object) {
return v8_flags.single_generation ? !i::Heap::InYoungGeneration(object)
: i::Heap::InYoungGeneration(object);
}
void GrowNewSpace(Heap* heap) {
SafepointScope scope(heap);
if (!heap->new_space()->IsAtMaximumCapacity()) {
heap->new_space()->Grow();
}
}
void GrowNewSpaceToMaximumCapacity(Heap* heap) {
SafepointScope scope(heap);
while (!heap->new_space()->IsAtMaximumCapacity()) {
heap->new_space()->Grow();
}
}
} // namespace heap
} // namespace internal
} // namespace v8