2016-05-20 13:30:22 +00:00
|
|
|
// Copyright 2016 the V8 project authors. All rights reserved.
|
2015-12-09 11:25:26 +00:00
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file.
|
|
|
|
|
2016-05-20 13:30:22 +00:00
|
|
|
#include "test/cctest/heap/heap-utils.h"
|
2015-12-09 11:25:26 +00:00
|
|
|
|
2020-08-21 07:54:18 +00:00
|
|
|
#include "src/base/platform/mutex.h"
|
2019-05-22 07:55:37 +00:00
|
|
|
#include "src/execution/isolate.h"
|
2018-04-09 19:11:22 +00:00
|
|
|
#include "src/heap/factory.h"
|
2015-12-09 11:25:26 +00:00
|
|
|
#include "src/heap/heap-inl.h"
|
|
|
|
#include "src/heap/incremental-marking.h"
|
|
|
|
#include "src/heap/mark-compact.h"
|
2020-05-04 12:24:44 +00:00
|
|
|
#include "src/heap/memory-chunk.h"
|
2020-06-02 17:26:39 +00:00
|
|
|
#include "src/heap/safepoint.h"
|
2018-09-19 08:19:40 +00:00
|
|
|
#include "test/cctest/cctest.h"
|
2015-12-09 11:25:26 +00:00
|
|
|
|
|
|
|
namespace v8 {
|
|
|
|
namespace internal {
|
2016-05-20 13:30:22 +00:00
|
|
|
namespace heap {
|
|
|
|
|
2020-08-25 15:33:11 +00:00
|
|
|
void InvokeScavenge(Isolate* isolate) {
|
|
|
|
CcTest::CollectGarbage(i::NEW_SPACE, isolate);
|
|
|
|
}
|
2019-01-29 19:12:21 +00:00
|
|
|
|
2020-08-25 15:33:11 +00:00
|
|
|
void InvokeMarkSweep(Isolate* isolate) { CcTest::CollectAllGarbage(isolate); }
|
2019-01-29 19:12:21 +00:00
|
|
|
|
2016-05-20 13:30:22 +00:00
|
|
|
void SealCurrentObjects(Heap* heap) {
|
2020-09-03 10:33:46 +00:00
|
|
|
// If you see this check failing, disable the flag at the start of your test:
|
|
|
|
// FLAG_stress_concurrent_allocation = false;
|
|
|
|
// Background thread allocating concurrently interferes with this function.
|
|
|
|
CHECK(!FLAG_stress_concurrent_allocation);
|
2018-09-19 08:19:40 +00:00
|
|
|
CcTest::CollectAllGarbage();
|
|
|
|
CcTest::CollectAllGarbage();
|
2016-05-20 13:30:22 +00:00
|
|
|
heap->mark_compact_collector()->EnsureSweepingCompleted();
|
2018-01-09 08:56:07 +00:00
|
|
|
heap->old_space()->FreeLinearAllocationArea();
|
2016-06-23 09:55:18 +00:00
|
|
|
for (Page* page : *heap->old_space()) {
|
2016-05-20 13:30:22 +00:00
|
|
|
page->MarkNeverAllocateForTesting();
|
|
|
|
}
|
|
|
|
}
|
2015-12-09 11:25:26 +00:00
|
|
|
|
2016-05-20 13:30:22 +00:00
|
|
|
int FixedArrayLenFromSize(int size) {
|
2020-11-24 16:41:59 +00:00
|
|
|
return std::min({(size - FixedArray::kHeaderSize) / kTaggedSize,
|
|
|
|
FixedArray::kMaxRegularLength});
|
2015-12-09 11:25:26 +00:00
|
|
|
}
|
|
|
|
|
2016-09-06 11:02:21 +00:00
|
|
|
std::vector<Handle<FixedArray>> FillOldSpacePageWithFixedArrays(Heap* heap,
|
|
|
|
int remainder) {
|
2017-09-23 13:55:40 +00:00
|
|
|
PauseAllocationObserversScope pause_observers(heap);
|
2016-09-06 11:02:21 +00:00
|
|
|
std::vector<Handle<FixedArray>> handles;
|
|
|
|
Isolate* isolate = heap->isolate();
|
|
|
|
const int kArraySize = 128;
|
|
|
|
const int kArrayLen = heap::FixedArrayLenFromSize(kArraySize);
|
|
|
|
Handle<FixedArray> array;
|
2018-10-11 13:04:34 +00:00
|
|
|
int allocated = 0;
|
|
|
|
do {
|
2018-10-23 11:52:20 +00:00
|
|
|
if (allocated + kArraySize * 2 >
|
|
|
|
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage())) {
|
2018-10-11 13:04:34 +00:00
|
|
|
int size =
|
|
|
|
kArraySize * 2 -
|
2018-10-23 11:52:20 +00:00
|
|
|
((allocated + kArraySize * 2) -
|
|
|
|
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage())) -
|
2018-10-11 13:04:34 +00:00
|
|
|
remainder;
|
|
|
|
int last_array_len = heap::FixedArrayLenFromSize(size);
|
2019-03-11 19:04:02 +00:00
|
|
|
array = isolate->factory()->NewFixedArray(last_array_len,
|
|
|
|
AllocationType::kOld);
|
2018-10-11 13:04:34 +00:00
|
|
|
CHECK_EQ(size, array->Size());
|
|
|
|
allocated += array->Size() + remainder;
|
2016-09-06 11:02:21 +00:00
|
|
|
} else {
|
2019-03-11 19:04:02 +00:00
|
|
|
array =
|
|
|
|
isolate->factory()->NewFixedArray(kArrayLen, AllocationType::kOld);
|
2018-10-11 13:04:34 +00:00
|
|
|
allocated += array->Size();
|
2016-09-06 11:02:21 +00:00
|
|
|
CHECK_EQ(kArraySize, array->Size());
|
|
|
|
}
|
|
|
|
if (handles.empty()) {
|
|
|
|
// Check that allocations started on a new page.
|
2019-01-15 00:23:43 +00:00
|
|
|
CHECK_EQ(array->address(), Page::FromHeapObject(*array)->area_start());
|
2016-09-06 11:02:21 +00:00
|
|
|
}
|
|
|
|
handles.push_back(array);
|
2018-10-23 11:52:20 +00:00
|
|
|
} while (allocated <
|
|
|
|
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()));
|
2016-09-06 11:02:21 +00:00
|
|
|
return handles;
|
|
|
|
}
|
|
|
|
|
2016-05-20 13:30:22 +00:00
|
|
|
std::vector<Handle<FixedArray>> CreatePadding(Heap* heap, int padding_size,
|
2019-03-11 19:04:02 +00:00
|
|
|
AllocationType allocation,
|
2016-05-20 13:30:22 +00:00
|
|
|
int object_size) {
|
2015-12-16 14:06:33 +00:00
|
|
|
std::vector<Handle<FixedArray>> handles;
|
|
|
|
Isolate* isolate = heap->isolate();
|
2015-12-09 11:25:26 +00:00
|
|
|
int allocate_memory;
|
|
|
|
int length;
|
|
|
|
int free_memory = padding_size;
|
2019-03-11 19:04:02 +00:00
|
|
|
if (allocation == i::AllocationType::kOld) {
|
2018-01-09 08:56:07 +00:00
|
|
|
heap->old_space()->FreeLinearAllocationArea();
|
2015-12-16 14:06:33 +00:00
|
|
|
int overall_free_memory = static_cast<int>(heap->old_space()->Available());
|
|
|
|
CHECK(padding_size <= overall_free_memory || overall_free_memory == 0);
|
2015-12-09 11:25:26 +00:00
|
|
|
} else {
|
[cctest][heap] Do not rely on page limit for full space simulation.
This reverts https://chromium-review.googlesource.com/c/v8/v8/+/2372545
in favour of different solution. In order to simulate filling up a page,
it's not suitable to look at the limit() since there might be observers
that have lowered it, so the page will not actually be full.
Instead, let's relax the CHECK() in CreatePadding() to not look at the
limit() but all available space.
For instance, the test-heap/Regress978156 cctest uses FillCurrentPage()
to fill the current page. However if there's an observer on the current
page, it will not be filled entirely and the test will fail. This works
because by default, when the new space is empty, the scavenger observer
happens to be on the second page of the space. However if one changes
the V8 page size to 512k, then it fails.
This can be reproduced as such:
# Make sure the scavenge trigger is on the first page.
./cctest test-heap/Regress978156 --scavenge-task-trigger=10
# Stress marking adds random observers to trigger incremental
# marking.
./cctest test-heap/Regress978156 --stress-marking=100
This issue also causes crashes when using the %SimulateNewspaceFull()
runtime test function, as found by fuzzing and you can find more details
in the bug.
Bug: v8:10808, v8:9906, chromium:1122848
Change-Id: Ie043ae0a1d3754d2423cb5d97f2b3e1ee860e5c8
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2401427
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Commit-Queue: Pierre Langlois <pierre.langlois@arm.com>
Cr-Commit-Position: refs/heads/master@{#69805}
2020-09-09 17:40:22 +00:00
|
|
|
int overall_free_memory = static_cast<int>(heap->new_space()->Available());
|
2015-12-16 14:06:33 +00:00
|
|
|
CHECK(padding_size <= overall_free_memory || overall_free_memory == 0);
|
2015-12-09 11:25:26 +00:00
|
|
|
}
|
2015-12-16 14:06:33 +00:00
|
|
|
while (free_memory > 0) {
|
|
|
|
if (free_memory > object_size) {
|
|
|
|
allocate_memory = object_size;
|
2016-05-20 13:30:22 +00:00
|
|
|
length = FixedArrayLenFromSize(allocate_memory);
|
2015-12-09 11:25:26 +00:00
|
|
|
} else {
|
|
|
|
allocate_memory = free_memory;
|
2016-05-20 13:30:22 +00:00
|
|
|
length = FixedArrayLenFromSize(allocate_memory);
|
2015-12-09 11:25:26 +00:00
|
|
|
if (length <= 0) {
|
2019-09-17 10:17:21 +00:00
|
|
|
// Not enough room to create another FixedArray, so create a filler.
|
|
|
|
if (allocation == i::AllocationType::kOld) {
|
2016-06-06 15:12:08 +00:00
|
|
|
heap->CreateFillerObjectAt(
|
2019-08-22 11:17:45 +00:00
|
|
|
*heap->old_space()->allocation_top_address(), free_memory,
|
|
|
|
ClearRecordedSlots::kNo);
|
2019-09-17 10:17:21 +00:00
|
|
|
} else {
|
|
|
|
heap->CreateFillerObjectAt(
|
|
|
|
*heap->new_space()->allocation_top_address(), free_memory,
|
|
|
|
ClearRecordedSlots::kNo);
|
2016-05-20 13:30:22 +00:00
|
|
|
}
|
2015-12-09 11:25:26 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2019-03-11 19:04:02 +00:00
|
|
|
handles.push_back(isolate->factory()->NewFixedArray(length, allocation));
|
|
|
|
CHECK((allocation == AllocationType::kYoung &&
|
2019-02-11 15:07:56 +00:00
|
|
|
heap->new_space()->Contains(*handles.back())) ||
|
2019-03-11 19:04:02 +00:00
|
|
|
(allocation == AllocationType::kOld &&
|
|
|
|
heap->InOldSpace(*handles.back())));
|
2019-02-06 15:23:06 +00:00
|
|
|
free_memory -= handles.back()->Size();
|
2015-12-09 11:25:26 +00:00
|
|
|
}
|
2015-12-16 14:06:33 +00:00
|
|
|
return handles;
|
2015-12-09 11:25:26 +00:00
|
|
|
}
|
|
|
|
|
2019-10-29 11:48:23 +00:00
|
|
|
bool FillCurrentPage(v8::internal::NewSpace* space,
|
2016-05-20 13:30:22 +00:00
|
|
|
std::vector<Handle<FixedArray>>* out_handles) {
|
2019-10-29 11:48:23 +00:00
|
|
|
return heap::FillCurrentPageButNBytes(space, 0, out_handles);
|
2016-05-20 13:30:22 +00:00
|
|
|
}
|
2015-12-09 11:25:26 +00:00
|
|
|
|
2019-10-29 11:48:23 +00:00
|
|
|
bool FillCurrentPageButNBytes(v8::internal::NewSpace* space, int extra_bytes,
|
|
|
|
std::vector<Handle<FixedArray>>* out_handles) {
|
2017-12-12 18:31:47 +00:00
|
|
|
PauseAllocationObserversScope pause_observers(space->heap());
|
2019-10-29 11:48:23 +00:00
|
|
|
// We cannot rely on `space->limit()` to point to the end of the current page
|
|
|
|
// in the case where inline allocations are disabled, it actually points to
|
|
|
|
// the current allocation pointer.
|
|
|
|
DCHECK_IMPLIES(space->heap()->inline_allocation_disabled(),
|
|
|
|
space->limit() == space->top());
|
[cctest][heap] Do not rely on page limit for full space simulation.
This reverts https://chromium-review.googlesource.com/c/v8/v8/+/2372545
in favour of different solution. In order to simulate filling up a page,
it's not suitable to look at the limit() since there might be observers
that have lowered it, so the page will not actually be full.
Instead, let's relax the CHECK() in CreatePadding() to not look at the
limit() but all available space.
For instance, the test-heap/Regress978156 cctest uses FillCurrentPage()
to fill the current page. However if there's an observer on the current
page, it will not be filled entirely and the test will fail. This works
because by default, when the new space is empty, the scavenger observer
happens to be on the second page of the space. However if one changes
the V8 page size to 512k, then it fails.
This can be reproduced as such:
# Make sure the scavenge trigger is on the first page.
./cctest test-heap/Regress978156 --scavenge-task-trigger=10
# Stress marking adds random observers to trigger incremental
# marking.
./cctest test-heap/Regress978156 --stress-marking=100
This issue also causes crashes when using the %SimulateNewspaceFull()
runtime test function, as found by fuzzing and you can find more details
in the bug.
Bug: v8:10808, v8:9906, chromium:1122848
Change-Id: Ie043ae0a1d3754d2423cb5d97f2b3e1ee860e5c8
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2401427
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Commit-Queue: Pierre Langlois <pierre.langlois@arm.com>
Cr-Commit-Position: refs/heads/master@{#69805}
2020-09-09 17:40:22 +00:00
|
|
|
int space_remaining =
|
|
|
|
static_cast<int>(space->to_space().page_high() - space->top());
|
2019-10-29 11:48:23 +00:00
|
|
|
CHECK(space_remaining >= extra_bytes);
|
|
|
|
int new_linear_size = space_remaining - extra_bytes;
|
|
|
|
if (new_linear_size == 0) return false;
|
2019-03-11 19:04:02 +00:00
|
|
|
std::vector<Handle<FixedArray>> handles = heap::CreatePadding(
|
|
|
|
space->heap(), space_remaining, i::AllocationType::kYoung);
|
2019-09-17 10:17:21 +00:00
|
|
|
if (out_handles != nullptr) {
|
2016-04-20 09:29:33 +00:00
|
|
|
out_handles->insert(out_handles->end(), handles.begin(), handles.end());
|
2019-09-17 10:17:21 +00:00
|
|
|
}
|
2016-05-20 13:30:22 +00:00
|
|
|
return true;
|
2015-12-09 11:25:26 +00:00
|
|
|
}
|
|
|
|
|
2016-05-20 13:30:22 +00:00
|
|
|
void SimulateFullSpace(v8::internal::NewSpace* space,
|
|
|
|
std::vector<Handle<FixedArray>>* out_handles) {
|
2020-09-03 10:33:46 +00:00
|
|
|
// If you see this check failing, disable the flag at the start of your test:
|
|
|
|
// FLAG_stress_concurrent_allocation = false;
|
|
|
|
// Background thread allocating concurrently interferes with this function.
|
|
|
|
CHECK(!FLAG_stress_concurrent_allocation);
|
2019-10-29 11:48:23 +00:00
|
|
|
while (heap::FillCurrentPage(space, out_handles) || space->AddFreshPage()) {
|
2015-12-09 11:25:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 13:30:22 +00:00
|
|
|
void SimulateIncrementalMarking(i::Heap* heap, bool force_completion) {
|
2019-02-07 10:58:09 +00:00
|
|
|
const double kStepSizeInMs = 100;
|
2016-11-18 12:55:53 +00:00
|
|
|
CHECK(FLAG_incremental_marking);
|
2015-12-09 11:25:26 +00:00
|
|
|
i::IncrementalMarking* marking = heap->incremental_marking();
|
2016-09-06 15:28:54 +00:00
|
|
|
i::MarkCompactCollector* collector = heap->mark_compact_collector();
|
2015-12-09 11:25:26 +00:00
|
|
|
if (collector->sweeping_in_progress()) {
|
2020-08-11 14:05:00 +00:00
|
|
|
SafepointScope scope(heap);
|
2015-12-09 11:25:26 +00:00
|
|
|
collector->EnsureSweepingCompleted();
|
|
|
|
}
|
2016-09-06 15:28:54 +00:00
|
|
|
CHECK(marking->IsMarking() || marking->IsStopped() || marking->IsComplete());
|
2015-12-09 11:25:26 +00:00
|
|
|
if (marking->IsStopped()) {
|
2016-09-07 10:02:58 +00:00
|
|
|
heap->StartIncrementalMarking(i::Heap::kNoGCFlags,
|
|
|
|
i::GarbageCollectionReason::kTesting);
|
2015-12-09 11:25:26 +00:00
|
|
|
}
|
2016-09-06 15:28:54 +00:00
|
|
|
CHECK(marking->IsMarking() || marking->IsComplete());
|
2015-12-09 11:25:26 +00:00
|
|
|
if (!force_completion) return;
|
|
|
|
|
|
|
|
while (!marking->IsComplete()) {
|
2020-01-30 17:47:41 +00:00
|
|
|
marking->Step(kStepSizeInMs, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
|
|
|
|
i::StepOrigin::kV8);
|
2015-12-09 11:25:26 +00:00
|
|
|
if (marking->IsReadyToOverApproximateWeakClosure()) {
|
2020-06-02 17:26:39 +00:00
|
|
|
SafepointScope scope(heap);
|
2015-12-09 11:25:26 +00:00
|
|
|
marking->FinalizeIncrementally();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
CHECK(marking->IsComplete());
|
|
|
|
}
|
|
|
|
|
2016-05-20 13:30:22 +00:00
|
|
|
void SimulateFullSpace(v8::internal::PagedSpace* space) {
|
2020-09-03 10:33:46 +00:00
|
|
|
// If you see this check failing, disable the flag at the start of your test:
|
|
|
|
// FLAG_stress_concurrent_allocation = false;
|
|
|
|
// Background thread allocating concurrently interferes with this function.
|
|
|
|
CHECK(!FLAG_stress_concurrent_allocation);
|
2017-11-06 15:52:24 +00:00
|
|
|
CodeSpaceMemoryModificationScope modification_scope(space->heap());
|
2016-10-13 10:50:39 +00:00
|
|
|
i::MarkCompactCollector* collector = space->heap()->mark_compact_collector();
|
|
|
|
if (collector->sweeping_in_progress()) {
|
|
|
|
collector->EnsureSweepingCompleted();
|
|
|
|
}
|
2018-01-09 08:56:07 +00:00
|
|
|
space->FreeLinearAllocationArea();
|
2016-05-20 13:30:22 +00:00
|
|
|
space->ResetFreeList();
|
|
|
|
}
|
|
|
|
|
2016-06-07 17:28:31 +00:00
|
|
|
void AbandonCurrentlyFreeMemory(PagedSpace* space) {
|
2018-01-09 08:56:07 +00:00
|
|
|
space->FreeLinearAllocationArea();
|
2016-06-23 09:55:18 +00:00
|
|
|
for (Page* page : *space) {
|
|
|
|
page->MarkNeverAllocateForTesting();
|
2016-06-07 17:28:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void GcAndSweep(Heap* heap, AllocationSpace space) {
|
2016-09-07 10:02:58 +00:00
|
|
|
heap->CollectGarbage(space, GarbageCollectionReason::kTesting);
|
2016-06-07 17:28:31 +00:00
|
|
|
if (heap->mark_compact_collector()->sweeping_in_progress()) {
|
2020-08-12 15:47:52 +00:00
|
|
|
SafepointScope scope(heap);
|
2016-06-07 17:28:31 +00:00
|
|
|
heap->mark_compact_collector()->EnsureSweepingCompleted();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-03 12:12:38 +00:00
|
|
|
void ForceEvacuationCandidate(Page* page) {
|
|
|
|
CHECK(FLAG_manual_evacuation_candidates_selection);
|
|
|
|
page->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
|
|
|
|
PagedSpace* space = static_cast<PagedSpace*>(page->owner());
|
2019-06-04 10:53:21 +00:00
|
|
|
DCHECK_NOT_NULL(space);
|
2016-11-03 12:12:38 +00:00
|
|
|
Address top = space->top();
|
|
|
|
Address limit = space->limit();
|
|
|
|
if (top < limit && Page::FromAllocationAreaAddress(top) == page) {
|
|
|
|
// Create filler object to keep page iterable if it was iterable.
|
|
|
|
int remaining = static_cast<int>(limit - top);
|
2019-08-22 11:17:45 +00:00
|
|
|
space->heap()->CreateFillerObjectAt(top, remaining,
|
|
|
|
ClearRecordedSlots::kNo);
|
2020-08-21 07:54:18 +00:00
|
|
|
base::MutexGuard guard(space->mutex());
|
2018-01-09 08:56:07 +00:00
|
|
|
space->FreeLinearAllocationArea();
|
2016-11-03 12:12:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-02 23:40:05 +00:00
|
|
|
bool InCorrectGeneration(HeapObject object) {
|
|
|
|
return FLAG_single_generation ? !i::Heap::InYoungGeneration(object)
|
|
|
|
: i::Heap::InYoungGeneration(object);
|
|
|
|
}
|
|
|
|
|
2020-09-24 10:51:32 +00:00
|
|
|
void EnsureFlagLocalHeapsEnabled() {
|
|
|
|
// Avoid data race in concurrent thread by only setting the flag to true if
|
|
|
|
// not already enabled.
|
|
|
|
if (!FLAG_local_heaps) FLAG_local_heaps = true;
|
|
|
|
}
|
|
|
|
|
2020-09-24 15:27:07 +00:00
|
|
|
void GrowNewSpace(Heap* heap) {
|
|
|
|
SafepointScope scope(heap);
|
2020-12-10 20:48:33 +00:00
|
|
|
if (!heap->new_space()->IsAtMaximumCapacity()) {
|
|
|
|
heap->new_space()->Grow();
|
|
|
|
}
|
2020-09-24 15:27:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void GrowNewSpaceToMaximumCapacity(Heap* heap) {
|
|
|
|
SafepointScope scope(heap);
|
|
|
|
while (!heap->new_space()->IsAtMaximumCapacity()) {
|
|
|
|
heap->new_space()->Grow();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 13:30:22 +00:00
|
|
|
} // namespace heap
|
2015-12-09 11:25:26 +00:00
|
|
|
} // namespace internal
|
|
|
|
} // namespace v8
|