[heap] Implement SimulateFullSpace for PagedNewSpace

The existing version for paged spaces simply reset the freelist, which
doesn't work for tests that require actual objects in the space.
The version for new space also doesn't work because it assumes
everything after top is free space.

Fill the space with FixedArray by iterating over the freelist and
creating an object in place of each freelist entry.
This method actually fills the space, so that we can also use it to
force page promotion.

Bug: v8:12612
Change-Id: Ie0d73e846bbf688ea52030be29e0587b2f37ed4e
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3823135
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#82437}
This commit is contained in:
Omer Katz 2022-08-12 13:37:18 +02:00 committed by V8 LUCI CQ
parent 12099544c8
commit 0cd0e4bb76
14 changed files with 579 additions and 417 deletions

View File

@ -64,7 +64,8 @@ class FreeListCategory {
// Performs a single try to pick a node of at least |minimum_size| from the
// category. Stores the actual size in |node_size|. Returns nullptr if no
// node is found.
FreeSpace PickNodeFromList(size_t minimum_size, size_t* node_size);
V8_EXPORT_PRIVATE FreeSpace PickNodeFromList(size_t minimum_size,
size_t* node_size);
// Picks a node of at least |minimum_size| from the category. Stores the
// actual size in |node_size|. Returns nullptr if no node is found.

View File

@ -148,7 +148,6 @@ v8_source_set("cctest_sources") {
"heap/test-iterators.cc",
"heap/test-mark-compact.cc",
"heap/test-memory-measurement.cc",
"heap/test-page-promotion.cc",
"heap/test-shared-heap.cc",
"heap/test-spaces.cc",
"heap/test-unmapper.cc",

View File

@ -167,16 +167,6 @@ bool FillCurrentPageButNBytes(v8::internal::NewSpace* space, int extra_bytes,
return true;
}
void SimulateFullSpace(v8::internal::NewSpace* space,
std::vector<Handle<FixedArray>>* out_handles) {
// If you see this check failing, disable the flag at the start of your test:
// FLAG_stress_concurrent_allocation = false;
// Background thread allocating concurrently interferes with this function.
CHECK(!FLAG_stress_concurrent_allocation);
while (heap::FillCurrentPage(space, out_handles) || space->AddFreshPage()) {
}
}
void SimulateIncrementalMarking(i::Heap* heap, bool force_completion) {
const double kStepSizeInMs = 100;
CHECK(FLAG_incremental_marking);

View File

@ -49,10 +49,6 @@ bool FillCurrentPageButNBytes(
v8::internal::NewSpace* space, int extra_bytes,
std::vector<Handle<FixedArray>>* out_handles = nullptr);
// Helper function that simulates a full new-space in the heap.
void SimulateFullSpace(v8::internal::NewSpace* space,
std::vector<Handle<FixedArray>>* out_handles = nullptr);
// Helper function that simulates many incremental marking steps until
// marking is completed.
void SimulateIncrementalMarking(i::Heap* heap, bool force_completion = true);

View File

@ -2224,90 +2224,6 @@ TEST(TestSizeOfObjectsVsHeapObjectIteratorPrecision) {
}
}
TEST(GrowAndShrinkNewSpace) {
if (FLAG_single_generation) return;
// Avoid shrinking new space in GC epilogue. This can happen if allocation
// throughput samples have been taken while executing the benchmark.
FLAG_predictable = true;
FLAG_stress_concurrent_allocation = false; // For SimulateFullSpace.
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
NewSpace* new_space = heap->new_space();
if (heap->MaxSemiSpaceSize() == heap->InitialSemiSpaceSize()) {
return;
}
// Make sure we're in a consistent state to start out.
CcTest::CollectAllGarbage();
CcTest::CollectAllGarbage();
new_space->Shrink();
// Explicitly growing should double the space capacity.
size_t old_capacity, new_capacity;
old_capacity = new_space->TotalCapacity();
GrowNewSpace(heap);
new_capacity = new_space->TotalCapacity();
CHECK_EQ(2 * old_capacity, new_capacity);
old_capacity = new_space->TotalCapacity();
{
v8::HandleScope temporary_scope(CcTest::isolate());
heap::SimulateFullSpace(new_space);
}
new_capacity = new_space->TotalCapacity();
CHECK_EQ(old_capacity, new_capacity);
// Explicitly shrinking should not affect space capacity.
old_capacity = new_space->TotalCapacity();
new_space->Shrink();
new_capacity = new_space->TotalCapacity();
CHECK_EQ(old_capacity, new_capacity);
// Let the scavenger empty the new space.
CcTest::CollectGarbage(NEW_SPACE);
CHECK_LE(new_space->Size(), old_capacity);
// Explicitly shrinking should halve the space capacity.
old_capacity = new_space->TotalCapacity();
new_space->Shrink();
new_capacity = new_space->TotalCapacity();
CHECK_EQ(old_capacity, 2 * new_capacity);
// Consecutive shrinking should not affect space capacity.
old_capacity = new_space->TotalCapacity();
new_space->Shrink();
new_space->Shrink();
new_space->Shrink();
new_capacity = new_space->TotalCapacity();
CHECK_EQ(old_capacity, new_capacity);
}
TEST(CollectingAllAvailableGarbageShrinksNewSpace) {
if (FLAG_single_generation) return;
FLAG_stress_concurrent_allocation = false; // For SimulateFullSpace.
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
if (heap->MaxSemiSpaceSize() == heap->InitialSemiSpaceSize()) {
return;
}
v8::HandleScope scope(CcTest::isolate());
NewSpace* new_space = heap->new_space();
size_t old_capacity, new_capacity;
old_capacity = new_space->TotalCapacity();
GrowNewSpace(heap);
new_capacity = new_space->TotalCapacity();
CHECK_EQ(2 * old_capacity, new_capacity);
{
v8::HandleScope temporary_scope(CcTest::isolate());
heap::SimulateFullSpace(new_space);
}
CcTest::CollectAllAvailableGarbage();
new_capacity = new_space->TotalCapacity();
CHECK_EQ(old_capacity, new_capacity);
}
static int NumberOfGlobalObjects() {
int count = 0;
HeapObjectIterator iterator(CcTest::heap());
@ -2318,7 +2234,6 @@ static int NumberOfGlobalObjects() {
return count;
}
// Test that we don't embed maps from foreign contexts into
// optimized code.
TEST(LeakNativeContextViaMap) {
@ -2677,47 +2592,6 @@ TEST(IdleNotificationFinishMarking) {
CHECK_EQ(CcTest::heap()->gc_count(), initial_gc_count + 1);
}
// Test that HAllocateObject will always return an object in new-space.
TEST(OptimizedAllocationAlwaysInNewSpace) {
if (FLAG_single_generation) return;
FLAG_allow_natives_syntax = true;
FLAG_stress_concurrent_allocation = false; // For SimulateFullSpace.
CcTest::InitializeVM();
if (!CcTest::i_isolate()->use_optimizer() || FLAG_always_turbofan) return;
if (FLAG_gc_global || FLAG_stress_compaction ||
FLAG_stress_incremental_marking)
return;
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> ctx = CcTest::isolate()->GetCurrentContext();
heap::SimulateFullSpace(CcTest::heap()->new_space());
AlwaysAllocateScopeForTesting always_allocate(CcTest::heap());
v8::Local<v8::Value> res = CompileRun(
"function c(x) {"
" this.x = x;"
" for (var i = 0; i < 32; i++) {"
" this['x' + i] = x;"
" }"
"}"
"function f(x) { return new c(x); };"
"%PrepareFunctionForOptimization(f);"
"f(1); f(2); f(3);"
"%OptimizeFunctionOnNextCall(f);"
"f(4);");
CHECK_EQ(4, res.As<v8::Object>()
->GetRealNamedProperty(ctx, v8_str("x"))
.ToLocalChecked()
->Int32Value(ctx)
.FromJust());
i::Handle<JSReceiver> o =
v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(res));
CHECK(Heap::InYoungGeneration(*o));
}
TEST(OptimizedPretenuringAllocationFolding) {
FLAG_allow_natives_syntax = true;
FLAG_expose_gc = true;
@ -6455,48 +6329,6 @@ TEST(RememberedSet_InsertInLargePage) {
CHECK_EQ(2, GetRememberedSetSize<OLD_TO_NEW>(*arr));
}
TEST(RememberedSet_InsertOnPromotingObjectToOld) {
if (FLAG_single_generation || FLAG_stress_incremental_marking) return;
FLAG_stress_concurrent_allocation = false; // For SealCurrentObjects.
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
heap::SealCurrentObjects(heap);
HandleScope scope(isolate);
// Create a young object and age it one generation inside the new space.
Handle<FixedArray> arr = factory->NewFixedArray(1);
std::vector<Handle<FixedArray>> handles;
if (FLAG_minor_mc) {
NewSpace* new_space = heap->new_space();
CHECK(!new_space->IsAtMaximumCapacity());
// Fill current pages to force MinorMC to promote them.
SimulateFullSpace(new_space, &handles);
SafepointScope scope(heap);
// New empty pages should remain in new space.
new_space->Grow();
} else {
CcTest::CollectGarbage(i::NEW_SPACE);
}
CHECK(Heap::InYoungGeneration(*arr));
// Add into 'arr' a reference to an object one generation younger.
{
HandleScope scope_inner(isolate);
Handle<Object> number = factory->NewHeapNumber(42);
arr->set(0, *number);
}
// Promote 'arr' into old, its element is still in new, the old to new
// refs are inserted into the remembered sets during GC.
CcTest::CollectGarbage(i::NEW_SPACE);
CHECK(heap->InOldSpace(*arr));
CHECK(heap->InYoungGeneration(arr->get(0)));
CHECK_EQ(1, GetRememberedSetSize<OLD_TO_NEW>(*arr));
}
TEST(RememberedSet_RemoveStaleOnScavenge) {
if (FLAG_single_generation || FLAG_stress_incremental_marking) return;
FLAG_stress_concurrent_allocation = false; // For SealCurrentObjects.
@ -7259,46 +7091,6 @@ TEST(NoCodeRangeInJitlessMode) {
CHECK(CcTest::i_isolate()->heap()->code_region().is_empty());
}
TEST(Regress978156) {
if (!FLAG_incremental_marking) return;
if (FLAG_single_generation) return;
ManualGCScope manual_gc_scope;
CcTest::InitializeVM();
HandleScope handle_scope(CcTest::i_isolate());
Heap* heap = CcTest::i_isolate()->heap();
// 1. Ensure that the new space is empty.
CcTest::CollectGarbage(NEW_SPACE);
CcTest::CollectGarbage(NEW_SPACE);
// 2. Fill the new space with FixedArrays.
std::vector<Handle<FixedArray>> arrays;
i::heap::SimulateFullSpace(heap->new_space(), &arrays);
// 3. Trim the last array by one word thus creating a one-word filler.
Handle<FixedArray> last = arrays.back();
CHECK_GT(last->length(), 0);
heap->RightTrimFixedArray(*last, 1);
// 4. Get the last filler on the page.
HeapObject filler = HeapObject::FromAddress(
MemoryChunk::FromHeapObject(*last)->area_end() - kTaggedSize);
HeapObject::FromAddress(last->address() + last->Size());
CHECK(filler.IsFiller());
// 5. Start incremental marking.
i::IncrementalMarking* marking = heap->incremental_marking();
if (marking->IsStopped()) {
SafepointScope scope(heap);
heap->tracer()->StartCycle(
GarbageCollector::MARK_COMPACTOR, GarbageCollectionReason::kTesting,
"collector cctest", GCTracer::MarkingType::kIncremental);
marking->Start(i::GarbageCollectionReason::kTesting);
}
MarkingState* marking_state = marking->marking_state();
// 6. Mark the filler black to access its two markbits. This triggers
// an out-of-bounds access of the marking bitmap in a bad case.
marking_state->WhiteToGrey(filler);
marking_state->GreyToBlack(filler);
}
TEST(GarbageCollectionWithLocalHeap) {
ManualGCScope manual_gc_scope;
CcTest::InitializeVM();

View File

@ -17766,48 +17766,6 @@ void EpilogueCallbackNew(v8::Isolate* isolate, v8::GCType,
++*static_cast<int*>(data);
}
void PrologueCallbackAlloc(v8::Isolate* isolate,
v8::GCType,
v8::GCCallbackFlags flags) {
v8::HandleScope scope(isolate);
CHECK_EQ(flags, v8::kNoGCCallbackFlags);
CHECK_EQ(gc_callbacks_isolate, isolate);
++prologue_call_count_alloc;
if (!v8::internal::FLAG_single_generation) {
// Simulate full heap to see if we will reenter this callback
i::heap::SimulateFullSpace(CcTest::heap()->new_space());
}
Local<Object> obj = Object::New(isolate);
CHECK(!obj.IsEmpty());
CcTest::PreciseCollectAllGarbage();
}
void EpilogueCallbackAlloc(v8::Isolate* isolate,
v8::GCType,
v8::GCCallbackFlags flags) {
v8::HandleScope scope(isolate);
CHECK_EQ(flags, v8::kNoGCCallbackFlags);
CHECK_EQ(gc_callbacks_isolate, isolate);
++epilogue_call_count_alloc;
if (!v8::internal::FLAG_single_generation) {
// Simulate full heap to see if we will reenter this callback
i::heap::SimulateFullSpace(CcTest::heap()->new_space());
}
Local<Object> obj = Object::New(isolate);
CHECK(!obj.IsEmpty());
CcTest::PreciseCollectAllGarbage();
}
TEST(GCCallbacksOld) {
LocalContext context;
@ -17890,52 +17848,6 @@ TEST(GCCallbacksWithData) {
CHECK_EQ(2, epilogue2);
}
TEST(GCCallbacks) {
// For SimulateFullSpace in PrologueCallbackAlloc and EpilogueCallbackAlloc.
i::FLAG_stress_concurrent_allocation = false;
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
gc_callbacks_isolate = isolate;
isolate->AddGCPrologueCallback(PrologueCallback);
isolate->AddGCEpilogueCallback(EpilogueCallback);
CHECK_EQ(0, prologue_call_count);
CHECK_EQ(0, epilogue_call_count);
CcTest::CollectAllGarbage();
CHECK_EQ(1, prologue_call_count);
CHECK_EQ(1, epilogue_call_count);
isolate->AddGCPrologueCallback(PrologueCallbackSecond);
isolate->AddGCEpilogueCallback(EpilogueCallbackSecond);
CcTest::CollectAllGarbage();
CHECK_EQ(2, prologue_call_count);
CHECK_EQ(2, epilogue_call_count);
CHECK_EQ(1, prologue_call_count_second);
CHECK_EQ(1, epilogue_call_count_second);
isolate->RemoveGCPrologueCallback(PrologueCallback);
isolate->RemoveGCEpilogueCallback(EpilogueCallback);
CcTest::CollectAllGarbage();
CHECK_EQ(2, prologue_call_count);
CHECK_EQ(2, epilogue_call_count);
CHECK_EQ(2, prologue_call_count_second);
CHECK_EQ(2, epilogue_call_count_second);
isolate->RemoveGCPrologueCallback(PrologueCallbackSecond);
isolate->RemoveGCEpilogueCallback(EpilogueCallbackSecond);
CcTest::CollectAllGarbage();
CHECK_EQ(2, prologue_call_count);
CHECK_EQ(2, epilogue_call_count);
CHECK_EQ(2, prologue_call_count_second);
CHECK_EQ(2, epilogue_call_count_second);
CHECK_EQ(0, prologue_call_count_alloc);
CHECK_EQ(0, epilogue_call_count_alloc);
isolate->AddGCPrologueCallback(PrologueCallbackAlloc);
isolate->AddGCEpilogueCallback(EpilogueCallbackAlloc);
CcTest::PreciseCollectAllGarbage();
CHECK_EQ(1, prologue_call_count_alloc);
CHECK_EQ(1, epilogue_call_count_alloc);
isolate->RemoveGCPrologueCallback(PrologueCallbackAlloc);
isolate->RemoveGCEpilogueCallback(EpilogueCallbackAlloc);
}
namespace {
void AssertOneByteConsContainsTwoByteExternal(i::Handle<i::String> maybe_cons,

View File

@ -1747,69 +1747,6 @@ TEST(ExternalStringIndexOf) {
.FromJust());
}
#define GC_INSIDE_NEW_STRING_FROM_UTF8_SUB_STRING(NAME, STRING) \
TEST(GCInsideNewStringFromUtf8SubStringWith##NAME) { \
FLAG_stress_concurrent_allocation = false; /* For SimulateFullSpace. */ \
CcTest::InitializeVM(); \
LocalContext context; \
v8::HandleScope scope(CcTest::isolate()); \
Factory* factory = CcTest::i_isolate()->factory(); \
/* Length must be bigger than the buffer size of the Utf8Decoder. */ \
const char* buf = STRING; \
size_t len = strlen(buf); \
Handle<String> main_string = \
factory \
->NewStringFromOneByte(v8::base::Vector<const uint8_t>( \
reinterpret_cast<const uint8_t*>(buf), len)) \
.ToHandleChecked(); \
if (FLAG_single_generation) { \
CHECK(!Heap::InYoungGeneration(*main_string)); \
heap::SimulateFullSpace(CcTest::i_isolate()->heap()->old_space()); \
} else { \
CHECK(Heap::InYoungGeneration(*main_string)); \
heap::SimulateFullSpace(CcTest::i_isolate()->heap()->new_space()); \
} \
/* Offset by two to check substring-ing. */ \
Handle<String> s = factory \
->NewStringFromUtf8SubString( \
Handle<SeqOneByteString>::cast(main_string), 2, \
static_cast<int>(len - 2)) \
.ToHandleChecked(); \
Handle<String> expected_string = \
factory \
->NewStringFromUtf8( \
v8::base::Vector<const char>(buf + 2, len - 2)) \
.ToHandleChecked(); \
CHECK(s->Equals(*expected_string)); \
}
GC_INSIDE_NEW_STRING_FROM_UTF8_SUB_STRING(
OneByte,
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ")
GC_INSIDE_NEW_STRING_FROM_UTF8_SUB_STRING(
TwoByte,
"QQ\xF0\x9F\x98\x8D\xF0\x9F\x98\x8D"
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
"QQ\xF0\x9F\x98\x8D\xF0\x9F\x98\x8D")
#undef GC_INSIDE_NEW_STRING_FROM_UTF8_SUB_STRING
namespace {
struct IndexData {

View File

@ -227,6 +227,7 @@ v8_source_set("unittests_sources") {
"api/api-icu-unittest.cc",
"api/deserialize-unittest.cc",
"api/exception-unittest.cc",
"api/gc-callbacks-unittest.cc",
"api/interceptor-unittest.cc",
"api/isolate-unittest.cc",
"api/remote-object-unittest.cc",
@ -385,6 +386,7 @@ v8_source_set("unittests_sources") {
"heap/marking-worklist-unittest.cc",
"heap/memory-reducer-unittest.cc",
"heap/object-stats-unittest.cc",
"heap/page-promotion-unittest.cc",
"heap/persistent-handles-unittest.cc",
"heap/progressbar-unittest.cc",
"heap/safepoint-unittest.cc",

View File

@ -0,0 +1,167 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "test/unittests/heap/heap-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
namespace {
namespace {
class GCCallbacksTest : public internal::TestWithHeapInternalsAndContext {
public:
static void PrologueCallbackAlloc(v8::Isolate* isolate, v8::GCType,
v8::GCCallbackFlags flags) {
v8::HandleScope scope(isolate);
CHECK_EQ(flags, v8::kNoGCCallbackFlags);
CHECK_EQ(current_test_->gc_callbacks_isolate_, isolate);
++current_test_->prologue_call_count_alloc_;
if (!v8::internal::FLAG_single_generation) {
// Simulate full heap to see if we will reenter this callback
current_test_->SimulateFullSpace(current_test_->heap()->new_space());
}
Local<Object> obj = Object::New(isolate);
CHECK(!obj.IsEmpty());
current_test_->PreciseCollectAllGarbage();
}
static void EpilogueCallbackAlloc(v8::Isolate* isolate, v8::GCType,
v8::GCCallbackFlags flags) {
v8::HandleScope scope(isolate);
CHECK_EQ(flags, v8::kNoGCCallbackFlags);
CHECK_EQ(current_test_->gc_callbacks_isolate_, isolate);
++current_test_->epilogue_call_count_alloc_;
if (!v8::internal::FLAG_single_generation) {
// Simulate full heap to see if we will reenter this callback
current_test_->SimulateFullSpace(current_test_->heap()->new_space());
}
Local<Object> obj = Object::New(isolate);
CHECK(!obj.IsEmpty());
current_test_->PreciseCollectAllGarbage();
}
static void PrologueCallback(v8::Isolate* isolate, v8::GCType,
v8::GCCallbackFlags flags) {
CHECK_EQ(flags, v8::kNoGCCallbackFlags);
CHECK_EQ(current_test_->gc_callbacks_isolate_, isolate);
++current_test_->prologue_call_count_;
}
static void EpilogueCallback(v8::Isolate* isolate, v8::GCType,
v8::GCCallbackFlags flags) {
CHECK_EQ(flags, v8::kNoGCCallbackFlags);
CHECK_EQ(current_test_->gc_callbacks_isolate_, isolate);
++current_test_->epilogue_call_count_;
}
static void PrologueCallbackSecond(v8::Isolate* isolate, v8::GCType,
v8::GCCallbackFlags flags) {
CHECK_EQ(flags, v8::kNoGCCallbackFlags);
CHECK_EQ(current_test_->gc_callbacks_isolate_, isolate);
++current_test_->prologue_call_count_second_;
}
static void EpilogueCallbackSecond(v8::Isolate* isolate, v8::GCType,
v8::GCCallbackFlags flags) {
CHECK_EQ(flags, v8::kNoGCCallbackFlags);
CHECK_EQ(current_test_->gc_callbacks_isolate_, isolate);
++current_test_->epilogue_call_count_second_;
}
static void PrologueCallbackNew(v8::Isolate* isolate, v8::GCType,
v8::GCCallbackFlags flags, void* data) {
CHECK_EQ(flags, v8::kNoGCCallbackFlags);
CHECK_EQ(current_test_->gc_callbacks_isolate_, isolate);
++*static_cast<int*>(data);
}
static void EpilogueCallbackNew(v8::Isolate* isolate, v8::GCType,
v8::GCCallbackFlags flags, void* data) {
CHECK_EQ(flags, v8::kNoGCCallbackFlags);
CHECK_EQ(current_test_->gc_callbacks_isolate_, isolate);
++*static_cast<int*>(data);
}
protected:
void SetUp() override {
internal::TestWithHeapInternalsAndContext::SetUp();
DCHECK_NULL(current_test_);
current_test_ = this;
}
void TearDown() override {
DCHECK_NOT_NULL(current_test_);
current_test_ = nullptr;
internal::TestWithHeapInternalsAndContext::TearDown();
}
static GCCallbacksTest* current_test_;
v8::Isolate* gc_callbacks_isolate_ = nullptr;
int prologue_call_count_ = 0;
int epilogue_call_count_ = 0;
int prologue_call_count_second_ = 0;
int epilogue_call_count_second_ = 0;
int prologue_call_count_alloc_ = 0;
int epilogue_call_count_alloc_ = 0;
};
GCCallbacksTest* GCCallbacksTest::current_test_ = nullptr;
} // namespace
TEST_F(GCCallbacksTest, GCCallbacks) {
// For SimulateFullSpace in PrologueCallbackAlloc and EpilogueCallbackAlloc.
i::FLAG_stress_concurrent_allocation = false;
v8::Isolate* isolate = context()->GetIsolate();
gc_callbacks_isolate_ = isolate;
isolate->AddGCPrologueCallback(PrologueCallback);
isolate->AddGCEpilogueCallback(EpilogueCallback);
CHECK_EQ(0, prologue_call_count_);
CHECK_EQ(0, epilogue_call_count_);
CollectAllGarbage();
CHECK_EQ(1, prologue_call_count_);
CHECK_EQ(1, epilogue_call_count_);
isolate->AddGCPrologueCallback(PrologueCallbackSecond);
isolate->AddGCEpilogueCallback(EpilogueCallbackSecond);
CollectAllGarbage();
CHECK_EQ(2, prologue_call_count_);
CHECK_EQ(2, epilogue_call_count_);
CHECK_EQ(1, prologue_call_count_second_);
CHECK_EQ(1, epilogue_call_count_second_);
isolate->RemoveGCPrologueCallback(PrologueCallback);
isolate->RemoveGCEpilogueCallback(EpilogueCallback);
CollectAllGarbage();
CHECK_EQ(2, prologue_call_count_);
CHECK_EQ(2, epilogue_call_count_);
CHECK_EQ(2, prologue_call_count_second_);
CHECK_EQ(2, epilogue_call_count_second_);
isolate->RemoveGCPrologueCallback(PrologueCallbackSecond);
isolate->RemoveGCEpilogueCallback(EpilogueCallbackSecond);
CollectAllGarbage();
CHECK_EQ(2, prologue_call_count_);
CHECK_EQ(2, epilogue_call_count_);
CHECK_EQ(2, prologue_call_count_second_);
CHECK_EQ(2, epilogue_call_count_second_);
CHECK_EQ(0, prologue_call_count_alloc_);
CHECK_EQ(0, epilogue_call_count_alloc_);
isolate->AddGCPrologueCallback(PrologueCallbackAlloc);
isolate->AddGCEpilogueCallback(EpilogueCallbackAlloc);
PreciseCollectAllGarbage();
CHECK_EQ(1, prologue_call_count_alloc_);
CHECK_EQ(1, epilogue_call_count_alloc_);
isolate->RemoveGCPrologueCallback(PrologueCallbackAlloc);
isolate->RemoveGCEpilogueCallback(EpilogueCallbackAlloc);
}
} // namespace
} // namespace v8

View File

@ -8,18 +8,23 @@
#include <iostream>
#include <limits>
#include "include/v8-isolate.h"
#include "include/v8-object.h"
#include "src/handles/handles-inl.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/remembered-set.h"
#include "src/heap/safepoint.h"
#include "src/heap/spaces-inl.h"
#include "src/objects/objects-inl.h"
#include "test/unittests/heap/heap-utils.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
namespace internal {
using HeapTest = TestWithContext;
using HeapTest = TestWithHeapInternalsAndContext;
TEST(Heap, YoungGenerationSizeFromOldGenerationSize) {
const size_t pm = i::Heap::kPointerMultiplier;
@ -174,5 +179,220 @@ TEST_F(HeapTest, HeapLayout) {
}
#endif // V8_COMPRESS_POINTERS
TEST_F(HeapTest, GrowAndShrinkNewSpace) {
if (FLAG_single_generation) return;
// Avoid shrinking new space in GC epilogue. This can happen if allocation
// throughput samples have been taken while executing the benchmark.
FLAG_predictable = true;
FLAG_stress_concurrent_allocation = false; // For SimulateFullSpace.
NewSpace* new_space = heap()->new_space();
if (heap()->MaxSemiSpaceSize() == heap()->InitialSemiSpaceSize()) {
return;
}
// Make sure we're in a consistent state to start out.
CollectAllGarbage();
CollectAllGarbage();
new_space->Shrink();
// Explicitly growing should double the space capacity.
size_t old_capacity, new_capacity;
old_capacity = new_space->TotalCapacity();
GrowNewSpace();
new_capacity = new_space->TotalCapacity();
CHECK_EQ(2 * old_capacity, new_capacity);
old_capacity = new_space->TotalCapacity();
{
v8::HandleScope temporary_scope(reinterpret_cast<v8::Isolate*>(isolate()));
SimulateFullSpace(new_space);
}
new_capacity = new_space->TotalCapacity();
CHECK_EQ(old_capacity, new_capacity);
// Explicitly shrinking should not affect space capacity.
old_capacity = new_space->TotalCapacity();
new_space->Shrink();
new_capacity = new_space->TotalCapacity();
CHECK_EQ(old_capacity, new_capacity);
// Let the scavenger empty the new space.
CollectGarbage(NEW_SPACE);
CHECK_LE(new_space->Size(), old_capacity);
// Explicitly shrinking should halve the space capacity.
old_capacity = new_space->TotalCapacity();
new_space->Shrink();
new_capacity = new_space->TotalCapacity();
CHECK_EQ(old_capacity, 2 * new_capacity);
// Consecutive shrinking should not affect space capacity.
old_capacity = new_space->TotalCapacity();
new_space->Shrink();
new_space->Shrink();
new_space->Shrink();
new_capacity = new_space->TotalCapacity();
CHECK_EQ(old_capacity, new_capacity);
}
TEST_F(HeapTest, CollectingAllAvailableGarbageShrinksNewSpace) {
if (FLAG_single_generation) return;
FLAG_stress_concurrent_allocation = false; // For SimulateFullSpace.
if (heap()->MaxSemiSpaceSize() == heap()->InitialSemiSpaceSize()) {
return;
}
v8::Isolate* iso = reinterpret_cast<v8::Isolate*>(isolate());
v8::HandleScope scope(iso);
NewSpace* new_space = heap()->new_space();
size_t old_capacity, new_capacity;
old_capacity = new_space->TotalCapacity();
GrowNewSpace();
new_capacity = new_space->TotalCapacity();
CHECK_EQ(2 * old_capacity, new_capacity);
{
v8::HandleScope temporary_scope(iso);
SimulateFullSpace(new_space);
}
CollectAllAvailableGarbage();
new_capacity = new_space->TotalCapacity();
CHECK_EQ(old_capacity, new_capacity);
}
// Test that HAllocateObject will always return an object in new-space.
TEST_F(HeapTest, OptimizedAllocationAlwaysInNewSpace) {
if (FLAG_single_generation) return;
FLAG_allow_natives_syntax = true;
FLAG_stress_concurrent_allocation = false; // For SimulateFullSpace.
if (!isolate()->use_optimizer() || FLAG_always_turbofan) return;
if (FLAG_gc_global || FLAG_stress_compaction ||
FLAG_stress_incremental_marking)
return;
v8::Isolate* iso = reinterpret_cast<v8::Isolate*>(isolate());
v8::HandleScope scope(iso);
v8::Local<v8::Context> ctx = iso->GetCurrentContext();
SimulateFullSpace(heap()->new_space());
AlwaysAllocateScopeForTesting always_allocate(heap());
v8::Local<v8::Value> res = WithIsolateScopeMixin::RunJS(
"function c(x) {"
" this.x = x;"
" for (var i = 0; i < 32; i++) {"
" this['x' + i] = x;"
" }"
"}"
"function f(x) { return new c(x); };"
"%PrepareFunctionForOptimization(f);"
"f(1); f(2); f(3);"
"%OptimizeFunctionOnNextCall(f);"
"f(4);");
CHECK_EQ(4, res.As<v8::Object>()
->GetRealNamedProperty(ctx, NewString("x"))
.ToLocalChecked()
->Int32Value(ctx)
.FromJust());
i::Handle<JSReceiver> o =
v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(res));
CHECK(Heap::InYoungGeneration(*o));
}
namespace {
template <RememberedSetType direction>
static size_t GetRememberedSetSize(HeapObject obj) {
size_t count = 0;
auto chunk = MemoryChunk::FromHeapObject(obj);
RememberedSet<direction>::Iterate(
chunk,
[&count](MaybeObjectSlot slot) {
count++;
return KEEP_SLOT;
},
SlotSet::KEEP_EMPTY_BUCKETS);
return count;
}
} // namespace
TEST_F(HeapTest, RememberedSet_InsertOnPromotingObjectToOld) {
if (FLAG_single_generation || FLAG_stress_incremental_marking) return;
FLAG_stress_concurrent_allocation = false; // For SealCurrentObjects.
Factory* factory = isolate()->factory();
Heap* heap = isolate()->heap();
SealCurrentObjects();
HandleScope scope(isolate());
// Create a young object and age it one generation inside the new space.
Handle<FixedArray> arr = factory->NewFixedArray(1);
std::vector<Handle<FixedArray>> handles;
if (FLAG_minor_mc) {
NewSpace* new_space = heap->new_space();
CHECK(!new_space->IsAtMaximumCapacity());
// Fill current pages to force MinorMC to promote them.
SimulateFullSpace(new_space, &handles);
SafepointScope scope(heap);
// New empty pages should remain in new space.
new_space->Grow();
} else {
CollectGarbage(i::NEW_SPACE);
}
CHECK(Heap::InYoungGeneration(*arr));
// Add into 'arr' a reference to an object one generation younger.
{
HandleScope scope_inner(isolate());
Handle<Object> number = factory->NewHeapNumber(42);
arr->set(0, *number);
}
// Promote 'arr' into old, its element is still in new, the old to new
// refs are inserted into the remembered sets during GC.
CollectGarbage(i::NEW_SPACE);
CHECK(heap->InOldSpace(*arr));
CHECK(heap->InYoungGeneration(arr->get(0)));
CHECK_EQ(1, GetRememberedSetSize<OLD_TO_NEW>(*arr));
}
TEST_F(HeapTest, Regress978156) {
if (!FLAG_incremental_marking) return;
if (FLAG_single_generation) return;
ManualGCScope manual_gc_scope(isolate());
HandleScope handle_scope(isolate());
Heap* heap = isolate()->heap();
// 1. Ensure that the new space is empty.
CollectGarbage(NEW_SPACE);
CollectGarbage(NEW_SPACE);
// 2. Fill the new space with FixedArrays.
std::vector<Handle<FixedArray>> arrays;
SimulateFullSpace(heap->new_space(), &arrays);
// 3. Trim the last array by one word thus creating a one-word filler.
Handle<FixedArray> last = arrays.back();
CHECK_GT(last->length(), 0);
heap->RightTrimFixedArray(*last, 1);
// 4. Get the last filler on the page.
HeapObject filler = HeapObject::FromAddress(
MemoryChunk::FromHeapObject(*last)->area_end() - kTaggedSize);
HeapObject::FromAddress(last->address() + last->Size());
CHECK(filler.IsFiller());
// 5. Start incremental marking.
i::IncrementalMarking* marking = heap->incremental_marking();
if (marking->IsStopped()) {
SafepointScope scope(heap);
heap->tracer()->StartCycle(
GarbageCollector::MARK_COMPACTOR, GarbageCollectionReason::kTesting,
"collector cctest", GCTracer::MarkingType::kIncremental);
marking->Start(i::GarbageCollectionReason::kTesting);
}
MarkingState* marking_state = marking->marking_state();
// 6. Mark the filler black to access its two markbits. This triggers
// an out-of-bounds access of the marking bitmap in a bad case.
marking_state->WhiteToGrey(filler);
marking_state->GreyToBlack(filler);
}
} // namespace internal
} // namespace v8

View File

@ -36,6 +36,71 @@ void HeapInternalsBase::SimulateIncrementalMarking(Heap* heap,
CHECK(marking->IsComplete());
}
void HeapInternalsBase::SimulateFullSpace(
v8::internal::PagedNewSpace* space,
std::vector<Handle<FixedArray>>* out_handles) {
// If you see this check failing, disable the flag at the start of your test:
// FLAG_stress_concurrent_allocation = false;
// Background thread allocating concurrently interferes with this function.
CHECK(!FLAG_stress_concurrent_allocation);
Heap* heap = space->heap();
if (heap->mark_compact_collector()->sweeping_in_progress()) {
heap->mark_compact_collector()->EnsureSweepingCompleted(
MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
}
// MinorMC is atomic so need to ensure it is completed.
Map unchecked_fixed_array_map =
ReadOnlyRoots(heap).unchecked_fixed_array_map();
PagedSpaceBase* paged_space = space->paged_space();
paged_space->FreeLinearAllocationArea();
FreeList* free_list = paged_space->free_list();
free_list->ForAllFreeListCategories(
[heap, paged_space, free_list, unchecked_fixed_array_map,
out_handles](FreeListCategory* category) {
// Remove category from the free list to remove it from the available
// bytes count.
free_list->RemoveCategory(category);
// Create FixedArray objects in all free list entries.
while (!category->is_empty()) {
size_t node_size;
FreeSpace node = category->PickNodeFromList(0, &node_size);
DCHECK_LT(0, node_size);
DCHECK_LE(node_size, std::numeric_limits<int>::max());
// Zero the memory to "initialize" it for the FixedArray.
memset(reinterpret_cast<void*>(node.address()), 0, node_size);
Address address = node.address();
Page* page = Page::FromAddress(address);
// Fixedarray requires at least 2*kTaggedSize memory.
while (node_size >= 2 * kTaggedSize) {
// Don't create FixedArrays bigger than max normal object size.
int array_size = std::min(static_cast<int>(node_size),
kMaxRegularHeapObjectSize);
// Convert the free space to a FixedArray
HeapObject heap_object(HeapObject::FromAddress(address));
heap_object.set_map_after_allocation(unchecked_fixed_array_map,
SKIP_WRITE_BARRIER);
FixedArray arr(FixedArray::cast(heap_object));
arr.set_length((array_size - FixedArray::SizeFor(0)) / kTaggedSize);
DCHECK_EQ(array_size, arr.AllocatedSize());
if (out_handles)
out_handles->push_back(handle(arr, heap->isolate()));
// Update allocated bytes statistics for the page and the space.
page->IncreaseAllocatedBytes(array_size);
paged_space->IncreaseAllocatedBytes(array_size, page);
node_size -= array_size;
address += array_size;
}
if (node_size > 0) {
// Create a filler in any remaining memory.
DCHECK_GT(2 * kTaggedSize, node_size);
heap->CreateFillerObjectAt(address, static_cast<int>(node_size));
}
}
});
paged_space->ResetFreeList();
}
void HeapInternalsBase::SimulateFullSpace(
v8::internal::NewSpace* space,
std::vector<Handle<FixedArray>>* out_handles) {
@ -43,9 +108,13 @@ void HeapInternalsBase::SimulateFullSpace(
// FLAG_stress_concurrent_allocation = false;
// Background thread allocating concurrently interferes with this function.
CHECK(!FLAG_stress_concurrent_allocation);
if (FLAG_minor_mc) {
SimulateFullSpace(PagedNewSpace::From(space), out_handles);
} else {
while (FillCurrentPage(space, out_handles) || space->AddFreshPage()) {
}
}
}
void HeapInternalsBase::SimulateFullSpace(v8::internal::PagedSpace* space) {
// If you see this check failing, disable the flag at the start of your test:

View File

@ -30,6 +30,11 @@ class HeapInternalsBase {
Heap* heap, int padding_size, AllocationType allocation,
int object_size = kMaxRegularHeapObjectSize);
int FixedArrayLenFromSize(int size);
private:
void SimulateFullSpace(
v8::internal::PagedNewSpace* space,
std::vector<Handle<FixedArray>>* out_handles = nullptr);
};
template <typename TMixin>
@ -51,6 +56,10 @@ class WithHeapInternals : public TMixin, HeapInternalsBase {
heap()->CollectGarbage(NEW_SPACE, i::GarbageCollectionReason::kTesting);
}
void CollectAllAvailableGarbage() {
heap()->CollectAllAvailableGarbage(i::GarbageCollectionReason::kTesting);
}
Heap* heap() const { return this->i_isolate()->heap(); }
void SimulateIncrementalMarking(bool force_completion = true) {
@ -66,6 +75,28 @@ class WithHeapInternals : public TMixin, HeapInternalsBase {
void SimulateFullSpace(v8::internal::PagedSpace* space) {
return HeapInternalsBase::SimulateFullSpace(space);
}
void GrowNewSpace() {
SafepointScope scope(heap());
if (!heap()->new_space()->IsAtMaximumCapacity()) {
heap()->new_space()->Grow();
}
}
void SealCurrentObjects() {
// If you see this check failing, disable the flag at the start of your
// test: FLAG_stress_concurrent_allocation = false; Background thread
// allocating concurrently interferes with this function.
CHECK(!FLAG_stress_concurrent_allocation);
FullGC();
FullGC();
heap()->mark_compact_collector()->EnsureSweepingCompleted(
MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
heap()->old_space()->FreeLinearAllocationArea();
for (Page* page : *heap()->old_space()) {
page->MarkNeverAllocateForTesting();
}
}
};
using TestWithHeapInternals = //

View File

@ -3,12 +3,8 @@
// found in the LICENSE file.
#include "src/execution/isolate.h"
#include "src/heap/factory.h"
#include "src/heap/spaces-inl.h"
#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
#include "test/cctest/heap/heap-utils.h"
#include "test/unittests/heap/heap-utils.h"
#include "v8-isolate.h"
namespace v8 {
namespace internal {
@ -19,28 +15,7 @@ namespace heap {
namespace {
v8::Isolate* NewIsolateForPagePromotion(int min_semi_space_size = 8,
int max_semi_space_size = 8) {
// Parallel evacuation messes with fragmentation in a way that objects that
// should be copied in semi space are promoted to old space because of
// fragmentation.
FLAG_parallel_compaction = false;
FLAG_page_promotion = true;
FLAG_page_promotion_threshold = 0;
// Parallel scavenge introduces too much fragmentation.
FLAG_parallel_scavenge = false;
FLAG_min_semi_space_size = min_semi_space_size;
// We cannot optimize for size as we require a new space with more than one
// page.
FLAG_optimize_for_size = false;
// Set max_semi_space_size because it could've been initialized by an
// implication of optimize_for_size.
FLAG_max_semi_space_size = max_semi_space_size;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
return isolate;
}
class PagePromotionTest : public TestWithHeapInternalsAndContext {};
Page* FindLastPageInNewSpace(const std::vector<Handle<FixedArray>>& handles) {
for (auto rit = handles.rbegin(); rit != handles.rend(); ++rit) {
@ -53,32 +28,39 @@ Page* FindLastPageInNewSpace(const std::vector<Handle<FixedArray>>& handles) {
} // namespace
UNINITIALIZED_TEST(PagePromotion_NewToOld) {
TEST_F(PagePromotionTest, PagePromotion_NewToOld) {
if (i::FLAG_single_generation) return;
if (!i::FLAG_incremental_marking) return;
if (!i::FLAG_page_promotion) return;
ManualGCScope manual_gc_scope;
FLAG_page_promotion_threshold = 0;
// Parallel evacuation messes with fragmentation in a way that objects that
// should be copied in semi space are promoted to old space because of
// fragmentation.
FLAG_parallel_compaction = false;
// Parallel scavenge introduces too much fragmentation.
FLAG_parallel_scavenge = false;
// We cannot optimize for size as we require a new space with more than one
// page.
FLAG_optimize_for_size = false;
ManualGCScope manual_gc_scope(isolate());
v8::Isolate* isolate = NewIsolateForPagePromotion();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
v8::Context::New(isolate)->Enter();
Heap* heap = i_isolate->heap();
v8::HandleScope handle_scope(reinterpret_cast<v8::Isolate*>(isolate()));
Heap* heap = isolate()->heap();
// Ensure that the new space is empty so that the page to be promoted
// does not contain the age mark.
heap->CollectGarbage(OLD_SPACE, i::GarbageCollectionReason::kTesting);
std::vector<Handle<FixedArray>> handles;
heap::SimulateFullSpace(heap->new_space(), &handles);
SimulateFullSpace(heap->new_space(), &handles);
CHECK_GT(handles.size(), 0u);
Page* const to_be_promoted_page = FindLastPageInNewSpace(handles);
CHECK_NOT_NULL(to_be_promoted_page);
CHECK(heap->new_space()->IsPromotionCandidate(to_be_promoted_page));
// To perform a sanity check on live bytes we need to mark the heap.
heap::SimulateIncrementalMarking(heap, true);
SimulateIncrementalMarking(true);
// Sanity check that the page meets the requirements for promotion.
const int threshold_bytes = static_cast<int>(
FLAG_page_promotion_threshold *
@ -91,11 +73,10 @@ UNINITIALIZED_TEST(PagePromotion_NewToOld) {
// during a full GC.
CHECK(heap->new_space()->ContainsSlow(to_be_promoted_page->address()));
CHECK(!heap->old_space()->ContainsSlow(to_be_promoted_page->address()));
heap::GcAndSweep(heap, OLD_SPACE);
CollectGarbage(OLD_SPACE);
CHECK(!heap->new_space()->ContainsSlow(to_be_promoted_page->address()));
CHECK(heap->old_space()->ContainsSlow(to_be_promoted_page->address()));
}
isolate->Dispose();
}
#endif // V8_LITE_MODE

View File

@ -6,9 +6,11 @@
#include <string>
#include <vector>
#include "include/v8-isolate.h"
#include "src/base/vector.h"
#include "src/strings/unicode-decoder.h"
#include "src/strings/unicode-inl.h"
#include "test/unittests/heap/heap-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
@ -493,5 +495,68 @@ TEST(UnicodeTest, IncrementalUTF8DecodingVsNonIncrementalUtf8Decoding) {
}
}
class UnicodeWithGCTest : public TestWithHeapInternals {};
#define GC_INSIDE_NEW_STRING_FROM_UTF8_SUB_STRING(NAME, STRING) \
TEST_F(UnicodeWithGCTest, GCInsideNewStringFromUtf8SubStringWith##NAME) { \
FLAG_stress_concurrent_allocation = false; /* For SimulateFullSpace. */ \
v8::HandleScope scope(reinterpret_cast<v8::Isolate*>(isolate())); \
Factory* factory = isolate()->factory(); \
/* Length must be bigger than the buffer size of the Utf8Decoder. */ \
const char* buf = STRING; \
size_t len = strlen(buf); \
Handle<String> main_string = \
factory \
->NewStringFromOneByte(v8::base::Vector<const uint8_t>( \
reinterpret_cast<const uint8_t*>(buf), len)) \
.ToHandleChecked(); \
if (FLAG_single_generation) { \
CHECK(!Heap::InYoungGeneration(*main_string)); \
SimulateFullSpace(heap()->old_space()); \
} else { \
CHECK(Heap::InYoungGeneration(*main_string)); \
SimulateFullSpace(heap()->new_space()); \
} \
/* Offset by two to check substring-ing. */ \
Handle<String> s = factory \
->NewStringFromUtf8SubString( \
Handle<SeqOneByteString>::cast(main_string), 2, \
static_cast<int>(len - 2)) \
.ToHandleChecked(); \
Handle<String> expected_string = \
factory \
->NewStringFromUtf8( \
v8::base::Vector<const char>(buf + 2, len - 2)) \
.ToHandleChecked(); \
CHECK(s->Equals(*expected_string)); \
}
GC_INSIDE_NEW_STRING_FROM_UTF8_SUB_STRING(
OneByte,
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ")
GC_INSIDE_NEW_STRING_FROM_UTF8_SUB_STRING(
TwoByte,
"QQ\xF0\x9F\x98\x8D\xF0\x9F\x98\x8D"
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
"QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
"QQ\xF0\x9F\x98\x8D\xF0\x9F\x98\x8D")
#undef GC_INSIDE_NEW_STRING_FROM_UTF8_SUB_STRING
} // namespace internal
} // namespace v8