[heap] Small fixes for young large objects

This replaces Heap::InNewSpace with Heap::InYoungGeneration and
fixes tests that are sensitive to page size.

Bug: chromium:852420
Change-Id: I32b1eafb45813ea3bdcbda075f9e6156aaf4c5e3
Reviewed-on: https://chromium-review.googlesource.com/c/1475766
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#59647}
This commit is contained in:
Ulan Degenbaev 2019-02-18 09:58:18 +01:00 committed by Commit Bot
parent ec68d97db8
commit d56da5467b
6 changed files with 19 additions and 44 deletions

View File

@ -316,34 +316,6 @@ void Heap::FinalizeExternalString(String string) {
Address Heap::NewSpaceTop() { return new_space_->top(); }
// static
bool Heap::InNewSpace(Object object) {
DCHECK(!HasWeakHeapObjectTag(object));
return object->IsHeapObject() && InNewSpace(HeapObject::cast(object));
}
// static
bool Heap::InNewSpace(MaybeObject object) {
HeapObject heap_object;
return object->GetHeapObject(&heap_object) && InNewSpace(heap_object);
}
// static
bool Heap::InNewSpace(HeapObject heap_object) {
// Inlined check from NewSpace::Contains.
bool result = MemoryChunk::FromHeapObject(heap_object)->InNewSpace();
#ifdef DEBUG
// If in NEW_SPACE, then check we're either not in the middle of GC or the
// object is in to-space.
if (result) {
// If the object is in NEW_SPACE, then it's not in RO_SPACE so this is safe.
Heap* heap = Heap::FromWritableHeapObject(heap_object);
DCHECK_IMPLIES(heap->gc_state_ == NOT_IN_GC, InToPage(heap_object));
}
#endif
return result;
}
bool Heap::InYoungGeneration(Object object) {
DCHECK(!HasWeakHeapObjectTag(object));
return object->IsHeapObject() && InYoungGeneration(HeapObject::cast(object));
@ -528,7 +500,7 @@ void Heap::ExternalStringTable::AddString(String string) {
DCHECK(string->IsExternalString());
DCHECK(!Contains(string));
if (InNewSpace(string)) {
if (InYoungGeneration(string)) {
young_strings_.push_back(string);
} else {
old_strings_.push_back(string);

View File

@ -256,7 +256,8 @@ size_t Heap::CommittedMemoryOfUnmapper() {
size_t Heap::CommittedMemory() {
if (!HasBeenSetUp()) return 0;
return new_space_->CommittedMemory() + CommittedOldGenerationMemory();
return new_space_->CommittedMemory() + new_lo_space_->Size() +
CommittedOldGenerationMemory();
}

View File

@ -912,11 +912,6 @@ class Heap {
static inline bool InYoungGeneration(Object object);
static inline bool InYoungGeneration(MaybeObject object);
static inline bool InYoungGeneration(HeapObject heap_object);
// TODO(ulan): Remove once all call sites are changed to use
// InYoungGeneration.
static inline bool InNewSpace(Object object);
static inline bool InNewSpace(MaybeObject object);
static inline bool InNewSpace(HeapObject heap_object);
static inline bool InFromPage(Object object);
static inline bool InFromPage(MaybeObject object);
static inline bool InFromPage(HeapObject heap_object);

View File

@ -172,6 +172,7 @@ class FullMarkingVerifier : public MarkingVerifier {
void Run() override {
VerifyRoots(VISIT_ONLY_STRONG);
VerifyMarking(heap_->new_space());
VerifyMarking(heap_->new_lo_space());
VerifyMarking(heap_->old_space());
VerifyMarking(heap_->code_space());
VerifyMarking(heap_->map_space());
@ -1141,7 +1142,7 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
inline virtual void RecordMigratedSlot(HeapObject host, MaybeObject value,
Address slot) {
if (value->IsStrongOrWeak()) {
Page* p = Page::FromAddress(value.ptr());
MemoryChunk* p = MemoryChunk::FromAddress(value.ptr());
if (p->InYoungGeneration()) {
DCHECK_IMPLIES(
p->IsToPage(),
@ -4042,16 +4043,16 @@ class YoungGenerationRecordMigratedSlotVisitor final
inline void RecordMigratedSlot(HeapObject host, MaybeObject value,
Address slot) final {
if (value->IsStrongOrWeak()) {
Page* p = Page::FromAddress(value.ptr());
MemoryChunk* p = MemoryChunk::FromAddress(value.ptr());
if (p->InYoungGeneration()) {
DCHECK_IMPLIES(
p->IsToPage(),
p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) || p->IsLargePage());
RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(
Page::FromHeapObject(host), slot);
MemoryChunk::FromHeapObject(host), slot);
} else if (p->IsEvacuationCandidate() && IsLive(host)) {
RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
Page::FromHeapObject(host), slot);
MemoryChunk::FromHeapObject(host), slot);
}
}
}

View File

@ -308,10 +308,11 @@ TEST(ArrayBuffer_SemiSpaceCopyThenPagePromotion) {
UNINITIALIZED_TEST(ArrayBuffer_SemiSpaceCopyMultipleTasks) {
if (FLAG_optimize_for_size) return;
ManualGCScope manual_gc_scope;
// Test allocates JSArrayBuffer on different pages before triggering a
// full GC that performs the semispace copy. If parallelized, this test
// ensures proper synchronization in TSAN configurations.
FLAG_min_semi_space_size = 2 * Page::kPageSize / MB;
FLAG_min_semi_space_size = Max(2 * Page::kPageSize / MB, 1);
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);

View File

@ -1927,7 +1927,7 @@ TEST(HeapNumberAlignment) {
AlignNewSpace(required_alignment, offset);
Handle<Object> number_new = factory->NewNumber(1.000123);
CHECK(number_new->IsHeapNumber());
CHECK(Heap::InNewSpace(*number_new));
CHECK(Heap::InYoungGeneration(*number_new));
CHECK_EQ(0, Heap::GetFillToAlign(HeapObject::cast(*number_new)->address(),
required_alignment));
@ -1956,7 +1956,7 @@ TEST(MutableHeapNumberAlignment) {
AlignNewSpace(required_alignment, offset);
Handle<Object> number_new = factory->NewMutableHeapNumber(1.000123);
CHECK(number_new->IsMutableHeapNumber());
CHECK(Heap::InNewSpace(*number_new));
CHECK(Heap::InYoungGeneration(*number_new));
CHECK_EQ(0, Heap::GetFillToAlign(HeapObject::cast(*number_new)->address(),
required_alignment));
@ -2021,6 +2021,8 @@ TEST(GrowAndShrinkNewSpace) {
// Make sure we're in a consistent state to start out.
CcTest::CollectAllGarbage();
CcTest::CollectAllGarbage();
new_space->Shrink();
// Explicitly growing should double the space capacity.
size_t old_capacity, new_capacity;
@ -3150,6 +3152,9 @@ TEST(ReleaseOverReservedPages) {
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
v8::HandleScope scope(CcTest::isolate());
// Ensure that the young generation is empty.
CcTest::CollectGarbage(NEW_SPACE);
CcTest::CollectGarbage(NEW_SPACE);
static const int number_of_test_pages = 20;
// Prepare many pages with low live-bytes count.
@ -3183,7 +3188,7 @@ TEST(ReleaseOverReservedPages) {
// boots, but if the 20 small arrays don't fit on the first page then that's
// an indication that it is too small.
CcTest::CollectAllAvailableGarbage();
CHECK_EQ(initial_page_count, old_space->CountTotalPages());
CHECK_GE(initial_page_count, old_space->CountTotalPages());
}
static int forced_gc_counter = 0;
@ -6133,7 +6138,7 @@ HEAP_TEST(Regress670675) {
if (marking->IsStopped()) {
marking->Start(i::GarbageCollectionReason::kTesting);
}
size_t array_length = Page::kPageSize / kTaggedSize + 100;
size_t array_length = 128 * KB;
size_t n = heap->OldGenerationSpaceAvailable() / array_length;
for (size_t i = 0; i < n + 40; i++) {
{