From a45048e20582e7fb1f1f5c7d4b77110dab1f9cba Mon Sep 17 00:00:00 2001 From: Michael Lippautz Date: Thu, 20 Jul 2017 15:34:04 +0200 Subject: [PATCH] [heap] Parallel Scavenge Bug: chromium:738865 Change-Id: Ie18574bb067438816238e2cf930e6d2a7bc5ecef Reviewed-on: https://chromium-review.googlesource.com/570579 Commit-Queue: Michael Lippautz Reviewed-by: Ulan Degenbaev Cr-Commit-Position: refs/heads/master@{#46798} --- src/heap/heap.cc | 23 ++++-- src/heap/heap.h | 6 +- src/heap/local-allocator.h | 31 ++++++++ src/heap/scavenger-inl.h | 96 +++++++++++++++++------ src/heap/scavenger.cc | 36 ++++++--- src/heap/scavenger.h | 100 ++++++++++++------------ src/heap/spaces-inl.h | 22 ++++++ src/heap/spaces.h | 4 + src/objects/string-inl.h | 2 +- test/cctest/heap/test-page-promotion.cc | 2 + test/cctest/test-api.cc | 3 + 11 files changed, 227 insertions(+), 98 deletions(-) diff --git a/src/heap/heap.cc b/src/heap/heap.cc index 56895e1a1d..f8fb0667b9 100644 --- a/src/heap/heap.cc +++ b/src/heap/heap.cc @@ -1804,10 +1804,11 @@ class ScavengingItem : public ItemParallelJob::Item { class ScavengingTask final : public ItemParallelJob::Task { public: - ScavengingTask(Heap* heap, Scavenger* scavenger) + ScavengingTask(Heap* heap, Scavenger* scavenger, Scavenger::Barrier* barrier) : ItemParallelJob::Task(heap->isolate()), heap_(heap), - scavenger_(scavenger) {} + scavenger_(scavenger), + barrier_(barrier) {} void RunInParallel() final { double scavenging_time = 0.0; @@ -1818,6 +1819,10 @@ class ScavengingTask final : public ItemParallelJob::Task { item->Process(scavenger_); item->MarkFinished(); } + while (!barrier_->Done()) { + scavenger_->Process(barrier_); + barrier_->Wait(); + } scavenger_->Process(); } if (FLAG_trace_parallel_scavenge) { @@ -1831,6 +1836,7 @@ class ScavengingTask final : public ItemParallelJob::Task { private: Heap* const heap_; Scavenger* const scavenger_; + Scavenger::Barrier* const barrier_; }; class PageScavengingItem final : public ScavengingItem { @@ -1868,8 +1874,14 @@ class PageScavengingItem final : public ScavengingItem { }; int Heap::NumberOfScavengeTasks() { - CHECK(!FLAG_parallel_scavenge); - return 1; + if (!FLAG_parallel_scavenge) return 1; + const int num_scavenge_tasks = + static_cast(new_space()->TotalCapacity()) / MB; + return Max( + 1, + Min(Min(num_scavenge_tasks, kMaxScavengerTasks), + static_cast( + V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()))); } void Heap::Scavenge() { @@ -1910,12 +1922,13 @@ void Heap::Scavenge() { const bool is_logging = IsLogging(isolate()); const bool is_incremental_marking = incremental_marking()->IsMarking(); const int num_scavenge_tasks = NumberOfScavengeTasks(); + Scavenger::Barrier barrier(num_scavenge_tasks); CopiedList copied_list(num_scavenge_tasks); PromotionList promotion_list(num_scavenge_tasks); for (int i = 0; i < num_scavenge_tasks; i++) { scavengers[i] = new Scavenger(this, is_logging, is_incremental_marking, &copied_list, &promotion_list, i); - job.AddTask(new ScavengingTask(this, scavengers[i])); + job.AddTask(new ScavengingTask(this, scavengers[i], &barrier)); } RememberedSet::IterateMemoryChunks( diff --git a/src/heap/heap.h b/src/heap/heap.h index fd77d46483..f33df448c0 100644 --- a/src/heap/heap.h +++ b/src/heap/heap.h @@ -1631,7 +1631,11 @@ class Heap { static const int kInitialFeedbackCapacity = 256; - static const int kMaxScavengerTasks = 1; +#ifdef V8_TARGET_ARCH_ARM + static const int kMaxScavengerTasks = 2; +#else + static const int kMaxScavengerTasks = 8; +#endif Heap(); diff --git a/src/heap/local-allocator.h b/src/heap/local-allocator.h index 685d911e89..5628b413f8 100644 --- a/src/heap/local-allocator.h +++ b/src/heap/local-allocator.h @@ -55,6 +55,21 @@ class LocalAllocator { } } + void FreeLast(AllocationSpace space, HeapObject* object, int object_size) { + switch (space) { + case NEW_SPACE: + FreeLastInNewSpace(object, object_size); + return; + case OLD_SPACE: + FreeLastInOldSpace(object, object_size); + return; + default: + // Only new and old space supported. + UNREACHABLE(); + break; + } + } + private: AllocationResult AllocateInNewSpace(int object_size, AllocationAlignment alignment) { @@ -97,6 +112,22 @@ class LocalAllocator { return allocation; } + void FreeLastInNewSpace(HeapObject* object, int object_size) { + if (!new_space_lab_.TryFreeLast(object, object_size)) { + // We couldn't free the last object so we have to write a proper filler. + heap_->CreateFillerObjectAt(object->address(), object_size, + ClearRecordedSlots::kNo); + } + } + + void FreeLastInOldSpace(HeapObject* object, int object_size) { + if (!compaction_spaces_.Get(OLD_SPACE)->TryFreeLast(object, object_size)) { + // We couldn't free the last object so we have to write a proper filler. + heap_->CreateFillerObjectAt(object->address(), object_size, + ClearRecordedSlots::kNo); + } + } + Heap* const heap_; NewSpace* const new_space_; CompactionSpaceCollection compaction_spaces_; diff --git a/src/heap/scavenger-inl.h b/src/heap/scavenger-inl.h index 59c8fe325e..a7f573c4d7 100644 --- a/src/heap/scavenger-inl.h +++ b/src/heap/scavenger-inl.h @@ -34,13 +34,32 @@ bool ContainsOnlyData(VisitorId visitor_id) { } // namespace -void Scavenger::MigrateObject(Map* map, HeapObject* source, HeapObject* target, +void Scavenger::PageMemoryFence(Object* object) { +#ifdef THREAD_SANITIZER + // Perform a dummy acquire load to tell TSAN that there is no data race + // with page initialization. + if (object->IsHeapObject()) { + MemoryChunk* chunk = + MemoryChunk::FromAddress(HeapObject::cast(object)->address()); + CHECK_NOT_NULL(chunk->synchronized_heap()); + } +#endif +} + +bool Scavenger::MigrateObject(Map* map, HeapObject* source, HeapObject* target, int size) { // Copy the content of source to target. - heap()->CopyBlock(target->address(), source->address(), size); + target->set_map_word(MapWord::FromMap(map)); + heap()->CopyBlock(target->address() + kPointerSize, + source->address() + kPointerSize, size - kPointerSize); - // Set the forwarding address. - source->set_map_word(MapWord::FromForwardingAddress(target)); + HeapObject* old = base::AsAtomicWord::Release_CompareAndSwap( + reinterpret_cast(source->address()), map, + MapWord::FromForwardingAddress(target).ToMap()); + if (old != map) { + // Other task migrated the object. + return false; + } if (V8_UNLIKELY(is_logging_)) { // Update NewSpace stats if necessary. @@ -49,10 +68,12 @@ void Scavenger::MigrateObject(Map* map, HeapObject* source, HeapObject* target, } if (is_incremental_marking_) { - heap()->incremental_marking()->TransferColor(source, target); + heap()->incremental_marking()->TransferColor(source, + target); } heap()->UpdateAllocationSite(map, source, &local_pretenuring_feedback_); + return true; } bool Scavenger::SemiSpaceCopyObject(Map* map, HeapObject** slot, @@ -66,10 +87,16 @@ bool Scavenger::SemiSpaceCopyObject(Map* map, HeapObject** slot, if (allocation.To(&target)) { DCHECK(ObjectMarking::IsWhite( target, heap()->mark_compact_collector()->marking_state(target))); - MigrateObject(map, object, target, object_size); + const bool self_success = MigrateObject(map, object, target, object_size); + if (!self_success) { + allocator_.FreeLast(NEW_SPACE, target, object_size); + MapWord map_word = object->map_word(); + *slot = map_word.ToForwardingAddress(); + return true; + } *slot = target; - copied_list_.Insert(target, object_size); + copied_list_.Push(ObjectAndSize(target, object_size)); copied_size_ += object_size; return true; } @@ -86,7 +113,13 @@ bool Scavenger::PromoteObject(Map* map, HeapObject** slot, HeapObject* object, if (allocation.To(&target)) { DCHECK(ObjectMarking::IsWhite( target, heap()->mark_compact_collector()->marking_state(target))); - MigrateObject(map, object, target, object_size); + const bool self_success = MigrateObject(map, object, target, object_size); + if (!self_success) { + allocator_.FreeLast(OLD_SPACE, target, object_size); + MapWord map_word = object->map_word(); + *slot = map_word.ToForwardingAddress(); + return true; + } *slot = target; if (!ContainsOnlyData(static_cast(map->visitor_id()))) { @@ -106,14 +139,10 @@ void Scavenger::EvacuateObjectDefault(Map* map, HeapObject** slot, if (!heap()->ShouldBePromoted(object->address())) { // A semi-space copy may fail due to fragmentation. In that case, we // try to promote the object. - if (SemiSpaceCopyObject(map, slot, object, object_size)) { - return; - } + if (SemiSpaceCopyObject(map, slot, object, object_size)) return; } - if (PromoteObject(map, slot, object, object_size)) { - return; - } + if (PromoteObject(map, slot, object, object_size)) return; // If promotion failed, we try to copy the object to the other semi-space if (SemiSpaceCopyObject(map, slot, object, object_size)) return; @@ -124,12 +153,15 @@ void Scavenger::EvacuateObjectDefault(Map* map, HeapObject** slot, void Scavenger::EvacuateThinString(Map* map, HeapObject** slot, ThinString* object, int object_size) { if (!is_incremental_marking_) { + // Loading actual is fine in a parallel setting is there is no write. HeapObject* actual = object->actual(); *slot = actual; // ThinStrings always refer to internalized strings, which are // always in old space. DCHECK(!heap()->InNewSpace(actual)); - object->set_map_word(MapWord::FromForwardingAddress(actual)); + base::AsAtomicWord::Relaxed_Store( + reinterpret_cast(object->address()), + MapWord::FromForwardingAddress(actual).ToMap()); return; } @@ -146,7 +178,9 @@ void Scavenger::EvacuateShortcutCandidate(Map* map, HeapObject** slot, *slot = first; if (!heap()->InNewSpace(first)) { - object->set_map_word(MapWord::FromForwardingAddress(first)); + base::AsAtomicWord::Relaxed_Store( + reinterpret_cast(object->address()), + MapWord::FromForwardingAddress(first).ToMap()); return; } @@ -155,12 +189,16 @@ void Scavenger::EvacuateShortcutCandidate(Map* map, HeapObject** slot, HeapObject* target = first_word.ToForwardingAddress(); *slot = target; - object->set_map_word(MapWord::FromForwardingAddress(target)); + base::AsAtomicWord::Relaxed_Store( + reinterpret_cast(object->address()), + MapWord::FromForwardingAddress(target).ToMap()); return; } - - EvacuateObject(slot, first_word.ToMap(), first); - object->set_map_word(MapWord::FromForwardingAddress(*slot)); + Map* map = first_word.ToMap(); + EvacuateObjectDefault(map, slot, first, first->SizeFromMap(map)); + base::AsAtomicWord::Relaxed_Store( + reinterpret_cast(object->address()), + MapWord::FromForwardingAddress(*slot).ToMap()); return; } @@ -172,12 +210,16 @@ void Scavenger::EvacuateObject(HeapObject** slot, Map* map, SLOW_DCHECK(heap_->InFromSpace(source)); SLOW_DCHECK(!MapWord::FromMap(map).IsForwardingAddress()); int size = source->SizeFromMap(map); + // Cannot use ::cast() below because that would add checks in debug mode + // that require re-reading the map. switch (static_cast(map->visitor_id())) { case kVisitThinString: - EvacuateThinString(map, slot, ThinString::cast(source), size); + EvacuateThinString(map, slot, reinterpret_cast(source), + size); break; case kVisitShortcutCandidate: - EvacuateShortcutCandidate(map, slot, ConsString::cast(source), size); + EvacuateShortcutCandidate(map, slot, + reinterpret_cast(source), size); break; default: EvacuateObjectDefault(map, slot, source, size); @@ -188,10 +230,7 @@ void Scavenger::EvacuateObject(HeapObject** slot, Map* map, void Scavenger::ScavengeObject(HeapObject** p, HeapObject* object) { DCHECK(heap()->InFromSpace(object)); - // We use the first word (where the map pointer usually is) of a heap - // object to record the forwarding pointer. A forwarding pointer can - // point to an old space, the code space, or the to space of the new - // generation. + // Relaxed load here. We either load a forwarding pointer or the map. MapWord first_word = object->map_word(); // If the first word is a forwarding address, the object has already been @@ -225,9 +264,14 @@ SlotCallbackResult Scavenger::CheckAndScavengeObject(Heap* heap, // callback in to space, the object is still live. // Unfortunately, we do not know about the slot. It could be in a // just freed free space object. + PageMemoryFence(object); if (heap->InToSpace(object)) { return KEEP_SLOT; } + } else if (heap->InToSpace(object)) { + // Already updated slot. This can happen when processing of the work list + // is interleaved with processing roots. + return KEEP_SLOT; } // Slots can point to "to" space if the slot has been recorded multiple // times in the remembered set. We remove the redundant slot now. diff --git a/src/heap/scavenger.cc b/src/heap/scavenger.cc index 41c6176ee3..99d5027419 100644 --- a/src/heap/scavenger.cc +++ b/src/heap/scavenger.cc @@ -26,12 +26,15 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor { slot_address += kPointerSize) { Object** slot = reinterpret_cast(slot_address); Object* target = *slot; + scavenger_->PageMemoryFence(target); if (target->IsHeapObject()) { if (heap_->InFromSpace(target)) { scavenger_->ScavengeObject(reinterpret_cast(slot), HeapObject::cast(target)); target = *slot; + scavenger_->PageMemoryFence(target); + if (heap_->InNewSpace(target)) { SLOW_DCHECK(target->IsHeapObject()); SLOW_DCHECK(heap_->InToSpace(target)); @@ -73,9 +76,9 @@ void Scavenger::IterateAndScavengePromotedObject(HeapObject* target, int size) { // objects. Grey object's slots would be rescanned. // White object might not survive until the end of collection // it would be a violation of the invariant to record it's slots. - const bool record_slots = - heap()->incremental_marking()->IsCompacting() && - ObjectMarking::IsBlack(target, MarkingState::Internal(target)); + const bool record_slots = heap()->incremental_marking()->IsCompacting() && + ObjectMarking::IsBlack( + target, MarkingState::Internal(target)); IterateAndScavengePromotedObjectsVisitor visitor(heap(), this, record_slots); if (target->IsJSFunction()) { // JSFunctions reachable through kNextFunctionLinkOffset are weak. Slots for @@ -86,34 +89,41 @@ void Scavenger::IterateAndScavengePromotedObject(HeapObject* target, int size) { } } -void Scavenger::Process() { +void Scavenger::Process(Barrier* barrier) { // Threshold when to switch processing the promotion list to avoid // allocating too much backing store in the worklist. const int kProcessPromotionListThreshold = kPromotionListSegmentSize / 2; ScavengeVisitor scavenge_visitor(heap(), this); + const bool have_barrier = barrier != nullptr; bool done; + size_t objects = 0; do { done = true; - AddressRange range; + ObjectAndSize object_and_size; while ((promotion_list_.LocalPushSegmentSize() < kProcessPromotionListThreshold) && - copied_list_.Pop(&range)) { - for (Address current = range.first; current < range.second;) { - HeapObject* object = HeapObject::FromAddress(current); - int size = object->Size(); - scavenge_visitor.Visit(object); - current += size; - } + copied_list_.Pop(&object_and_size)) { + scavenge_visitor.Visit(object_and_size.first); done = false; + if (have_barrier && ((++objects % kInterruptThreshold) == 0)) { + if (!copied_list_.IsGlobalPoolEmpty()) { + barrier->NotifyAll(); + } + } } - ObjectAndSize object_and_size; + while (promotion_list_.Pop(&object_and_size)) { HeapObject* target = object_and_size.first; int size = object_and_size.second; DCHECK(!target->IsMap()); IterateAndScavengePromotedObject(target, size); done = false; + if (have_barrier && ((++objects % kInterruptThreshold) == 0)) { + if (!promotion_list_.IsGlobalPoolEmpty()) { + barrier->NotifyAll(); + } + } } } while (!done); } diff --git a/src/heap/scavenger.h b/src/heap/scavenger.h index 342c8838ad..7d4ac192ef 100644 --- a/src/heap/scavenger.h +++ b/src/heap/scavenger.h @@ -5,6 +5,7 @@ #ifndef V8_HEAP_SCAVENGER_H_ #define V8_HEAP_SCAVENGER_H_ +#include "src/base/platform/condition-variable.h" #include "src/heap/local-allocator.h" #include "src/heap/objects-visiting.h" #include "src/heap/slot-set.h" @@ -13,62 +14,50 @@ namespace v8 { namespace internal { -static const int kCopiedListSegmentSize = 64; -static const int kPromotionListSegmentSize = 64; +static const int kCopiedListSegmentSize = 256; +static const int kPromotionListSegmentSize = 256; using AddressRange = std::pair; -using CopiedList = Worklist; using ObjectAndSize = std::pair; +using CopiedList = Worklist; using PromotionList = Worklist; -// A list of copied ranges. Keeps the last consecutive range local and announces -// all other ranges to a global work list. -class CopiedRangesList { - public: - CopiedRangesList(CopiedList* copied_list, int task_id) - : current_start_(nullptr), - current_end_(nullptr), - copied_list_(copied_list, task_id) {} - - ~CopiedRangesList() { - CHECK_NULL(current_start_); - CHECK_NULL(current_end_); - } - - void Insert(HeapObject* object, int size) { - const Address object_address = object->address(); - if (current_end_ != object_address) { - if (current_start_ != nullptr) { - copied_list_.Push(AddressRange(current_start_, current_end_)); - } - current_start_ = object_address; - current_end_ = current_start_ + size; - return; - } - DCHECK_EQ(current_end_, object_address); - current_end_ += size; - return; - } - - bool Pop(AddressRange* entry) { - if (copied_list_.Pop(entry)) { - return true; - } else if (current_start_ != nullptr) { - *entry = AddressRange(current_start_, current_end_); - current_start_ = current_end_ = nullptr; - return true; - } - return false; - } - - private: - Address current_start_; - Address current_end_; - CopiedList::View copied_list_; -}; - class Scavenger { public: + class Barrier { + public: + explicit Barrier(int tasks) : tasks_(tasks), waiting_(0), done_(false) {} + + void NotifyAll() { + base::LockGuard guard(&mutex_); + if (waiting_ > 0) condition_.NotifyAll(); + } + + void Wait() { + base::LockGuard guard(&mutex_); + waiting_++; + if (waiting_ == tasks_) { + done_ = true; + condition_.NotifyAll(); + } else { + // Spurious wakeup is ok here. + condition_.Wait(&mutex_); + } + waiting_--; + } + + void Reset() { done_ = false; } + + bool Done() { return done_; } + + private: + base::ConditionVariable condition_; + base::Mutex mutex_; + int tasks_; + int waiting_; + bool done_; + }; + Scavenger(Heap* heap, bool is_logging, bool is_incremental_marking, CopiedList* copied_list, PromotionList* promotion_list, int task_id) : heap_(heap), @@ -92,7 +81,7 @@ class Scavenger { // Processes remaining work (=objects) after single objects have been // manually scavenged using ScavengeObject or CheckAndScavengeObject. - void Process(); + void Process(Barrier* barrier = nullptr); // Finalize the Scavenger. Needs to be called from the main thread. void Finalize(); @@ -101,12 +90,17 @@ class Scavenger { size_t bytes_promoted() const { return promoted_size_; } private: + // Number of objects to process before interrupting for potentially waking + // up other tasks. + static const int kInterruptThreshold = 128; static const int kInitialLocalPretenuringFeedbackCapacity = 256; inline Heap* heap() { return heap_; } + inline void PageMemoryFence(Object* object); + // Copies |source| to |target| and sets the forwarding pointer in |source|. - V8_INLINE void MigrateObject(Map* map, HeapObject* source, HeapObject* target, + V8_INLINE bool MigrateObject(Map* map, HeapObject* source, HeapObject* target, int size); V8_INLINE bool SemiSpaceCopyObject(Map* map, HeapObject** slot, @@ -138,13 +132,15 @@ class Scavenger { Heap* const heap_; PromotionList::View promotion_list_; - CopiedRangesList copied_list_; + CopiedList::View copied_list_; base::HashMap local_pretenuring_feedback_; size_t copied_size_; size_t promoted_size_; LocalAllocator allocator_; bool is_logging_; bool is_incremental_marking_; + + friend class IterateAndScavengePromotedObjectsVisitor; }; // Helper class for turning the scavenger into an object visitor that is also diff --git a/src/heap/spaces-inl.h b/src/heap/spaces-inl.h index 0fef117b7e..1121e3422b 100644 --- a/src/heap/spaces-inl.h +++ b/src/heap/spaces-inl.h @@ -172,6 +172,17 @@ intptr_t PagedSpace::RelinkFreeListCategories(Page* page) { return added; } +bool PagedSpace::TryFreeLast(HeapObject* object, int object_size) { + if (allocation_info_.top() != nullptr) { + const Address object_address = object->address(); + if ((allocation_info_.top() - object_size) == object_address) { + allocation_info_.set_top(object_address); + return true; + } + } + return false; +} + MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) { MemoryChunk* chunk = MemoryChunk::FromAddress(addr); uintptr_t offset = addr - chunk->address(); @@ -519,6 +530,17 @@ bool LocalAllocationBuffer::TryMerge(LocalAllocationBuffer* other) { return false; } +bool LocalAllocationBuffer::TryFreeLast(HeapObject* object, int object_size) { + if (IsValid()) { + const Address object_address = object->address(); + if ((allocation_info_.top() - object_size) == object_address) { + allocation_info_.set_top(object_address); + return true; + } + } + return false; +} + } // namespace internal } // namespace v8 diff --git a/src/heap/spaces.h b/src/heap/spaces.h index a8394dd486..bfa9362643 100644 --- a/src/heap/spaces.h +++ b/src/heap/spaces.h @@ -1967,6 +1967,8 @@ class LocalAllocationBuffer { // Returns true if the merge was successful, false otherwise. inline bool TryMerge(LocalAllocationBuffer* other); + inline bool TryFreeLast(HeapObject* object, int object_size); + // Close a LAB, effectively invalidating it. Returns the unused area. AllocationInfo Close(); @@ -2099,6 +2101,8 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) { return size_in_bytes - wasted; } + inline bool TryFreeLast(HeapObject* object, int object_size); + void ResetFreeList() { free_list_.Reset(); } // Set space allocation info. diff --git a/src/objects/string-inl.h b/src/objects/string-inl.h index c83ecffa46..63f07532f1 100644 --- a/src/objects/string-inl.h +++ b/src/objects/string-inl.h @@ -514,7 +514,7 @@ String* ConsString::second() { } Object* ConsString::unchecked_second() { - return READ_FIELD(this, kSecondOffset); + return RELAXED_READ_FIELD(this, kSecondOffset); } void ConsString::set_second(String* value, WriteBarrierMode mode) { diff --git a/test/cctest/heap/test-page-promotion.cc b/test/cctest/heap/test-page-promotion.cc index c1543bd3bd..a9f825a716 100644 --- a/test/cctest/heap/test-page-promotion.cc +++ b/test/cctest/heap/test-page-promotion.cc @@ -26,6 +26,8 @@ v8::Isolate* NewIsolateForPagePromotion(int min_semi_space_size = 8, FLAG_parallel_compaction = false; FLAG_page_promotion = true; FLAG_page_promotion_threshold = 0; + // Parallel scavenge introduces too much fragmentation. + FLAG_parallel_scavenge = false; FLAG_min_semi_space_size = min_semi_space_size; // We cannot optimize for size as we require a new space with more than one // page. diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc index fa6f1df125..bef52fb70b 100644 --- a/test/cctest/test-api.cc +++ b/test/cctest/test-api.cc @@ -47,6 +47,7 @@ #include "src/execution.h" #include "src/futex-emulation.h" #include "src/heap/incremental-marking.h" +#include "src/heap/local-allocator.h" #include "src/lookup.h" #include "src/objects-inl.h" #include "src/parsing/preparse-data.h" @@ -7577,6 +7578,8 @@ static void SetFlag(const v8::WeakCallbackInfo& data) { static void IndependentWeakHandle(bool global_gc, bool interlinked) { i::FLAG_stress_incremental_marking = false; + // Parallel scavenge introduces too much fragmentation. + i::FLAG_parallel_scavenge = false; v8::Isolate* iso = CcTest::isolate(); v8::HandleScope scope(iso); v8::Local context = Context::New(iso);