[heap] Support young large objects in concurrent marker.

The last allocated large object in the young generation is not
guaranteed to be initialized when the concurrent marker visits it.

This patch adds a mechanism for the concurrent marker to put such
objects on the on-hold worklist similar to how new space objects are
handled.

Bug: chromium:852420
Change-Id: I749e9a7f3dcee6f177f4d95980a4f693c0fd4b04
Reviewed-on: https://chromium-review.googlesource.com/c/1454916
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#59377}
This commit is contained in:
Ulan Degenbaev 2019-02-05 17:20:35 +01:00 committed by Commit Bot
parent 25d7023638
commit 85fcaff1b0
6 changed files with 21 additions and 3 deletions

View File

@ -794,8 +794,10 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
// The order of the two loads is important.
Address new_space_top = heap_->new_space()->original_top_acquire();
Address new_space_limit = heap_->new_space()->original_limit_relaxed();
Address new_large_object = heap_->new_lo_space()->pending_object();
Address addr = object->address();
if (new_space_top <= addr && addr < new_space_limit) {
if ((new_space_top <= addr && addr < new_space_limit) ||
addr == new_large_object) {
on_hold_->Push(task_id, object);
} else {
Map map = object->synchronized_map();

View File

@ -2014,6 +2014,7 @@ void Heap::Scavenge() {
// We also flip the young generation large object space. All large objects
// will be in the from space.
new_lo_space()->Flip();
new_lo_space()->ResetPendingObject();
// Implements Cheney's copying algorithm
LOG(isolate_, ResourceEvent("scavenge", "begin"));

View File

@ -1061,6 +1061,7 @@ size_t IncrementalMarking::Step(size_t bytes_to_process,
if (state_ == MARKING) {
if (FLAG_concurrent_marking) {
heap_->new_space()->ResetOriginalTop();
heap_->new_lo_space()->ResetPendingObject();
// It is safe to merge back all objects that were on hold to the shared
// work list at Step because we are at a safepoint where all objects
// are properly initialized.

View File

@ -2594,6 +2594,7 @@ void MarkCompactCollector::EvacuatePrologue() {
new_space->ResetLinearAllocationArea();
heap()->new_lo_space()->Flip();
heap()->new_lo_space()->ResetPendingObject();
// Old space.
DCHECK(old_space_evacuation_pages_.empty());

View File

@ -3737,16 +3737,18 @@ void Page::Print() {
#endif // DEBUG
NewLargeObjectSpace::NewLargeObjectSpace(Heap* heap)
: LargeObjectSpace(heap, NEW_LO_SPACE) {}
: LargeObjectSpace(heap, NEW_LO_SPACE), pending_object_(0) {}
AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
// TODO(hpayer): Add heap growing strategy here.
LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
if (page == nullptr) return AllocationResult::Retry(identity());
HeapObject result = page->GetObject();
page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
page->SetFlag(MemoryChunk::TO_PAGE);
pending_object_.store(result->address(), std::memory_order_relaxed);
page->InitializationMemoryFence();
return page->GetObject();
return result;
}
size_t NewLargeObjectSpace::Available() {

View File

@ -3090,6 +3090,17 @@ class NewLargeObjectSpace : public LargeObjectSpace {
void Flip();
void FreeAllObjects();
// The last allocated object that is not guaranteed to be initialized when
// the concurrent marker visits it.
Address pending_object() {
return pending_object_.load(std::memory_order_relaxed);
}
void ResetPendingObject() { pending_object_.store(0); }
private:
std::atomic<Address> pending_object_;
};
class CodeLargeObjectSpace : public LargeObjectSpace {