[heap] Scavenger: Use LABs
Bug: chromium:738865 Change-Id: Icc3e292ded7f4097ef266d8db80f273a412a8b92 Reviewed-on: https://chromium-review.googlesource.com/565718 Commit-Queue: Michael Lippautz <mlippautz@chromium.org> Reviewed-by: Ulan Degenbaev <ulan@chromium.org> Cr-Commit-Position: refs/heads/master@{#46708}
This commit is contained in:
parent
07a743df0a
commit
24195a6d06
1
BUILD.gn
1
BUILD.gn
@ -1581,6 +1581,7 @@ v8_source_set("v8_base") {
|
||||
"src/heap/incremental-marking.cc",
|
||||
"src/heap/incremental-marking.h",
|
||||
"src/heap/item-parallel-job.h",
|
||||
"src/heap/local-allocator.h",
|
||||
"src/heap/mark-compact-inl.h",
|
||||
"src/heap/mark-compact.cc",
|
||||
"src/heap/mark-compact.h",
|
||||
|
@ -1738,6 +1738,10 @@ void Heap::Scavenge() {
|
||||
isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
|
||||
&JSObject::IsUnmodifiedApiObject);
|
||||
|
||||
std::vector<MemoryChunk*> pages;
|
||||
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
|
||||
this, [&pages](MemoryChunk* chunk) { pages.push_back(chunk); });
|
||||
|
||||
{
|
||||
// Copy roots.
|
||||
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_ROOTS);
|
||||
@ -1747,23 +1751,27 @@ void Heap::Scavenge() {
|
||||
{
|
||||
// Copy objects reachable from the old generation.
|
||||
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_OLD_TO_NEW_POINTERS);
|
||||
RememberedSet<OLD_TO_NEW>::Iterate(
|
||||
this, SYNCHRONIZED, [this, &scavenger](Address addr) {
|
||||
return scavenger.CheckAndScavengeObject(this, addr);
|
||||
});
|
||||
|
||||
RememberedSet<OLD_TO_NEW>::IterateTyped(
|
||||
this, SYNCHRONIZED,
|
||||
[this, &scavenger](SlotType type, Address host_addr, Address addr) {
|
||||
return UpdateTypedSlotHelper::UpdateTypedSlot(
|
||||
isolate(), type, addr, [this, &scavenger](Object** addr) {
|
||||
// We expect that objects referenced by code are long living.
|
||||
// If we do not force promotion, then we need to clear
|
||||
// old_to_new slots in dead code objects after mark-compact.
|
||||
return scavenger.CheckAndScavengeObject(
|
||||
this, reinterpret_cast<Address>(addr));
|
||||
});
|
||||
});
|
||||
for (MemoryChunk* chunk : pages) {
|
||||
base::LockGuard<base::RecursiveMutex> guard(chunk->mutex());
|
||||
RememberedSet<OLD_TO_NEW>::Iterate(
|
||||
chunk, [this, &scavenger](Address addr) {
|
||||
return scavenger.CheckAndScavengeObject(this, addr);
|
||||
});
|
||||
RememberedSet<OLD_TO_NEW>::IterateTyped(
|
||||
chunk,
|
||||
[this, &scavenger](SlotType type, Address host_addr, Address addr) {
|
||||
return UpdateTypedSlotHelper::UpdateTypedSlot(
|
||||
isolate(), type, addr, [this, &scavenger](Object** addr) {
|
||||
// We expect that objects referenced by code are long
|
||||
// living. If we do not force promotion, then we need to
|
||||
// clear old_to_new slots in dead code objects after
|
||||
// mark-compact.
|
||||
return scavenger.CheckAndScavengeObject(
|
||||
this, reinterpret_cast<Address>(addr));
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
|
99
src/heap/local-allocator.h
Normal file
99
src/heap/local-allocator.h
Normal file
@ -0,0 +1,99 @@
|
||||
// Copyright 2017 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/globals.h"
|
||||
#include "src/heap/heap.h"
|
||||
#include "src/heap/spaces.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// Allocator encapsulating thread-local allocation. Assumes that all other
|
||||
// allocations also go through LocalAllocator.
|
||||
class LocalAllocator {
|
||||
public:
|
||||
static const int kLabSize = 32 * KB;
|
||||
static const int kMaxLabObjectSize = 8 * KB;
|
||||
|
||||
explicit LocalAllocator(Heap* heap)
|
||||
: heap_(heap),
|
||||
new_space_(heap->new_space()),
|
||||
compaction_spaces_(heap),
|
||||
new_space_lab_(LocalAllocationBuffer::InvalidBuffer()) {}
|
||||
|
||||
// Needs to be called from the main thread to finalize this LocalAllocator.
|
||||
void Finalize() {
|
||||
heap_->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE));
|
||||
// Give back remaining LAB space if this LocalAllocator's new space LAB
|
||||
// sits right next to new space allocation top.
|
||||
const AllocationInfo info = new_space_lab_.Close();
|
||||
const Address top = new_space_->top();
|
||||
if (info.limit() != nullptr && info.limit() == top) {
|
||||
DCHECK_NOT_NULL(info.top());
|
||||
*new_space_->allocation_top_address() = info.top();
|
||||
}
|
||||
}
|
||||
|
||||
template <AllocationSpace space>
|
||||
AllocationResult Allocate(int object_size, AllocationAlignment alignment) {
|
||||
switch (space) {
|
||||
case NEW_SPACE:
|
||||
return AllocateInNewSpace(object_size, alignment);
|
||||
case OLD_SPACE:
|
||||
return compaction_spaces_.Get(OLD_SPACE)->AllocateRaw(object_size,
|
||||
alignment);
|
||||
default:
|
||||
// Only new and old space supported.
|
||||
UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
AllocationResult AllocateInNewSpace(int object_size,
|
||||
AllocationAlignment alignment) {
|
||||
if (object_size > kMaxLabObjectSize) {
|
||||
return new_space_->AllocateRawSynchronized(object_size, alignment);
|
||||
}
|
||||
return AllocateInLAB(object_size, alignment);
|
||||
}
|
||||
|
||||
inline bool NewLocalAllocationBuffer() {
|
||||
LocalAllocationBuffer saved_lab_ = new_space_lab_;
|
||||
AllocationResult result =
|
||||
new_space_->AllocateRawSynchronized(kLabSize, kWordAligned);
|
||||
new_space_lab_ = LocalAllocationBuffer::FromResult(heap_, result, kLabSize);
|
||||
if (new_space_lab_.IsValid()) {
|
||||
new_space_lab_.TryMerge(&saved_lab_);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
AllocationResult AllocateInLAB(int object_size,
|
||||
AllocationAlignment alignment) {
|
||||
AllocationResult allocation;
|
||||
if (!new_space_lab_.IsValid() && !NewLocalAllocationBuffer()) {
|
||||
return AllocationResult::Retry(OLD_SPACE);
|
||||
}
|
||||
allocation = new_space_lab_.AllocateRawAligned(object_size, alignment);
|
||||
if (allocation.IsRetry()) {
|
||||
if (!NewLocalAllocationBuffer()) {
|
||||
return AllocationResult::Retry(OLD_SPACE);
|
||||
} else {
|
||||
allocation = new_space_lab_.AllocateRawAligned(object_size, alignment);
|
||||
CHECK(!allocation.IsRetry());
|
||||
}
|
||||
}
|
||||
return allocation;
|
||||
}
|
||||
|
||||
Heap* const heap_;
|
||||
NewSpace* const new_space_;
|
||||
CompactionSpaceCollection compaction_spaces_;
|
||||
LocalAllocationBuffer new_space_lab_;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
@ -36,13 +36,6 @@ bool ContainsOnlyData(VisitorId visitor_id) {
|
||||
|
||||
void Scavenger::MigrateObject(Map* map, HeapObject* source, HeapObject* target,
|
||||
int size) {
|
||||
// If we migrate into to-space, then the to-space top pointer should be
|
||||
// right after the target object. Incorporate double alignment
|
||||
// over-allocation.
|
||||
DCHECK(!heap()->InToSpace(target) ||
|
||||
target->address() + size == heap()->new_space()->top() ||
|
||||
target->address() + size + kPointerSize == heap()->new_space()->top());
|
||||
|
||||
// Copy the content of source to target.
|
||||
heap()->CopyBlock(target->address(), source->address(), size);
|
||||
|
||||
@ -67,7 +60,7 @@ bool Scavenger::SemiSpaceCopyObject(Map* map, HeapObject** slot,
|
||||
DCHECK(heap()->AllowedToBeMigrated(object, NEW_SPACE));
|
||||
AllocationAlignment alignment = object->RequiredAlignment();
|
||||
AllocationResult allocation =
|
||||
heap()->new_space()->AllocateRaw(object_size, alignment);
|
||||
allocator_.Allocate<NEW_SPACE>(object_size, alignment);
|
||||
|
||||
HeapObject* target = nullptr;
|
||||
if (allocation.To(&target)) {
|
||||
@ -87,7 +80,7 @@ bool Scavenger::PromoteObject(Map* map, HeapObject** slot, HeapObject* object,
|
||||
int object_size) {
|
||||
AllocationAlignment alignment = object->RequiredAlignment();
|
||||
AllocationResult allocation =
|
||||
heap()->old_space()->AllocateRaw(object_size, alignment);
|
||||
allocator_.Allocate<OLD_SPACE>(object_size, alignment);
|
||||
|
||||
HeapObject* target = nullptr;
|
||||
if (allocation.To(&target)) {
|
||||
|
@ -134,6 +134,7 @@ void Scavenger::Finalize() {
|
||||
heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
|
||||
heap()->IncrementSemiSpaceCopiedObjectSize(copied_size_);
|
||||
heap()->IncrementPromotedObjectsSize(promoted_size_);
|
||||
allocator_.Finalize();
|
||||
}
|
||||
|
||||
void RootScavengeVisitor::VisitRootPointer(Root root, Object** p) {
|
||||
|
@ -5,6 +5,7 @@
|
||||
#ifndef V8_HEAP_SCAVENGER_H_
|
||||
#define V8_HEAP_SCAVENGER_H_
|
||||
|
||||
#include "src/heap/local-allocator.h"
|
||||
#include "src/heap/objects-visiting.h"
|
||||
#include "src/heap/slot-set.h"
|
||||
#include "src/heap/worklist.h"
|
||||
@ -76,6 +77,7 @@ class Scavenger {
|
||||
local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
|
||||
copied_size_(0),
|
||||
promoted_size_(0),
|
||||
allocator_(heap),
|
||||
is_logging_(is_logging),
|
||||
is_incremental_marking_(is_incremental_marking) {}
|
||||
|
||||
@ -137,6 +139,7 @@ class Scavenger {
|
||||
base::HashMap local_pretenuring_feedback_;
|
||||
size_t copied_size_;
|
||||
size_t promoted_size_;
|
||||
LocalAllocator allocator_;
|
||||
bool is_logging_;
|
||||
bool is_incremental_marking_;
|
||||
};
|
||||
|
@ -1412,6 +1412,8 @@ void PagedSpace::RefillFreeList() {
|
||||
}
|
||||
|
||||
void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
|
||||
base::LockGuard<base::Mutex> guard(mutex());
|
||||
|
||||
DCHECK(identity() == other->identity());
|
||||
// Unmerged fields:
|
||||
// area_size_
|
||||
|
@ -1024,6 +1024,7 @@
|
||||
'heap/incremental-marking.cc',
|
||||
'heap/incremental-marking.h',
|
||||
'heap/item-parallel-job.h',
|
||||
'heap/local-allocator.h',
|
||||
'heap/mark-compact-inl.h',
|
||||
'heap/mark-compact.cc',
|
||||
'heap/mark-compact.h',
|
||||
|
Loading…
Reference in New Issue
Block a user