[heap] Optimize Heap::IsPendingAllocation

IsPendingAllocation will now load the space from the object's page
header first and then only check the object against the current LAB
of that particular space. Previously we were looking up that object
in the LABs of all spaces.

This new design also makes it feasible to have one dedicated mutex for
original_top/original_limit (respectively pending_object) for each
space. This will reduce contention on the mutexes.

Change-Id: I8e7636410259fd03b7970084bfbbaeadb2d8ba61
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2936606
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75054}
This commit is contained in:
Dominik Inführ 2021-06-09 17:32:02 +02:00 committed by V8 LUCI CQ
parent 8732b2ee52
commit 9140d00172
8 changed files with 72 additions and 40 deletions

View File

@ -136,10 +136,7 @@ void Heap::SetPendingOptimizeForTestBytecode(Object hash_table) {
}
PagedSpace* Heap::paged_space(int idx) {
DCHECK_NE(idx, LO_SPACE);
DCHECK_NE(idx, NEW_SPACE);
DCHECK_NE(idx, CODE_LO_SPACE);
DCHECK_NE(idx, NEW_LO_SPACE);
DCHECK(idx == OLD_SPACE || idx == CODE_SPACE || idx == MAP_SPACE);
return static_cast<PagedSpace*>(space_[idx]);
}
@ -586,34 +583,51 @@ void Heap::UpdateAllocationSite(Map map, HeapObject object,
}
bool Heap::IsPendingAllocation(HeapObject object) {
if (ReadOnlyHeap::Contains(object)) return false;
DCHECK(deserialization_complete());
// Prevents concurrent modification by main thread
base::SharedMutexGuard<base::kShared> guard(&pending_allocation_mutex_);
BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
if (chunk->InReadOnlySpace()) return false;
// TODO(ulan): Optimize this function to perform 3 loads at most.
BaseSpace* base_space = chunk->owner();
Address addr = object.address();
Address top, limit;
if (new_space_) {
top = new_space_->original_top_acquire();
limit = new_space_->original_limit_relaxed();
DCHECK_LE(top, limit);
if (top && top <= addr && addr < limit) return true;
switch (base_space->identity()) {
case NEW_SPACE: {
base::SharedMutexGuard<base::kShared> guard(
new_space_->pending_allocation_mutex());
Address top = new_space_->original_top_acquire();
Address limit = new_space_->original_limit_relaxed();
DCHECK_LE(top, limit);
return top && top <= addr && addr < limit;
}
case OLD_SPACE:
case CODE_SPACE:
case MAP_SPACE: {
PagedSpace* paged_space = static_cast<PagedSpace*>(base_space);
base::SharedMutexGuard<base::kShared> guard(
paged_space->pending_allocation_mutex());
Address top = paged_space->original_top();
Address limit = paged_space->original_limit();
DCHECK_LE(top, limit);
return top && top <= addr && addr < limit;
}
case LO_SPACE:
case CODE_LO_SPACE:
case NEW_LO_SPACE: {
LargeObjectSpace* large_space =
static_cast<LargeObjectSpace*>(base_space);
base::SharedMutexGuard<base::kShared> guard(
large_space->pending_allocation_mutex());
return addr == large_space->pending_object();
}
case RO_SPACE:
UNREACHABLE();
}
PagedSpaceIterator spaces(this);
for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) {
top = space->original_top();
limit = space->original_limit();
DCHECK_LE(top, limit);
if (top && top <= addr && addr < limit) return true;
}
if (addr == lo_space_->pending_object()) return true;
if (new_lo_space_ && addr == new_lo_space_->pending_object()) return true;
if (addr == code_lo_space_->pending_object()) return true;
return false;
UNREACHABLE();
}
void Heap::ExternalStringTable::AddString(String string) {

View File

@ -2429,9 +2429,6 @@ class Heap {
HeapObject pending_layout_change_object_;
// This mutex protects original_top/limit and pending_object for all spaces.
base::SharedMutex pending_allocation_mutex_;
base::Mutex unprotected_memory_chunks_mutex_;
std::unordered_set<MemoryChunk*> unprotected_memory_chunks_;
bool unprotected_memory_chunks_registry_enabled_ = false;

View File

@ -438,8 +438,7 @@ void LargeObjectSpace::Print() {
#endif // DEBUG
void LargeObjectSpace::UpdatePendingObject(HeapObject object) {
base::SharedMutexGuard<base::kExclusive> guard(
&heap_->pending_allocation_mutex_);
base::SharedMutexGuard<base::kExclusive> guard(&pending_allocation_mutex_);
pending_object_.store(object.address(), std::memory_order_release);
}

View File

@ -123,6 +123,10 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
pending_object_.store(0, std::memory_order_release);
}
base::SharedMutex* pending_allocation_mutex() {
return &pending_allocation_mutex_;
}
protected:
LargeObjectSpace(Heap* heap, AllocationSpace id);
@ -136,8 +140,14 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
int page_count_; // number of chunks
std::atomic<size_t> objects_size_; // size of objects
base::Mutex allocation_mutex_;
// Current potentially uninitialized object. Protected by
// pending_allocation_mutex_.
std::atomic<Address> pending_object_;
// Used to protect pending_object_.
base::SharedMutex pending_allocation_mutex_;
private:
friend class LargeObjectSpaceObjectIterator;
};

View File

@ -474,8 +474,7 @@ void NewSpace::UpdateLinearAllocationArea(Address known_top) {
// The order of the following two stores is important.
// See the corresponding loads in ConcurrentMarking::Run.
{
base::SharedMutexGuard<base::kExclusive> guard(
&heap_->pending_allocation_mutex_);
base::SharedMutexGuard<base::kExclusive> guard(&pending_allocation_mutex_);
original_limit_.store(limit(), std::memory_order_relaxed);
original_top_.store(top(), std::memory_order_release);
}

View File

@ -457,8 +457,7 @@ class V8_EXPORT_PRIVATE NewSpace
SemiSpace& to_space() { return to_space_; }
void MoveOriginalTopForward() {
base::SharedMutexGuard<base::kExclusive> guard(
&heap_->pending_allocation_mutex_);
base::SharedMutexGuard<base::kExclusive> guard(&pending_allocation_mutex_);
DCHECK_GE(top(), original_top_);
DCHECK_LE(top(), original_limit_);
original_top_.store(top(), std::memory_order_release);
@ -466,6 +465,10 @@ class V8_EXPORT_PRIVATE NewSpace
void MaybeFreeUnusedLab(LinearAllocationArea info);
base::SharedMutex* pending_allocation_mutex() {
return &pending_allocation_mutex_;
}
private:
static const int kAllocationBufferParkingThreshold = 4 * KB;
@ -475,10 +478,14 @@ class V8_EXPORT_PRIVATE NewSpace
base::Mutex mutex_;
// The top and the limit at the time of setting the linear allocation area.
// These values can be accessed by background tasks.
// These values can be accessed by background tasks. Protected by
// pending_allocation_mutex_.
std::atomic<Address> original_top_;
std::atomic<Address> original_limit_;
// Protects original_top_ and original_limit_.
base::SharedMutex pending_allocation_mutex_;
// The semispaces.
SemiSpace to_space_;
SemiSpace from_space_;

View File

@ -278,7 +278,7 @@ void PagedSpace::SetTopAndLimit(Address top, Address limit) {
base::Optional<base::SharedMutexGuard<base::kExclusive>> optional_guard;
if (!is_compaction_space())
optional_guard.emplace(&heap_->pending_allocation_mutex_);
optional_guard.emplace(&pending_allocation_mutex_);
original_limit_ = limit;
original_top_ = top;
}

View File

@ -302,13 +302,16 @@ class V8_EXPORT_PRIVATE PagedSpace
Address original_limit() { return original_limit_; }
void MoveOriginalTopForward() {
base::SharedMutexGuard<base::kExclusive> guard(
&heap_->pending_allocation_mutex_);
base::SharedMutexGuard<base::kExclusive> guard(&pending_allocation_mutex_);
DCHECK_GE(top(), original_top_);
DCHECK_LE(top(), original_limit_);
original_top_ = top();
}
base::SharedMutex* pending_allocation_mutex() {
return &pending_allocation_mutex_;
}
private:
class ConcurrentAllocationMutex {
public:
@ -415,10 +418,13 @@ class V8_EXPORT_PRIVATE PagedSpace
base::Mutex space_mutex_;
// The top and the limit at the time of setting the linear allocation area.
// These values are protected by Heap::pending_allocation_mutex_.
// These values are protected by pending_allocation_mutex_.
Address original_top_;
Address original_limit_;
// Protects original_top_ and original_limit_.
base::SharedMutex pending_allocation_mutex_;
friend class IncrementalMarking;
friend class MarkCompactCollector;