Revert "[heap] Refactor allocation observer in AllocationCounter"

This reverts commit b354e344fd.

Reason for revert: Clusterfuzz found issues with this CL.

Original change's description:
> [heap] Refactor allocation observer in AllocationCounter
> 
> Moves accounting of allocation observers into the AllocationCounter
> class. This CL removes top_on_previous_step_ for counters that are
> increased regularly in the slow path of the allocation functions.
> 
> AdvanceAllocationObservers() informs the AllocationCounter about
> allocated bytes, InvokeAllocationObservers() needs to be invoked when
> an allocation step is reached. NextBytes() returns the number of bytes
> until the next AllocationObserver::Step needs to run.
> 
> Bug: v8:10315
> Change-Id: I8b6eb8719ab032d44ee0614d2a0f2645bfce9df6
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2320650
> Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#69170}

TBR=ulan@chromium.org,dinfuehr@chromium.org

Change-Id: Icd713207bfb2085421fd82009be24a0211ae86da
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: v8:10315
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2332667
Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69187}
This commit is contained in:
Dominik Inführ 2020-08-01 07:33:28 +00:00 committed by Commit Bot
parent f220997e26
commit ef603a9e0e
17 changed files with 282 additions and 385 deletions

View File

@ -11,97 +11,70 @@ namespace v8 {
namespace internal {
void AllocationCounter::AddAllocationObserver(AllocationObserver* observer) {
intptr_t step_size = observer->GetNextStepSize();
size_t observer_next_counter = current_counter_ + step_size;
#if DEBUG
auto it =
std::find_if(observers_.begin(), observers_.end(),
[observer](const ObserverAccounting& observer_accounting) {
return observer_accounting.observer_ == observer;
});
DCHECK_EQ(observers_.end(), it);
#endif
observers_.push_back(
ObserverAccounting(observer, current_counter_, observer_next_counter));
if (observers_.size() == 1) {
DCHECK_EQ(current_counter_, next_counter_);
next_counter_ = observer_next_counter;
} else {
size_t missing_bytes = next_counter_ - current_counter_;
next_counter_ =
current_counter_ + Min(static_cast<intptr_t>(missing_bytes), step_size);
}
allocation_observers_.push_back(observer);
}
void AllocationCounter::RemoveAllocationObserver(AllocationObserver* observer) {
auto it =
std::find_if(observers_.begin(), observers_.end(),
[observer](const ObserverAccounting& observer_accounting) {
return observer_accounting.observer_ == observer;
});
DCHECK_NE(observers_.end(), it);
observers_.erase(it);
if (observers_.size() == 0) {
current_counter_ = next_counter_ = 0;
} else {
size_t step_size = 0;
for (ObserverAccounting& observer : observers_) {
size_t left_in_step = observer.next_counter_ - current_counter_;
DCHECK_GT(left_in_step, 0);
step_size = step_size ? Min(step_size, left_in_step) : left_in_step;
}
next_counter_ = current_counter_ + step_size;
}
auto it = std::find(allocation_observers_.begin(),
allocation_observers_.end(), observer);
DCHECK(allocation_observers_.end() != it);
allocation_observers_.erase(it);
}
void AllocationCounter::AdvanceAllocationObservers(size_t allocated) {
intptr_t AllocationCounter::GetNextInlineAllocationStepSize() {
intptr_t next_step = 0;
for (AllocationObserver* observer : allocation_observers_) {
next_step = next_step ? Min(next_step, observer->bytes_to_next_step())
: observer->bytes_to_next_step();
}
DCHECK(!HasAllocationObservers() || next_step > 0);
return next_step;
}
void AllocationCounter::NotifyBytes(size_t allocated) {
if (!IsActive()) {
return;
}
DCHECK_LT(allocated, next_counter_ - current_counter_);
DCHECK_LE(allocated, next_counter_ - current_counter_);
current_counter_ += allocated;
}
void AllocationCounter::InvokeAllocationObservers(Address soon_object,
size_t object_size,
size_t aligned_object_size) {
void AllocationCounter::NotifyObject(Address soon_object, size_t object_size) {
if (!IsActive()) {
return;
}
DCHECK_GE(aligned_object_size, next_counter_ - current_counter_);
DCHECK_GT(object_size, next_counter_ - current_counter_);
size_t bytes_since_last_step = current_counter_ - prev_counter_;
DCHECK(!heap_->allocation_step_in_progress());
heap_->set_allocation_step_in_progress(true);
DCHECK(soon_object);
size_t step_size = 0;
bool step_run = false;
for (ObserverAccounting& observer_accounting : observers_) {
if (observer_accounting.next_counter_ - current_counter_ <=
aligned_object_size) {
observer_accounting.observer_->Step(
static_cast<int>(current_counter_ -
observer_accounting.prev_counter_),
soon_object, object_size);
size_t observer_step_size =
observer_accounting.observer_->GetNextStepSize();
observer_accounting.prev_counter_ = current_counter_;
observer_accounting.next_counter_ =
current_counter_ + aligned_object_size + observer_step_size;
step_run = true;
}
size_t left_in_step = observer_accounting.next_counter_ - current_counter_;
step_size = step_size ? Min(step_size, left_in_step) : left_in_step;
heap_->CreateFillerObjectAt(soon_object, static_cast<int>(object_size),
ClearRecordedSlots::kNo);
intptr_t next_step = 0;
for (AllocationObserver* observer : allocation_observers_) {
observer->AllocationStep(static_cast<int>(bytes_since_last_step),
soon_object, object_size);
next_step = next_step ? Min(next_step, observer->bytes_to_next_step())
: observer->bytes_to_next_step();
}
heap_->set_allocation_step_in_progress(false);
CHECK(step_run);
next_counter_ = current_counter_ + step_size;
prev_counter_ = current_counter_;
next_counter_ = current_counter_ + object_size + next_step;
}
void AllocationObserver::AllocationStep(int bytes_allocated,
Address soon_object, size_t size) {
DCHECK_GE(bytes_allocated, 0);
bytes_to_next_step_ -= bytes_allocated;
if (bytes_to_next_step_ <= 0) {
Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object, size);
step_size_ = GetNextStepSize();
bytes_to_next_step_ = step_size_;
}
DCHECK_GE(bytes_to_next_step_, 0);
}
PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)

View File

@ -5,7 +5,6 @@
#ifndef V8_HEAP_ALLOCATION_OBSERVER_H_
#define V8_HEAP_ALLOCATION_OBSERVER_H_
#include <cstdint>
#include <vector>
#include "src/common/globals.h"
@ -14,14 +13,27 @@ namespace v8 {
namespace internal {
class AllocationObserver;
class Heap;
class AllocationCounter {
public:
AllocationCounter() : paused_(false), current_counter_(0), next_counter_(0) {}
V8_EXPORT_PRIVATE void AddAllocationObserver(AllocationObserver* observer);
V8_EXPORT_PRIVATE void RemoveAllocationObserver(AllocationObserver* observer);
explicit AllocationCounter(Heap* heap)
: heap_(heap),
paused_(false),
prev_counter_(0),
current_counter_(0),
next_counter_(0) {}
bool IsActive() { return !IsPaused() && observers_.size() > 0; }
auto begin() { return allocation_observers_.begin(); }
auto end() { return allocation_observers_.end(); }
void AddAllocationObserver(AllocationObserver* observer);
void RemoveAllocationObserver(AllocationObserver* observer);
bool HasAllocationObservers() { return !allocation_observers_.empty(); }
size_t NumberAllocationObservers() { return allocation_observers_.size(); }
bool IsActive() { return !IsPaused() && HasAllocationObservers(); }
void Pause() {
DCHECK(!paused_);
@ -33,10 +45,10 @@ class AllocationCounter {
paused_ = false;
}
V8_EXPORT_PRIVATE void AdvanceAllocationObservers(size_t allocated);
V8_EXPORT_PRIVATE void InvokeAllocationObservers(Address soon_object,
size_t object_size,
size_t aligned_object_size);
intptr_t GetNextInlineAllocationStepSize();
void NotifyBytes(size_t allocated);
void NotifyObject(Address soon_object, size_t object_size);
size_t NextBytes() {
DCHECK(IsActive());
@ -46,22 +58,11 @@ class AllocationCounter {
private:
bool IsPaused() { return paused_; }
struct ObserverAccounting {
ObserverAccounting(AllocationObserver* observer, size_t prev_counter,
size_t next_counter)
: observer_(observer),
prev_counter_(prev_counter),
next_counter_(next_counter) {}
AllocationObserver* observer_;
size_t prev_counter_;
size_t next_counter_;
};
std::vector<ObserverAccounting> observers_;
std::vector<AllocationObserver*> allocation_observers_;
Heap* heap_;
bool paused_;
size_t prev_counter_;
size_t current_counter_;
size_t next_counter_;
};
@ -70,12 +71,21 @@ class AllocationCounter {
// Allows observation of allocations.
class AllocationObserver {
public:
explicit AllocationObserver(intptr_t step_size) : step_size_(step_size) {
explicit AllocationObserver(intptr_t step_size)
: step_size_(step_size), bytes_to_next_step_(step_size) {
DCHECK_LE(kTaggedSize, step_size);
}
virtual ~AllocationObserver() = default;
// Called each time the observed space does an allocation step. This may be
// more frequently than the step_size we are monitoring (e.g. when there are
// multiple observers, or when page or space boundary is encountered.)
void AllocationStep(int bytes_allocated, Address soon_object, size_t size);
protected:
intptr_t step_size() const { return step_size_; }
intptr_t bytes_to_next_step() const { return bytes_to_next_step_; }
// Pure virtual method provided by the subclasses that gets called when at
// least step_size bytes have been allocated. soon_object is the address just
// allocated (but not yet initialized.) size is the size of the object as
@ -93,9 +103,10 @@ class AllocationObserver {
// Subclasses can override this method to make step size dynamic.
virtual intptr_t GetNextStepSize() { return step_size_; }
private:
intptr_t step_size_;
intptr_t bytes_to_next_step_;
private:
friend class AllocationCounter;
DISALLOW_COPY_AND_ASSIGN(AllocationObserver);
};

View File

@ -66,11 +66,6 @@ HeapObject AllocationResult::ToObject() {
return HeapObject::cast(object_);
}
Address AllocationResult::ToAddress() {
DCHECK(!IsRetry());
return HeapObject::cast(object_).address();
}
Isolate* Heap::isolate() {
return reinterpret_cast<Isolate*>(
reinterpret_cast<intptr_t>(this) -

View File

@ -900,7 +900,6 @@ void Heap::MergeAllocationSitePretenuringFeedback(
void Heap::AddAllocationObserversToAllSpaces(
AllocationObserver* observer, AllocationObserver* new_space_observer) {
DCHECK(observer && new_space_observer);
SafepointScope scope(this);
for (SpaceIterator it(this); it.HasNext();) {
Space* space = it.Next();
@ -915,7 +914,6 @@ void Heap::AddAllocationObserversToAllSpaces(
void Heap::RemoveAllocationObserversFromAllSpaces(
AllocationObserver* observer, AllocationObserver* new_space_observer) {
DCHECK(observer && new_space_observer);
SafepointScope scope(this);
for (SpaceIterator it(this); it.HasNext();) {
Space* space = it.Next();
@ -5129,7 +5127,6 @@ void Heap::EnableInlineAllocation() {
inline_allocation_disabled_ = false;
// Update inline allocation limit for new space.
new_space()->AdvanceAllocationObservers();
new_space()->UpdateInlineAllocationLimit(0);
}

View File

@ -197,7 +197,6 @@ class AllocationResult {
inline bool IsRetry() { return object_.IsSmi(); }
inline HeapObject ToObjectChecked();
inline HeapObject ToObject();
inline Address ToAddress();
inline AllocationSpace RetrySpace();
template <typename T>
@ -1396,6 +1395,11 @@ class Heap {
void RemoveAllocationObserversFromAllSpaces(
AllocationObserver* observer, AllocationObserver* new_space_observer);
bool allocation_step_in_progress() { return allocation_step_in_progress_; }
void set_allocation_step_in_progress(bool val) {
allocation_step_in_progress_ = val;
}
// ===========================================================================
// Heap object allocation tracking. ==========================================
// ===========================================================================
@ -2075,6 +2079,8 @@ class Heap {
// Observer that can cause early scavenge start.
StressScavengeObserver* stress_scavenge_observer_ = nullptr;
bool allocation_step_in_progress_ = false;
// The maximum percent of the marking limit reached wihout causing marking.
// This is tracked when specyfing --fuzzer-gc-analysis.
double max_marking_limit_reached_ = 0.0;

View File

@ -108,19 +108,6 @@ void LargeObjectSpace::TearDown() {
}
}
void LargeObjectSpace::AdvanceAndInvokeAllocationObservers(Address soon_object,
size_t object_size) {
if (!allocation_counter_.IsActive()) return;
if (object_size >= allocation_counter_.NextBytes()) {
allocation_counter_.InvokeAllocationObservers(soon_object, object_size,
object_size);
}
// Large objects can be accounted immediately since no LAB is involved.
allocation_counter_.AdvanceAllocationObservers(object_size);
}
AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size) {
return AllocateRaw(object_size, NOT_EXECUTABLE);
}
@ -149,8 +136,7 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
heap()->incremental_marking()->marking_state()->IsBlack(object));
page->InitializationMemoryFence();
heap()->NotifyOldGenerationExpansion(identity(), page);
AdvanceAndInvokeAllocationObservers(object.address(),
static_cast<size_t>(object_size));
AllocationStep(object_size, object.address(), object_size);
return object;
}
@ -472,8 +458,7 @@ AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
page->InitializationMemoryFence();
DCHECK(page->IsLargePage());
DCHECK_EQ(page->owner_identity(), NEW_LO_SPACE);
AdvanceAndInvokeAllocationObservers(result.address(),
static_cast<size_t>(object_size));
AllocationStep(object_size, result.address(), object_size);
return result;
}

View File

@ -113,8 +113,6 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
protected:
LargeObjectSpace(Heap* heap, AllocationSpace id);
void AdvanceAndInvokeAllocationObservers(Address soon_object, size_t size);
LargePage* AllocateLargePage(int object_size, Executability executable);
size_t size_; // allocated bytes

View File

@ -90,7 +90,7 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
AllocationResult result;
if (alignment != kWordAligned) {
result = AllocateFastAligned(size_in_bytes, nullptr, alignment, origin);
result = AllocateFastAligned(size_in_bytes, alignment, origin);
} else {
result = AllocateFastUnaligned(size_in_bytes, origin);
}
@ -122,9 +122,9 @@ AllocationResult NewSpace::AllocateFastUnaligned(int size_in_bytes,
return obj;
}
AllocationResult NewSpace::AllocateFastAligned(
int size_in_bytes, int* result_aligned_size_in_bytes,
AllocationAlignment alignment, AllocationOrigin origin) {
AllocationResult NewSpace::AllocateFastAligned(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
Address top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(top, alignment);
int aligned_size_in_bytes = size_in_bytes + filler_size;
@ -136,8 +136,6 @@ AllocationResult NewSpace::AllocateFastAligned(
HeapObject obj = HeapObject::FromAddress(top);
allocation_info_.set_top(top + aligned_size_in_bytes);
if (result_aligned_size_in_bytes)
*result_aligned_size_in_bytes = aligned_size_in_bytes;
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
if (filler_size > 0) {

View File

@ -465,7 +465,8 @@ bool NewSpace::Rebalance() {
}
void NewSpace::UpdateLinearAllocationArea() {
AdvanceAllocationObservers();
// Make sure there is no unaccounted allocations.
DCHECK(!allocation_counter_.IsActive() || top_on_previous_step_ == top());
Address new_top = to_space_.page_low();
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
@ -474,10 +475,13 @@ void NewSpace::UpdateLinearAllocationArea() {
// See the corresponding loads in ConcurrentMarking::Run.
original_limit_.store(limit(), std::memory_order_relaxed);
original_top_.store(top(), std::memory_order_release);
StartNextInlineAllocationStep();
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
void NewSpace::ResetLinearAllocationArea() {
// Do a step to account for memory allocated so far before resetting.
InlineAllocationStep(top(), top(), kNullAddress, 0);
to_space_.Reset();
UpdateLinearAllocationArea();
// Clear all mark-bits in the to-space.
@ -502,6 +506,9 @@ bool NewSpace::AddFreshPage() {
Address top = allocation_info_.top();
DCHECK(!OldSpace::IsAtPageStart(top));
// Do a step to account for memory allocated on previous page.
InlineAllocationStep(top, top, kNullAddress, 0);
if (!to_space_.AdvancePage()) {
// No more pages left to advance.
return false;
@ -523,30 +530,34 @@ bool NewSpace::AddFreshPageSynchronized() {
bool NewSpace::EnsureAllocation(int size_in_bytes,
AllocationAlignment alignment) {
AdvanceAllocationObservers();
Address old_top = allocation_info_.top();
Address high = to_space_.page_high();
int filler_size = Heap::GetFillToAlign(old_top, alignment);
int aligned_size_in_bytes = size_in_bytes + filler_size;
if (old_top + aligned_size_in_bytes <= high) {
UpdateInlineAllocationLimit(aligned_size_in_bytes);
return true;
}
if (old_top + aligned_size_in_bytes > high) {
// Not enough room in the page, try to allocate a new one.
if (!AddFreshPage()) {
return false;
}
// Not enough room in the page, try to allocate a new one.
if (!AddFreshPage()) {
return false;
old_top = allocation_info_.top();
high = to_space_.page_high();
filler_size = Heap::GetFillToAlign(old_top, alignment);
}
old_top = allocation_info_.top();
high = to_space_.page_high();
filler_size = Heap::GetFillToAlign(old_top, alignment);
aligned_size_in_bytes = size_in_bytes + filler_size;
DCHECK(old_top + aligned_size_in_bytes <= high);
UpdateInlineAllocationLimit(aligned_size_in_bytes);
if (allocation_info_.limit() < high) {
// Either the limit has been lowered because linear allocation was disabled
// or because incremental marking wants to get a chance to do a step,
// or because idle scavenge job wants to get a chance to post a task.
// Set the new limit accordingly.
Address new_top = old_top + aligned_size_in_bytes;
Address soon_object = old_top + filler_size;
InlineAllocationStep(new_top, new_top, soon_object, size_in_bytes);
UpdateInlineAllocationLimit(aligned_size_in_bytes);
}
return true;
}
@ -557,6 +568,12 @@ std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator(Heap* heap) {
AllocationResult NewSpace::AllocateRawSlow(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
if (top() < top_on_previous_step_) {
// Generated code decreased the top() pointer to do folded allocations
DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
Page::FromAllocationAreaAddress(top_on_previous_step_));
top_on_previous_step_ = top();
}
#ifdef V8_HOST_ARCH_32_BIT
return alignment != kWordAligned
? AllocateRawAligned(size_in_bytes, alignment, origin)
@ -578,14 +595,8 @@ AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
return AllocationResult::Retry();
}
DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
AllocationResult result = AllocateFastUnaligned(size_in_bytes, origin);
DCHECK(!result.IsRetry());
InvokeAllocationObservers(result.ToAddress(), size_in_bytes, size_in_bytes,
size_in_bytes);
return result;
}
@ -596,17 +607,9 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
return AllocationResult::Retry();
}
DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
int aligned_size_in_bytes;
AllocationResult result = AllocateFastAligned(
size_in_bytes, &aligned_size_in_bytes, alignment, origin);
AllocationResult result =
AllocateFastAligned(size_in_bytes, alignment, origin);
DCHECK(!result.IsRetry());
InvokeAllocationObservers(result.ToAddress(), size_in_bytes,
aligned_size_in_bytes, aligned_size_in_bytes);
return result;
}

View File

@ -477,8 +477,8 @@ class V8_EXPORT_PRIVATE NewSpace
// Internal allocation methods.
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
AllocateFastAligned(int size_in_bytes, int* aligned_size_in_bytes,
AllocationAlignment alignment, AllocationOrigin origin);
AllocateFastAligned(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin);
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
AllocateFastUnaligned(int size_in_bytes, AllocationOrigin origin);
@ -495,7 +495,7 @@ class V8_EXPORT_PRIVATE NewSpace
int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
bool SupportsAllocationObserver() override { return true; }
bool SupportsInlineAllocation() override { return true; }
friend class SemiSpaceObjectIterator;
};

View File

@ -6,7 +6,6 @@
#define V8_HEAP_PAGED_SPACES_INL_H_
#include "src/common/globals.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/paged-spaces.h"
#include "src/objects/code-inl.h"
@ -97,20 +96,18 @@ bool PagedSpace::EnsureLabMain(int size_in_bytes, AllocationOrigin origin) {
return RefillLabMain(size_in_bytes, origin);
}
AllocationResult PagedSpace::AllocateFastUnaligned(int size_in_bytes) {
AllocationResult PagedSpace::AllocateLinearly(int size_in_bytes) {
Address current_top = allocation_info_.top();
Address new_top = current_top + size_in_bytes;
if (new_top > allocation_info_.limit())
return AllocationResult::Retry(identity());
DCHECK_LE(new_top, allocation_info_.limit());
allocation_info_.set_top(new_top);
return AllocationResult(HeapObject::FromAddress(current_top));
}
AllocationResult PagedSpace::AllocateFastAligned(
int size_in_bytes, int* aligned_size_in_bytes,
AllocationAlignment alignment) {
AllocationResult PagedSpace::TryAllocateLinearlyAligned(
int size_in_bytes, AllocationAlignment alignment) {
Address current_top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(current_top, alignment);
@ -119,8 +116,6 @@ AllocationResult PagedSpace::AllocateFastAligned(
return AllocationResult::Retry(identity());
allocation_info_.set_top(new_top);
if (aligned_size_in_bytes)
*aligned_size_in_bytes = filler_size + size_in_bytes;
if (filler_size > 0) {
Heap::PrecedeWithFiller(ReadOnlyRoots(heap()),
HeapObject::FromAddress(current_top), filler_size);
@ -134,8 +129,7 @@ AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
if (!EnsureLabMain(size_in_bytes, origin)) {
return AllocationResult::Retry(identity());
}
AllocationResult result = AllocateFastUnaligned(size_in_bytes);
AllocationResult result = AllocateLinearly(size_in_bytes);
DCHECK(!result.IsRetry());
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(result.ToObjectChecked().address(),
size_in_bytes);
@ -144,9 +138,6 @@ AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
UpdateAllocationOrigins(origin);
}
InvokeAllocationObservers(result.ToAddress(), size_in_bytes, size_in_bytes,
size_in_bytes);
return result;
}
@ -162,9 +153,8 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
if (!EnsureLabMain(allocation_size, origin)) {
return AllocationResult::Retry(identity());
}
int aligned_size_in_bytes;
AllocationResult result =
AllocateFastAligned(size_in_bytes, &aligned_size_in_bytes, alignment);
TryAllocateLinearlyAligned(size_in_bytes, alignment);
DCHECK(!result.IsRetry());
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(result.ToObjectChecked().address(),
size_in_bytes);
@ -173,9 +163,6 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
UpdateAllocationOrigins(origin);
}
InvokeAllocationObservers(result.ToAddress(), size_in_bytes,
aligned_size_in_bytes, allocation_size);
return result;
}
@ -185,9 +172,9 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
AllocationResult result;
if (alignment != kWordAligned) {
result = AllocateFastAligned(size_in_bytes, nullptr, alignment);
result = TryAllocateLinearlyAligned(size_in_bytes, alignment);
} else {
result = AllocateFastUnaligned(size_in_bytes);
result = AllocateLinearly(size_in_bytes);
}
if (!result.IsRetry()) {

View File

@ -383,13 +383,6 @@ void PagedSpace::DecreaseLimit(Address new_limit) {
DCHECK_LE(top(), new_limit);
DCHECK_GE(old_limit, new_limit);
if (new_limit != old_limit) {
base::Optional<CodePageMemoryModificationScope> optional_scope;
if (identity() == CODE_SPACE) {
MemoryChunk* chunk = MemoryChunk::FromAddress(new_limit);
optional_scope.emplace(chunk);
}
SetTopAndLimit(top(), new_limit);
Free(new_limit, old_limit - new_limit,
SpaceAccountingMode::kSpaceAccounted);
@ -446,7 +439,12 @@ void PagedSpace::FreeLinearAllocationArea() {
return;
}
AdvanceAllocationObservers();
if (!is_local_space()) {
// This can start incremental marking and mark the current
// linear allocation area as black. Thus destroying of the black
// area needs to happen afterwards.
InlineAllocationStep(current_top, kNullAddress, kNullAddress, 0);
}
if (current_top != current_limit && !is_off_thread_space() &&
heap()->incremental_marking()->black_allocation()) {
@ -481,6 +479,7 @@ void PagedSpace::ReleasePage(Page* page) {
free_list_->EvictFreeListItems(page);
if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
DCHECK(!top_on_previous_step_);
SetTopAndLimit(kNullAddress, kNullAddress);
}
@ -553,7 +552,6 @@ bool PagedSpace::TryAllocationFromFreeListMain(size_t size_in_bytes,
Page* page = Page::FromHeapObject(new_node);
IncreaseAllocatedBytes(new_node_size, page);
DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
Address start = new_node.address();
Address end = new_node.address() + new_node_size;
Address limit = ComputeLimit(start, end, size_in_bytes);
@ -850,9 +848,6 @@ void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
#endif
void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
// Ensure there are no unaccounted allocations.
DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
Address new_limit = ComputeLimit(top(), limit(), min_size);
DCHECK_LE(top(), new_limit);
DCHECK_LE(new_limit, limit());
@ -1004,6 +999,20 @@ bool PagedSpace::ContributeToSweepingMain(int required_freed_bytes,
AllocationResult PagedSpace::AllocateRawSlow(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
if (top_on_previous_step_ && top() < top_on_previous_step_ &&
SupportsInlineAllocation()) {
// Generated code decreased the top() pointer to do folded allocations.
// The top_on_previous_step_ can be one byte beyond the current page.
DCHECK_NE(top(), kNullAddress);
DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
Page::FromAllocationAreaAddress(top_on_previous_step_ - 1));
top_on_previous_step_ = top();
}
size_t bytes_since_last =
top_on_previous_step_ ? top() - top_on_previous_step_ : 0;
DCHECK_IMPLIES(!SupportsInlineAllocation(), bytes_since_last == 0);
if (!is_local_space()) {
// Start incremental marking before the actual allocation, this allows the
// allocation function to mark the object black when incremental marking is
@ -1021,6 +1030,15 @@ AllocationResult PagedSpace::AllocateRawSlow(int size_in_bytes,
#else
AllocationResult result = AllocateRawUnaligned(size_in_bytes, origin);
#endif
HeapObject heap_obj;
if (!result.IsRetry() && result.To(&heap_obj) && !is_local_space()) {
AllocationStep(static_cast<int>(size_in_bytes + bytes_since_last),
heap_obj.address(), size_in_bytes);
StartNextInlineAllocationStep();
DCHECK_IMPLIES(
heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
}
return result;
}

View File

@ -317,7 +317,9 @@ class V8_EXPORT_PRIVATE PagedSpace
void SetTopAndLimit(Address top, Address limit);
void DecreaseLimit(Address new_limit);
void UpdateInlineAllocationLimit(size_t min_size) override;
bool SupportsAllocationObserver() override { return !is_local_space(); }
bool SupportsInlineAllocation() override {
return identity() == OLD_SPACE && !is_local_space();
}
// Slow path of allocation function
V8_WARN_UNUSED_RESULT AllocationResult
@ -348,14 +350,13 @@ class V8_EXPORT_PRIVATE PagedSpace
inline bool EnsureLabMain(int size_in_bytes, AllocationOrigin origin);
// Allocates an object from the linear allocation area. Assumes that the
// linear allocation area is large enought to fit the object.
inline AllocationResult AllocateFastUnaligned(int size_in_bytes);
inline AllocationResult AllocateLinearly(int size_in_bytes);
// Tries to allocate an aligned object from the linear allocation area.
// Returns nullptr if the linear allocation area does not fit the object.
// Otherwise, returns the object pointer and writes the allocation size
// (object size + alignment filler size) to the size_in_bytes.
inline AllocationResult AllocateFastAligned(int size_in_bytes,
int* aligned_size_in_bytes,
AllocationAlignment alignment);
inline AllocationResult TryAllocateLinearlyAligned(
int size_in_bytes, AllocationAlignment alignment);
V8_WARN_UNUSED_RESULT bool TryAllocationFromFreeListMain(
size_t size_in_bytes, AllocationOrigin origin);

View File

@ -249,16 +249,46 @@ void Page::DestroyBlackAreaBackground(Address start, Address end) {
void Space::AddAllocationObserver(AllocationObserver* observer) {
allocation_counter_.AddAllocationObserver(observer);
StartNextInlineAllocationStep();
}
void Space::RemoveAllocationObserver(AllocationObserver* observer) {
allocation_counter_.RemoveAllocationObserver(observer);
StartNextInlineAllocationStep();
}
void Space::PauseAllocationObservers() { allocation_counter_.Pause(); }
void Space::ResumeAllocationObservers() { allocation_counter_.Resume(); }
void Space::AllocationStep(int bytes_since_last, Address soon_object,
int size) {
if (!allocation_counter_.IsActive()) {
return;
}
DCHECK(!heap()->allocation_step_in_progress());
heap()->set_allocation_step_in_progress(true);
heap()->CreateFillerObjectAt(soon_object, size, ClearRecordedSlots::kNo);
for (AllocationObserver* observer : allocation_counter_) {
observer->AllocationStep(bytes_since_last, soon_object, size);
}
heap()->set_allocation_step_in_progress(false);
}
void Space::AllocationStepAfterMerge(Address first_object_in_chunk, int size) {
if (!allocation_counter_.IsActive()) {
return;
}
DCHECK(!heap()->allocation_step_in_progress());
heap()->set_allocation_step_in_progress(true);
for (AllocationObserver* observer : allocation_counter_) {
observer->AllocationStep(size, first_object_in_chunk, size);
}
heap()->set_allocation_step_in_progress(false);
}
Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
size_t min_size) {
DCHECK_GE(end - start, min_size);
@ -266,19 +296,14 @@ Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
if (heap()->inline_allocation_disabled()) {
// Fit the requested area exactly.
return start + min_size;
} else if (SupportsAllocationObserver() && allocation_counter_.IsActive()) {
// Ensure there are no unaccounted allocations.
DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
} else if (SupportsInlineAllocation() && allocation_counter_.IsActive()) {
// Generated code may allocate inline from the linear allocation area for.
// To make sure we can observe these allocations, we use a lower ©limit.
size_t step = allocation_counter_.NextBytes();
DCHECK_NE(step, 0);
// To make sure we can observe these allocations, we use a lower limit.
size_t step = allocation_counter_.GetNextInlineAllocationStepSize();
size_t rounded_step =
RoundSizeDownToObjectAlignment(static_cast<int>(step - 1));
// Use uint64_t to avoid overflow on 32-bit
uint64_t step_end =
static_cast<uint64_t>(start) + Max(min_size, rounded_step);
uint64_t step_end = static_cast<uint64_t>(start) + min_size + rounded_step;
uint64_t new_end = Min(step_end, static_cast<uint64_t>(end));
return static_cast<Address>(new_end);
} else {
@ -345,74 +370,73 @@ LocalAllocationBuffer& LocalAllocationBuffer::operator=(
other.allocation_info_.Reset(kNullAddress, kNullAddress);
return *this;
}
void SpaceWithLinearArea::StartNextInlineAllocationStep() {
if (heap()->allocation_step_in_progress()) {
// If we are mid-way through an existing step, don't start a new one.
return;
}
if (allocation_counter_.IsActive()) {
top_on_previous_step_ = top();
UpdateInlineAllocationLimit(0);
} else {
DCHECK_EQ(kNullAddress, top_on_previous_step_);
}
}
void SpaceWithLinearArea::AddAllocationObserver(AllocationObserver* observer) {
AdvanceAllocationObservers();
InlineAllocationStep(top(), top(), kNullAddress, 0);
Space::AddAllocationObserver(observer);
UpdateInlineAllocationLimit(0);
DCHECK_IMPLIES(top_on_previous_step_, allocation_counter_.IsActive());
}
void SpaceWithLinearArea::RemoveAllocationObserver(
AllocationObserver* observer) {
AdvanceAllocationObservers();
Address top_for_next_step =
allocation_counter_.NumberAllocationObservers() == 1 ? kNullAddress
: top();
InlineAllocationStep(top(), top_for_next_step, kNullAddress, 0);
Space::RemoveAllocationObserver(observer);
UpdateInlineAllocationLimit(0);
DCHECK_IMPLIES(top_on_previous_step_, allocation_counter_.IsActive());
}
void SpaceWithLinearArea::PauseAllocationObservers() {
AdvanceAllocationObservers();
// Do a step to account for memory allocated so far.
InlineAllocationStep(top(), kNullAddress, kNullAddress, 0);
Space::PauseAllocationObservers();
}
void SpaceWithLinearArea::ResumeAllocationObservers() {
Space::ResumeAllocationObservers();
allocation_info_.MoveStartToTop();
DCHECK_EQ(kNullAddress, top_on_previous_step_);
UpdateInlineAllocationLimit(0);
}
void SpaceWithLinearArea::AdvanceAllocationObservers() {
if (allocation_info_.top()) {
allocation_counter_.AdvanceAllocationObservers(allocation_info_.top() -
allocation_info_.start());
allocation_info_.MoveStartToTop();
void SpaceWithLinearArea::ResumeAllocationObservers() {
DCHECK_EQ(kNullAddress, top_on_previous_step_);
Space::ResumeAllocationObservers();
StartNextInlineAllocationStep();
}
void SpaceWithLinearArea::InlineAllocationStep(Address top,
Address top_for_next_step,
Address soon_object,
size_t size) {
if (heap()->allocation_step_in_progress()) {
// Avoid starting a new step if we are mid-way through an existing one.
return;
}
if (top_on_previous_step_) {
if (top < top_on_previous_step_) {
// Generated code decreased the top pointer to do folded allocations.
DCHECK_NE(top, kNullAddress);
DCHECK_EQ(Page::FromAllocationAreaAddress(top),
Page::FromAllocationAreaAddress(top_on_previous_step_));
top_on_previous_step_ = top;
}
int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
AllocationStep(bytes_allocated, soon_object, static_cast<int>(size));
top_on_previous_step_ = top_for_next_step;
}
}
// Perform an allocation step when the step is reached. size_in_bytes is the
// actual size needed for the object (required for InvokeAllocationObservers).
// aligned_size_in_bytes is the size of the object including the filler right
// before it to reach the right alignment (required to DCHECK the start of the
// object). allocation_size is the size of the actual allocation which needs to
// be used for the accounting. It can be different from aligned_size_in_bytes in
// PagedSpace::AllocateRawAligned, where we have to overallocate in order to be
// able to align the allocation afterwards.
void SpaceWithLinearArea::InvokeAllocationObservers(
Address soon_object, size_t size_in_bytes, size_t aligned_size_in_bytes,
size_t allocation_size) {
DCHECK_LE(size_in_bytes, aligned_size_in_bytes);
DCHECK_LE(aligned_size_in_bytes, allocation_size);
DCHECK(size_in_bytes == aligned_size_in_bytes ||
aligned_size_in_bytes == allocation_size);
if (!SupportsAllocationObserver() || !allocation_counter_.IsActive()) return;
if (allocation_size >= allocation_counter_.NextBytes()) {
// Only the first object in a LAB should reach the next step.
DCHECK_EQ(soon_object,
allocation_info_.start() + aligned_size_in_bytes - size_in_bytes);
// Ensure that there is a valid object
heap_->CreateFillerObjectAt(soon_object, static_cast<int>(size_in_bytes),
ClearRecordedSlots::kNo);
// Run AllocationObserver::Step through the AllocationCounter.
allocation_counter_.InvokeAllocationObservers(soon_object, size_in_bytes,
allocation_size);
}
DCHECK_LT(allocation_info_.limit() - allocation_info_.start(),
allocation_counter_.NextBytes());
}
int MemoryChunk::FreeListsLength() {
int length = 0;

View File

@ -112,6 +112,7 @@ class V8_EXPORT_PRIVATE Space : public BaseSpace {
public:
Space(Heap* heap, AllocationSpace id, FreeList* free_list)
: BaseSpace(heap, id),
allocation_counter_(heap),
free_list_(std::unique_ptr<FreeList>(free_list)) {
external_backing_store_bytes_ =
new std::atomic<size_t>[ExternalBackingStoreType::kNumTypes];
@ -138,6 +139,13 @@ class V8_EXPORT_PRIVATE Space : public BaseSpace {
virtual void StartNextInlineAllocationStep() {}
void AllocationStep(int bytes_since_last, Address soon_object, int size);
// An AllocationStep equivalent to be called after merging a contiguous
// chunk of an off-thread space into this space. The chunk is treated as a
// single allocation-folding group.
void AllocationStepAfterMerge(Address first_object_in_chunk, int size);
// Returns size of objects. Can differ from the allocated size
// (e.g. see OldLargeObjectSpace).
virtual size_t SizeOfObjects() { return Size(); }
@ -373,8 +381,6 @@ class LinearAllocationArea {
set_limit(limit);
}
void MoveStartToTop() { start_ = top_; }
V8_INLINE Address start() const { return start_; }
V8_INLINE void set_top(Address top) {
@ -484,11 +490,11 @@ class LocalAllocationBuffer {
class SpaceWithLinearArea : public Space {
public:
SpaceWithLinearArea(Heap* heap, AllocationSpace id, FreeList* free_list)
: Space(heap, id, free_list) {
: Space(heap, id, free_list), top_on_previous_step_(0) {
allocation_info_.Reset(kNullAddress, kNullAddress);
}
virtual bool SupportsAllocationObserver() = 0;
virtual bool SupportsInlineAllocation() = 0;
// Returns the allocation pointer in this space.
Address top() { return allocation_info_.top(); }
@ -502,7 +508,6 @@ class SpaceWithLinearArea : public Space {
return allocation_info_.limit_address();
}
// Methods needed for allocation observers.
V8_EXPORT_PRIVATE void AddAllocationObserver(
AllocationObserver* observer) override;
V8_EXPORT_PRIVATE void RemoveAllocationObserver(
@ -510,12 +515,6 @@ class SpaceWithLinearArea : public Space {
V8_EXPORT_PRIVATE void ResumeAllocationObservers() override;
V8_EXPORT_PRIVATE void PauseAllocationObservers() override;
V8_EXPORT_PRIVATE void AdvanceAllocationObservers();
V8_EXPORT_PRIVATE void InvokeAllocationObservers(Address soon_object,
size_t size_in_bytes,
size_t aligned_size_in_bytes,
size_t allocation_size);
// When allocation observers are active we may use a lower limit to allow the
// observers to 'interrupt' earlier than the natural limit. Given a linear
// area bounded by [start, end), this function computes the limit to use to
@ -530,8 +529,22 @@ class SpaceWithLinearArea : public Space {
void PrintAllocationsOrigins();
protected:
// If we are doing inline allocation in steps, this method performs the 'step'
// operation. top is the memory address of the bump pointer at the last
// inline allocation (i.e. it determines the numbers of bytes actually
// allocated since the last step.) top_for_next_step is the address of the
// bump pointer where the next byte is going to be allocated from. top and
// top_for_next_step may be different when we cross a page boundary or reset
// the space.
// TODO(ofrobots): clarify the precise difference between this and
// Space::AllocationStep.
void InlineAllocationStep(Address top, Address top_for_next_step,
Address soon_object, size_t size);
V8_EXPORT_PRIVATE void StartNextInlineAllocationStep() override;
// TODO(ofrobots): make these private after refactoring is complete.
LinearAllocationArea allocation_info_;
Address top_on_previous_step_;
size_t allocations_origins_[static_cast<int>(
AllocationOrigin::kNumberOfAllocationOrigins)] = {0};

View File

@ -232,7 +232,6 @@ v8_source_set("unittests_sources") {
"diagnostics/eh-frame-iterator-unittest.cc",
"diagnostics/eh-frame-writer-unittest.cc",
"execution/microtask-queue-unittest.cc",
"heap/allocation-observer-unittest.cc",
"heap/barrier-unittest.cc",
"heap/bitmap-test-utils.h",
"heap/bitmap-unittest.cc",

View File

@ -1,111 +0,0 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/allocation-observer.h"
#include "src/base/logging.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
namespace internal {
namespace {
class UnusedObserver : public AllocationObserver {
public:
explicit UnusedObserver(size_t step_size) : AllocationObserver(step_size) {}
void Step(int bytes_allocated, Address soon_object, size_t size) override {
CHECK(false);
}
};
} // namespace
TEST(AllocationObserverTest, AddAndRemoveUnusedObservers) {
AllocationCounter counter;
CHECK(!counter.IsActive());
UnusedObserver observer100(100);
UnusedObserver observer200(200);
counter.AddAllocationObserver(&observer200);
CHECK_EQ(counter.NextBytes(), 200);
counter.AddAllocationObserver(&observer100);
CHECK_EQ(counter.NextBytes(), 100);
counter.AdvanceAllocationObservers(90);
CHECK_EQ(counter.NextBytes(), 10);
counter.RemoveAllocationObserver(&observer100);
CHECK_EQ(counter.NextBytes(), 110);
counter.RemoveAllocationObserver(&observer200);
CHECK(!counter.IsActive());
}
namespace {
class VerifyStepObserver : public AllocationObserver {
public:
explicit VerifyStepObserver(size_t step_size)
: AllocationObserver(step_size) {}
void Step(int bytes_allocated, Address soon_object, size_t size) override {
CHECK(!do_not_invoke_);
invocations_++;
CHECK_EQ(expected_bytes_allocated_, bytes_allocated);
CHECK_EQ(expected_size_, size);
}
void ExpectNoInvocation() { do_not_invoke_ = true; }
void Expect(int expected_bytes_allocated, size_t expected_size) {
do_not_invoke_ = false;
expected_bytes_allocated_ = expected_bytes_allocated;
expected_size_ = expected_size;
}
int Invocations() { return invocations_; }
private:
bool do_not_invoke_ = false;
int invocations_ = 0;
int expected_bytes_allocated_ = 0;
size_t expected_size_ = 0;
};
} // namespace
TEST(AllocationObserverTest, Step) {
AllocationCounter counter;
CHECK(!counter.IsActive());
const Address kSomeObjectAddress = 8;
VerifyStepObserver observer100(100);
VerifyStepObserver observer200(200);
counter.AddAllocationObserver(&observer100);
counter.AddAllocationObserver(&observer200);
observer100.Expect(90, 8);
observer200.ExpectNoInvocation();
counter.AdvanceAllocationObservers(90);
counter.InvokeAllocationObservers(kSomeObjectAddress, 8, 10);
CHECK_EQ(observer100.Invocations(), 1);
CHECK_EQ(observer200.Invocations(), 0);
CHECK_EQ(counter.NextBytes(),
10 /* aligned_object_size */ + 100 /* smallest step size*/);
observer100.Expect(90, 16);
observer200.Expect(180, 16);
counter.AdvanceAllocationObservers(90);
counter.InvokeAllocationObservers(kSomeObjectAddress, 16, 20);
CHECK_EQ(observer100.Invocations(), 2);
CHECK_EQ(observer200.Invocations(), 1);
CHECK_EQ(counter.NextBytes(),
20 /* aligned_object_size */ + 100 /* smallest step size*/);
}
} // namespace internal
} // namespace v8