Reland "[heap] Refactor allocation observer in AllocationCounter"

This is a reland of b354e344fd

This CL adds 3 fixes:

* Unprotect code object before creating filler
* Allows AllocationObserver::Step to add more AllocationObservers
* Update limit in NewSpace::UpdateLinearAllocationArea

Original change's description:
> [heap] Refactor allocation observer in AllocationCounter
>
> Moves accounting of allocation observers into the AllocationCounter
> class. This CL removes top_on_previous_step_ for counters that are
> increased regularly in the slow path of the allocation functions.
>
> AdvanceAllocationObservers() informs the AllocationCounter about
> allocated bytes, InvokeAllocationObservers() needs to be invoked when
> an allocation step is reached. NextBytes() returns the number of bytes
> until the next AllocationObserver::Step needs to run.
>
> Bug: v8:10315
> Change-Id: I8b6eb8719ab032d44ee0614d2a0f2645bfce9df6
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2320650
> Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#69170}

Bug: v8:10315
Change-Id: I89ab4d5069a234a293471f613dab16b47d8fff89
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2332805
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69216}
This commit is contained in:
Dominik Inführ 2020-08-04 00:22:36 +02:00 committed by Commit Bot
parent dfd3c628d6
commit 9fff9a73bb
17 changed files with 449 additions and 280 deletions

View File

@ -11,70 +11,124 @@ namespace v8 {
namespace internal {
void AllocationCounter::AddAllocationObserver(AllocationObserver* observer) {
allocation_observers_.push_back(observer);
#if DEBUG
auto it = std::find_if(observers_.begin(), observers_.end(),
[observer](const AllocationObserverCounter& aoc) {
return aoc.observer_ == observer;
});
DCHECK_EQ(observers_.end(), it);
#endif
if (step_in_progress_) {
pending_.push_back(AllocationObserverCounter(observer, 0, 0));
return;
}
intptr_t step_size = observer->GetNextStepSize();
size_t observer_next_counter = current_counter_ + step_size;
observers_.push_back(AllocationObserverCounter(observer, current_counter_,
observer_next_counter));
if (observers_.size() == 1) {
DCHECK_EQ(current_counter_, next_counter_);
next_counter_ = observer_next_counter;
} else {
size_t missing_bytes = next_counter_ - current_counter_;
next_counter_ =
current_counter_ + Min(static_cast<intptr_t>(missing_bytes), step_size);
}
}
void AllocationCounter::RemoveAllocationObserver(AllocationObserver* observer) {
auto it = std::find(allocation_observers_.begin(),
allocation_observers_.end(), observer);
DCHECK(allocation_observers_.end() != it);
allocation_observers_.erase(it);
}
DCHECK(!step_in_progress_);
auto it = std::find_if(observers_.begin(), observers_.end(),
[observer](const AllocationObserverCounter& aoc) {
return aoc.observer_ == observer;
});
DCHECK_NE(observers_.end(), it);
observers_.erase(it);
intptr_t AllocationCounter::GetNextInlineAllocationStepSize() {
intptr_t next_step = 0;
for (AllocationObserver* observer : allocation_observers_) {
next_step = next_step ? Min(next_step, observer->bytes_to_next_step())
: observer->bytes_to_next_step();
if (observers_.size() == 0) {
current_counter_ = next_counter_ = 0;
} else {
size_t step_size = 0;
for (AllocationObserverCounter& observer : observers_) {
size_t left_in_step = observer.next_counter_ - current_counter_;
DCHECK_GT(left_in_step, 0);
step_size = step_size ? Min(step_size, left_in_step) : left_in_step;
}
next_counter_ = current_counter_ + step_size;
}
DCHECK(!HasAllocationObservers() || next_step > 0);
return next_step;
}
void AllocationCounter::NotifyBytes(size_t allocated) {
void AllocationCounter::AdvanceAllocationObservers(size_t allocated) {
if (!IsActive()) {
return;
}
DCHECK_LE(allocated, next_counter_ - current_counter_);
DCHECK(!step_in_progress_);
DCHECK_LT(allocated, next_counter_ - current_counter_);
current_counter_ += allocated;
}
void AllocationCounter::NotifyObject(Address soon_object, size_t object_size) {
void AllocationCounter::InvokeAllocationObservers(Address soon_object,
size_t object_size,
size_t aligned_object_size) {
if (!IsActive()) {
return;
}
DCHECK_GT(object_size, next_counter_ - current_counter_);
size_t bytes_since_last_step = current_counter_ - prev_counter_;
DCHECK(!heap_->allocation_step_in_progress());
heap_->set_allocation_step_in_progress(true);
DCHECK(!step_in_progress_);
DCHECK_GE(aligned_object_size, next_counter_ - current_counter_);
DCHECK(soon_object);
heap_->CreateFillerObjectAt(soon_object, static_cast<int>(object_size),
ClearRecordedSlots::kNo);
intptr_t next_step = 0;
for (AllocationObserver* observer : allocation_observers_) {
observer->AllocationStep(static_cast<int>(bytes_since_last_step),
soon_object, object_size);
next_step = next_step ? Min(next_step, observer->bytes_to_next_step())
: observer->bytes_to_next_step();
}
heap_->set_allocation_step_in_progress(false);
bool step_run = false;
step_in_progress_ = true;
size_t step_size = 0;
prev_counter_ = current_counter_;
next_counter_ = current_counter_ + object_size + next_step;
}
DCHECK(pending_.empty());
void AllocationObserver::AllocationStep(int bytes_allocated,
Address soon_object, size_t size) {
DCHECK_GE(bytes_allocated, 0);
bytes_to_next_step_ -= bytes_allocated;
if (bytes_to_next_step_ <= 0) {
Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object, size);
step_size_ = GetNextStepSize();
bytes_to_next_step_ = step_size_;
for (AllocationObserverCounter& aoc : observers_) {
if (aoc.next_counter_ - current_counter_ <= aligned_object_size) {
{
DisallowHeapAllocation disallow_heap_allocation;
aoc.observer_->Step(
static_cast<int>(current_counter_ - aoc.prev_counter_), soon_object,
object_size);
}
size_t observer_step_size = aoc.observer_->GetNextStepSize();
aoc.prev_counter_ = current_counter_;
aoc.next_counter_ =
current_counter_ + aligned_object_size + observer_step_size;
step_run = true;
}
size_t left_in_step = aoc.next_counter_ - current_counter_;
step_size = step_size ? Min(step_size, left_in_step) : left_in_step;
}
DCHECK_GE(bytes_to_next_step_, 0);
CHECK(step_run);
// Now process newly added allocation observers.
for (AllocationObserverCounter& aoc : pending_) {
size_t observer_step_size = aoc.observer_->GetNextStepSize();
aoc.prev_counter_ = current_counter_;
aoc.next_counter_ =
current_counter_ + aligned_object_size + observer_step_size;
DCHECK_NE(step_size, 0);
step_size = Min(step_size, aligned_object_size + observer_step_size);
observers_.push_back(aoc);
}
pending_.clear();
next_counter_ = current_counter_ + step_size;
step_in_progress_ = false;
}
PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)

View File

@ -5,6 +5,7 @@
#ifndef V8_HEAP_ALLOCATION_OBSERVER_H_
#define V8_HEAP_ALLOCATION_OBSERVER_H_
#include <cstdint>
#include <vector>
#include "src/common/globals.h"
@ -13,79 +14,79 @@ namespace v8 {
namespace internal {
class AllocationObserver;
class Heap;
class AllocationCounter {
public:
explicit AllocationCounter(Heap* heap)
: heap_(heap),
paused_(false),
prev_counter_(0),
AllocationCounter()
: paused_(false),
current_counter_(0),
next_counter_(0) {}
next_counter_(0),
step_in_progress_(false) {}
V8_EXPORT_PRIVATE void AddAllocationObserver(AllocationObserver* observer);
V8_EXPORT_PRIVATE void RemoveAllocationObserver(AllocationObserver* observer);
auto begin() { return allocation_observers_.begin(); }
auto end() { return allocation_observers_.end(); }
void AddAllocationObserver(AllocationObserver* observer);
void RemoveAllocationObserver(AllocationObserver* observer);
bool HasAllocationObservers() { return !allocation_observers_.empty(); }
size_t NumberAllocationObservers() { return allocation_observers_.size(); }
bool IsActive() { return !IsPaused() && HasAllocationObservers(); }
bool IsActive() { return !IsPaused() && observers_.size() > 0; }
void Pause() {
DCHECK(!paused_);
DCHECK(!step_in_progress_);
paused_ = true;
}
void Resume() {
DCHECK(paused_);
DCHECK(!step_in_progress_);
paused_ = false;
}
intptr_t GetNextInlineAllocationStepSize();
void NotifyBytes(size_t allocated);
void NotifyObject(Address soon_object, size_t object_size);
V8_EXPORT_PRIVATE void AdvanceAllocationObservers(size_t allocated);
V8_EXPORT_PRIVATE void InvokeAllocationObservers(Address soon_object,
size_t object_size,
size_t aligned_object_size);
size_t NextBytes() {
DCHECK(IsActive());
return next_counter_ - current_counter_;
}
bool IsStepInProgress() { return step_in_progress_; }
private:
bool IsPaused() { return paused_; }
std::vector<AllocationObserver*> allocation_observers_;
Heap* heap_;
struct AllocationObserverCounter {
AllocationObserverCounter(AllocationObserver* observer, size_t prev_counter,
size_t next_counter)
: observer_(observer),
prev_counter_(prev_counter),
next_counter_(next_counter) {}
AllocationObserver* observer_;
size_t prev_counter_;
size_t next_counter_;
};
std::vector<AllocationObserverCounter> observers_;
std::vector<AllocationObserverCounter> pending_;
bool paused_;
size_t prev_counter_;
size_t current_counter_;
size_t next_counter_;
bool step_in_progress_;
};
// -----------------------------------------------------------------------------
// Allows observation of allocations.
class AllocationObserver {
public:
explicit AllocationObserver(intptr_t step_size)
: step_size_(step_size), bytes_to_next_step_(step_size) {
explicit AllocationObserver(intptr_t step_size) : step_size_(step_size) {
DCHECK_LE(kTaggedSize, step_size);
}
virtual ~AllocationObserver() = default;
// Called each time the observed space does an allocation step. This may be
// more frequently than the step_size we are monitoring (e.g. when there are
// multiple observers, or when page or space boundary is encountered.)
void AllocationStep(int bytes_allocated, Address soon_object, size_t size);
protected:
intptr_t step_size() const { return step_size_; }
intptr_t bytes_to_next_step() const { return bytes_to_next_step_; }
// Pure virtual method provided by the subclasses that gets called when at
// least step_size bytes have been allocated. soon_object is the address just
// allocated (but not yet initialized.) size is the size of the object as
@ -103,10 +104,9 @@ class AllocationObserver {
// Subclasses can override this method to make step size dynamic.
virtual intptr_t GetNextStepSize() { return step_size_; }
intptr_t step_size_;
intptr_t bytes_to_next_step_;
private:
intptr_t step_size_;
friend class AllocationCounter;
DISALLOW_COPY_AND_ASSIGN(AllocationObserver);
};

View File

@ -66,6 +66,11 @@ HeapObject AllocationResult::ToObject() {
return HeapObject::cast(object_);
}
Address AllocationResult::ToAddress() {
DCHECK(!IsRetry());
return HeapObject::cast(object_).address();
}
Isolate* Heap::isolate() {
return reinterpret_cast<Isolate*>(
reinterpret_cast<intptr_t>(this) -

View File

@ -900,6 +900,7 @@ void Heap::MergeAllocationSitePretenuringFeedback(
void Heap::AddAllocationObserversToAllSpaces(
AllocationObserver* observer, AllocationObserver* new_space_observer) {
DCHECK(observer && new_space_observer);
SafepointScope scope(this);
for (SpaceIterator it(this); it.HasNext();) {
Space* space = it.Next();
@ -914,6 +915,7 @@ void Heap::AddAllocationObserversToAllSpaces(
void Heap::RemoveAllocationObserversFromAllSpaces(
AllocationObserver* observer, AllocationObserver* new_space_observer) {
DCHECK(observer && new_space_observer);
SafepointScope scope(this);
for (SpaceIterator it(this); it.HasNext();) {
Space* space = it.Next();
@ -5127,6 +5129,7 @@ void Heap::EnableInlineAllocation() {
inline_allocation_disabled_ = false;
// Update inline allocation limit for new space.
new_space()->AdvanceAllocationObservers();
new_space()->UpdateInlineAllocationLimit(0);
}

View File

@ -197,6 +197,7 @@ class AllocationResult {
inline bool IsRetry() { return object_.IsSmi(); }
inline HeapObject ToObjectChecked();
inline HeapObject ToObject();
inline Address ToAddress();
inline AllocationSpace RetrySpace();
template <typename T>
@ -1395,11 +1396,6 @@ class Heap {
void RemoveAllocationObserversFromAllSpaces(
AllocationObserver* observer, AllocationObserver* new_space_observer);
bool allocation_step_in_progress() { return allocation_step_in_progress_; }
void set_allocation_step_in_progress(bool val) {
allocation_step_in_progress_ = val;
}
// ===========================================================================
// Heap object allocation tracking. ==========================================
// ===========================================================================
@ -2079,8 +2075,6 @@ class Heap {
// Observer that can cause early scavenge start.
StressScavengeObserver* stress_scavenge_observer_ = nullptr;
bool allocation_step_in_progress_ = false;
// The maximum percent of the marking limit reached wihout causing marking.
// This is tracked when specyfing --fuzzer-gc-analysis.
double max_marking_limit_reached_ = 0.0;

View File

@ -110,6 +110,19 @@ void LargeObjectSpace::TearDown() {
}
}
void LargeObjectSpace::AdvanceAndInvokeAllocationObservers(Address soon_object,
size_t object_size) {
if (!allocation_counter_.IsActive()) return;
if (object_size >= allocation_counter_.NextBytes()) {
allocation_counter_.InvokeAllocationObservers(soon_object, object_size,
object_size);
}
// Large objects can be accounted immediately since no LAB is involved.
allocation_counter_.AdvanceAllocationObservers(object_size);
}
AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size) {
return AllocateRaw(object_size, NOT_EXECUTABLE);
}
@ -138,7 +151,8 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
heap()->incremental_marking()->marking_state()->IsBlack(object));
page->InitializationMemoryFence();
heap()->NotifyOldGenerationExpansion(identity(), page);
AllocationStep(object_size, object.address(), object_size);
AdvanceAndInvokeAllocationObservers(object.address(),
static_cast<size_t>(object_size));
return object;
}
@ -487,7 +501,8 @@ AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
page->InitializationMemoryFence();
DCHECK(page->IsLargePage());
DCHECK_EQ(page->owner_identity(), NEW_LO_SPACE);
AllocationStep(object_size, result.address(), object_size);
AdvanceAndInvokeAllocationObservers(result.address(),
static_cast<size_t>(object_size));
return result;
}

View File

@ -115,6 +115,8 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
protected:
LargeObjectSpace(Heap* heap, AllocationSpace id);
void AdvanceAndInvokeAllocationObservers(Address soon_object, size_t size);
LargePage* AllocateLargePage(int object_size, Executability executable);
std::atomic<size_t> size_; // allocated bytes

View File

@ -90,7 +90,7 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
AllocationResult result;
if (alignment != kWordAligned) {
result = AllocateFastAligned(size_in_bytes, alignment, origin);
result = AllocateFastAligned(size_in_bytes, nullptr, alignment, origin);
} else {
result = AllocateFastUnaligned(size_in_bytes, origin);
}
@ -122,9 +122,9 @@ AllocationResult NewSpace::AllocateFastUnaligned(int size_in_bytes,
return obj;
}
AllocationResult NewSpace::AllocateFastAligned(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
AllocationResult NewSpace::AllocateFastAligned(
int size_in_bytes, int* result_aligned_size_in_bytes,
AllocationAlignment alignment, AllocationOrigin origin) {
Address top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(top, alignment);
int aligned_size_in_bytes = size_in_bytes + filler_size;
@ -136,6 +136,8 @@ AllocationResult NewSpace::AllocateFastAligned(int size_in_bytes,
HeapObject obj = HeapObject::FromAddress(top);
allocation_info_.set_top(top + aligned_size_in_bytes);
if (result_aligned_size_in_bytes)
*result_aligned_size_in_bytes = aligned_size_in_bytes;
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
if (filler_size > 0) {

View File

@ -465,8 +465,7 @@ bool NewSpace::Rebalance() {
}
void NewSpace::UpdateLinearAllocationArea() {
// Make sure there is no unaccounted allocations.
DCHECK(!allocation_counter_.IsActive() || top_on_previous_step_ == top());
AdvanceAllocationObservers();
Address new_top = to_space_.page_low();
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
@ -475,13 +474,12 @@ void NewSpace::UpdateLinearAllocationArea() {
// See the corresponding loads in ConcurrentMarking::Run.
original_limit_.store(limit(), std::memory_order_relaxed);
original_top_.store(top(), std::memory_order_release);
StartNextInlineAllocationStep();
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
UpdateInlineAllocationLimit(0);
}
void NewSpace::ResetLinearAllocationArea() {
// Do a step to account for memory allocated so far before resetting.
InlineAllocationStep(top(), top(), kNullAddress, 0);
to_space_.Reset();
UpdateLinearAllocationArea();
// Clear all mark-bits in the to-space.
@ -506,9 +504,6 @@ bool NewSpace::AddFreshPage() {
Address top = allocation_info_.top();
DCHECK(!OldSpace::IsAtPageStart(top));
// Do a step to account for memory allocated on previous page.
InlineAllocationStep(top, top, kNullAddress, 0);
if (!to_space_.AdvancePage()) {
// No more pages left to advance.
return false;
@ -530,34 +525,30 @@ bool NewSpace::AddFreshPageSynchronized() {
bool NewSpace::EnsureAllocation(int size_in_bytes,
AllocationAlignment alignment) {
AdvanceAllocationObservers();
Address old_top = allocation_info_.top();
Address high = to_space_.page_high();
int filler_size = Heap::GetFillToAlign(old_top, alignment);
int aligned_size_in_bytes = size_in_bytes + filler_size;
if (old_top + aligned_size_in_bytes > high) {
// Not enough room in the page, try to allocate a new one.
if (!AddFreshPage()) {
return false;
}
old_top = allocation_info_.top();
high = to_space_.page_high();
filler_size = Heap::GetFillToAlign(old_top, alignment);
if (old_top + aligned_size_in_bytes <= high) {
UpdateInlineAllocationLimit(aligned_size_in_bytes);
return true;
}
// Not enough room in the page, try to allocate a new one.
if (!AddFreshPage()) {
return false;
}
old_top = allocation_info_.top();
high = to_space_.page_high();
filler_size = Heap::GetFillToAlign(old_top, alignment);
aligned_size_in_bytes = size_in_bytes + filler_size;
DCHECK(old_top + aligned_size_in_bytes <= high);
if (allocation_info_.limit() < high) {
// Either the limit has been lowered because linear allocation was disabled
// or because incremental marking wants to get a chance to do a step,
// or because idle scavenge job wants to get a chance to post a task.
// Set the new limit accordingly.
Address new_top = old_top + aligned_size_in_bytes;
Address soon_object = old_top + filler_size;
InlineAllocationStep(new_top, new_top, soon_object, size_in_bytes);
UpdateInlineAllocationLimit(aligned_size_in_bytes);
}
UpdateInlineAllocationLimit(aligned_size_in_bytes);
return true;
}
@ -568,12 +559,6 @@ std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator(Heap* heap) {
AllocationResult NewSpace::AllocateRawSlow(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
if (top() < top_on_previous_step_) {
// Generated code decreased the top() pointer to do folded allocations
DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
Page::FromAllocationAreaAddress(top_on_previous_step_));
top_on_previous_step_ = top();
}
#ifdef V8_HOST_ARCH_32_BIT
return alignment != kWordAligned
? AllocateRawAligned(size_in_bytes, alignment, origin)
@ -595,8 +580,14 @@ AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
return AllocationResult::Retry();
}
DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
AllocationResult result = AllocateFastUnaligned(size_in_bytes, origin);
DCHECK(!result.IsRetry());
InvokeAllocationObservers(result.ToAddress(), size_in_bytes, size_in_bytes,
size_in_bytes);
return result;
}
@ -607,9 +598,17 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
return AllocationResult::Retry();
}
AllocationResult result =
AllocateFastAligned(size_in_bytes, alignment, origin);
DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
int aligned_size_in_bytes;
AllocationResult result = AllocateFastAligned(
size_in_bytes, &aligned_size_in_bytes, alignment, origin);
DCHECK(!result.IsRetry());
InvokeAllocationObservers(result.ToAddress(), size_in_bytes,
aligned_size_in_bytes, aligned_size_in_bytes);
return result;
}

View File

@ -477,8 +477,8 @@ class V8_EXPORT_PRIVATE NewSpace
// Internal allocation methods.
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
AllocateFastAligned(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin);
AllocateFastAligned(int size_in_bytes, int* aligned_size_in_bytes,
AllocationAlignment alignment, AllocationOrigin origin);
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
AllocateFastUnaligned(int size_in_bytes, AllocationOrigin origin);
@ -495,7 +495,7 @@ class V8_EXPORT_PRIVATE NewSpace
int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
bool SupportsInlineAllocation() override { return true; }
bool SupportsAllocationObserver() override { return true; }
friend class SemiSpaceObjectIterator;
};

View File

@ -6,6 +6,7 @@
#define V8_HEAP_PAGED_SPACES_INL_H_
#include "src/common/globals.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/paged-spaces.h"
#include "src/objects/code-inl.h"
@ -96,18 +97,20 @@ bool PagedSpace::EnsureLabMain(int size_in_bytes, AllocationOrigin origin) {
return RefillLabMain(size_in_bytes, origin);
}
AllocationResult PagedSpace::AllocateLinearly(int size_in_bytes) {
AllocationResult PagedSpace::AllocateFastUnaligned(int size_in_bytes) {
Address current_top = allocation_info_.top();
Address new_top = current_top + size_in_bytes;
if (new_top > allocation_info_.limit())
return AllocationResult::Retry(identity());
DCHECK_LE(new_top, allocation_info_.limit());
allocation_info_.set_top(new_top);
return AllocationResult(HeapObject::FromAddress(current_top));
}
AllocationResult PagedSpace::TryAllocateLinearlyAligned(
int size_in_bytes, AllocationAlignment alignment) {
AllocationResult PagedSpace::AllocateFastAligned(
int size_in_bytes, int* aligned_size_in_bytes,
AllocationAlignment alignment) {
Address current_top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(current_top, alignment);
@ -116,6 +119,8 @@ AllocationResult PagedSpace::TryAllocateLinearlyAligned(
return AllocationResult::Retry(identity());
allocation_info_.set_top(new_top);
if (aligned_size_in_bytes)
*aligned_size_in_bytes = filler_size + size_in_bytes;
if (filler_size > 0) {
Heap::PrecedeWithFiller(ReadOnlyRoots(heap()),
HeapObject::FromAddress(current_top), filler_size);
@ -129,7 +134,8 @@ AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
if (!EnsureLabMain(size_in_bytes, origin)) {
return AllocationResult::Retry(identity());
}
AllocationResult result = AllocateLinearly(size_in_bytes);
AllocationResult result = AllocateFastUnaligned(size_in_bytes);
DCHECK(!result.IsRetry());
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(result.ToObjectChecked().address(),
size_in_bytes);
@ -138,6 +144,9 @@ AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
UpdateAllocationOrigins(origin);
}
InvokeAllocationObservers(result.ToAddress(), size_in_bytes, size_in_bytes,
size_in_bytes);
return result;
}
@ -153,8 +162,9 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
if (!EnsureLabMain(allocation_size, origin)) {
return AllocationResult::Retry(identity());
}
int aligned_size_in_bytes;
AllocationResult result =
TryAllocateLinearlyAligned(size_in_bytes, alignment);
AllocateFastAligned(size_in_bytes, &aligned_size_in_bytes, alignment);
DCHECK(!result.IsRetry());
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(result.ToObjectChecked().address(),
size_in_bytes);
@ -163,6 +173,9 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
UpdateAllocationOrigins(origin);
}
InvokeAllocationObservers(result.ToAddress(), size_in_bytes,
aligned_size_in_bytes, allocation_size);
return result;
}
@ -172,9 +185,9 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
AllocationResult result;
if (alignment != kWordAligned) {
result = TryAllocateLinearlyAligned(size_in_bytes, alignment);
result = AllocateFastAligned(size_in_bytes, nullptr, alignment);
} else {
result = AllocateLinearly(size_in_bytes);
result = AllocateFastUnaligned(size_in_bytes);
}
if (!result.IsRetry()) {

View File

@ -383,6 +383,13 @@ void PagedSpace::DecreaseLimit(Address new_limit) {
DCHECK_LE(top(), new_limit);
DCHECK_GE(old_limit, new_limit);
if (new_limit != old_limit) {
base::Optional<CodePageMemoryModificationScope> optional_scope;
if (identity() == CODE_SPACE) {
MemoryChunk* chunk = MemoryChunk::FromAddress(new_limit);
optional_scope.emplace(chunk);
}
SetTopAndLimit(top(), new_limit);
Free(new_limit, old_limit - new_limit,
SpaceAccountingMode::kSpaceAccounted);
@ -439,12 +446,7 @@ void PagedSpace::FreeLinearAllocationArea() {
return;
}
if (!is_local_space()) {
// This can start incremental marking and mark the current
// linear allocation area as black. Thus destroying of the black
// area needs to happen afterwards.
InlineAllocationStep(current_top, kNullAddress, kNullAddress, 0);
}
AdvanceAllocationObservers();
if (current_top != current_limit && !is_off_thread_space() &&
heap()->incremental_marking()->black_allocation()) {
@ -479,7 +481,6 @@ void PagedSpace::ReleasePage(Page* page) {
free_list_->EvictFreeListItems(page);
if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
DCHECK(!top_on_previous_step_);
SetTopAndLimit(kNullAddress, kNullAddress);
}
@ -552,6 +553,7 @@ bool PagedSpace::TryAllocationFromFreeListMain(size_t size_in_bytes,
Page* page = Page::FromHeapObject(new_node);
IncreaseAllocatedBytes(new_node_size, page);
DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
Address start = new_node.address();
Address end = new_node.address() + new_node_size;
Address limit = ComputeLimit(start, end, size_in_bytes);
@ -848,6 +850,9 @@ void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
#endif
void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
// Ensure there are no unaccounted allocations.
DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
Address new_limit = ComputeLimit(top(), limit(), min_size);
DCHECK_LE(top(), new_limit);
DCHECK_LE(new_limit, limit());
@ -999,20 +1004,6 @@ bool PagedSpace::ContributeToSweepingMain(int required_freed_bytes,
AllocationResult PagedSpace::AllocateRawSlow(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
if (top_on_previous_step_ && top() < top_on_previous_step_ &&
SupportsInlineAllocation()) {
// Generated code decreased the top() pointer to do folded allocations.
// The top_on_previous_step_ can be one byte beyond the current page.
DCHECK_NE(top(), kNullAddress);
DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
Page::FromAllocationAreaAddress(top_on_previous_step_ - 1));
top_on_previous_step_ = top();
}
size_t bytes_since_last =
top_on_previous_step_ ? top() - top_on_previous_step_ : 0;
DCHECK_IMPLIES(!SupportsInlineAllocation(), bytes_since_last == 0);
if (!is_local_space()) {
// Start incremental marking before the actual allocation, this allows the
// allocation function to mark the object black when incremental marking is
@ -1030,15 +1021,6 @@ AllocationResult PagedSpace::AllocateRawSlow(int size_in_bytes,
#else
AllocationResult result = AllocateRawUnaligned(size_in_bytes, origin);
#endif
HeapObject heap_obj;
if (!result.IsRetry() && result.To(&heap_obj) && !is_local_space()) {
AllocationStep(static_cast<int>(size_in_bytes + bytes_since_last),
heap_obj.address(), size_in_bytes);
StartNextInlineAllocationStep();
DCHECK_IMPLIES(
heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
}
return result;
}

View File

@ -317,9 +317,7 @@ class V8_EXPORT_PRIVATE PagedSpace
void SetTopAndLimit(Address top, Address limit);
void DecreaseLimit(Address new_limit);
void UpdateInlineAllocationLimit(size_t min_size) override;
bool SupportsInlineAllocation() override {
return identity() == OLD_SPACE && !is_local_space();
}
bool SupportsAllocationObserver() override { return !is_local_space(); }
// Slow path of allocation function
V8_WARN_UNUSED_RESULT AllocationResult
@ -350,13 +348,14 @@ class V8_EXPORT_PRIVATE PagedSpace
inline bool EnsureLabMain(int size_in_bytes, AllocationOrigin origin);
// Allocates an object from the linear allocation area. Assumes that the
// linear allocation area is large enought to fit the object.
inline AllocationResult AllocateLinearly(int size_in_bytes);
inline AllocationResult AllocateFastUnaligned(int size_in_bytes);
// Tries to allocate an aligned object from the linear allocation area.
// Returns nullptr if the linear allocation area does not fit the object.
// Otherwise, returns the object pointer and writes the allocation size
// (object size + alignment filler size) to the size_in_bytes.
inline AllocationResult TryAllocateLinearlyAligned(
int size_in_bytes, AllocationAlignment alignment);
inline AllocationResult AllocateFastAligned(int size_in_bytes,
int* aligned_size_in_bytes,
AllocationAlignment alignment);
V8_WARN_UNUSED_RESULT bool TryAllocationFromFreeListMain(
size_t size_in_bytes, AllocationOrigin origin);

View File

@ -22,6 +22,7 @@
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/large-spaces.h"
#include "src/heap/mark-compact.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/remembered-set.h"
#include "src/heap/slot-set.h"
@ -249,46 +250,16 @@ void Page::DestroyBlackAreaBackground(Address start, Address end) {
void Space::AddAllocationObserver(AllocationObserver* observer) {
allocation_counter_.AddAllocationObserver(observer);
StartNextInlineAllocationStep();
}
void Space::RemoveAllocationObserver(AllocationObserver* observer) {
allocation_counter_.RemoveAllocationObserver(observer);
StartNextInlineAllocationStep();
}
void Space::PauseAllocationObservers() { allocation_counter_.Pause(); }
void Space::ResumeAllocationObservers() { allocation_counter_.Resume(); }
void Space::AllocationStep(int bytes_since_last, Address soon_object,
int size) {
if (!allocation_counter_.IsActive()) {
return;
}
DCHECK(!heap()->allocation_step_in_progress());
heap()->set_allocation_step_in_progress(true);
heap()->CreateFillerObjectAt(soon_object, size, ClearRecordedSlots::kNo);
for (AllocationObserver* observer : allocation_counter_) {
observer->AllocationStep(bytes_since_last, soon_object, size);
}
heap()->set_allocation_step_in_progress(false);
}
void Space::AllocationStepAfterMerge(Address first_object_in_chunk, int size) {
if (!allocation_counter_.IsActive()) {
return;
}
DCHECK(!heap()->allocation_step_in_progress());
heap()->set_allocation_step_in_progress(true);
for (AllocationObserver* observer : allocation_counter_) {
observer->AllocationStep(size, first_object_in_chunk, size);
}
heap()->set_allocation_step_in_progress(false);
}
Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
size_t min_size) {
DCHECK_GE(end - start, min_size);
@ -296,14 +267,19 @@ Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
if (heap()->inline_allocation_disabled()) {
// Fit the requested area exactly.
return start + min_size;
} else if (SupportsInlineAllocation() && allocation_counter_.IsActive()) {
} else if (SupportsAllocationObserver() && allocation_counter_.IsActive()) {
// Ensure there are no unaccounted allocations.
DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
// Generated code may allocate inline from the linear allocation area for.
// To make sure we can observe these allocations, we use a lower limit.
size_t step = allocation_counter_.GetNextInlineAllocationStepSize();
// To make sure we can observe these allocations, we use a lower ©limit.
size_t step = allocation_counter_.NextBytes();
DCHECK_NE(step, 0);
size_t rounded_step =
RoundSizeDownToObjectAlignment(static_cast<int>(step - 1));
// Use uint64_t to avoid overflow on 32-bit
uint64_t step_end = static_cast<uint64_t>(start) + min_size + rounded_step;
uint64_t step_end =
static_cast<uint64_t>(start) + Max(min_size, rounded_step);
uint64_t new_end = Min(step_end, static_cast<uint64_t>(end));
return static_cast<Address>(new_end);
} else {
@ -370,73 +346,99 @@ LocalAllocationBuffer& LocalAllocationBuffer::operator=(
other.allocation_info_.Reset(kNullAddress, kNullAddress);
return *this;
}
void SpaceWithLinearArea::StartNextInlineAllocationStep() {
if (heap()->allocation_step_in_progress()) {
// If we are mid-way through an existing step, don't start a new one.
return;
}
if (allocation_counter_.IsActive()) {
top_on_previous_step_ = top();
UpdateInlineAllocationLimit(0);
} else {
DCHECK_EQ(kNullAddress, top_on_previous_step_);
}
}
void SpaceWithLinearArea::AddAllocationObserver(AllocationObserver* observer) {
InlineAllocationStep(top(), top(), kNullAddress, 0);
Space::AddAllocationObserver(observer);
DCHECK_IMPLIES(top_on_previous_step_, allocation_counter_.IsActive());
if (!allocation_counter_.IsStepInProgress()) {
AdvanceAllocationObservers();
Space::AddAllocationObserver(observer);
UpdateInlineAllocationLimit(0);
} else {
Space::AddAllocationObserver(observer);
}
}
void SpaceWithLinearArea::RemoveAllocationObserver(
AllocationObserver* observer) {
Address top_for_next_step =
allocation_counter_.NumberAllocationObservers() == 1 ? kNullAddress
: top();
InlineAllocationStep(top(), top_for_next_step, kNullAddress, 0);
DCHECK(!allocation_counter_.IsStepInProgress());
AdvanceAllocationObservers();
Space::RemoveAllocationObserver(observer);
DCHECK_IMPLIES(top_on_previous_step_, allocation_counter_.IsActive());
}
void SpaceWithLinearArea::PauseAllocationObservers() {
// Do a step to account for memory allocated so far.
InlineAllocationStep(top(), kNullAddress, kNullAddress, 0);
Space::PauseAllocationObservers();
DCHECK_EQ(kNullAddress, top_on_previous_step_);
UpdateInlineAllocationLimit(0);
}
void SpaceWithLinearArea::PauseAllocationObservers() {
AdvanceAllocationObservers();
Space::PauseAllocationObservers();
}
void SpaceWithLinearArea::ResumeAllocationObservers() {
DCHECK_EQ(kNullAddress, top_on_previous_step_);
Space::ResumeAllocationObservers();
StartNextInlineAllocationStep();
allocation_info_.MoveStartToTop();
UpdateInlineAllocationLimit(0);
}
void SpaceWithLinearArea::InlineAllocationStep(Address top,
Address top_for_next_step,
Address soon_object,
size_t size) {
if (heap()->allocation_step_in_progress()) {
// Avoid starting a new step if we are mid-way through an existing one.
return;
void SpaceWithLinearArea::AdvanceAllocationObservers() {
if (allocation_info_.top() &&
allocation_info_.start() != allocation_info_.top()) {
allocation_counter_.AdvanceAllocationObservers(allocation_info_.top() -
allocation_info_.start());
allocation_info_.MoveStartToTop();
}
}
if (top_on_previous_step_) {
if (top < top_on_previous_step_) {
// Generated code decreased the top pointer to do folded allocations.
DCHECK_NE(top, kNullAddress);
DCHECK_EQ(Page::FromAllocationAreaAddress(top),
Page::FromAllocationAreaAddress(top_on_previous_step_));
top_on_previous_step_ = top;
// Perform an allocation step when the step is reached. size_in_bytes is the
// actual size needed for the object (required for InvokeAllocationObservers).
// aligned_size_in_bytes is the size of the object including the filler right
// before it to reach the right alignment (required to DCHECK the start of the
// object). allocation_size is the size of the actual allocation which needs to
// be used for the accounting. It can be different from aligned_size_in_bytes in
// PagedSpace::AllocateRawAligned, where we have to overallocate in order to be
// able to align the allocation afterwards.
void SpaceWithLinearArea::InvokeAllocationObservers(
Address soon_object, size_t size_in_bytes, size_t aligned_size_in_bytes,
size_t allocation_size) {
DCHECK_LE(size_in_bytes, aligned_size_in_bytes);
DCHECK_LE(aligned_size_in_bytes, allocation_size);
DCHECK(size_in_bytes == aligned_size_in_bytes ||
aligned_size_in_bytes == allocation_size);
if (!SupportsAllocationObserver() || !allocation_counter_.IsActive()) return;
if (allocation_size >= allocation_counter_.NextBytes()) {
// Only the first object in a LAB should reach the next step.
DCHECK_EQ(soon_object,
allocation_info_.start() + aligned_size_in_bytes - size_in_bytes);
// Right now the LAB only contains that one object.
DCHECK_EQ(allocation_info_.top() + allocation_size - aligned_size_in_bytes,
allocation_info_.limit());
// Ensure that there is a valid object
if (identity() == CODE_SPACE) {
MemoryChunk* chunk = MemoryChunk::FromAddress(soon_object);
heap()->UnprotectAndRegisterMemoryChunk(chunk);
}
int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
AllocationStep(bytes_allocated, soon_object, static_cast<int>(size));
top_on_previous_step_ = top_for_next_step;
}
}
heap_->CreateFillerObjectAt(soon_object, static_cast<int>(size_in_bytes),
ClearRecordedSlots::kNo);
#if DEBUG
// Ensure that allocation_info_ isn't modified during one of the
// AllocationObserver::Step methods.
LinearAllocationArea saved_allocation_info = allocation_info_;
#endif
// Run AllocationObserver::Step through the AllocationCounter.
allocation_counter_.InvokeAllocationObservers(soon_object, size_in_bytes,
allocation_size);
// Ensure that start/top/limit didn't change.
DCHECK_EQ(saved_allocation_info.start(), allocation_info_.start());
DCHECK_EQ(saved_allocation_info.top(), allocation_info_.top());
DCHECK_EQ(saved_allocation_info.limit(), allocation_info_.limit());
}
DCHECK_LT(allocation_info_.limit() - allocation_info_.start(),
allocation_counter_.NextBytes());
}
int MemoryChunk::FreeListsLength() {
int length = 0;

View File

@ -112,7 +112,6 @@ class V8_EXPORT_PRIVATE Space : public BaseSpace {
public:
Space(Heap* heap, AllocationSpace id, FreeList* free_list)
: BaseSpace(heap, id),
allocation_counter_(heap),
free_list_(std::unique_ptr<FreeList>(free_list)) {
external_backing_store_bytes_ =
new std::atomic<size_t>[ExternalBackingStoreType::kNumTypes];
@ -139,13 +138,6 @@ class V8_EXPORT_PRIVATE Space : public BaseSpace {
virtual void StartNextInlineAllocationStep() {}
void AllocationStep(int bytes_since_last, Address soon_object, int size);
// An AllocationStep equivalent to be called after merging a contiguous
// chunk of an off-thread space into this space. The chunk is treated as a
// single allocation-folding group.
void AllocationStepAfterMerge(Address first_object_in_chunk, int size);
// Returns size of objects. Can differ from the allocated size
// (e.g. see OldLargeObjectSpace).
virtual size_t SizeOfObjects() { return Size(); }
@ -381,6 +373,8 @@ class LinearAllocationArea {
set_limit(limit);
}
void MoveStartToTop() { start_ = top_; }
V8_INLINE Address start() const { return start_; }
V8_INLINE void set_top(Address top) {
@ -490,11 +484,11 @@ class LocalAllocationBuffer {
class SpaceWithLinearArea : public Space {
public:
SpaceWithLinearArea(Heap* heap, AllocationSpace id, FreeList* free_list)
: Space(heap, id, free_list), top_on_previous_step_(0) {
: Space(heap, id, free_list) {
allocation_info_.Reset(kNullAddress, kNullAddress);
}
virtual bool SupportsInlineAllocation() = 0;
virtual bool SupportsAllocationObserver() = 0;
// Returns the allocation pointer in this space.
Address top() { return allocation_info_.top(); }
@ -508,6 +502,7 @@ class SpaceWithLinearArea : public Space {
return allocation_info_.limit_address();
}
// Methods needed for allocation observers.
V8_EXPORT_PRIVATE void AddAllocationObserver(
AllocationObserver* observer) override;
V8_EXPORT_PRIVATE void RemoveAllocationObserver(
@ -515,6 +510,12 @@ class SpaceWithLinearArea : public Space {
V8_EXPORT_PRIVATE void ResumeAllocationObservers() override;
V8_EXPORT_PRIVATE void PauseAllocationObservers() override;
V8_EXPORT_PRIVATE void AdvanceAllocationObservers();
V8_EXPORT_PRIVATE void InvokeAllocationObservers(Address soon_object,
size_t size_in_bytes,
size_t aligned_size_in_bytes,
size_t allocation_size);
// When allocation observers are active we may use a lower limit to allow the
// observers to 'interrupt' earlier than the natural limit. Given a linear
// area bounded by [start, end), this function computes the limit to use to
@ -529,22 +530,8 @@ class SpaceWithLinearArea : public Space {
void PrintAllocationsOrigins();
protected:
// If we are doing inline allocation in steps, this method performs the 'step'
// operation. top is the memory address of the bump pointer at the last
// inline allocation (i.e. it determines the numbers of bytes actually
// allocated since the last step.) top_for_next_step is the address of the
// bump pointer where the next byte is going to be allocated from. top and
// top_for_next_step may be different when we cross a page boundary or reset
// the space.
// TODO(ofrobots): clarify the precise difference between this and
// Space::AllocationStep.
void InlineAllocationStep(Address top, Address top_for_next_step,
Address soon_object, size_t size);
V8_EXPORT_PRIVATE void StartNextInlineAllocationStep() override;
// TODO(ofrobots): make these private after refactoring is complete.
LinearAllocationArea allocation_info_;
Address top_on_previous_step_;
size_t allocations_origins_[static_cast<int>(
AllocationOrigin::kNumberOfAllocationOrigins)] = {0};

View File

@ -232,6 +232,7 @@ v8_source_set("unittests_sources") {
"diagnostics/eh-frame-iterator-unittest.cc",
"diagnostics/eh-frame-writer-unittest.cc",
"execution/microtask-queue-unittest.cc",
"heap/allocation-observer-unittest.cc",
"heap/barrier-unittest.cc",
"heap/bitmap-test-utils.h",
"heap/bitmap-unittest.cc",

View File

@ -0,0 +1,111 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/allocation-observer.h"
#include "src/base/logging.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
namespace internal {
namespace {
class UnusedObserver : public AllocationObserver {
public:
explicit UnusedObserver(size_t step_size) : AllocationObserver(step_size) {}
void Step(int bytes_allocated, Address soon_object, size_t size) override {
CHECK(false);
}
};
} // namespace
TEST(AllocationObserverTest, AddAndRemoveUnusedObservers) {
AllocationCounter counter;
CHECK(!counter.IsActive());
UnusedObserver observer100(100);
UnusedObserver observer200(200);
counter.AddAllocationObserver(&observer200);
CHECK_EQ(counter.NextBytes(), 200);
counter.AddAllocationObserver(&observer100);
CHECK_EQ(counter.NextBytes(), 100);
counter.AdvanceAllocationObservers(90);
CHECK_EQ(counter.NextBytes(), 10);
counter.RemoveAllocationObserver(&observer100);
CHECK_EQ(counter.NextBytes(), 110);
counter.RemoveAllocationObserver(&observer200);
CHECK(!counter.IsActive());
}
namespace {
class VerifyStepObserver : public AllocationObserver {
public:
explicit VerifyStepObserver(size_t step_size)
: AllocationObserver(step_size) {}
void Step(int bytes_allocated, Address soon_object, size_t size) override {
CHECK(!do_not_invoke_);
invocations_++;
CHECK_EQ(expected_bytes_allocated_, bytes_allocated);
CHECK_EQ(expected_size_, size);
}
void ExpectNoInvocation() { do_not_invoke_ = true; }
void Expect(int expected_bytes_allocated, size_t expected_size) {
do_not_invoke_ = false;
expected_bytes_allocated_ = expected_bytes_allocated;
expected_size_ = expected_size;
}
int Invocations() { return invocations_; }
private:
bool do_not_invoke_ = false;
int invocations_ = 0;
int expected_bytes_allocated_ = 0;
size_t expected_size_ = 0;
};
} // namespace
TEST(AllocationObserverTest, Step) {
AllocationCounter counter;
CHECK(!counter.IsActive());
const Address kSomeObjectAddress = 8;
VerifyStepObserver observer100(100);
VerifyStepObserver observer200(200);
counter.AddAllocationObserver(&observer100);
counter.AddAllocationObserver(&observer200);
observer100.Expect(90, 8);
observer200.ExpectNoInvocation();
counter.AdvanceAllocationObservers(90);
counter.InvokeAllocationObservers(kSomeObjectAddress, 8, 10);
CHECK_EQ(observer100.Invocations(), 1);
CHECK_EQ(observer200.Invocations(), 0);
CHECK_EQ(counter.NextBytes(),
10 /* aligned_object_size */ + 100 /* smallest step size*/);
observer100.Expect(90, 16);
observer200.Expect(180, 16);
counter.AdvanceAllocationObservers(90);
counter.InvokeAllocationObservers(kSomeObjectAddress, 16, 20);
CHECK_EQ(observer100.Invocations(), 2);
CHECK_EQ(observer200.Invocations(), 1);
CHECK_EQ(counter.NextBytes(),
20 /* aligned_object_size */ + 100 /* smallest step size*/);
}
} // namespace internal
} // namespace v8