Reland "[heap] Invoke allocation observers before limiting the LAB"

This is a reland of commit 39d387bb72

Original change's description:
> [heap] Invoke allocation observers before limiting the LAB
>
> Currently whenever we reach a step we get a small LAB the same size as
> the allocated object. This is becuase the remaining step size is smaller
> then the current allocation.
> Invoking observers before limiting the LAB, and thus updating step
> sizes, should eliminate the small LAB we get whenever we reach a step.
>
> Drive-by: remove redundant method arguments.
>
> Bug: v8:12612
> Change-Id: Ied92a947308368d3b289e246fdb4f40ac5e5981f
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4013683
> Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
> Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
> Commit-Queue: Omer Katz <omerkatz@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#84157}

Bug: v8:12612, v8:13465
Change-Id: I40fb930a755cb5decccd932c4d25ed7d5d224da4
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4020177
Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#84328}
This commit is contained in:
Omer Katz 2022-11-12 00:17:29 +01:00 committed by V8 LUCI CQ
parent e5230b85db
commit dbbccae19a
23 changed files with 107 additions and 140 deletions

View File

@ -101,13 +101,10 @@ void AllocationCounter::InvokeAllocationObservers(Address soon_object,
if (aoc.next_counter_ - current_counter_ <= aligned_object_size) {
{
DisallowGarbageCollection no_gc;
aoc.observer_->Step(
static_cast<int>(current_counter_ - aoc.prev_counter_), soon_object,
object_size);
aoc.observer_->Step(soon_object, object_size);
}
size_t observer_step_size = aoc.observer_->GetNextStepSize();
aoc.prev_counter_ = current_counter_;
aoc.next_counter_ =
current_counter_ + aligned_object_size + observer_step_size;
step_run = true;
@ -122,7 +119,6 @@ void AllocationCounter::InvokeAllocationObservers(Address soon_object,
// Now process newly added allocation observers.
for (AllocationObserverCounter& aoc : pending_added_) {
size_t observer_step_size = aoc.observer_->GetNextStepSize();
aoc.prev_counter_ = current_counter_;
aoc.next_counter_ =
current_counter_ + aligned_object_size + observer_step_size;

View File

@ -37,7 +37,7 @@ class AllocationObserver {
// rather than the first object.
// 3. `size` is the requested size at the time of allocation. Right-trimming
// may change the object size dynamically.
virtual void Step(int bytes_allocated, Address soon_object, size_t size) = 0;
virtual void Step(Address soon_object, size_t size) = 0;
// Subclasses can override this method to make step size dynamic.
virtual intptr_t GetNextStepSize() { return step_size_; }
@ -94,12 +94,9 @@ class AllocationCounter final {
struct AllocationObserverCounter final {
AllocationObserverCounter(AllocationObserver* observer, size_t prev_counter,
size_t next_counter)
: observer_(observer),
prev_counter_(prev_counter),
next_counter_(next_counter) {}
: observer_(observer), next_counter_(next_counter) {}
AllocationObserver* observer_;
size_t prev_counter_;
size_t next_counter_;
};

View File

@ -195,9 +195,7 @@ class ScavengeTaskObserver final : public AllocationObserver {
ScavengeTaskObserver(Heap* heap, intptr_t step_size)
: AllocationObserver(step_size), heap_(heap) {}
void Step(int bytes_allocated, Address, size_t) override {
heap_->ScheduleScavengeTaskIfNeeded();
}
void Step(Address, size_t) override { heap_->ScheduleScavengeTaskIfNeeded(); }
private:
Heap* heap_;
@ -210,7 +208,7 @@ class MinorMCTaskObserver final : public AllocationObserver {
MinorMCTaskObserver(Heap* heap, intptr_t step_size)
: AllocationObserver(step_size), heap_(heap) {}
void Step(int bytes_allocated, Address, size_t) override {
void Step(Address, size_t) override {
if (v8_flags.concurrent_minor_mc_marking) {
if (heap_->incremental_marking()->IsMinorMarking()) {
heap_->concurrent_marking()->RescheduleJobIfNeeded(
@ -5519,7 +5517,7 @@ class StressConcurrentAllocationObserver : public AllocationObserver {
explicit StressConcurrentAllocationObserver(Heap* heap)
: AllocationObserver(1024), heap_(heap) {}
void Step(int bytes_allocated, Address, size_t) override {
void Step(Address, size_t) override {
DCHECK(heap_->deserialization_complete());
if (v8_flags.stress_concurrent_allocation) {
// Only schedule task if --stress-concurrent-allocation is enabled. This
@ -5708,9 +5706,7 @@ void Heap::PrintMaxNewSpaceSizeReached() {
}
int Heap::NextStressMarkingLimit() {
// Reuse Heap-global mutex as this getter is called from different threads on
// allocation slow paths.
base::MutexGuard guard(relocation_mutex());
base::MutexGuard guard(&stress_marking_mutex_);
return isolate()->fuzzer_rng()->NextInt(v8_flags.stress_marking + 1);
}

View File

@ -2213,6 +2213,9 @@ class Heap {
// marking limit.
AllocationObserver* stress_marking_observer_ = nullptr;
// Mutex used for synchronizing getting next stress marking limit.
base::Mutex stress_marking_mutex_;
// Observer that can cause early scavenge start.
StressScavengeObserver* stress_scavenge_observer_ = nullptr;

View File

@ -41,8 +41,7 @@
namespace v8 {
namespace internal {
void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
size_t size) {
void IncrementalMarking::Observer::Step(Address addr, size_t size) {
Heap* heap = incremental_marking_->heap();
VMState<GC> state(heap->isolate());
RCS_SCOPE(heap->isolate(),

View File

@ -174,7 +174,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
: AllocationObserver(step_size),
incremental_marking_(incremental_marking) {}
void Step(int bytes_allocated, Address, size_t) override;
void Step(Address, size_t) override;
private:
IncrementalMarking* incremental_marking_;

View File

@ -59,8 +59,7 @@ V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
// SemiSpaceNewSpace
V8_INLINE bool SemiSpaceNewSpace::EnsureAllocation(
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin,
int* out_max_aligned_size) {
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
size_in_bytes = ALIGN_TO_ALLOCATION_ALIGNMENT(size_in_bytes);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
#if DEBUG
@ -85,15 +84,12 @@ V8_INLINE bool SemiSpaceNewSpace::EnsureAllocation(
old_top = allocation_info_.top();
high = to_space_.page_high();
filler_size = Heap::GetFillToAlign(old_top, alignment);
aligned_size_in_bytes = size_in_bytes + filler_size;
}
if (out_max_aligned_size) {
*out_max_aligned_size = aligned_size_in_bytes;
}
DCHECK(old_top + aligned_size_in_bytes <= high);
InvokeAllocationObservers(
top() + Heap::GetFillToAlign(top(), alignment), size_in_bytes,
size_in_bytes + Heap::GetFillToAlign(top(), alignment));
UpdateInlineAllocationLimit(aligned_size_in_bytes);
DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
@ -104,10 +100,8 @@ V8_INLINE bool SemiSpaceNewSpace::EnsureAllocation(
// PagedSpaceForNewSpace
V8_INLINE bool PagedSpaceForNewSpace::EnsureAllocation(
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin,
int* out_max_aligned_size) {
if (!PagedSpaceBase::EnsureAllocation(size_in_bytes, alignment, origin,
out_max_aligned_size)) {
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
if (!PagedSpaceBase::EnsureAllocation(size_in_bytes, alignment, origin)) {
return false;
}
allocated_linear_areas_ += limit() - top();

View File

@ -540,8 +540,7 @@ class V8_EXPORT_PRIVATE SemiSpaceNewSpace final : public NewSpace {
ParkedAllocationBuffersVector parked_allocation_buffers_;
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin,
int* out_max_aligned_size) final;
AllocationOrigin origin) final;
friend class SemiSpaceObjectIterator;
};
@ -604,8 +603,7 @@ class V8_EXPORT_PRIVATE PagedSpaceForNewSpace final : public PagedSpaceBase {
bool AddFreshPage() { return false; }
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin,
int* out_max_aligned_size) final;
AllocationOrigin origin) final;
bool EnsureCurrentCapacity();
@ -807,10 +805,8 @@ class V8_EXPORT_PRIVATE PagedNewSpace final : public NewSpace {
private:
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin,
int* out_max_aligned_size) final {
return paged_space_.EnsureAllocation(size_in_bytes, alignment, origin,
out_max_aligned_size);
AllocationOrigin origin) final {
return paged_space_.EnsureAllocation(size_in_bytes, alignment, origin);
}
void PromotePageInNewSpace(Page* page) final { UNREACHABLE(); }

View File

@ -114,8 +114,7 @@ bool PagedSpaceBase::TryFreeLast(Address object_address, int object_size) {
V8_INLINE bool PagedSpaceBase::EnsureAllocation(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin,
int* out_max_aligned_size) {
AllocationOrigin origin) {
if ((identity() != NEW_SPACE) && !is_compaction_space()) {
// Start incremental marking before the actual allocation, this allows the
// allocation function to mark the object black when incremental marking is
@ -128,13 +127,10 @@ V8_INLINE bool PagedSpaceBase::EnsureAllocation(int size_in_bytes,
// We don't know exactly how much filler we need to align until space is
// allocated, so assume the worst case.
size_in_bytes += Heap::GetMaximumFillToAlign(alignment);
if (out_max_aligned_size) {
*out_max_aligned_size = size_in_bytes;
}
if (allocation_info_.top() + size_in_bytes <= allocation_info_.limit()) {
return true;
}
return RefillLabMain(size_in_bytes, origin);
return RefillLabMain(size_in_bytes, origin, alignment);
}
} // namespace internal

View File

@ -605,8 +605,9 @@ std::unique_ptr<ObjectIterator> PagedSpaceBase::GetObjectIterator(Heap* heap) {
new PagedSpaceObjectIterator(heap, this));
}
bool PagedSpaceBase::TryAllocationFromFreeListMain(size_t size_in_bytes,
AllocationOrigin origin) {
bool PagedSpaceBase::TryAllocationFromFreeListMain(
size_t size_in_bytes, AllocationOrigin origin,
AllocationAlignment alignment) {
ConcurrentAllocationMutex guard(this);
DCHECK(IsAligned(size_in_bytes, kTaggedSize));
DCHECK_LE(top(), limit());
@ -642,6 +643,11 @@ bool PagedSpaceBase::TryAllocationFromFreeListMain(size_t size_in_bytes,
DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
Address start = new_node.address();
Address end = new_node.address() + new_node_size;
bool was_black_allocating = heap()->incremental_marking()->black_allocation();
SetTopAndLimit(start, end);
InvokeAllocationObservers(
start + Heap::GetFillToAlign(start, alignment), size_in_bytes,
size_in_bytes + Heap::GetFillToAlign(start, alignment));
Address limit = ComputeLimit(start, end, size_in_bytes);
DCHECK_LE(limit, end);
DCHECK_LE(size_in_bytes, limit - start);
@ -650,6 +656,12 @@ bool PagedSpaceBase::TryAllocationFromFreeListMain(size_t size_in_bytes,
heap()->UnprotectAndRegisterMemoryChunk(
page, GetUnprotectMemoryOrigin(is_compaction_space()));
}
if (!was_black_allocating &&
heap()->incremental_marking()->black_allocation() &&
identity() != NEW_SPACE) {
// Allocation observers triggered black allocation.
page->DestroyBlackArea(limit, end);
}
Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
}
SetLinearAllocationArea(start, limit);
@ -872,11 +884,11 @@ void PagedSpaceBase::UpdateInlineAllocationLimit(size_t min_size) {
// -----------------------------------------------------------------------------
// OldSpace implementation
bool PagedSpaceBase::RefillLabMain(int size_in_bytes, AllocationOrigin origin) {
VMState<GC> state(heap()->isolate());
bool PagedSpaceBase::RefillLabMain(int size_in_bytes, AllocationOrigin origin,
AllocationAlignment alignment) {
RCS_SCOPE(heap()->isolate(),
RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw);
return RawRefillLabMain(size_in_bytes, origin);
return RawRefillLabMain(size_in_bytes, origin, alignment);
}
Page* CompactionSpace::TryExpandImpl() {
@ -886,12 +898,13 @@ Page* CompactionSpace::TryExpandImpl() {
return page;
}
bool CompactionSpace::RefillLabMain(int size_in_bytes,
AllocationOrigin origin) {
return RawRefillLabMain(size_in_bytes, origin);
bool CompactionSpace::RefillLabMain(int size_in_bytes, AllocationOrigin origin,
AllocationAlignment alignment) {
return RawRefillLabMain(size_in_bytes, origin, alignment);
}
bool PagedSpaceBase::TryExpand(int size_in_bytes, AllocationOrigin origin) {
bool PagedSpaceBase::TryExpand(int size_in_bytes, AllocationOrigin origin,
AllocationAlignment alignment) {
DCHECK_NE(NEW_SPACE, identity());
Page* page = TryExpandImpl();
if (!page) return false;
@ -899,16 +912,18 @@ bool PagedSpaceBase::TryExpand(int size_in_bytes, AllocationOrigin origin) {
heap()->NotifyOldGenerationExpansion(identity(), page);
}
return TryAllocationFromFreeListMain(static_cast<size_t>(size_in_bytes),
origin);
origin, alignment);
}
bool PagedSpaceBase::RawRefillLabMain(int size_in_bytes,
AllocationOrigin origin) {
AllocationOrigin origin,
AllocationAlignment alignment) {
// Allocation in this space has failed.
DCHECK_GE(size_in_bytes, 0);
const int kMaxPagesToSweep = 1;
if (TryAllocationFromFreeListMain(size_in_bytes, origin)) return true;
if (TryAllocationFromFreeListMain(size_in_bytes, origin, alignment))
return true;
const bool is_main_thread =
heap()->IsMainThread() || heap()->IsSharedMainThread();
@ -927,13 +942,13 @@ bool PagedSpaceBase::RawRefillLabMain(int size_in_bytes,
// Retry the free list allocation.
if (TryAllocationFromFreeListMain(static_cast<size_t>(size_in_bytes),
origin))
origin, alignment))
return true;
{
TRACE_GC_EPOCH(heap()->tracer(), sweeping_scope_id, sweeping_scope_kind);
if (ContributeToSweepingMain(size_in_bytes, kMaxPagesToSweep,
size_in_bytes, origin))
size_in_bytes, origin, alignment))
return true;
}
}
@ -947,7 +962,7 @@ bool PagedSpaceBase::RawRefillLabMain(int size_in_bytes,
if (page != nullptr) {
AddPage(page);
if (TryAllocationFromFreeListMain(static_cast<size_t>(size_in_bytes),
origin))
origin, alignment))
return true;
}
}
@ -956,7 +971,7 @@ bool PagedSpaceBase::RawRefillLabMain(int size_in_bytes,
heap()->ShouldExpandOldGenerationOnSlowAllocation(
heap()->main_thread_local_heap()) &&
heap()->CanExpandOldGeneration(AreaSize())) {
if (TryExpand(size_in_bytes, origin)) {
if (TryExpand(size_in_bytes, origin, alignment)) {
return true;
}
}
@ -964,21 +979,23 @@ bool PagedSpaceBase::RawRefillLabMain(int size_in_bytes,
// Try sweeping all pages.
{
TRACE_GC_EPOCH(heap()->tracer(), sweeping_scope_id, sweeping_scope_kind);
if (ContributeToSweepingMain(0, 0, size_in_bytes, origin)) return true;
if (ContributeToSweepingMain(0, 0, size_in_bytes, origin, alignment))
return true;
}
if (identity() != NEW_SPACE && heap()->gc_state() != Heap::NOT_IN_GC &&
!heap()->force_oom()) {
// Avoid OOM crash in the GC in order to invoke NearHeapLimitCallback after
// GC and give it a chance to increase the heap limit.
return TryExpand(size_in_bytes, origin);
return TryExpand(size_in_bytes, origin, alignment);
}
return false;
}
bool PagedSpaceBase::ContributeToSweepingMain(int required_freed_bytes,
int max_pages, int size_in_bytes,
AllocationOrigin origin) {
AllocationOrigin origin,
AllocationAlignment alignment) {
// Cleanup invalidated old-to-new refs for compaction space in the
// final atomic pause.
Sweeper::SweepingMode sweeping_mode =
@ -989,7 +1006,7 @@ bool PagedSpaceBase::ContributeToSweepingMain(int required_freed_bytes,
heap()->sweeper()->ParallelSweepSpace(identity(), sweeping_mode,
required_freed_bytes, max_pages);
RefillFreeList();
return TryAllocationFromFreeListMain(size_in_bytes, origin);
return TryAllocationFromFreeListMain(size_in_bytes, origin, alignment);
}
return false;
}

View File

@ -322,30 +322,32 @@ class V8_EXPORT_PRIVATE PagedSpaceBase
virtual Page* TryExpandImpl();
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin,
int* out_max_aligned_size) override;
AllocationOrigin origin) override;
V8_WARN_UNUSED_RESULT bool TryAllocationFromFreeListMain(
size_t size_in_bytes, AllocationOrigin origin);
size_t size_in_bytes, AllocationOrigin origin,
AllocationAlignment alignment);
V8_WARN_UNUSED_RESULT bool ContributeToSweepingMain(int required_freed_bytes,
int max_pages,
int size_in_bytes,
AllocationOrigin origin);
V8_WARN_UNUSED_RESULT bool ContributeToSweepingMain(
int required_freed_bytes, int max_pages, int size_in_bytes,
AllocationOrigin origin, AllocationAlignment alignment);
// Refills LAB for EnsureLabMain. This function is space-dependent. Returns
// false if there is not enough space and the caller has to retry after
// collecting garbage.
V8_WARN_UNUSED_RESULT virtual bool RefillLabMain(int size_in_bytes,
AllocationOrigin origin);
V8_WARN_UNUSED_RESULT virtual bool RefillLabMain(
int size_in_bytes, AllocationOrigin origin,
AllocationAlignment alignment);
// Actual implementation of refilling LAB. Returns false if there is not
// enough space and the caller has to retry after collecting garbage.
V8_WARN_UNUSED_RESULT bool RawRefillLabMain(int size_in_bytes,
AllocationOrigin origin);
AllocationOrigin origin,
AllocationAlignment alignment);
V8_WARN_UNUSED_RESULT bool TryExpand(int size_in_bytes,
AllocationOrigin origin);
AllocationOrigin origin,
AllocationAlignment alignment);
size_t committed_physical_memory() const {
return committed_physical_memory_.load(std::memory_order_relaxed);
@ -406,8 +408,9 @@ class V8_EXPORT_PRIVATE CompactionSpace final : public PagedSpace {
LinearAllocationArea allocation_info_;
protected:
V8_WARN_UNUSED_RESULT bool RefillLabMain(int size_in_bytes,
AllocationOrigin origin) override;
V8_WARN_UNUSED_RESULT bool RefillLabMain(
int size_in_bytes, AllocationOrigin origin,
AllocationAlignment alignment) override;
Page* TryExpandImpl() final;
// The space is temporary and not included in any snapshots.

View File

@ -264,13 +264,10 @@ AllocationResult SpaceWithLinearArea::AllocateRaw(int size_in_bytes,
AllocationResult SpaceWithLinearArea::AllocateRawUnaligned(
int size_in_bytes, AllocationOrigin origin) {
DCHECK(!v8_flags.enable_third_party_heap);
int max_aligned_size;
if (!EnsureAllocation(size_in_bytes, kTaggedAligned, origin,
&max_aligned_size)) {
if (!EnsureAllocation(size_in_bytes, kTaggedAligned, origin)) {
return AllocationResult::Failure();
}
DCHECK_EQ(max_aligned_size, size_in_bytes);
DCHECK_LE(allocation_info_.start(), allocation_info_.top());
AllocationResult result = AllocateFastUnaligned(size_in_bytes, origin);
@ -280,8 +277,9 @@ AllocationResult SpaceWithLinearArea::AllocateRawUnaligned(
UpdateAllocationOrigins(origin);
}
InvokeAllocationObservers(result.ToAddress(), size_in_bytes, size_in_bytes,
size_in_bytes);
DCHECK_IMPLIES(allocation_counter_.IsActive(),
(allocation_info_.limit() - allocation_info_.start()) <=
allocation_counter_.NextBytes());
return result;
}
@ -289,27 +287,25 @@ AllocationResult SpaceWithLinearArea::AllocateRawUnaligned(
AllocationResult SpaceWithLinearArea::AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
DCHECK(!v8_flags.enable_third_party_heap);
int max_aligned_size;
if (!EnsureAllocation(size_in_bytes, alignment, origin, &max_aligned_size)) {
if (!EnsureAllocation(size_in_bytes, alignment, origin)) {
return AllocationResult::Failure();
}
DCHECK_GE(max_aligned_size, size_in_bytes);
DCHECK_LE(allocation_info_.start(), allocation_info_.top());
int aligned_size_in_bytes;
AllocationResult result = AllocateFastAligned(
size_in_bytes, &aligned_size_in_bytes, alignment, origin);
DCHECK_GE(max_aligned_size, aligned_size_in_bytes);
DCHECK(!result.IsFailure());
if (v8_flags.trace_allocations_origins) {
UpdateAllocationOrigins(origin);
}
InvokeAllocationObservers(result.ToAddress(), size_in_bytes,
aligned_size_in_bytes, max_aligned_size);
DCHECK_IMPLIES(allocation_counter_.IsActive(),
(allocation_info_.limit() - allocation_info_.start()) <=
allocation_counter_.NextBytes());
return result;
}

View File

@ -387,29 +387,18 @@ void SpaceWithLinearArea::MarkLabStartInitialized() {
// actual size needed for the object (required for InvokeAllocationObservers).
// aligned_size_in_bytes is the size of the object including the filler right
// before it to reach the right alignment (required to DCHECK the start of the
// object). allocation_size is the size of the actual allocation which needs to
// be used for the accounting. It can be different from aligned_size_in_bytes in
// PagedSpace::AllocateRawAligned, where we have to overallocate in order to be
// able to align the allocation afterwards.
// object).
void SpaceWithLinearArea::InvokeAllocationObservers(
Address soon_object, size_t size_in_bytes, size_t aligned_size_in_bytes,
size_t allocation_size) {
Address soon_object, size_t size_in_bytes, size_t aligned_size_in_bytes) {
DCHECK_LE(size_in_bytes, aligned_size_in_bytes);
DCHECK_LE(aligned_size_in_bytes, allocation_size);
DCHECK(size_in_bytes == aligned_size_in_bytes ||
aligned_size_in_bytes == allocation_size);
if (!SupportsAllocationObserver() || !allocation_counter_.IsActive()) return;
if (allocation_size >= allocation_counter_.NextBytes()) {
if (aligned_size_in_bytes >= allocation_counter_.NextBytes()) {
// Only the first object in a LAB should reach the next step.
DCHECK_EQ(soon_object,
allocation_info_.start() + aligned_size_in_bytes - size_in_bytes);
// Right now the LAB only contains that one object.
DCHECK_EQ(allocation_info_.top() + allocation_size - aligned_size_in_bytes,
allocation_info_.limit());
// Ensure that there is a valid object
if (identity() == CODE_SPACE) {
MemoryChunk* chunk = MemoryChunk::FromAddress(soon_object);
@ -426,17 +415,13 @@ void SpaceWithLinearArea::InvokeAllocationObservers(
// Run AllocationObserver::Step through the AllocationCounter.
allocation_counter_.InvokeAllocationObservers(soon_object, size_in_bytes,
allocation_size);
aligned_size_in_bytes);
// Ensure that start/top/limit didn't change.
DCHECK_EQ(saved_allocation_info.start(), allocation_info_.start());
DCHECK_EQ(saved_allocation_info.top(), allocation_info_.top());
DCHECK_EQ(saved_allocation_info.limit(), allocation_info_.limit());
}
DCHECK_IMPLIES(allocation_counter_.IsActive(),
(allocation_info_.limit() - allocation_info_.start()) <
allocation_counter_.NextBytes());
}
#if DEBUG

View File

@ -553,10 +553,8 @@ class SpaceWithLinearArea : public Space {
V8_EXPORT_PRIVATE void PauseAllocationObservers() override;
V8_EXPORT_PRIVATE void AdvanceAllocationObservers();
V8_EXPORT_PRIVATE void InvokeAllocationObservers(Address soon_object,
size_t size_in_bytes,
size_t aligned_size_in_bytes,
size_t allocation_size);
V8_EXPORT_PRIVATE void InvokeAllocationObservers(
Address soon_object, size_t size_in_bytes, size_t aligned_size_in_bytes);
void MarkLabStartInitialized();
virtual void FreeLinearAllocationArea() = 0;
@ -636,8 +634,7 @@ class SpaceWithLinearArea : public Space {
// that there is enough space.
virtual bool EnsureAllocation(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin,
int* out_max_aligned_size) = 0;
AllocationOrigin origin) = 0;
#if DEBUG
V8_EXPORT_PRIVATE virtual void VerifyTop() const;

View File

@ -12,8 +12,7 @@ namespace internal {
StressMarkingObserver::StressMarkingObserver(Heap* heap)
: AllocationObserver(64), heap_(heap) {}
void StressMarkingObserver::Step(int bytes_allocated, Address soon_object,
size_t size) {
void StressMarkingObserver::Step(Address soon_object, size_t size) {
heap_->StartIncrementalMarkingIfAllocationLimitIsReached(Heap::kNoGCFlags,
kNoGCCallbackFlags);
}

View File

@ -14,7 +14,7 @@ class StressMarkingObserver : public AllocationObserver {
public:
explicit StressMarkingObserver(Heap* heap);
void Step(int bytes_allocated, Address soon_object, size_t size) override;
void Step(Address soon_object, size_t size) override;
private:
Heap* heap_;

View File

@ -26,8 +26,7 @@ StressScavengeObserver::StressScavengeObserver(Heap* heap)
}
}
void StressScavengeObserver::Step(int bytes_allocated, Address soon_object,
size_t size) {
void StressScavengeObserver::Step(Address soon_object, size_t size) {
if (has_requested_gc_ || heap_->new_space()->Capacity() == 0) {
return;
}

View File

@ -14,7 +14,7 @@ class StressScavengeObserver : public AllocationObserver {
public:
explicit StressScavengeObserver(Heap* heap);
void Step(int bytes_allocated, Address soon_object, size_t size) override;
void Step(Address soon_object, size_t size) override;
bool HasRequestedGC() const;
void RequestedGCDone();

View File

@ -138,7 +138,7 @@ class SamplingHeapProfiler {
rate_(rate) {}
protected:
void Step(int bytes_allocated, Address soon_object, size_t size) override {
void Step(Address soon_object, size_t size) override {
USE(heap_);
DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
if (soon_object) {

View File

@ -234,7 +234,6 @@ void FillPageInPagedSpace(Page* page,
void FillCurrentPage(v8::internal::NewSpace* space,
std::vector<Handle<FixedArray>>* out_handles) {
if (v8_flags.minor_mc) {
PauseAllocationObserversScope pause_observers(space->heap());
if (space->top() == kNullAddress) return;
Page* page = Page::FromAllocationAreaAddress(space->top());
space->heap()->EnsureSweepingCompleted(

View File

@ -5632,7 +5632,7 @@ AllocationResult HeapTester::AllocateByteArrayForTest(
bool HeapTester::CodeEnsureLinearAllocationArea(Heap* heap, int size_in_bytes) {
bool result = heap->code_space()->EnsureAllocation(
size_in_bytes, AllocationAlignment::kTaggedAligned,
AllocationOrigin::kRuntime, nullptr);
AllocationOrigin::kRuntime);
heap->code_space()->UpdateInlineAllocationLimit(0);
return result;
}

View File

@ -497,7 +497,7 @@ class Observer : public AllocationObserver {
explicit Observer(intptr_t step_size)
: AllocationObserver(step_size), count_(0) {}
void Step(int bytes_allocated, Address addr, size_t) override { count_++; }
void Step(Address addr, size_t) override { count_++; }
int count() const { return count_; }

View File

@ -15,9 +15,7 @@ namespace {
class UnusedObserver : public AllocationObserver {
public:
explicit UnusedObserver(size_t step_size) : AllocationObserver(step_size) {}
void Step(int bytes_allocated, Address soon_object, size_t size) override {
CHECK(false);
}
void Step(Address soon_object, size_t size) override { CHECK(false); }
};
} // namespace
@ -50,18 +48,16 @@ class VerifyStepObserver : public AllocationObserver {
explicit VerifyStepObserver(size_t step_size)
: AllocationObserver(step_size) {}
void Step(int bytes_allocated, Address soon_object, size_t size) override {
void Step(Address soon_object, size_t size) override {
CHECK(!do_not_invoke_);
invocations_++;
CHECK_EQ(expected_bytes_allocated_, bytes_allocated);
CHECK_EQ(expected_size_, size);
}
void ExpectNoInvocation() { do_not_invoke_ = true; }
void Expect(int expected_bytes_allocated, size_t expected_size) {
void Expect(size_t expected_size) {
do_not_invoke_ = false;
expected_bytes_allocated_ = expected_bytes_allocated;
expected_size_ = expected_size;
}
@ -70,7 +66,6 @@ class VerifyStepObserver : public AllocationObserver {
private:
bool do_not_invoke_ = false;
int invocations_ = 0;
int expected_bytes_allocated_ = 0;
size_t expected_size_ = 0;
};
} // namespace
@ -86,7 +81,7 @@ TEST(AllocationObserverTest, Step) {
counter.AddAllocationObserver(&observer100);
counter.AddAllocationObserver(&observer200);
observer100.Expect(90, 8);
observer100.Expect(8);
observer200.ExpectNoInvocation();
counter.AdvanceAllocationObservers(90);
@ -96,8 +91,8 @@ TEST(AllocationObserverTest, Step) {
CHECK_EQ(counter.NextBytes(),
10 /* aligned_object_size */ + 100 /* smallest step size*/);
observer100.Expect(90, 16);
observer200.Expect(180, 16);
observer100.Expect(16);
observer200.Expect(16);
counter.AdvanceAllocationObservers(90);
counter.InvokeAllocationObservers(kSomeObjectAddress, 16, 20);
@ -114,7 +109,7 @@ class RecursiveAddObserver : public AllocationObserver {
AllocationObserver* observer)
: AllocationObserver(step_size), counter_(counter), observer_(observer) {}
void Step(int bytes_allocated, Address soon_object, size_t size) override {
void Step(Address soon_object, size_t size) override {
counter_->AddAllocationObserver(observer_);
}
@ -148,7 +143,7 @@ class RecursiveRemoveObserver : public AllocationObserver {
AllocationObserver* observer)
: AllocationObserver(step_size), counter_(counter), observer_(observer) {}
void Step(int bytes_allocated, Address soon_object, size_t size) override {
void Step(Address soon_object, size_t size) override {
counter_->RemoveAllocationObserver(observer_);
}