Revert "Reland "[heap] Invoke allocation observers before limiting the LAB""

This reverts commit dbbccae19a.

Reason for revert: Deadlock in TSAN with stress:
https://ci.chromium.org/ui/p/v8/builders/ci/V8%20Linux64%20TSAN/44651/overview

Original change's description:
> Reland "[heap] Invoke allocation observers before limiting the LAB"
>
> This is a reland of commit 39d387bb72
>
> Original change's description:
> > [heap] Invoke allocation observers before limiting the LAB
> >
> > Currently whenever we reach a step we get a small LAB the same size as
> > the allocated object. This is becuase the remaining step size is smaller
> > then the current allocation.
> > Invoking observers before limiting the LAB, and thus updating step
> > sizes, should eliminate the small LAB we get whenever we reach a step.
> >
> > Drive-by: remove redundant method arguments.
> >
> > Bug: v8:12612
> > Change-Id: Ied92a947308368d3b289e246fdb4f40ac5e5981f
> > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4013683
> > Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
> > Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
> > Commit-Queue: Omer Katz <omerkatz@chromium.org>
> > Cr-Commit-Position: refs/heads/main@{#84157}
>
> Bug: v8:12612, v8:13465
> Change-Id: I40fb930a755cb5decccd932c4d25ed7d5d224da4
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4020177
> Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
> Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
> Commit-Queue: Omer Katz <omerkatz@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#84328}

Bug: v8:12612, v8:13465
Change-Id: I70df00448c7413999b91412343915c503baf0dd3
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4035252
Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Commit-Queue: Shu-yu Guo <syg@chromium.org>
Owners-Override: Shu-yu Guo <syg@chromium.org>
Cr-Commit-Position: refs/heads/main@{#84345}
This commit is contained in:
Shu-yu Guo 2022-11-17 22:53:39 +00:00 committed by V8 LUCI CQ
parent ceeb9ba58a
commit 76372353c1
23 changed files with 140 additions and 107 deletions

View File

@ -101,10 +101,13 @@ void AllocationCounter::InvokeAllocationObservers(Address soon_object,
if (aoc.next_counter_ - current_counter_ <= aligned_object_size) {
{
DisallowGarbageCollection no_gc;
aoc.observer_->Step(soon_object, object_size);
aoc.observer_->Step(
static_cast<int>(current_counter_ - aoc.prev_counter_), soon_object,
object_size);
}
size_t observer_step_size = aoc.observer_->GetNextStepSize();
aoc.prev_counter_ = current_counter_;
aoc.next_counter_ =
current_counter_ + aligned_object_size + observer_step_size;
step_run = true;
@ -119,6 +122,7 @@ void AllocationCounter::InvokeAllocationObservers(Address soon_object,
// Now process newly added allocation observers.
for (AllocationObserverCounter& aoc : pending_added_) {
size_t observer_step_size = aoc.observer_->GetNextStepSize();
aoc.prev_counter_ = current_counter_;
aoc.next_counter_ =
current_counter_ + aligned_object_size + observer_step_size;

View File

@ -37,7 +37,7 @@ class AllocationObserver {
// rather than the first object.
// 3. `size` is the requested size at the time of allocation. Right-trimming
// may change the object size dynamically.
virtual void Step(Address soon_object, size_t size) = 0;
virtual void Step(int bytes_allocated, Address soon_object, size_t size) = 0;
// Subclasses can override this method to make step size dynamic.
virtual intptr_t GetNextStepSize() { return step_size_; }
@ -94,9 +94,12 @@ class AllocationCounter final {
struct AllocationObserverCounter final {
AllocationObserverCounter(AllocationObserver* observer, size_t prev_counter,
size_t next_counter)
: observer_(observer), next_counter_(next_counter) {}
: observer_(observer),
prev_counter_(prev_counter),
next_counter_(next_counter) {}
AllocationObserver* observer_;
size_t prev_counter_;
size_t next_counter_;
};

View File

@ -195,7 +195,9 @@ class ScavengeTaskObserver final : public AllocationObserver {
ScavengeTaskObserver(Heap* heap, intptr_t step_size)
: AllocationObserver(step_size), heap_(heap) {}
void Step(Address, size_t) override { heap_->ScheduleScavengeTaskIfNeeded(); }
void Step(int bytes_allocated, Address, size_t) override {
heap_->ScheduleScavengeTaskIfNeeded();
}
private:
Heap* heap_;
@ -208,7 +210,7 @@ class MinorMCTaskObserver final : public AllocationObserver {
MinorMCTaskObserver(Heap* heap, intptr_t step_size)
: AllocationObserver(step_size), heap_(heap) {}
void Step(Address, size_t) override {
void Step(int bytes_allocated, Address, size_t) override {
if (v8_flags.concurrent_minor_mc_marking) {
if (heap_->incremental_marking()->IsMinorMarking()) {
heap_->concurrent_marking()->RescheduleJobIfNeeded(
@ -5517,7 +5519,7 @@ class StressConcurrentAllocationObserver : public AllocationObserver {
explicit StressConcurrentAllocationObserver(Heap* heap)
: AllocationObserver(1024), heap_(heap) {}
void Step(Address, size_t) override {
void Step(int bytes_allocated, Address, size_t) override {
DCHECK(heap_->deserialization_complete());
if (v8_flags.stress_concurrent_allocation) {
// Only schedule task if --stress-concurrent-allocation is enabled. This
@ -5706,7 +5708,9 @@ void Heap::PrintMaxNewSpaceSizeReached() {
}
int Heap::NextStressMarkingLimit() {
base::MutexGuard guard(&stress_marking_mutex_);
// Reuse Heap-global mutex as this getter is called from different threads on
// allocation slow paths.
base::MutexGuard guard(relocation_mutex());
return isolate()->fuzzer_rng()->NextInt(v8_flags.stress_marking + 1);
}

View File

@ -2213,9 +2213,6 @@ class Heap {
// marking limit.
AllocationObserver* stress_marking_observer_ = nullptr;
// Mutex used for synchronizing getting next stress marking limit.
base::Mutex stress_marking_mutex_;
// Observer that can cause early scavenge start.
StressScavengeObserver* stress_scavenge_observer_ = nullptr;

View File

@ -41,7 +41,8 @@
namespace v8 {
namespace internal {
void IncrementalMarking::Observer::Step(Address addr, size_t size) {
void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
size_t size) {
Heap* heap = incremental_marking_->heap();
VMState<GC> state(heap->isolate());
RCS_SCOPE(heap->isolate(),

View File

@ -174,7 +174,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
: AllocationObserver(step_size),
incremental_marking_(incremental_marking) {}
void Step(Address, size_t) override;
void Step(int bytes_allocated, Address, size_t) override;
private:
IncrementalMarking* incremental_marking_;

View File

@ -59,7 +59,8 @@ V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
// SemiSpaceNewSpace
V8_INLINE bool SemiSpaceNewSpace::EnsureAllocation(
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin,
int* out_max_aligned_size) {
size_in_bytes = ALIGN_TO_ALLOCATION_ALIGNMENT(size_in_bytes);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
#if DEBUG
@ -84,12 +85,15 @@ V8_INLINE bool SemiSpaceNewSpace::EnsureAllocation(
old_top = allocation_info_.top();
high = to_space_.page_high();
filler_size = Heap::GetFillToAlign(old_top, alignment);
aligned_size_in_bytes = size_in_bytes + filler_size;
}
if (out_max_aligned_size) {
*out_max_aligned_size = aligned_size_in_bytes;
}
DCHECK(old_top + aligned_size_in_bytes <= high);
InvokeAllocationObservers(
top() + Heap::GetFillToAlign(top(), alignment), size_in_bytes,
size_in_bytes + Heap::GetFillToAlign(top(), alignment));
UpdateInlineAllocationLimit(aligned_size_in_bytes);
DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
@ -100,8 +104,10 @@ V8_INLINE bool SemiSpaceNewSpace::EnsureAllocation(
// PagedSpaceForNewSpace
V8_INLINE bool PagedSpaceForNewSpace::EnsureAllocation(
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
if (!PagedSpaceBase::EnsureAllocation(size_in_bytes, alignment, origin)) {
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin,
int* out_max_aligned_size) {
if (!PagedSpaceBase::EnsureAllocation(size_in_bytes, alignment, origin,
out_max_aligned_size)) {
return false;
}
allocated_linear_areas_ += limit() - top();

View File

@ -540,7 +540,8 @@ class V8_EXPORT_PRIVATE SemiSpaceNewSpace final : public NewSpace {
ParkedAllocationBuffersVector parked_allocation_buffers_;
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin) final;
AllocationOrigin origin,
int* out_max_aligned_size) final;
friend class SemiSpaceObjectIterator;
};
@ -603,7 +604,8 @@ class V8_EXPORT_PRIVATE PagedSpaceForNewSpace final : public PagedSpaceBase {
bool AddFreshPage() { return false; }
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin) final;
AllocationOrigin origin,
int* out_max_aligned_size) final;
bool EnsureCurrentCapacity();
@ -807,8 +809,10 @@ class V8_EXPORT_PRIVATE PagedNewSpace final : public NewSpace {
private:
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin) final {
return paged_space_.EnsureAllocation(size_in_bytes, alignment, origin);
AllocationOrigin origin,
int* out_max_aligned_size) final {
return paged_space_.EnsureAllocation(size_in_bytes, alignment, origin,
out_max_aligned_size);
}
void PromotePageInNewSpace(Page* page) final { UNREACHABLE(); }

View File

@ -114,7 +114,8 @@ bool PagedSpaceBase::TryFreeLast(Address object_address, int object_size) {
V8_INLINE bool PagedSpaceBase::EnsureAllocation(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
AllocationOrigin origin,
int* out_max_aligned_size) {
if ((identity() != NEW_SPACE) && !is_compaction_space()) {
// Start incremental marking before the actual allocation, this allows the
// allocation function to mark the object black when incremental marking is
@ -127,10 +128,13 @@ V8_INLINE bool PagedSpaceBase::EnsureAllocation(int size_in_bytes,
// We don't know exactly how much filler we need to align until space is
// allocated, so assume the worst case.
size_in_bytes += Heap::GetMaximumFillToAlign(alignment);
if (out_max_aligned_size) {
*out_max_aligned_size = size_in_bytes;
}
if (allocation_info_.top() + size_in_bytes <= allocation_info_.limit()) {
return true;
}
return RefillLabMain(size_in_bytes, origin, alignment);
return RefillLabMain(size_in_bytes, origin);
}
} // namespace internal

View File

@ -605,9 +605,8 @@ std::unique_ptr<ObjectIterator> PagedSpaceBase::GetObjectIterator(Heap* heap) {
new PagedSpaceObjectIterator(heap, this));
}
bool PagedSpaceBase::TryAllocationFromFreeListMain(
size_t size_in_bytes, AllocationOrigin origin,
AllocationAlignment alignment) {
bool PagedSpaceBase::TryAllocationFromFreeListMain(size_t size_in_bytes,
AllocationOrigin origin) {
ConcurrentAllocationMutex guard(this);
DCHECK(IsAligned(size_in_bytes, kTaggedSize));
DCHECK_LE(top(), limit());
@ -643,11 +642,6 @@ bool PagedSpaceBase::TryAllocationFromFreeListMain(
DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
Address start = new_node.address();
Address end = new_node.address() + new_node_size;
bool was_black_allocating = heap()->incremental_marking()->black_allocation();
SetTopAndLimit(start, end);
InvokeAllocationObservers(
start + Heap::GetFillToAlign(start, alignment), size_in_bytes,
size_in_bytes + Heap::GetFillToAlign(start, alignment));
Address limit = ComputeLimit(start, end, size_in_bytes);
DCHECK_LE(limit, end);
DCHECK_LE(size_in_bytes, limit - start);
@ -656,12 +650,6 @@ bool PagedSpaceBase::TryAllocationFromFreeListMain(
heap()->UnprotectAndRegisterMemoryChunk(
page, GetUnprotectMemoryOrigin(is_compaction_space()));
}
if (!was_black_allocating &&
heap()->incremental_marking()->black_allocation() &&
identity() != NEW_SPACE) {
// Allocation observers triggered black allocation.
page->DestroyBlackArea(limit, end);
}
Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
}
SetLinearAllocationArea(start, limit);
@ -866,11 +854,11 @@ void PagedSpaceBase::UpdateInlineAllocationLimit(size_t min_size) {
// -----------------------------------------------------------------------------
// OldSpace implementation
bool PagedSpaceBase::RefillLabMain(int size_in_bytes, AllocationOrigin origin,
AllocationAlignment alignment) {
bool PagedSpaceBase::RefillLabMain(int size_in_bytes, AllocationOrigin origin) {
VMState<GC> state(heap()->isolate());
RCS_SCOPE(heap()->isolate(),
RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw);
return RawRefillLabMain(size_in_bytes, origin, alignment);
return RawRefillLabMain(size_in_bytes, origin);
}
Page* CompactionSpace::TryExpandImpl() {
@ -880,13 +868,12 @@ Page* CompactionSpace::TryExpandImpl() {
return page;
}
bool CompactionSpace::RefillLabMain(int size_in_bytes, AllocationOrigin origin,
AllocationAlignment alignment) {
return RawRefillLabMain(size_in_bytes, origin, alignment);
bool CompactionSpace::RefillLabMain(int size_in_bytes,
AllocationOrigin origin) {
return RawRefillLabMain(size_in_bytes, origin);
}
bool PagedSpaceBase::TryExpand(int size_in_bytes, AllocationOrigin origin,
AllocationAlignment alignment) {
bool PagedSpaceBase::TryExpand(int size_in_bytes, AllocationOrigin origin) {
DCHECK_NE(NEW_SPACE, identity());
Page* page = TryExpandImpl();
if (!page) return false;
@ -894,18 +881,16 @@ bool PagedSpaceBase::TryExpand(int size_in_bytes, AllocationOrigin origin,
heap()->NotifyOldGenerationExpansion(identity(), page);
}
return TryAllocationFromFreeListMain(static_cast<size_t>(size_in_bytes),
origin, alignment);
origin);
}
bool PagedSpaceBase::RawRefillLabMain(int size_in_bytes,
AllocationOrigin origin,
AllocationAlignment alignment) {
AllocationOrigin origin) {
// Allocation in this space has failed.
DCHECK_GE(size_in_bytes, 0);
const int kMaxPagesToSweep = 1;
if (TryAllocationFromFreeListMain(size_in_bytes, origin, alignment))
return true;
if (TryAllocationFromFreeListMain(size_in_bytes, origin)) return true;
const bool is_main_thread =
heap()->IsMainThread() || heap()->IsSharedMainThread();
@ -924,13 +909,13 @@ bool PagedSpaceBase::RawRefillLabMain(int size_in_bytes,
// Retry the free list allocation.
if (TryAllocationFromFreeListMain(static_cast<size_t>(size_in_bytes),
origin, alignment))
origin))
return true;
{
TRACE_GC_EPOCH(heap()->tracer(), sweeping_scope_id, sweeping_scope_kind);
if (ContributeToSweepingMain(size_in_bytes, kMaxPagesToSweep,
size_in_bytes, origin, alignment))
size_in_bytes, origin))
return true;
}
}
@ -944,7 +929,7 @@ bool PagedSpaceBase::RawRefillLabMain(int size_in_bytes,
if (page != nullptr) {
AddPage(page);
if (TryAllocationFromFreeListMain(static_cast<size_t>(size_in_bytes),
origin, alignment))
origin))
return true;
}
}
@ -953,7 +938,7 @@ bool PagedSpaceBase::RawRefillLabMain(int size_in_bytes,
heap()->ShouldExpandOldGenerationOnSlowAllocation(
heap()->main_thread_local_heap()) &&
heap()->CanExpandOldGeneration(AreaSize())) {
if (TryExpand(size_in_bytes, origin, alignment)) {
if (TryExpand(size_in_bytes, origin)) {
return true;
}
}
@ -961,23 +946,21 @@ bool PagedSpaceBase::RawRefillLabMain(int size_in_bytes,
// Try sweeping all pages.
{
TRACE_GC_EPOCH(heap()->tracer(), sweeping_scope_id, sweeping_scope_kind);
if (ContributeToSweepingMain(0, 0, size_in_bytes, origin, alignment))
return true;
if (ContributeToSweepingMain(0, 0, size_in_bytes, origin)) return true;
}
if (identity() != NEW_SPACE && heap()->gc_state() != Heap::NOT_IN_GC &&
!heap()->force_oom()) {
// Avoid OOM crash in the GC in order to invoke NearHeapLimitCallback after
// GC and give it a chance to increase the heap limit.
return TryExpand(size_in_bytes, origin, alignment);
return TryExpand(size_in_bytes, origin);
}
return false;
}
bool PagedSpaceBase::ContributeToSweepingMain(int required_freed_bytes,
int max_pages, int size_in_bytes,
AllocationOrigin origin,
AllocationAlignment alignment) {
AllocationOrigin origin) {
// Cleanup invalidated old-to-new refs for compaction space in the
// final atomic pause.
Sweeper::SweepingMode sweeping_mode =
@ -988,7 +971,7 @@ bool PagedSpaceBase::ContributeToSweepingMain(int required_freed_bytes,
heap()->sweeper()->ParallelSweepSpace(identity(), sweeping_mode,
required_freed_bytes, max_pages);
RefillFreeList();
return TryAllocationFromFreeListMain(size_in_bytes, origin, alignment);
return TryAllocationFromFreeListMain(size_in_bytes, origin);
}
return false;
}

View File

@ -320,32 +320,30 @@ class V8_EXPORT_PRIVATE PagedSpaceBase
virtual Page* TryExpandImpl();
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin) override;
AllocationOrigin origin,
int* out_max_aligned_size) override;
V8_WARN_UNUSED_RESULT bool TryAllocationFromFreeListMain(
size_t size_in_bytes, AllocationOrigin origin,
AllocationAlignment alignment);
size_t size_in_bytes, AllocationOrigin origin);
V8_WARN_UNUSED_RESULT bool ContributeToSweepingMain(
int required_freed_bytes, int max_pages, int size_in_bytes,
AllocationOrigin origin, AllocationAlignment alignment);
V8_WARN_UNUSED_RESULT bool ContributeToSweepingMain(int required_freed_bytes,
int max_pages,
int size_in_bytes,
AllocationOrigin origin);
// Refills LAB for EnsureLabMain. This function is space-dependent. Returns
// false if there is not enough space and the caller has to retry after
// collecting garbage.
V8_WARN_UNUSED_RESULT virtual bool RefillLabMain(
int size_in_bytes, AllocationOrigin origin,
AllocationAlignment alignment);
V8_WARN_UNUSED_RESULT virtual bool RefillLabMain(int size_in_bytes,
AllocationOrigin origin);
// Actual implementation of refilling LAB. Returns false if there is not
// enough space and the caller has to retry after collecting garbage.
V8_WARN_UNUSED_RESULT bool RawRefillLabMain(int size_in_bytes,
AllocationOrigin origin,
AllocationAlignment alignment);
AllocationOrigin origin);
V8_WARN_UNUSED_RESULT bool TryExpand(int size_in_bytes,
AllocationOrigin origin,
AllocationAlignment alignment);
AllocationOrigin origin);
size_t committed_physical_memory() const {
return committed_physical_memory_.load(std::memory_order_relaxed);
@ -406,9 +404,8 @@ class V8_EXPORT_PRIVATE CompactionSpace final : public PagedSpace {
LinearAllocationArea allocation_info_;
protected:
V8_WARN_UNUSED_RESULT bool RefillLabMain(
int size_in_bytes, AllocationOrigin origin,
AllocationAlignment alignment) override;
V8_WARN_UNUSED_RESULT bool RefillLabMain(int size_in_bytes,
AllocationOrigin origin) override;
Page* TryExpandImpl() final;
// The space is temporary and not included in any snapshots.

View File

@ -264,10 +264,13 @@ AllocationResult SpaceWithLinearArea::AllocateRaw(int size_in_bytes,
AllocationResult SpaceWithLinearArea::AllocateRawUnaligned(
int size_in_bytes, AllocationOrigin origin) {
DCHECK(!v8_flags.enable_third_party_heap);
if (!EnsureAllocation(size_in_bytes, kTaggedAligned, origin)) {
int max_aligned_size;
if (!EnsureAllocation(size_in_bytes, kTaggedAligned, origin,
&max_aligned_size)) {
return AllocationResult::Failure();
}
DCHECK_EQ(max_aligned_size, size_in_bytes);
DCHECK_LE(allocation_info_.start(), allocation_info_.top());
AllocationResult result = AllocateFastUnaligned(size_in_bytes, origin);
@ -277,9 +280,8 @@ AllocationResult SpaceWithLinearArea::AllocateRawUnaligned(
UpdateAllocationOrigins(origin);
}
DCHECK_IMPLIES(allocation_counter_.IsActive(),
(allocation_info_.limit() - allocation_info_.start()) <=
allocation_counter_.NextBytes());
InvokeAllocationObservers(result.ToAddress(), size_in_bytes, size_in_bytes,
size_in_bytes);
return result;
}
@ -287,25 +289,27 @@ AllocationResult SpaceWithLinearArea::AllocateRawUnaligned(
AllocationResult SpaceWithLinearArea::AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
DCHECK(!v8_flags.enable_third_party_heap);
if (!EnsureAllocation(size_in_bytes, alignment, origin)) {
int max_aligned_size;
if (!EnsureAllocation(size_in_bytes, alignment, origin, &max_aligned_size)) {
return AllocationResult::Failure();
}
DCHECK_GE(max_aligned_size, size_in_bytes);
DCHECK_LE(allocation_info_.start(), allocation_info_.top());
int aligned_size_in_bytes;
AllocationResult result = AllocateFastAligned(
size_in_bytes, &aligned_size_in_bytes, alignment, origin);
DCHECK_GE(max_aligned_size, aligned_size_in_bytes);
DCHECK(!result.IsFailure());
if (v8_flags.trace_allocations_origins) {
UpdateAllocationOrigins(origin);
}
DCHECK_IMPLIES(allocation_counter_.IsActive(),
(allocation_info_.limit() - allocation_info_.start()) <=
allocation_counter_.NextBytes());
InvokeAllocationObservers(result.ToAddress(), size_in_bytes,
aligned_size_in_bytes, max_aligned_size);
return result;
}

View File

@ -387,18 +387,29 @@ void SpaceWithLinearArea::MarkLabStartInitialized() {
// actual size needed for the object (required for InvokeAllocationObservers).
// aligned_size_in_bytes is the size of the object including the filler right
// before it to reach the right alignment (required to DCHECK the start of the
// object).
// object). allocation_size is the size of the actual allocation which needs to
// be used for the accounting. It can be different from aligned_size_in_bytes in
// PagedSpace::AllocateRawAligned, where we have to overallocate in order to be
// able to align the allocation afterwards.
void SpaceWithLinearArea::InvokeAllocationObservers(
Address soon_object, size_t size_in_bytes, size_t aligned_size_in_bytes) {
Address soon_object, size_t size_in_bytes, size_t aligned_size_in_bytes,
size_t allocation_size) {
DCHECK_LE(size_in_bytes, aligned_size_in_bytes);
DCHECK_LE(aligned_size_in_bytes, allocation_size);
DCHECK(size_in_bytes == aligned_size_in_bytes ||
aligned_size_in_bytes == allocation_size);
if (!SupportsAllocationObserver() || !allocation_counter_.IsActive()) return;
if (aligned_size_in_bytes >= allocation_counter_.NextBytes()) {
if (allocation_size >= allocation_counter_.NextBytes()) {
// Only the first object in a LAB should reach the next step.
DCHECK_EQ(soon_object,
allocation_info_.start() + aligned_size_in_bytes - size_in_bytes);
// Right now the LAB only contains that one object.
DCHECK_EQ(allocation_info_.top() + allocation_size - aligned_size_in_bytes,
allocation_info_.limit());
// Ensure that there is a valid object
if (identity() == CODE_SPACE) {
MemoryChunk* chunk = MemoryChunk::FromAddress(soon_object);
@ -415,13 +426,17 @@ void SpaceWithLinearArea::InvokeAllocationObservers(
// Run AllocationObserver::Step through the AllocationCounter.
allocation_counter_.InvokeAllocationObservers(soon_object, size_in_bytes,
aligned_size_in_bytes);
allocation_size);
// Ensure that start/top/limit didn't change.
DCHECK_EQ(saved_allocation_info.start(), allocation_info_.start());
DCHECK_EQ(saved_allocation_info.top(), allocation_info_.top());
DCHECK_EQ(saved_allocation_info.limit(), allocation_info_.limit());
}
DCHECK_IMPLIES(allocation_counter_.IsActive(),
(allocation_info_.limit() - allocation_info_.start()) <
allocation_counter_.NextBytes());
}
#if DEBUG

View File

@ -553,8 +553,10 @@ class SpaceWithLinearArea : public Space {
V8_EXPORT_PRIVATE void PauseAllocationObservers() override;
V8_EXPORT_PRIVATE void AdvanceAllocationObservers();
V8_EXPORT_PRIVATE void InvokeAllocationObservers(
Address soon_object, size_t size_in_bytes, size_t aligned_size_in_bytes);
V8_EXPORT_PRIVATE void InvokeAllocationObservers(Address soon_object,
size_t size_in_bytes,
size_t aligned_size_in_bytes,
size_t allocation_size);
void MarkLabStartInitialized();
virtual void FreeLinearAllocationArea() = 0;
@ -634,7 +636,8 @@ class SpaceWithLinearArea : public Space {
// that there is enough space.
virtual bool EnsureAllocation(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) = 0;
AllocationOrigin origin,
int* out_max_aligned_size) = 0;
#if DEBUG
V8_EXPORT_PRIVATE virtual void VerifyTop() const;

View File

@ -12,7 +12,8 @@ namespace internal {
StressMarkingObserver::StressMarkingObserver(Heap* heap)
: AllocationObserver(64), heap_(heap) {}
void StressMarkingObserver::Step(Address soon_object, size_t size) {
void StressMarkingObserver::Step(int bytes_allocated, Address soon_object,
size_t size) {
heap_->StartIncrementalMarkingIfAllocationLimitIsReached(Heap::kNoGCFlags,
kNoGCCallbackFlags);
}

View File

@ -14,7 +14,7 @@ class StressMarkingObserver : public AllocationObserver {
public:
explicit StressMarkingObserver(Heap* heap);
void Step(Address soon_object, size_t size) override;
void Step(int bytes_allocated, Address soon_object, size_t size) override;
private:
Heap* heap_;

View File

@ -26,7 +26,8 @@ StressScavengeObserver::StressScavengeObserver(Heap* heap)
}
}
void StressScavengeObserver::Step(Address soon_object, size_t size) {
void StressScavengeObserver::Step(int bytes_allocated, Address soon_object,
size_t size) {
if (has_requested_gc_ || heap_->new_space()->Capacity() == 0) {
return;
}

View File

@ -14,7 +14,7 @@ class StressScavengeObserver : public AllocationObserver {
public:
explicit StressScavengeObserver(Heap* heap);
void Step(Address soon_object, size_t size) override;
void Step(int bytes_allocated, Address soon_object, size_t size) override;
bool HasRequestedGC() const;
void RequestedGCDone();

View File

@ -138,7 +138,7 @@ class SamplingHeapProfiler {
rate_(rate) {}
protected:
void Step(Address soon_object, size_t size) override {
void Step(int bytes_allocated, Address soon_object, size_t size) override {
USE(heap_);
DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
if (soon_object) {

View File

@ -234,6 +234,7 @@ void FillPageInPagedSpace(Page* page,
void FillCurrentPage(v8::internal::NewSpace* space,
std::vector<Handle<FixedArray>>* out_handles) {
if (v8_flags.minor_mc) {
PauseAllocationObserversScope pause_observers(space->heap());
if (space->top() == kNullAddress) return;
Page* page = Page::FromAllocationAreaAddress(space->top());
space->heap()->EnsureSweepingCompleted(

View File

@ -5632,7 +5632,7 @@ AllocationResult HeapTester::AllocateByteArrayForTest(
bool HeapTester::CodeEnsureLinearAllocationArea(Heap* heap, int size_in_bytes) {
bool result = heap->code_space()->EnsureAllocation(
size_in_bytes, AllocationAlignment::kTaggedAligned,
AllocationOrigin::kRuntime);
AllocationOrigin::kRuntime, nullptr);
heap->code_space()->UpdateInlineAllocationLimit(0);
return result;
}

View File

@ -497,7 +497,7 @@ class Observer : public AllocationObserver {
explicit Observer(intptr_t step_size)
: AllocationObserver(step_size), count_(0) {}
void Step(Address addr, size_t) override { count_++; }
void Step(int bytes_allocated, Address addr, size_t) override { count_++; }
int count() const { return count_; }

View File

@ -15,7 +15,9 @@ namespace {
class UnusedObserver : public AllocationObserver {
public:
explicit UnusedObserver(size_t step_size) : AllocationObserver(step_size) {}
void Step(Address soon_object, size_t size) override { CHECK(false); }
void Step(int bytes_allocated, Address soon_object, size_t size) override {
CHECK(false);
}
};
} // namespace
@ -48,16 +50,18 @@ class VerifyStepObserver : public AllocationObserver {
explicit VerifyStepObserver(size_t step_size)
: AllocationObserver(step_size) {}
void Step(Address soon_object, size_t size) override {
void Step(int bytes_allocated, Address soon_object, size_t size) override {
CHECK(!do_not_invoke_);
invocations_++;
CHECK_EQ(expected_bytes_allocated_, bytes_allocated);
CHECK_EQ(expected_size_, size);
}
void ExpectNoInvocation() { do_not_invoke_ = true; }
void Expect(size_t expected_size) {
void Expect(int expected_bytes_allocated, size_t expected_size) {
do_not_invoke_ = false;
expected_bytes_allocated_ = expected_bytes_allocated;
expected_size_ = expected_size;
}
@ -66,6 +70,7 @@ class VerifyStepObserver : public AllocationObserver {
private:
bool do_not_invoke_ = false;
int invocations_ = 0;
int expected_bytes_allocated_ = 0;
size_t expected_size_ = 0;
};
} // namespace
@ -81,7 +86,7 @@ TEST(AllocationObserverTest, Step) {
counter.AddAllocationObserver(&observer100);
counter.AddAllocationObserver(&observer200);
observer100.Expect(8);
observer100.Expect(90, 8);
observer200.ExpectNoInvocation();
counter.AdvanceAllocationObservers(90);
@ -91,8 +96,8 @@ TEST(AllocationObserverTest, Step) {
CHECK_EQ(counter.NextBytes(),
10 /* aligned_object_size */ + 100 /* smallest step size*/);
observer100.Expect(16);
observer200.Expect(16);
observer100.Expect(90, 16);
observer200.Expect(180, 16);
counter.AdvanceAllocationObservers(90);
counter.InvokeAllocationObservers(kSomeObjectAddress, 16, 20);
@ -109,7 +114,7 @@ class RecursiveAddObserver : public AllocationObserver {
AllocationObserver* observer)
: AllocationObserver(step_size), counter_(counter), observer_(observer) {}
void Step(Address soon_object, size_t size) override {
void Step(int bytes_allocated, Address soon_object, size_t size) override {
counter_->AddAllocationObserver(observer_);
}
@ -143,7 +148,7 @@ class RecursiveRemoveObserver : public AllocationObserver {
AllocationObserver* observer)
: AllocationObserver(step_size), counter_(counter), observer_(observer) {}
void Step(Address soon_object, size_t size) override {
void Step(int bytes_allocated, Address soon_object, size_t size) override {
counter_->RemoveAllocationObserver(observer_);
}