heap: Move disable-new state to corresponding spaces

The information was previously kept heap-global but is really only
used by spaces when refilling their LABs.

Bug: v8:12615
Change-Id: Iee256d35ffa0112c93ec721bc3afdc2881c4743b
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3465898
Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79122}
This commit is contained in:
Michael Lippautz 2022-02-16 10:01:50 +01:00 committed by V8 LUCI CQ
parent bdbc1d6527
commit 2b98251cbd
9 changed files with 47 additions and 31 deletions

View File

@ -5595,32 +5595,31 @@ bool Heap::ShouldStressCompaction() const {
}
void Heap::EnableInlineAllocation() {
if (!inline_allocation_disabled_) return;
inline_allocation_disabled_ = false;
// Update inline allocation limit for new space.
if (new_space()) {
new_space()->AdvanceAllocationObservers();
new_space()->UpdateInlineAllocationLimit(0);
new_space()->EnableInlineAllocation();
}
// Update inline allocation limit for old spaces.
PagedSpaceIterator spaces(this);
for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) {
base::MutexGuard guard(space->mutex());
space->EnableInlineAllocation();
}
}
void Heap::DisableInlineAllocation() {
if (inline_allocation_disabled_) return;
inline_allocation_disabled_ = true;
// Update inline allocation limit for new space.
if (new_space()) {
new_space()->UpdateInlineAllocationLimit(0);
new_space()->DisableInlineAllocation();
}
// Update inline allocation limit for old spaces.
PagedSpaceIterator spaces(this);
CodePageCollectionMemoryModificationScope modification_scope(this);
for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) {
base::MutexGuard guard(space->mutex());
space->FreeLinearAllocationArea();
space->DisableInlineAllocation();
}
}

View File

@ -957,9 +957,6 @@ class Heap {
// Inline allocation. ========================================================
// ===========================================================================
// Indicates whether inline bump-pointer allocation has been disabled.
bool inline_allocation_disabled() { return inline_allocation_disabled_; }
// Switch whether inline bump-pointer allocation should be used.
V8_EXPORT_PRIVATE void EnableInlineAllocation();
V8_EXPORT_PRIVATE void DisableInlineAllocation();
@ -2279,10 +2276,6 @@ class Heap {
std::atomic<size_t> old_generation_allocation_limit_{0};
size_t global_allocation_limit_ = 0;
// Indicates that inline bump-pointer allocation has been globally disabled
// for all spaces. This is used to disable allocations in generated code.
bool inline_allocation_disabled_ = false;
// Weak list heads, threaded through the objects.
// List heads are initialized lazily and contain the undefined_value at start.
// {native_contexts_list_} is an Address instead of an Object to allow the use

View File

@ -487,7 +487,7 @@ class V8_EXPORT_PRIVATE NewSpace
void MakeLinearAllocationAreaIterable();
// Creates a filler object in the linear allocation area and closes it.
void FreeLinearAllocationArea();
void FreeLinearAllocationArea() override;
private:
static const int kAllocationBufferParkingThreshold = 4 * KB;

View File

@ -194,7 +194,7 @@ class V8_EXPORT_PRIVATE PagedSpace
void ResetFreeList();
// Empty space linear allocation area, returning unused area to free list.
void FreeLinearAllocationArea();
void FreeLinearAllocationArea() override;
void MakeLinearAllocationAreaIterable();

View File

@ -259,10 +259,12 @@ Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
size_t min_size) {
DCHECK_GE(end - start, min_size);
if (heap()->inline_allocation_disabled()) {
// Fit the requested area exactly.
if (!use_lab_) {
// LABs are disabled, so we fit the requested area exactly.
return start + min_size;
} else if (SupportsAllocationObserver() && allocation_counter_.IsActive()) {
}
if (SupportsAllocationObserver() && allocation_counter_.IsActive()) {
// Ensure there are no unaccounted allocations.
DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
@ -277,10 +279,27 @@ Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
static_cast<uint64_t>(start) + std::max(min_size, rounded_step);
uint64_t new_end = std::min(step_end, static_cast<uint64_t>(end));
return static_cast<Address>(new_end);
} else {
// The entire node can be used as the linear allocation area.
return end;
}
// LABs are enabled and no observers attached. Return the whole node for the
// LAB.
return end;
}
void SpaceWithLinearArea::DisableInlineAllocation() {
if (!use_lab_) return;
use_lab_ = false;
FreeLinearAllocationArea();
UpdateInlineAllocationLimit(0);
}
void SpaceWithLinearArea::EnableInlineAllocation() {
if (use_lab_) return;
use_lab_ = true;
AdvanceAllocationObservers();
UpdateInlineAllocationLimit(0);
}
void SpaceWithLinearArea::UpdateAllocationOrigins(AllocationOrigin origin) {

View File

@ -474,6 +474,7 @@ class SpaceWithLinearArea : public Space {
size_t allocation_size);
void MarkLabStartInitialized();
virtual void FreeLinearAllocationArea() = 0;
// When allocation observers are active we may use a lower limit to allow the
// observers to 'interrupt' earlier than the natural limit. Given a linear
@ -484,13 +485,17 @@ class SpaceWithLinearArea : public Space {
V8_EXPORT_PRIVATE virtual void UpdateInlineAllocationLimit(
size_t min_size) = 0;
V8_EXPORT_PRIVATE void UpdateAllocationOrigins(AllocationOrigin origin);
void DisableInlineAllocation();
void EnableInlineAllocation();
bool IsInlineAllocationEnabled() const { return use_lab_; }
void PrintAllocationsOrigins();
protected:
// TODO(ofrobots): make these private after refactoring is complete.
V8_EXPORT_PRIVATE void UpdateAllocationOrigins(AllocationOrigin origin);
LinearAllocationArea* const allocation_info_;
bool use_lab_ = true;
size_t allocations_origins_[static_cast<int>(
AllocationOrigin::kNumberOfAllocationOrigins)] = {0};

View File

@ -774,7 +774,7 @@ void FillUpOneNewSpacePage(Isolate* isolate, Heap* heap) {
// We cannot rely on `space->limit()` to point to the end of the current page
// in the case where inline allocations are disabled, it actually points to
// the current allocation pointer.
DCHECK_IMPLIES(space->heap()->inline_allocation_disabled(),
DCHECK_IMPLIES(!space->IsInlineAllocationEnabled(),
space->limit() == space->top());
int space_remaining =
static_cast<int>(space->to_space().page_high() - space->top());

View File

@ -140,7 +140,7 @@ bool FillCurrentPageButNBytes(v8::internal::NewSpace* space, int extra_bytes,
// We cannot rely on `space->limit()` to point to the end of the current page
// in the case where inline allocations are disabled, it actually points to
// the current allocation pointer.
DCHECK_IMPLIES(space->heap()->inline_allocation_disabled(),
DCHECK_IMPLIES(!space->IsInlineAllocationEnabled(),
space->limit() == space->top());
int space_remaining =
static_cast<int>(space->to_space().page_high() - space->top());

View File

@ -3000,7 +3000,7 @@ TEST(TrackBumpPointerAllocations) {
// Now check that not all allocations are tracked if we manually reenable
// inline allocations.
CHECK(CcTest::heap()->inline_allocation_disabled());
CHECK(!CcTest::heap()->new_space()->IsInlineAllocationEnabled());
CcTest::heap()->EnableInlineAllocation();
CompileRun(inline_heap_allocation_source);