[heap] Prevent new space from allocation small LABs

Update the free list implementation for new space to set a larger
minimum size and skip redundant step in the allocation logic.

Bug: v8:12612
Change-Id: I480fe99cf4cfad7c25d687540b7841cd56d41d47
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3976508
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#83920}
This commit is contained in:
Omer Katz 2022-10-26 09:59:46 +02:00 committed by V8 LUCI CQ
parent 158de3ef88
commit 4d95ff1a21
4 changed files with 137 additions and 133 deletions

View File

@ -112,7 +112,7 @@ void FreeListCategory::Relink(FreeList* owner) {
FreeList* FreeList::CreateFreeList() { return new FreeListManyCachedOrigin(); }
FreeList* FreeList::CreateFreeListForNewSpace() {
return new FreeListManyCachedOriginForNewSpace();
return new FreeListManyCachedFastPathForNewSpace();
}
FreeSpace FreeList::TryFindNodeIn(FreeListCategoryType type,
@ -343,11 +343,11 @@ FreeSpace FreeListManyCached::Allocate(size_t size_in_bytes, size_t* node_size,
}
// ------------------------------------------------
// FreeListManyCachedFastPath implementation
// FreeListManyCachedFastPathBase implementation
FreeSpace FreeListManyCachedFastPath::Allocate(size_t size_in_bytes,
size_t* node_size,
AllocationOrigin origin) {
FreeSpace FreeListManyCachedFastPathBase::Allocate(size_t size_in_bytes,
size_t* node_size,
AllocationOrigin origin) {
USE(origin);
DCHECK_GE(kMaxBlockSize, size_in_bytes);
FreeSpace node;
@ -363,16 +363,18 @@ FreeSpace FreeListManyCachedFastPath::Allocate(size_t size_in_bytes,
}
// Fast path part 2: searching the medium categories for tiny objects
if (node.is_null()) {
if (size_in_bytes <= kTinyObjectMaxSize) {
DCHECK_EQ(kFastPathFirstCategory, first_category);
for (type = next_nonempty_category[kFastPathFallBackTiny];
type < kFastPathFirstCategory;
type = next_nonempty_category[type + 1]) {
node = TryFindNodeIn(type, size_in_bytes, node_size);
if (!node.is_null()) break;
if (small_blocks_mode_ == SmallBlocksMode::kAllow) {
if (node.is_null()) {
if (size_in_bytes <= kTinyObjectMaxSize) {
DCHECK_EQ(kFastPathFirstCategory, first_category);
for (type = next_nonempty_category[kFastPathFallBackTiny];
type < kFastPathFirstCategory;
type = next_nonempty_category[type + 1]) {
node = TryFindNodeIn(type, size_in_bytes, node_size);
if (!node.is_null()) break;
}
first_category = kFastPathFallBackTiny;
}
first_category = kFastPathFallBackTiny;
}
}
@ -406,32 +408,6 @@ FreeSpace FreeListManyCachedFastPath::Allocate(size_t size_in_bytes,
return node;
}
// ------------------------------------------------
// FreeListManyCachedFastPathForNewSpace implementation
FreeSpace FreeListManyCachedFastPathForNewSpace::Allocate(
size_t size_in_bytes, size_t* node_size, AllocationOrigin origin) {
FreeSpace node =
FreeListManyCachedFastPath::Allocate(size_in_bytes, node_size, origin);
if (!node.is_null()) return node;
// Search through the precise category for a fit
FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
node = SearchForNodeInList(type, size_in_bytes, node_size);
if (!node.is_null()) {
if (categories_[type] == nullptr) UpdateCacheAfterRemoval(type);
Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
}
#ifdef DEBUG
CheckCacheIntegrity();
#endif
DCHECK(IsVeryLong() || Available() == SumFreeLists());
return node;
}
// ------------------------------------------------
// FreeListManyCachedOrigin implementation
@ -446,19 +422,6 @@ FreeSpace FreeListManyCachedOrigin::Allocate(size_t size_in_bytes,
}
}
// ------------------------------------------------
// FreeListManyCachedOriginForNewSpace implementation
FreeSpace FreeListManyCachedOriginForNewSpace::Allocate(
size_t size_in_bytes, size_t* node_size, AllocationOrigin origin) {
if (origin == AllocationOrigin::kGC) {
return FreeListManyCached::Allocate(size_in_bytes, node_size, origin);
} else {
return FreeListManyCachedFastPathForNewSpace::Allocate(size_in_bytes,
node_size, origin);
}
}
// ------------------------------------------------
// Generic FreeList methods (non alloc/free related)

View File

@ -446,8 +446,18 @@ class V8_EXPORT_PRIVATE FreeListManyCached : public FreeListMany {
// FreeListMany), which makes its fast path less fast in the Scavenger. This is
// done on purpose, since this class's only purpose is to be used by
// FreeListManyCachedOrigin, which is precise for the scavenger.
class V8_EXPORT_PRIVATE FreeListManyCachedFastPath : public FreeListManyCached {
class V8_EXPORT_PRIVATE FreeListManyCachedFastPathBase
: public FreeListManyCached {
public:
enum class SmallBlocksMode { kAllow, kProhibit };
FreeListManyCachedFastPathBase(SmallBlocksMode small_blocks_mode)
: small_blocks_mode_(small_blocks_mode) {
if (small_blocks_mode_ == SmallBlocksMode::kProhibit) {
min_block_size_ = kFastPathStart;
}
}
V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
size_t* node_size,
AllocationOrigin origin) override;
@ -480,24 +490,25 @@ class V8_EXPORT_PRIVATE FreeListManyCachedFastPath : public FreeListManyCached {
return last_category_;
}
private:
SmallBlocksMode small_blocks_mode_;
FRIEND_TEST(
SpacesTest,
FreeListManyCachedFastPathSelectFastAllocationFreeListCategoryType);
};
// Same as FreeListManyCachedFastPath but falls back to a precise search of the
// precise category in case allocation fails. Because new space is relatively
// small, the free list is also relatively small and larger categories are more
// likely to be empty. The precise search is meant to avoid an allocation
// failure and thus avoid GCs.
class V8_EXPORT_PRIVATE FreeListManyCachedFastPathForNewSpace
: public FreeListManyCachedFastPath {
class FreeListManyCachedFastPath : public FreeListManyCachedFastPathBase {
public:
V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
size_t* node_size,
AllocationOrigin origin) override;
FreeListManyCachedFastPath()
: FreeListManyCachedFastPathBase(SmallBlocksMode::kAllow) {}
};
protected:
class FreeListManyCachedFastPathForNewSpace
: public FreeListManyCachedFastPathBase {
public:
FreeListManyCachedFastPathForNewSpace()
: FreeListManyCachedFastPathBase(SmallBlocksMode::kProhibit) {}
};
// Uses FreeListManyCached if in the GC; FreeListManyCachedFastPath otherwise.
@ -516,16 +527,6 @@ class V8_EXPORT_PRIVATE FreeListManyCachedOrigin
AllocationOrigin origin) override;
};
// Similar to FreeListManyCachedOrigin but uses
// FreeListManyCachedFastPathForNewSpace for allocations outside the GC.
class V8_EXPORT_PRIVATE FreeListManyCachedOriginForNewSpace
: public FreeListManyCachedFastPathForNewSpace {
public:
V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
size_t* node_size,
AllocationOrigin origin) override;
};
} // namespace internal
} // namespace v8

View File

@ -10,6 +10,7 @@
#include "src/execution/isolate.h"
#include "src/heap/factory.h"
#include "src/heap/free-list.h"
#include "src/heap/gc-tracer-inl.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
@ -136,12 +137,20 @@ std::vector<Handle<FixedArray>> CreatePadding(Heap* heap, int padding_size,
namespace {
void FillPageInPagedSpace(Page* page,
std::vector<Handle<FixedArray>>* out_handles) {
Heap* heap = page->heap();
DCHECK(page->SweepingDone());
PagedSpaceBase* paged_space = static_cast<PagedSpaceBase*>(page->owner());
// Make sure the LAB is empty to guarantee that all free space is accounted
// for in the freelist.
DCHECK_EQ(paged_space->limit(), paged_space->top());
PauseAllocationObserversScope no_observers_scope(heap);
CollectionEpoch full_epoch =
heap->tracer()->CurrentEpoch(GCTracer::Scope::ScopeId::MARK_COMPACTOR);
CollectionEpoch young_epoch = heap->tracer()->CurrentEpoch(
GCTracer::Scope::ScopeId::MINOR_MARK_COMPACTOR);
for (Page* p : *paged_space) {
if (p != page) paged_space->UnlinkFreeListCategories(p);
}
@ -158,56 +167,67 @@ void FillPageInPagedSpace(Page* page,
[&available_sizes](FreeListCategory* category) {
category->IterateNodesForTesting([&available_sizes](FreeSpace node) {
int node_size = node.Size();
DCHECK_LT(0, FixedArrayLenFromSize(node_size));
available_sizes.push_back(node_size);
if (node_size >= kMaxRegularHeapObjectSize) {
available_sizes.push_back(node_size);
}
});
});
Isolate* isolate = page->heap()->isolate();
Isolate* isolate = heap->isolate();
// Allocate as many max size arrays as possible, while making sure not to
// leave behind a block too small to fit a FixedArray.
const int max_array_length = FixedArrayLenFromSize(kMaxRegularHeapObjectSize);
for (size_t i = 0; i < available_sizes.size(); ++i) {
int available_size = available_sizes[i];
while (available_size >
kMaxRegularHeapObjectSize + FixedArray::kHeaderSize) {
while (available_size > kMaxRegularHeapObjectSize) {
Handle<FixedArray> fixed_array = isolate->factory()->NewFixedArray(
max_array_length, AllocationType::kYoung);
if (out_handles) out_handles->push_back(fixed_array);
available_size -= kMaxRegularHeapObjectSize;
}
if (available_size > kMaxRegularHeapObjectSize) {
// Allocate less than kMaxRegularHeapObjectSize to ensure remaining space
// can be used to allcoate another FixedArray.
int array_size = kMaxRegularHeapObjectSize - FixedArray::kHeaderSize;
Handle<FixedArray> fixed_array = isolate->factory()->NewFixedArray(
FixedArrayLenFromSize(array_size), AllocationType::kYoung);
if (out_handles) out_handles->push_back(fixed_array);
available_size -= array_size;
}
DCHECK_LE(available_size, kMaxRegularHeapObjectSize);
DCHECK_LT(0, FixedArrayLenFromSize(available_size));
available_sizes[i] = available_size;
}
// Allocate FixedArrays in remaining free list blocks, from largest to
// smallest.
std::sort(available_sizes.begin(), available_sizes.end(),
[](size_t a, size_t b) { return a > b; });
for (size_t i = 0; i < available_sizes.size(); ++i) {
int available_size = available_sizes[i];
DCHECK_LE(available_size, kMaxRegularHeapObjectSize);
int array_length = FixedArrayLenFromSize(available_size);
DCHECK_LT(0, array_length);
Handle<FixedArray> fixed_array =
isolate->factory()->NewFixedArray(array_length, AllocationType::kYoung);
if (out_handles) out_handles->push_back(fixed_array);
paged_space->FreeLinearAllocationArea();
// Allocate FixedArrays in remaining free list blocks, from largest
// category to smallest.
std::vector<std::vector<int>> remaining_sizes;
page->ForAllFreeListCategories(
[&remaining_sizes](FreeListCategory* category) {
remaining_sizes.push_back({});
std::vector<int>& sizes_in_category =
remaining_sizes[remaining_sizes.size() - 1];
category->IterateNodesForTesting([&sizes_in_category](FreeSpace node) {
int node_size = node.Size();
DCHECK_LT(0, FixedArrayLenFromSize(node_size));
sizes_in_category.push_back(node_size);
});
});
for (auto it = remaining_sizes.rbegin(); it != remaining_sizes.rend(); ++it) {
std::vector<int> sizes_in_category = *it;
for (int size : sizes_in_category) {
DCHECK_LE(size, kMaxRegularHeapObjectSize);
int array_length = FixedArrayLenFromSize(size);
DCHECK_LT(0, array_length);
Handle<FixedArray> fixed_array = isolate->factory()->NewFixedArray(
array_length, AllocationType::kYoung);
if (out_handles) out_handles->push_back(fixed_array);
}
}
DCHECK_EQ(0, page->AvailableInFreeList());
DCHECK_EQ(0, page->AvailableInFreeListFromAllocatedBytes());
for (Page* p : *paged_space) {
if (p != page) paged_space->RelinkFreeListCategories(p);
}
// Allocations in this method should not require a GC.
CHECK_EQ(full_epoch, heap->tracer()->CurrentEpoch(
GCTracer::Scope::ScopeId::MARK_COMPACTOR));
CHECK_EQ(young_epoch, heap->tracer()->CurrentEpoch(
GCTracer::Scope::ScopeId::MINOR_MARK_COMPACTOR));
}
} // namespace

View File

@ -8,6 +8,7 @@
#include "src/common/globals.h"
#include "src/flags/flags.h"
#include "src/heap/gc-tracer-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
#include "src/heap/new-spaces.h"
@ -50,12 +51,20 @@ int FixedArrayLenFromSize(int size) {
void FillPageInPagedSpace(Page* page,
std::vector<Handle<FixedArray>>* out_handles) {
Heap* heap = page->heap();
DCHECK(page->SweepingDone());
PagedSpaceBase* paged_space = static_cast<PagedSpaceBase*>(page->owner());
// Make sure the LAB is empty to guarantee that all free space is accounted
// for in the freelist.
DCHECK_EQ(paged_space->limit(), paged_space->top());
PauseAllocationObserversScope no_observers_scope(heap);
CollectionEpoch full_epoch =
heap->tracer()->CurrentEpoch(GCTracer::Scope::ScopeId::MARK_COMPACTOR);
CollectionEpoch young_epoch = heap->tracer()->CurrentEpoch(
GCTracer::Scope::ScopeId::MINOR_MARK_COMPACTOR);
for (Page* p : *paged_space) {
if (p != page) paged_space->UnlinkFreeListCategories(p);
}
@ -72,56 +81,67 @@ void FillPageInPagedSpace(Page* page,
[&available_sizes](FreeListCategory* category) {
category->IterateNodesForTesting([&available_sizes](FreeSpace node) {
int node_size = node.Size();
DCHECK_LT(0, FixedArrayLenFromSize(node_size));
available_sizes.push_back(node_size);
if (node_size >= kMaxRegularHeapObjectSize) {
available_sizes.push_back(node_size);
}
});
});
Isolate* isolate = page->heap()->isolate();
Isolate* isolate = heap->isolate();
// Allocate as many max size arrays as possible, while making sure not to
// leave behind a block too small to fit a FixedArray.
const int max_array_length = FixedArrayLenFromSize(kMaxRegularHeapObjectSize);
for (size_t i = 0; i < available_sizes.size(); ++i) {
int available_size = available_sizes[i];
while (available_size >
kMaxRegularHeapObjectSize + FixedArray::kHeaderSize) {
while (available_size > kMaxRegularHeapObjectSize) {
Handle<FixedArray> fixed_array = isolate->factory()->NewFixedArray(
max_array_length, AllocationType::kYoung);
if (out_handles) out_handles->push_back(fixed_array);
available_size -= kMaxRegularHeapObjectSize;
}
if (available_size > kMaxRegularHeapObjectSize) {
// Allocate less than kMaxRegularHeapObjectSize to ensure remaining space
// can be used to allcoate another FixedArray.
int array_size = kMaxRegularHeapObjectSize - FixedArray::kHeaderSize;
Handle<FixedArray> fixed_array = isolate->factory()->NewFixedArray(
FixedArrayLenFromSize(array_size), AllocationType::kYoung);
if (out_handles) out_handles->push_back(fixed_array);
available_size -= array_size;
}
DCHECK_LE(available_size, kMaxRegularHeapObjectSize);
DCHECK_LT(0, FixedArrayLenFromSize(available_size));
available_sizes[i] = available_size;
}
// Allocate FixedArrays in remaining free list blocks, from largest to
// smallest.
std::sort(available_sizes.begin(), available_sizes.end(),
[](size_t a, size_t b) { return a > b; });
for (size_t i = 0; i < available_sizes.size(); ++i) {
int available_size = available_sizes[i];
DCHECK_LE(available_size, kMaxRegularHeapObjectSize);
int array_length = FixedArrayLenFromSize(available_size);
DCHECK_LT(0, array_length);
Handle<FixedArray> fixed_array =
isolate->factory()->NewFixedArray(array_length, AllocationType::kYoung);
if (out_handles) out_handles->push_back(fixed_array);
paged_space->FreeLinearAllocationArea();
// Allocate FixedArrays in remaining free list blocks, from largest
// category to smallest.
std::vector<std::vector<int>> remaining_sizes;
page->ForAllFreeListCategories(
[&remaining_sizes](FreeListCategory* category) {
remaining_sizes.push_back({});
std::vector<int>& sizes_in_category =
remaining_sizes[remaining_sizes.size() - 1];
category->IterateNodesForTesting([&sizes_in_category](FreeSpace node) {
int node_size = node.Size();
DCHECK_LT(0, FixedArrayLenFromSize(node_size));
sizes_in_category.push_back(node_size);
});
});
for (auto it = remaining_sizes.rbegin(); it != remaining_sizes.rend(); ++it) {
std::vector<int> sizes_in_category = *it;
for (int size : sizes_in_category) {
DCHECK_LE(size, kMaxRegularHeapObjectSize);
int array_length = FixedArrayLenFromSize(size);
DCHECK_LT(0, array_length);
Handle<FixedArray> fixed_array = isolate->factory()->NewFixedArray(
array_length, AllocationType::kYoung);
if (out_handles) out_handles->push_back(fixed_array);
}
}
DCHECK_EQ(0, page->AvailableInFreeList());
DCHECK_EQ(0, page->AvailableInFreeListFromAllocatedBytes());
for (Page* p : *paged_space) {
if (p != page) paged_space->RelinkFreeListCategories(p);
}
// Allocations in this method should not require a GC.
CHECK_EQ(full_epoch, heap->tracer()->CurrentEpoch(
GCTracer::Scope::ScopeId::MARK_COMPACTOR));
CHECK_EQ(young_epoch, heap->tracer()->CurrentEpoch(
GCTracer::Scope::ScopeId::MINOR_MARK_COMPACTOR));
}
} // namespace