[heap] Make compaction space accept external memory.
BUG=chromium:524425 LOG=N Review URL: https://codereview.chromium.org/1322523004 Cr-Commit-Position: refs/heads/master@{#30428}
This commit is contained in:
parent
2fd84ef628
commit
4ecf07daa3
@ -356,6 +356,13 @@ AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
|
||||
}
|
||||
|
||||
|
||||
AllocationResult PagedSpace::AllocateRawUnalignedSynchronized(
|
||||
int size_in_bytes) {
|
||||
base::LockGuard<base::Mutex> lock_guard(&space_mutex_);
|
||||
return AllocateRawUnaligned(size_in_bytes);
|
||||
}
|
||||
|
||||
|
||||
// Raw allocation.
|
||||
AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
|
||||
AllocationAlignment alignment) {
|
||||
|
@ -2219,7 +2219,6 @@ FreeSpace* FreeListCategory::PickNodeFromList(int size_in_bytes,
|
||||
|
||||
|
||||
void FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes) {
|
||||
DCHECK_LE(FreeList::kSmallListMin, size_in_bytes);
|
||||
free_space->set_next(top());
|
||||
set_top(free_space);
|
||||
if (end_ == NULL) {
|
||||
@ -2274,7 +2273,7 @@ int FreeList::Free(Address start, int size_in_bytes) {
|
||||
Page* page = Page::FromAddress(start);
|
||||
|
||||
// Early return to drop too-small blocks on the floor.
|
||||
if (size_in_bytes < kSmallListMin) {
|
||||
if (size_in_bytes <= kSmallListMin) {
|
||||
page->add_non_available_small_blocks(size_in_bytes);
|
||||
return size_in_bytes;
|
||||
}
|
||||
|
@ -1591,7 +1591,7 @@ class FreeList {
|
||||
// This method returns how much memory can be allocated after freeing
|
||||
// maximum_freed memory.
|
||||
static inline int GuaranteedAllocatable(int maximum_freed) {
|
||||
if (maximum_freed < kSmallListMin) {
|
||||
if (maximum_freed <= kSmallListMin) {
|
||||
return 0;
|
||||
} else if (maximum_freed <= kSmallListMax) {
|
||||
return kSmallAllocationMax;
|
||||
@ -1631,24 +1631,23 @@ class FreeList {
|
||||
FreeListCategory* large_list() { return &large_list_; }
|
||||
FreeListCategory* huge_list() { return &huge_list_; }
|
||||
|
||||
static const int kSmallListMin = 0x20 * kPointerSize;
|
||||
|
||||
private:
|
||||
// The size range of blocks, in bytes.
|
||||
static const int kMinBlockSize = 3 * kPointerSize;
|
||||
static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
|
||||
|
||||
static const int kSmallListMin = 0x1f * kPointerSize;
|
||||
static const int kSmallListMax = 0xff * kPointerSize;
|
||||
static const int kMediumListMax = 0x7ff * kPointerSize;
|
||||
static const int kLargeListMax = 0x3fff * kPointerSize;
|
||||
static const int kSmallAllocationMax = kSmallListMin;
|
||||
static const int kMediumAllocationMax = kSmallListMax;
|
||||
static const int kLargeAllocationMax = kMediumListMax;
|
||||
|
||||
FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
|
||||
|
||||
PagedSpace* owner_;
|
||||
Heap* heap_;
|
||||
|
||||
static const int kSmallListMax = 0xff * kPointerSize;
|
||||
static const int kMediumListMax = 0x7ff * kPointerSize;
|
||||
static const int kLargeListMax = 0x3fff * kPointerSize;
|
||||
static const int kSmallAllocationMax = kSmallListMin - kPointerSize;
|
||||
static const int kMediumAllocationMax = kSmallListMax;
|
||||
static const int kLargeAllocationMax = kMediumListMax;
|
||||
FreeListCategory small_list_;
|
||||
FreeListCategory medium_list_;
|
||||
FreeListCategory large_list_;
|
||||
@ -1806,6 +1805,9 @@ class PagedSpace : public Space {
|
||||
MUST_USE_RESULT inline AllocationResult AllocateRawUnaligned(
|
||||
int size_in_bytes);
|
||||
|
||||
MUST_USE_RESULT inline AllocationResult AllocateRawUnalignedSynchronized(
|
||||
int size_in_bytes);
|
||||
|
||||
// Allocate the requested number of bytes in the space double aligned if
|
||||
// possible, return a failure object if not.
|
||||
MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
|
||||
@ -2002,8 +2004,11 @@ class PagedSpace : public Space {
|
||||
// If not used, the emergency memory is released after compaction.
|
||||
MemoryChunk* emergency_memory_;
|
||||
|
||||
friend class PageIterator;
|
||||
// Mutex guarding any concurrent access to the space.
|
||||
base::Mutex space_mutex_;
|
||||
|
||||
friend class MarkCompactCollector;
|
||||
friend class PageIterator;
|
||||
};
|
||||
|
||||
|
||||
@ -2685,6 +2690,12 @@ class CompactionSpace : public PagedSpace {
|
||||
CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
|
||||
: PagedSpace(heap, id, executable) {}
|
||||
|
||||
// Adds external memory starting at {start} of {size_in_bytes} to the space.
|
||||
void AddExternalMemory(Address start, int size_in_bytes) {
|
||||
IncreaseCapacity(size_in_bytes);
|
||||
Free(start, size_in_bytes);
|
||||
}
|
||||
|
||||
protected:
|
||||
// The space is temporary and not included in any snapshots.
|
||||
virtual bool snapshotable() { return false; }
|
||||
|
@ -441,6 +441,91 @@ TEST(CompactionSpace) {
|
||||
}
|
||||
|
||||
|
||||
TEST(CompactionSpaceUsingExternalMemory) {
|
||||
const int kObjectSize = 512;
|
||||
|
||||
Isolate* isolate = CcTest::i_isolate();
|
||||
Heap* heap = isolate->heap();
|
||||
MemoryAllocator* allocator = new MemoryAllocator(isolate);
|
||||
CHECK(allocator != nullptr);
|
||||
CHECK(allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize()));
|
||||
TestMemoryAllocatorScope test_scope(isolate, allocator);
|
||||
|
||||
CompactionSpace* compaction_space =
|
||||
new CompactionSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
|
||||
CHECK(compaction_space != NULL);
|
||||
CHECK(compaction_space->SetUp());
|
||||
|
||||
OldSpace* old_space = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
|
||||
CHECK(old_space != NULL);
|
||||
CHECK(old_space->SetUp());
|
||||
|
||||
// The linear allocation area already counts as used bytes, making
|
||||
// exact testing impossible.
|
||||
heap->DisableInlineAllocation();
|
||||
|
||||
// Test:
|
||||
// * Allocate a backing store in old_space.
|
||||
// * Compute the number num_rest_objects of kObjectSize objects that fit into
|
||||
// of available memory.
|
||||
// kNumRestObjects.
|
||||
// * Add the rest of available memory to the compaction space.
|
||||
// * Allocate kNumRestObjects in the compaction space.
|
||||
// * Allocate one object more.
|
||||
// * Merge the compaction space and compare the expected number of pages.
|
||||
|
||||
// Allocate a single object in old_space to initialize a backing page.
|
||||
old_space->AllocateRawUnaligned(kObjectSize).ToObjectChecked();
|
||||
// Compute the number of objects that fit into the rest in old_space.
|
||||
intptr_t rest = static_cast<int>(old_space->Available());
|
||||
CHECK_GT(rest, 0);
|
||||
intptr_t num_rest_objects = rest / kObjectSize;
|
||||
// After allocating num_rest_objects in compaction_space we allocate a bit
|
||||
// more.
|
||||
const intptr_t kAdditionalCompactionMemory = kObjectSize;
|
||||
// We expect a single old_space page.
|
||||
const intptr_t kExpectedInitialOldSpacePages = 1;
|
||||
// We expect a single additional page in compaction space because we mostly
|
||||
// use external memory.
|
||||
const intptr_t kExpectedCompactionPages = 1;
|
||||
// We expect two pages to be reachable from old_space in the end.
|
||||
const intptr_t kExpectedOldSpacePagesAfterMerge = 2;
|
||||
|
||||
Object* chunk =
|
||||
old_space->AllocateRawUnaligned(static_cast<int>(rest)).ToObjectChecked();
|
||||
CHECK_EQ(old_space->CountTotalPages(), kExpectedInitialOldSpacePages);
|
||||
CHECK(chunk != nullptr);
|
||||
CHECK(chunk->IsHeapObject());
|
||||
|
||||
CHECK_EQ(compaction_space->CountTotalPages(), 0);
|
||||
CHECK_EQ(compaction_space->Capacity(), 0);
|
||||
// Make the rest of memory available for compaction.
|
||||
compaction_space->AddExternalMemory(HeapObject::cast(chunk)->address(),
|
||||
static_cast<int>(rest));
|
||||
CHECK_EQ(compaction_space->CountTotalPages(), 0);
|
||||
CHECK_EQ(compaction_space->Capacity(), rest);
|
||||
while (num_rest_objects-- > 0) {
|
||||
compaction_space->AllocateRawUnaligned(kObjectSize).ToObjectChecked();
|
||||
}
|
||||
// We only used external memory so far.
|
||||
CHECK_EQ(compaction_space->CountTotalPages(), 0);
|
||||
// Additional allocation.
|
||||
compaction_space->AllocateRawUnaligned(kAdditionalCompactionMemory)
|
||||
.ToObjectChecked();
|
||||
// Now the compaction space shouldve also acquired a page.
|
||||
CHECK_EQ(compaction_space->CountTotalPages(), kExpectedCompactionPages);
|
||||
|
||||
old_space->MergeCompactionSpace(compaction_space);
|
||||
CHECK_EQ(old_space->CountTotalPages(), kExpectedOldSpacePagesAfterMerge);
|
||||
|
||||
delete compaction_space;
|
||||
delete old_space;
|
||||
|
||||
allocator->TearDown();
|
||||
delete allocator;
|
||||
}
|
||||
|
||||
|
||||
TEST(LargeObjectSpace) {
|
||||
v8::V8::Initialize();
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user