Reduce boot-up memory use of V8.
This is a recommit of http://codereview.chromium.org/9179012 after fixing what turned out to be unrelated out-of-memory errors. That was a rebase of http://codereview.chromium.org/9017009/ Review URL: https://chromiumcodereview.appspot.com/9289047 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@10542 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
f2eda210d0
commit
419ea5fcc3
@ -1150,6 +1150,7 @@ MemoryChunk* Deoptimizer::CreateCode(BailoutType type) {
|
|||||||
|
|
||||||
MemoryChunk* chunk =
|
MemoryChunk* chunk =
|
||||||
Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size,
|
Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size,
|
||||||
|
desc.instr_size,
|
||||||
EXECUTABLE,
|
EXECUTABLE,
|
||||||
NULL);
|
NULL);
|
||||||
if (chunk == NULL) {
|
if (chunk == NULL) {
|
||||||
|
20
src/heap.cc
20
src/heap.cc
@ -582,10 +582,14 @@ void Heap::ReserveSpace(
|
|||||||
PagedSpace* map_space = Heap::map_space();
|
PagedSpace* map_space = Heap::map_space();
|
||||||
PagedSpace* cell_space = Heap::cell_space();
|
PagedSpace* cell_space = Heap::cell_space();
|
||||||
LargeObjectSpace* lo_space = Heap::lo_space();
|
LargeObjectSpace* lo_space = Heap::lo_space();
|
||||||
|
bool one_old_space_gc_has_been_performed = false;
|
||||||
bool gc_performed = true;
|
bool gc_performed = true;
|
||||||
int counter = 0;
|
int counter = 0;
|
||||||
static const int kThreshold = 20;
|
static const int kThreshold = 20;
|
||||||
|
bool old_space_gc_performed;
|
||||||
|
|
||||||
while (gc_performed && counter++ < kThreshold) {
|
while (gc_performed && counter++ < kThreshold) {
|
||||||
|
old_space_gc_performed = false;
|
||||||
gc_performed = false;
|
gc_performed = false;
|
||||||
if (!new_space->ReserveSpace(new_space_size)) {
|
if (!new_space->ReserveSpace(new_space_size)) {
|
||||||
Heap::CollectGarbage(NEW_SPACE);
|
Heap::CollectGarbage(NEW_SPACE);
|
||||||
@ -594,22 +598,27 @@ void Heap::ReserveSpace(
|
|||||||
if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
|
if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
|
||||||
Heap::CollectGarbage(OLD_POINTER_SPACE);
|
Heap::CollectGarbage(OLD_POINTER_SPACE);
|
||||||
gc_performed = true;
|
gc_performed = true;
|
||||||
|
old_space_gc_performed = true;
|
||||||
}
|
}
|
||||||
if (!(old_data_space->ReserveSpace(data_space_size))) {
|
if (!(old_data_space->ReserveSpace(data_space_size))) {
|
||||||
Heap::CollectGarbage(OLD_DATA_SPACE);
|
Heap::CollectGarbage(OLD_DATA_SPACE);
|
||||||
gc_performed = true;
|
gc_performed = true;
|
||||||
|
old_space_gc_performed = true;
|
||||||
}
|
}
|
||||||
if (!(code_space->ReserveSpace(code_space_size))) {
|
if (!(code_space->ReserveSpace(code_space_size))) {
|
||||||
Heap::CollectGarbage(CODE_SPACE);
|
Heap::CollectGarbage(CODE_SPACE);
|
||||||
gc_performed = true;
|
gc_performed = true;
|
||||||
|
old_space_gc_performed = true;
|
||||||
}
|
}
|
||||||
if (!(map_space->ReserveSpace(map_space_size))) {
|
if (!(map_space->ReserveSpace(map_space_size))) {
|
||||||
Heap::CollectGarbage(MAP_SPACE);
|
Heap::CollectGarbage(MAP_SPACE);
|
||||||
gc_performed = true;
|
gc_performed = true;
|
||||||
|
old_space_gc_performed = true;
|
||||||
}
|
}
|
||||||
if (!(cell_space->ReserveSpace(cell_space_size))) {
|
if (!(cell_space->ReserveSpace(cell_space_size))) {
|
||||||
Heap::CollectGarbage(CELL_SPACE);
|
Heap::CollectGarbage(CELL_SPACE);
|
||||||
gc_performed = true;
|
gc_performed = true;
|
||||||
|
old_space_gc_performed = true;
|
||||||
}
|
}
|
||||||
// We add a slack-factor of 2 in order to have space for a series of
|
// We add a slack-factor of 2 in order to have space for a series of
|
||||||
// large-object allocations that are only just larger than the page size.
|
// large-object allocations that are only just larger than the page size.
|
||||||
@ -619,15 +628,22 @@ void Heap::ReserveSpace(
|
|||||||
// allocation in the other spaces.
|
// allocation in the other spaces.
|
||||||
large_object_size += cell_space_size + map_space_size + code_space_size +
|
large_object_size += cell_space_size + map_space_size + code_space_size +
|
||||||
data_space_size + pointer_space_size;
|
data_space_size + pointer_space_size;
|
||||||
if (!(lo_space->ReserveSpace(large_object_size))) {
|
|
||||||
|
// If we already did one GC in order to make space in old space, there is
|
||||||
|
// no sense in doing another one. We will attempt to force through the
|
||||||
|
// large object space allocation, which comes directly from the OS,
|
||||||
|
// regardless of any soft limit.
|
||||||
|
if (!one_old_space_gc_has_been_performed &&
|
||||||
|
!(lo_space->ReserveSpace(large_object_size))) {
|
||||||
Heap::CollectGarbage(LO_SPACE);
|
Heap::CollectGarbage(LO_SPACE);
|
||||||
gc_performed = true;
|
gc_performed = true;
|
||||||
}
|
}
|
||||||
|
if (old_space_gc_performed) one_old_space_gc_has_been_performed = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (gc_performed) {
|
if (gc_performed) {
|
||||||
// Failed to reserve the space after several attempts.
|
// Failed to reserve the space after several attempts.
|
||||||
V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
|
V8::FatalProcessOutOfMemory("Heap.:ReserveSpace");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -287,7 +287,7 @@ void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
|
|||||||
|
|
||||||
// It's difficult to filter out slots recorded for large objects.
|
// It's difficult to filter out slots recorded for large objects.
|
||||||
if (chunk->owner()->identity() == LO_SPACE &&
|
if (chunk->owner()->identity() == LO_SPACE &&
|
||||||
chunk->size() > static_cast<size_t>(Page::kPageSize) &&
|
chunk->size() > Page::kPageSize &&
|
||||||
is_compacting) {
|
is_compacting) {
|
||||||
chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
|
chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
|
||||||
}
|
}
|
||||||
|
@ -2919,7 +2919,8 @@ static void SweepPrecisely(PagedSpace* space,
|
|||||||
for ( ; live_objects != 0; live_objects--) {
|
for ( ; live_objects != 0; live_objects--) {
|
||||||
Address free_end = object_address + offsets[live_index++] * kPointerSize;
|
Address free_end = object_address + offsets[live_index++] * kPointerSize;
|
||||||
if (free_end != free_start) {
|
if (free_end != free_start) {
|
||||||
space->Free(free_start, static_cast<int>(free_end - free_start));
|
space->AddToFreeLists(free_start,
|
||||||
|
static_cast<int>(free_end - free_start));
|
||||||
}
|
}
|
||||||
HeapObject* live_object = HeapObject::FromAddress(free_end);
|
HeapObject* live_object = HeapObject::FromAddress(free_end);
|
||||||
ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
|
ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
|
||||||
@ -2945,7 +2946,8 @@ static void SweepPrecisely(PagedSpace* space,
|
|||||||
cells[cell_index] = 0;
|
cells[cell_index] = 0;
|
||||||
}
|
}
|
||||||
if (free_start != p->ObjectAreaEnd()) {
|
if (free_start != p->ObjectAreaEnd()) {
|
||||||
space->Free(free_start, static_cast<int>(p->ObjectAreaEnd() - free_start));
|
space->AddToFreeLists(free_start,
|
||||||
|
static_cast<int>(p->ObjectAreaEnd() - free_start));
|
||||||
}
|
}
|
||||||
p->ResetLiveBytes();
|
p->ResetLiveBytes();
|
||||||
}
|
}
|
||||||
@ -3238,7 +3240,9 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
|
|||||||
Page* p = evacuation_candidates_[i];
|
Page* p = evacuation_candidates_[i];
|
||||||
if (!p->IsEvacuationCandidate()) continue;
|
if (!p->IsEvacuationCandidate()) continue;
|
||||||
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
|
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
|
||||||
space->Free(p->ObjectAreaStart(), Page::kObjectAreaSize);
|
space->AddToFreeLists(
|
||||||
|
p->ObjectAreaStart(),
|
||||||
|
static_cast<int>(p->ObjectAreaEnd() - p->ObjectAreaStart()));
|
||||||
p->set_scan_on_scavenge(false);
|
p->set_scan_on_scavenge(false);
|
||||||
slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
|
slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
|
||||||
p->ClearEvacuationCandidate();
|
p->ClearEvacuationCandidate();
|
||||||
@ -3555,8 +3559,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
|
|||||||
}
|
}
|
||||||
size_t size = block_address - p->ObjectAreaStart();
|
size_t size = block_address - p->ObjectAreaStart();
|
||||||
if (cell_index == last_cell_index) {
|
if (cell_index == last_cell_index) {
|
||||||
freed_bytes += static_cast<int>(space->Free(p->ObjectAreaStart(),
|
freed_bytes += static_cast<int>(space->AddToFreeLists(
|
||||||
static_cast<int>(size)));
|
p->ObjectAreaStart(), static_cast<int>(size)));
|
||||||
ASSERT_EQ(0, p->LiveBytes());
|
ASSERT_EQ(0, p->LiveBytes());
|
||||||
return freed_bytes;
|
return freed_bytes;
|
||||||
}
|
}
|
||||||
@ -3565,8 +3569,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
|
|||||||
Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
|
Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
|
||||||
// Free the first free space.
|
// Free the first free space.
|
||||||
size = free_end - p->ObjectAreaStart();
|
size = free_end - p->ObjectAreaStart();
|
||||||
freed_bytes += space->Free(p->ObjectAreaStart(),
|
freed_bytes += space->AddToFreeLists(p->ObjectAreaStart(),
|
||||||
static_cast<int>(size));
|
static_cast<int>(size));
|
||||||
// The start of the current free area is represented in undigested form by
|
// The start of the current free area is represented in undigested form by
|
||||||
// the address of the last 32-word section that contained a live object and
|
// the address of the last 32-word section that contained a live object and
|
||||||
// the marking bitmap for that cell, which describes where the live object
|
// the marking bitmap for that cell, which describes where the live object
|
||||||
@ -3595,8 +3599,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
|
|||||||
// so now we need to find the start of the first live object at the
|
// so now we need to find the start of the first live object at the
|
||||||
// end of the free space.
|
// end of the free space.
|
||||||
free_end = StartOfLiveObject(block_address, cell);
|
free_end = StartOfLiveObject(block_address, cell);
|
||||||
freed_bytes += space->Free(free_start,
|
freed_bytes += space->AddToFreeLists(
|
||||||
static_cast<int>(free_end - free_start));
|
free_start, static_cast<int>(free_end - free_start));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Update our undigested record of where the current free area started.
|
// Update our undigested record of where the current free area started.
|
||||||
@ -3610,8 +3614,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
|
|||||||
// Handle the free space at the end of the page.
|
// Handle the free space at the end of the page.
|
||||||
if (block_address - free_start > 32 * kPointerSize) {
|
if (block_address - free_start > 32 * kPointerSize) {
|
||||||
free_start = DigestFreeStart(free_start, free_start_cell);
|
free_start = DigestFreeStart(free_start, free_start_cell);
|
||||||
freed_bytes += space->Free(free_start,
|
freed_bytes += space->AddToFreeLists(
|
||||||
static_cast<int>(block_address - free_start));
|
free_start, static_cast<int>(block_address - free_start));
|
||||||
}
|
}
|
||||||
|
|
||||||
p->ResetLiveBytes();
|
p->ResetLiveBytes();
|
||||||
|
@ -612,6 +612,7 @@ Address Deserializer::Allocate(int space_index, Space* space, int size) {
|
|||||||
pages_[LO_SPACE].Add(address);
|
pages_[LO_SPACE].Add(address);
|
||||||
}
|
}
|
||||||
last_object_address_ = address;
|
last_object_address_ = address;
|
||||||
|
ASSERT(address >= Page::FromAddress(address)->ObjectAreaStart());
|
||||||
return address;
|
return address;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -622,7 +623,12 @@ HeapObject* Deserializer::GetAddressFromEnd(int space) {
|
|||||||
int offset = source_->GetInt();
|
int offset = source_->GetInt();
|
||||||
ASSERT(!SpaceIsLarge(space));
|
ASSERT(!SpaceIsLarge(space));
|
||||||
offset <<= kObjectAlignmentBits;
|
offset <<= kObjectAlignmentBits;
|
||||||
return HeapObject::FromAddress(high_water_[space] - offset);
|
Address address = high_water_[space] - offset;
|
||||||
|
// This assert will fail if kMinimumSpaceSizes is too small for a space,
|
||||||
|
// because we rely on the fact that all allocation is linear when the VM
|
||||||
|
// is very young.
|
||||||
|
ASSERT(address >= Page::FromAddress(address)->ObjectAreaStart());
|
||||||
|
return HeapObject::FromAddress(address);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -26,6 +26,7 @@
|
|||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
#include "isolate.h"
|
#include "isolate.h"
|
||||||
|
#include "spaces.h"
|
||||||
|
|
||||||
#ifndef V8_SNAPSHOT_H_
|
#ifndef V8_SNAPSHOT_H_
|
||||||
#define V8_SNAPSHOT_H_
|
#define V8_SNAPSHOT_H_
|
||||||
@ -86,6 +87,7 @@ class Snapshot {
|
|||||||
DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot);
|
DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
} } // namespace v8::internal
|
} } // namespace v8::internal
|
||||||
|
|
||||||
#endif // V8_SNAPSHOT_H_
|
#endif // V8_SNAPSHOT_H_
|
||||||
|
@ -164,12 +164,12 @@ Page* Page::Initialize(Heap* heap,
|
|||||||
Executability executable,
|
Executability executable,
|
||||||
PagedSpace* owner) {
|
PagedSpace* owner) {
|
||||||
Page* page = reinterpret_cast<Page*>(chunk);
|
Page* page = reinterpret_cast<Page*>(chunk);
|
||||||
ASSERT(chunk->size() == static_cast<size_t>(kPageSize));
|
ASSERT(chunk->size() <= kPageSize);
|
||||||
ASSERT(chunk->owner() == owner);
|
ASSERT(chunk->owner() == owner);
|
||||||
owner->IncreaseCapacity(Page::kObjectAreaSize);
|
int object_bytes =
|
||||||
owner->Free(page->ObjectAreaStart(),
|
static_cast<int>(page->ObjectAreaEnd() - page->ObjectAreaStart());
|
||||||
static_cast<int>(page->ObjectAreaEnd() -
|
owner->IncreaseCapacity(object_bytes);
|
||||||
page->ObjectAreaStart()));
|
owner->AddToFreeLists(page->ObjectAreaStart(), object_bytes);
|
||||||
|
|
||||||
heap->incremental_marking()->SetOldSpacePageFlags(chunk);
|
heap->incremental_marking()->SetOldSpacePageFlags(chunk);
|
||||||
|
|
||||||
@ -257,6 +257,7 @@ HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
|
|||||||
if (new_top > allocation_info_.limit) return NULL;
|
if (new_top > allocation_info_.limit) return NULL;
|
||||||
|
|
||||||
allocation_info_.top = new_top;
|
allocation_info_.top = new_top;
|
||||||
|
ASSERT(new_top >= Page::FromAllocationTop(new_top)->ObjectAreaStart());
|
||||||
return HeapObject::FromAddress(current_top);
|
return HeapObject::FromAddress(current_top);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
349
src/spaces.cc
349
src/spaces.cc
@ -31,6 +31,7 @@
|
|||||||
#include "macro-assembler.h"
|
#include "macro-assembler.h"
|
||||||
#include "mark-compact.h"
|
#include "mark-compact.h"
|
||||||
#include "platform.h"
|
#include "platform.h"
|
||||||
|
#include "snapshot.h"
|
||||||
|
|
||||||
namespace v8 {
|
namespace v8 {
|
||||||
namespace internal {
|
namespace internal {
|
||||||
@ -263,7 +264,7 @@ MemoryAllocator::MemoryAllocator(Isolate* isolate)
|
|||||||
: isolate_(isolate),
|
: isolate_(isolate),
|
||||||
capacity_(0),
|
capacity_(0),
|
||||||
capacity_executable_(0),
|
capacity_executable_(0),
|
||||||
size_(0),
|
memory_allocator_reserved_(0),
|
||||||
size_executable_(0) {
|
size_executable_(0) {
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -273,7 +274,7 @@ bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
|
|||||||
capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
|
capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
|
||||||
ASSERT_GE(capacity_, capacity_executable_);
|
ASSERT_GE(capacity_, capacity_executable_);
|
||||||
|
|
||||||
size_ = 0;
|
memory_allocator_reserved_ = 0;
|
||||||
size_executable_ = 0;
|
size_executable_ = 0;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
@ -282,7 +283,7 @@ bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
|
|||||||
|
|
||||||
void MemoryAllocator::TearDown() {
|
void MemoryAllocator::TearDown() {
|
||||||
// Check that spaces were torn down before MemoryAllocator.
|
// Check that spaces were torn down before MemoryAllocator.
|
||||||
ASSERT(size_ == 0);
|
CHECK_EQ(memory_allocator_reserved_, 0);
|
||||||
// TODO(gc) this will be true again when we fix FreeMemory.
|
// TODO(gc) this will be true again when we fix FreeMemory.
|
||||||
// ASSERT(size_executable_ == 0);
|
// ASSERT(size_executable_ == 0);
|
||||||
capacity_ = 0;
|
capacity_ = 0;
|
||||||
@ -295,8 +296,8 @@ void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
|
|||||||
// TODO(gc) make code_range part of memory allocator?
|
// TODO(gc) make code_range part of memory allocator?
|
||||||
ASSERT(reservation->IsReserved());
|
ASSERT(reservation->IsReserved());
|
||||||
size_t size = reservation->size();
|
size_t size = reservation->size();
|
||||||
ASSERT(size_ >= size);
|
ASSERT(memory_allocator_reserved_ >= size);
|
||||||
size_ -= size;
|
memory_allocator_reserved_ -= size;
|
||||||
|
|
||||||
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
|
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
|
||||||
|
|
||||||
@ -316,8 +317,8 @@ void MemoryAllocator::FreeMemory(Address base,
|
|||||||
size_t size,
|
size_t size,
|
||||||
Executability executable) {
|
Executability executable) {
|
||||||
// TODO(gc) make code_range part of memory allocator?
|
// TODO(gc) make code_range part of memory allocator?
|
||||||
ASSERT(size_ >= size);
|
ASSERT(memory_allocator_reserved_ >= size);
|
||||||
size_ -= size;
|
memory_allocator_reserved_ -= size;
|
||||||
|
|
||||||
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
|
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
|
||||||
|
|
||||||
@ -343,7 +344,7 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size,
|
|||||||
VirtualMemory reservation(size, alignment);
|
VirtualMemory reservation(size, alignment);
|
||||||
|
|
||||||
if (!reservation.IsReserved()) return NULL;
|
if (!reservation.IsReserved()) return NULL;
|
||||||
size_ += reservation.size();
|
memory_allocator_reserved_ += reservation.size();
|
||||||
Address base = RoundUp(static_cast<Address>(reservation.address()),
|
Address base = RoundUp(static_cast<Address>(reservation.address()),
|
||||||
alignment);
|
alignment);
|
||||||
controller->TakeControl(&reservation);
|
controller->TakeControl(&reservation);
|
||||||
@ -352,11 +353,14 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size,
|
|||||||
|
|
||||||
|
|
||||||
Address MemoryAllocator::AllocateAlignedMemory(size_t size,
|
Address MemoryAllocator::AllocateAlignedMemory(size_t size,
|
||||||
|
size_t reserved_size,
|
||||||
size_t alignment,
|
size_t alignment,
|
||||||
Executability executable,
|
Executability executable,
|
||||||
VirtualMemory* controller) {
|
VirtualMemory* controller) {
|
||||||
|
ASSERT(RoundUp(reserved_size, OS::CommitPageSize()) >=
|
||||||
|
RoundUp(size, OS::CommitPageSize()));
|
||||||
VirtualMemory reservation;
|
VirtualMemory reservation;
|
||||||
Address base = ReserveAlignedMemory(size, alignment, &reservation);
|
Address base = ReserveAlignedMemory(reserved_size, alignment, &reservation);
|
||||||
if (base == NULL) return NULL;
|
if (base == NULL) return NULL;
|
||||||
if (!reservation.Commit(base,
|
if (!reservation.Commit(base,
|
||||||
size,
|
size,
|
||||||
@ -375,6 +379,53 @@ void Page::InitializeAsAnchor(PagedSpace* owner) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Page::CommitMore(intptr_t space_needed) {
|
||||||
|
intptr_t reserved_page_size = reservation_.IsReserved() ?
|
||||||
|
reservation_.size() :
|
||||||
|
Page::kPageSize;
|
||||||
|
ASSERT(size() + space_needed <= reserved_page_size);
|
||||||
|
// At increase the page size by at least 64k (this also rounds to OS page
|
||||||
|
// size).
|
||||||
|
int expand = Min(reserved_page_size - size(),
|
||||||
|
RoundUp(size() + space_needed, Page::kGrowthUnit) - size());
|
||||||
|
ASSERT(expand <= kPageSize - size());
|
||||||
|
ASSERT(expand <= reserved_page_size - size());
|
||||||
|
Executability executable =
|
||||||
|
IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
|
||||||
|
Address old_end = ObjectAreaEnd();
|
||||||
|
if (!VirtualMemory::CommitRegion(old_end, expand, executable)) return;
|
||||||
|
|
||||||
|
set_size(size() + expand);
|
||||||
|
|
||||||
|
PagedSpace* paged_space = reinterpret_cast<PagedSpace*>(owner());
|
||||||
|
paged_space->heap()->isolate()->memory_allocator()->AllocationBookkeeping(
|
||||||
|
paged_space,
|
||||||
|
old_end,
|
||||||
|
0, // No new memory was reserved.
|
||||||
|
expand, // New memory committed.
|
||||||
|
executable);
|
||||||
|
paged_space->IncreaseCapacity(expand);
|
||||||
|
|
||||||
|
// In spaces with alignment requirements (e.g. map space) we have to align
|
||||||
|
// the expanded area with the correct object alignment.
|
||||||
|
uintptr_t object_area_size = old_end - ObjectAreaStart();
|
||||||
|
uintptr_t aligned_object_area_size =
|
||||||
|
object_area_size - object_area_size % paged_space->ObjectAlignment();
|
||||||
|
if (aligned_object_area_size != object_area_size) {
|
||||||
|
aligned_object_area_size += paged_space->ObjectAlignment();
|
||||||
|
}
|
||||||
|
Address new_area =
|
||||||
|
reinterpret_cast<Address>(ObjectAreaStart() + aligned_object_area_size);
|
||||||
|
// In spaces with alignment requirements, this will waste the space for one
|
||||||
|
// object per doubling of the page size until the next GC.
|
||||||
|
paged_space->AddToFreeLists(old_end, new_area - old_end);
|
||||||
|
|
||||||
|
expand -= (new_area - old_end);
|
||||||
|
|
||||||
|
paged_space->AddToFreeLists(new_area, expand);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
NewSpacePage* NewSpacePage::Initialize(Heap* heap,
|
NewSpacePage* NewSpacePage::Initialize(Heap* heap,
|
||||||
Address start,
|
Address start,
|
||||||
SemiSpace* semi_space) {
|
SemiSpace* semi_space) {
|
||||||
@ -460,9 +511,15 @@ void MemoryChunk::Unlink() {
|
|||||||
|
|
||||||
|
|
||||||
MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
|
MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
|
||||||
|
intptr_t committed_body_size,
|
||||||
Executability executable,
|
Executability executable,
|
||||||
Space* owner) {
|
Space* owner) {
|
||||||
size_t chunk_size = MemoryChunk::kObjectStartOffset + body_size;
|
ASSERT(body_size >= committed_body_size);
|
||||||
|
size_t chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + body_size,
|
||||||
|
OS::CommitPageSize());
|
||||||
|
intptr_t committed_chunk_size =
|
||||||
|
committed_body_size + MemoryChunk::kObjectStartOffset;
|
||||||
|
committed_chunk_size = RoundUp(committed_chunk_size, OS::CommitPageSize());
|
||||||
Heap* heap = isolate_->heap();
|
Heap* heap = isolate_->heap();
|
||||||
Address base = NULL;
|
Address base = NULL;
|
||||||
VirtualMemory reservation;
|
VirtualMemory reservation;
|
||||||
@ -482,20 +539,21 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
|
|||||||
ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
|
ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
|
||||||
MemoryChunk::kAlignment));
|
MemoryChunk::kAlignment));
|
||||||
if (base == NULL) return NULL;
|
if (base == NULL) return NULL;
|
||||||
size_ += chunk_size;
|
// The AllocateAlignedMemory method will update the memory allocator
|
||||||
// Update executable memory size.
|
// memory used, but we are not using that if we have a code range, so
|
||||||
size_executable_ += chunk_size;
|
// we update it here.
|
||||||
|
memory_allocator_reserved_ += chunk_size;
|
||||||
} else {
|
} else {
|
||||||
base = AllocateAlignedMemory(chunk_size,
|
base = AllocateAlignedMemory(committed_chunk_size,
|
||||||
|
chunk_size,
|
||||||
MemoryChunk::kAlignment,
|
MemoryChunk::kAlignment,
|
||||||
executable,
|
executable,
|
||||||
&reservation);
|
&reservation);
|
||||||
if (base == NULL) return NULL;
|
if (base == NULL) return NULL;
|
||||||
// Update executable memory size.
|
|
||||||
size_executable_ += reservation.size();
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
base = AllocateAlignedMemory(chunk_size,
|
base = AllocateAlignedMemory(committed_chunk_size,
|
||||||
|
chunk_size,
|
||||||
MemoryChunk::kAlignment,
|
MemoryChunk::kAlignment,
|
||||||
executable,
|
executable,
|
||||||
&reservation);
|
&reservation);
|
||||||
@ -503,21 +561,12 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
|
|||||||
if (base == NULL) return NULL;
|
if (base == NULL) return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef DEBUG
|
AllocationBookkeeping(
|
||||||
ZapBlock(base, chunk_size);
|
owner, base, chunk_size, committed_chunk_size, executable);
|
||||||
#endif
|
|
||||||
isolate_->counters()->memory_allocated()->
|
|
||||||
Increment(static_cast<int>(chunk_size));
|
|
||||||
|
|
||||||
LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
|
|
||||||
if (owner != NULL) {
|
|
||||||
ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
|
|
||||||
PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
MemoryChunk* result = MemoryChunk::Initialize(heap,
|
MemoryChunk* result = MemoryChunk::Initialize(heap,
|
||||||
base,
|
base,
|
||||||
chunk_size,
|
committed_chunk_size,
|
||||||
executable,
|
executable,
|
||||||
owner);
|
owner);
|
||||||
result->set_reserved_memory(&reservation);
|
result->set_reserved_memory(&reservation);
|
||||||
@ -525,9 +574,40 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Page* MemoryAllocator::AllocatePage(PagedSpace* owner,
|
void MemoryAllocator::AllocationBookkeeping(Space* owner,
|
||||||
|
Address base,
|
||||||
|
intptr_t reserved_chunk_size,
|
||||||
|
intptr_t committed_chunk_size,
|
||||||
|
Executability executable) {
|
||||||
|
if (executable == EXECUTABLE) {
|
||||||
|
// Update executable memory size.
|
||||||
|
size_executable_ += reserved_chunk_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef DEBUG
|
||||||
|
ZapBlock(base, committed_chunk_size);
|
||||||
|
#endif
|
||||||
|
isolate_->counters()->memory_allocated()->
|
||||||
|
Increment(static_cast<int>(committed_chunk_size));
|
||||||
|
|
||||||
|
LOG(isolate_, NewEvent("MemoryChunk", base, committed_chunk_size));
|
||||||
|
if (owner != NULL) {
|
||||||
|
ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
|
||||||
|
PerformAllocationCallback(
|
||||||
|
space, kAllocationActionAllocate, committed_chunk_size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Page* MemoryAllocator::AllocatePage(intptr_t committed_object_area_size,
|
||||||
|
PagedSpace* owner,
|
||||||
Executability executable) {
|
Executability executable) {
|
||||||
MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, executable, owner);
|
ASSERT(committed_object_area_size <= Page::kObjectAreaSize);
|
||||||
|
|
||||||
|
MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize,
|
||||||
|
committed_object_area_size,
|
||||||
|
executable,
|
||||||
|
owner);
|
||||||
|
|
||||||
if (chunk == NULL) return NULL;
|
if (chunk == NULL) return NULL;
|
||||||
|
|
||||||
@ -538,7 +618,8 @@ Page* MemoryAllocator::AllocatePage(PagedSpace* owner,
|
|||||||
LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
|
LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
|
||||||
Executability executable,
|
Executability executable,
|
||||||
Space* owner) {
|
Space* owner) {
|
||||||
MemoryChunk* chunk = AllocateChunk(object_size, executable, owner);
|
MemoryChunk* chunk =
|
||||||
|
AllocateChunk(object_size, object_size, executable, owner);
|
||||||
if (chunk == NULL) return NULL;
|
if (chunk == NULL) return NULL;
|
||||||
return LargePage::Initialize(isolate_->heap(), chunk);
|
return LargePage::Initialize(isolate_->heap(), chunk);
|
||||||
}
|
}
|
||||||
@ -559,8 +640,12 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
|
|||||||
if (reservation->IsReserved()) {
|
if (reservation->IsReserved()) {
|
||||||
FreeMemory(reservation, chunk->executable());
|
FreeMemory(reservation, chunk->executable());
|
||||||
} else {
|
} else {
|
||||||
|
// When we do not have a reservation that is because this allocation
|
||||||
|
// is part of the huge reserved chunk of memory reserved for code on
|
||||||
|
// x64. In that case the size was rounded up to the page size on
|
||||||
|
// allocation so we do the same now when freeing.
|
||||||
FreeMemory(chunk->address(),
|
FreeMemory(chunk->address(),
|
||||||
chunk->size(),
|
RoundUp(chunk->size(), Page::kPageSize),
|
||||||
chunk->executable());
|
chunk->executable());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -640,11 +725,12 @@ void MemoryAllocator::RemoveMemoryAllocationCallback(
|
|||||||
|
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
void MemoryAllocator::ReportStatistics() {
|
void MemoryAllocator::ReportStatistics() {
|
||||||
float pct = static_cast<float>(capacity_ - size_) / capacity_;
|
float pct =
|
||||||
|
static_cast<float>(capacity_ - memory_allocator_reserved_) / capacity_;
|
||||||
PrintF(" capacity: %" V8_PTR_PREFIX "d"
|
PrintF(" capacity: %" V8_PTR_PREFIX "d"
|
||||||
", used: %" V8_PTR_PREFIX "d"
|
", used: %" V8_PTR_PREFIX "d"
|
||||||
", available: %%%d\n\n",
|
", available: %%%d\n\n",
|
||||||
capacity_, size_, static_cast<int>(pct*100));
|
capacity_, memory_allocator_reserved_, static_cast<int>(pct*100));
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -723,7 +809,6 @@ MaybeObject* PagedSpace::FindObject(Address addr) {
|
|||||||
|
|
||||||
bool PagedSpace::CanExpand() {
|
bool PagedSpace::CanExpand() {
|
||||||
ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
|
ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
|
||||||
ASSERT(Capacity() % Page::kObjectAreaSize == 0);
|
|
||||||
|
|
||||||
if (Capacity() == max_capacity_) return false;
|
if (Capacity() == max_capacity_) return false;
|
||||||
|
|
||||||
@ -735,11 +820,43 @@ bool PagedSpace::CanExpand() {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool PagedSpace::Expand() {
|
bool PagedSpace::Expand(intptr_t size_in_bytes) {
|
||||||
if (!CanExpand()) return false;
|
if (!CanExpand()) return false;
|
||||||
|
|
||||||
|
Page* last_page = anchor_.prev_page();
|
||||||
|
if (last_page != &anchor_) {
|
||||||
|
// We have run out of linear allocation space. This may be because the
|
||||||
|
// most recently allocated page (stored last in the list) is a small one,
|
||||||
|
// that starts on a page aligned boundary, but has not a full kPageSize of
|
||||||
|
// committed memory. Let's commit more memory for the page.
|
||||||
|
intptr_t reserved_page_size = last_page->reserved_memory()->IsReserved() ?
|
||||||
|
last_page->reserved_memory()->size() :
|
||||||
|
Page::kPageSize;
|
||||||
|
if (last_page->size() < reserved_page_size &&
|
||||||
|
(reserved_page_size - last_page->size()) >= size_in_bytes &&
|
||||||
|
!last_page->IsEvacuationCandidate() &&
|
||||||
|
last_page->WasSwept()) {
|
||||||
|
last_page->CommitMore(size_in_bytes);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We initially only commit a part of the page, but the deserialization
|
||||||
|
// of the initial snapshot makes the assumption that it can deserialize
|
||||||
|
// into linear memory of a certain size per space, so some of the spaces
|
||||||
|
// need to have a little more committed memory.
|
||||||
|
int initial =
|
||||||
|
Max(OS::CommitPageSize(), static_cast<intptr_t>(Page::kGrowthUnit));
|
||||||
|
|
||||||
|
ASSERT(Page::kPageSize - initial < Page::kObjectAreaSize);
|
||||||
|
|
||||||
|
intptr_t expansion_size =
|
||||||
|
Max(initial,
|
||||||
|
RoundUpToPowerOf2(MemoryChunk::kObjectStartOffset + size_in_bytes)) -
|
||||||
|
MemoryChunk::kObjectStartOffset;
|
||||||
|
|
||||||
Page* p = heap()->isolate()->memory_allocator()->
|
Page* p = heap()->isolate()->memory_allocator()->
|
||||||
AllocatePage(this, executable());
|
AllocatePage(expansion_size, this, executable());
|
||||||
if (p == NULL) return false;
|
if (p == NULL) return false;
|
||||||
|
|
||||||
ASSERT(Capacity() <= max_capacity_);
|
ASSERT(Capacity() <= max_capacity_);
|
||||||
@ -784,6 +901,8 @@ void PagedSpace::ReleasePage(Page* page) {
|
|||||||
allocation_info_.top = allocation_info_.limit = NULL;
|
allocation_info_.top = allocation_info_.limit = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
intptr_t size = page->ObjectAreaEnd() - page->ObjectAreaStart();
|
||||||
|
|
||||||
page->Unlink();
|
page->Unlink();
|
||||||
if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
|
if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
|
||||||
heap()->isolate()->memory_allocator()->Free(page);
|
heap()->isolate()->memory_allocator()->Free(page);
|
||||||
@ -792,8 +911,7 @@ void PagedSpace::ReleasePage(Page* page) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(Capacity() > 0);
|
ASSERT(Capacity() > 0);
|
||||||
ASSERT(Capacity() % Page::kObjectAreaSize == 0);
|
accounting_stats_.ShrinkSpace(size);
|
||||||
accounting_stats_.ShrinkSpace(Page::kObjectAreaSize);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -1671,7 +1789,7 @@ void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
|
|||||||
// is big enough to be a FreeSpace with at least one extra word (the next
|
// is big enough to be a FreeSpace with at least one extra word (the next
|
||||||
// pointer), we set its map to be the free space map and its size to an
|
// pointer), we set its map to be the free space map and its size to an
|
||||||
// appropriate array length for the desired size from HeapObject::Size().
|
// appropriate array length for the desired size from HeapObject::Size().
|
||||||
// If the block is too small (eg, one or two words), to hold both a size
|
// If the block is too small (e.g. one or two words), to hold both a size
|
||||||
// field and a next pointer, we give it a filler map that gives it the
|
// field and a next pointer, we give it a filler map that gives it the
|
||||||
// correct size.
|
// correct size.
|
||||||
if (size_in_bytes > FreeSpace::kHeaderSize) {
|
if (size_in_bytes > FreeSpace::kHeaderSize) {
|
||||||
@ -1775,69 +1893,102 @@ int FreeList::Free(Address start, int size_in_bytes) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) {
|
FreeListNode* FreeList::PickNodeFromList(FreeListNode** list,
|
||||||
|
int* node_size,
|
||||||
|
int minimum_size) {
|
||||||
FreeListNode* node = *list;
|
FreeListNode* node = *list;
|
||||||
|
|
||||||
if (node == NULL) return NULL;
|
if (node == NULL) return NULL;
|
||||||
|
|
||||||
|
ASSERT(node->map() == node->GetHeap()->raw_unchecked_free_space_map());
|
||||||
|
|
||||||
while (node != NULL &&
|
while (node != NULL &&
|
||||||
Page::FromAddress(node->address())->IsEvacuationCandidate()) {
|
Page::FromAddress(node->address())->IsEvacuationCandidate()) {
|
||||||
available_ -= node->Size();
|
available_ -= node->Size();
|
||||||
node = node->next();
|
node = node->next();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (node != NULL) {
|
if (node == NULL) {
|
||||||
*node_size = node->Size();
|
|
||||||
*list = node->next();
|
|
||||||
} else {
|
|
||||||
*list = NULL;
|
*list = NULL;
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Gets the size without checking the map. When we are booting we have
|
||||||
|
// a FreeListNode before we have created its map.
|
||||||
|
intptr_t size = reinterpret_cast<FreeSpace*>(node)->Size();
|
||||||
|
|
||||||
|
// We don't search the list for one that fits, preferring to look in the
|
||||||
|
// list of larger nodes, but we do check the first in the list, because
|
||||||
|
// if we had to expand the space or page we may have placed an entry that
|
||||||
|
// was just long enough at the head of one of the lists.
|
||||||
|
if (size < minimum_size) return NULL;
|
||||||
|
|
||||||
|
*node_size = size;
|
||||||
|
available_ -= size;
|
||||||
|
*list = node->next();
|
||||||
|
|
||||||
return node;
|
return node;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
|
FreeListNode* FreeList::FindAbuttingNode(
|
||||||
|
int size_in_bytes, int* node_size, Address limit, FreeListNode** list_head) {
|
||||||
|
FreeListNode* first_node = *list_head;
|
||||||
|
if (first_node != NULL &&
|
||||||
|
first_node->address() == limit &&
|
||||||
|
reinterpret_cast<FreeSpace*>(first_node)->Size() >= size_in_bytes &&
|
||||||
|
!Page::FromAddress(first_node->address())->IsEvacuationCandidate()) {
|
||||||
|
FreeListNode* answer = first_node;
|
||||||
|
int size = reinterpret_cast<FreeSpace*>(first_node)->Size();
|
||||||
|
available_ -= size;
|
||||||
|
*node_size = size;
|
||||||
|
*list_head = first_node->next();
|
||||||
|
ASSERT(IsVeryLong() || available_ == SumFreeLists());
|
||||||
|
return answer;
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
FreeListNode* FreeList::FindNodeFor(int size_in_bytes,
|
||||||
|
int* node_size,
|
||||||
|
Address limit) {
|
||||||
FreeListNode* node = NULL;
|
FreeListNode* node = NULL;
|
||||||
|
|
||||||
if (size_in_bytes <= kSmallAllocationMax) {
|
if (limit != NULL) {
|
||||||
node = PickNodeFromList(&small_list_, node_size);
|
// We may have a memory area at the head of the free list, which abuts the
|
||||||
|
// old linear allocation area. This happens if the linear allocation area
|
||||||
|
// has been shortened to allow an incremental marking step to be performed.
|
||||||
|
// In that case we prefer to return the free memory area that is contiguous
|
||||||
|
// with the old linear allocation area.
|
||||||
|
node = FindAbuttingNode(size_in_bytes, node_size, limit, &large_list_);
|
||||||
|
if (node != NULL) return node;
|
||||||
|
node = FindAbuttingNode(size_in_bytes, node_size, limit, &huge_list_);
|
||||||
if (node != NULL) return node;
|
if (node != NULL) return node;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (size_in_bytes <= kMediumAllocationMax) {
|
node = PickNodeFromList(&small_list_, node_size, size_in_bytes);
|
||||||
node = PickNodeFromList(&medium_list_, node_size);
|
ASSERT(IsVeryLong() || available_ == SumFreeLists());
|
||||||
if (node != NULL) return node;
|
if (node != NULL) return node;
|
||||||
}
|
|
||||||
|
|
||||||
if (size_in_bytes <= kLargeAllocationMax) {
|
node = PickNodeFromList(&medium_list_, node_size, size_in_bytes);
|
||||||
node = PickNodeFromList(&large_list_, node_size);
|
ASSERT(IsVeryLong() || available_ == SumFreeLists());
|
||||||
if (node != NULL) return node;
|
if (node != NULL) return node;
|
||||||
}
|
|
||||||
|
|
||||||
|
node = PickNodeFromList(&large_list_, node_size, size_in_bytes);
|
||||||
|
ASSERT(IsVeryLong() || available_ == SumFreeLists());
|
||||||
|
if (node != NULL) return node;
|
||||||
|
|
||||||
|
// The tricky third clause in this for statement is due to the fact that
|
||||||
|
// PickNodeFromList can cut pages out of the list if they are unavailable for
|
||||||
|
// new allocation (e.g. if they are on a page that has been scheduled for
|
||||||
|
// evacuation).
|
||||||
for (FreeListNode** cur = &huge_list_;
|
for (FreeListNode** cur = &huge_list_;
|
||||||
*cur != NULL;
|
*cur != NULL;
|
||||||
cur = (*cur)->next_address()) {
|
cur = (*cur) == NULL ? cur : (*cur)->next_address()) {
|
||||||
FreeListNode* cur_node = *cur;
|
node = PickNodeFromList(cur, node_size, size_in_bytes);
|
||||||
while (cur_node != NULL &&
|
ASSERT(IsVeryLong() || available_ == SumFreeLists());
|
||||||
Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
|
if (node != NULL) return node;
|
||||||
available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size();
|
|
||||||
cur_node = cur_node->next();
|
|
||||||
}
|
|
||||||
|
|
||||||
*cur = cur_node;
|
|
||||||
if (cur_node == NULL) break;
|
|
||||||
|
|
||||||
ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map());
|
|
||||||
FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
|
|
||||||
int size = cur_as_free_space->Size();
|
|
||||||
if (size >= size_in_bytes) {
|
|
||||||
// Large enough node found. Unlink it from the list.
|
|
||||||
node = *cur;
|
|
||||||
*node_size = size;
|
|
||||||
*cur = node->next();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return node;
|
return node;
|
||||||
@ -1856,10 +2007,23 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
|
|||||||
ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
|
ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
|
||||||
|
|
||||||
int new_node_size = 0;
|
int new_node_size = 0;
|
||||||
FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
|
FreeListNode* new_node =
|
||||||
|
FindNodeFor(size_in_bytes, &new_node_size, owner_->limit());
|
||||||
if (new_node == NULL) return NULL;
|
if (new_node == NULL) return NULL;
|
||||||
|
|
||||||
available_ -= new_node_size;
|
if (new_node->address() == owner_->limit()) {
|
||||||
|
// The new freelist node we were given is an extension of the one we had
|
||||||
|
// last. This is a common thing to happen when we extend a small page by
|
||||||
|
// committing more memory. In this case we just add the new node to the
|
||||||
|
// linear allocation area and recurse.
|
||||||
|
owner_->Allocate(new_node_size);
|
||||||
|
owner_->SetTop(owner_->top(), new_node->address() + new_node_size);
|
||||||
|
MaybeObject* allocation = owner_->AllocateRaw(size_in_bytes);
|
||||||
|
Object* answer;
|
||||||
|
if (!allocation->ToObject(&answer)) return NULL;
|
||||||
|
return HeapObject::cast(answer);
|
||||||
|
}
|
||||||
|
|
||||||
ASSERT(IsVeryLong() || available_ == SumFreeLists());
|
ASSERT(IsVeryLong() || available_ == SumFreeLists());
|
||||||
|
|
||||||
int bytes_left = new_node_size - size_in_bytes;
|
int bytes_left = new_node_size - size_in_bytes;
|
||||||
@ -1869,7 +2033,9 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
|
|||||||
// Mark the old linear allocation area with a free space map so it can be
|
// Mark the old linear allocation area with a free space map so it can be
|
||||||
// skipped when scanning the heap. This also puts it back in the free list
|
// skipped when scanning the heap. This also puts it back in the free list
|
||||||
// if it is big enough.
|
// if it is big enough.
|
||||||
owner_->Free(owner_->top(), old_linear_size);
|
if (old_linear_size != 0) {
|
||||||
|
owner_->AddToFreeLists(owner_->top(), old_linear_size);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
|
for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
|
||||||
@ -1898,8 +2064,8 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
|
|||||||
// We don't want to give too large linear areas to the allocator while
|
// We don't want to give too large linear areas to the allocator while
|
||||||
// incremental marking is going on, because we won't check again whether
|
// incremental marking is going on, because we won't check again whether
|
||||||
// we want to do another increment until the linear area is used up.
|
// we want to do another increment until the linear area is used up.
|
||||||
owner_->Free(new_node->address() + size_in_bytes + linear_size,
|
owner_->AddToFreeLists(new_node->address() + size_in_bytes + linear_size,
|
||||||
new_node_size - size_in_bytes - linear_size);
|
new_node_size - size_in_bytes - linear_size);
|
||||||
owner_->SetTop(new_node->address() + size_in_bytes,
|
owner_->SetTop(new_node->address() + size_in_bytes,
|
||||||
new_node->address() + size_in_bytes + linear_size);
|
new_node->address() + size_in_bytes + linear_size);
|
||||||
} else if (bytes_left > 0) {
|
} else if (bytes_left > 0) {
|
||||||
@ -1908,6 +2074,7 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
|
|||||||
owner_->SetTop(new_node->address() + size_in_bytes,
|
owner_->SetTop(new_node->address() + size_in_bytes,
|
||||||
new_node->address() + new_node_size);
|
new_node->address() + new_node_size);
|
||||||
} else {
|
} else {
|
||||||
|
ASSERT(bytes_left == 0);
|
||||||
// TODO(gc) Try not freeing linear allocation region when bytes_left
|
// TODO(gc) Try not freeing linear allocation region when bytes_left
|
||||||
// are zero.
|
// are zero.
|
||||||
owner_->SetTop(NULL, NULL);
|
owner_->SetTop(NULL, NULL);
|
||||||
@ -2040,7 +2207,9 @@ bool NewSpace::ReserveSpace(int bytes) {
|
|||||||
HeapObject* allocation = HeapObject::cast(object);
|
HeapObject* allocation = HeapObject::cast(object);
|
||||||
Address top = allocation_info_.top;
|
Address top = allocation_info_.top;
|
||||||
if ((top - bytes) == allocation->address()) {
|
if ((top - bytes) == allocation->address()) {
|
||||||
allocation_info_.top = allocation->address();
|
Address new_top = allocation->address();
|
||||||
|
ASSERT(new_top >= Page::FromAddress(new_top - 1)->ObjectAreaStart());
|
||||||
|
allocation_info_.top = new_top;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
// There may be a borderline case here where the allocation succeeded, but
|
// There may be a borderline case here where the allocation succeeded, but
|
||||||
@ -2055,7 +2224,7 @@ void PagedSpace::PrepareForMarkCompact() {
|
|||||||
// Mark the old linear allocation area with a free space map so it can be
|
// Mark the old linear allocation area with a free space map so it can be
|
||||||
// skipped when scanning the heap.
|
// skipped when scanning the heap.
|
||||||
int old_linear_size = static_cast<int>(limit() - top());
|
int old_linear_size = static_cast<int>(limit() - top());
|
||||||
Free(top(), old_linear_size);
|
AddToFreeLists(top(), old_linear_size);
|
||||||
SetTop(NULL, NULL);
|
SetTop(NULL, NULL);
|
||||||
|
|
||||||
// Stop lazy sweeping and clear marking bits for unswept pages.
|
// Stop lazy sweeping and clear marking bits for unswept pages.
|
||||||
@ -2098,10 +2267,13 @@ bool PagedSpace::ReserveSpace(int size_in_bytes) {
|
|||||||
// Mark the old linear allocation area with a free space so it can be
|
// Mark the old linear allocation area with a free space so it can be
|
||||||
// skipped when scanning the heap. This also puts it back in the free list
|
// skipped when scanning the heap. This also puts it back in the free list
|
||||||
// if it is big enough.
|
// if it is big enough.
|
||||||
Free(top(), old_linear_size);
|
AddToFreeLists(top(), old_linear_size);
|
||||||
|
|
||||||
SetTop(new_area->address(), new_area->address() + size_in_bytes);
|
SetTop(new_area->address(), new_area->address() + size_in_bytes);
|
||||||
Allocate(size_in_bytes);
|
// The AddToFreeLists call above will reduce the size of the space in the
|
||||||
|
// allocation stats. We don't need to add this linear area to the size
|
||||||
|
// with an Allocate(size_in_bytes) call here, because the
|
||||||
|
// free_list_.Allocate() call above already accounted for this memory.
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2182,7 +2354,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Try to expand the space and allocate in the new next page.
|
// Try to expand the space and allocate in the new next page.
|
||||||
if (Expand()) {
|
if (Expand(size_in_bytes)) {
|
||||||
return free_list_.Allocate(size_in_bytes);
|
return free_list_.Allocate(size_in_bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2543,6 +2715,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
|
|||||||
heap()->mark_compact_collector()->ReportDeleteIfNeeded(
|
heap()->mark_compact_collector()->ReportDeleteIfNeeded(
|
||||||
object, heap()->isolate());
|
object, heap()->isolate());
|
||||||
size_ -= static_cast<int>(page->size());
|
size_ -= static_cast<int>(page->size());
|
||||||
|
ASSERT(size_ >= 0);
|
||||||
objects_size_ -= object->Size();
|
objects_size_ -= object->Size();
|
||||||
page_count_--;
|
page_count_--;
|
||||||
|
|
||||||
|
114
src/spaces.h
114
src/spaces.h
@ -505,11 +505,9 @@ class MemoryChunk {
|
|||||||
static const int kObjectStartOffset = kBodyOffset - 1 +
|
static const int kObjectStartOffset = kBodyOffset - 1 +
|
||||||
(kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
|
(kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
|
||||||
|
|
||||||
size_t size() const { return size_; }
|
intptr_t size() const { return size_; }
|
||||||
|
|
||||||
void set_size(size_t size) {
|
void set_size(size_t size) { size_ = size; }
|
||||||
size_ = size;
|
|
||||||
}
|
|
||||||
|
|
||||||
Executability executable() {
|
Executability executable() {
|
||||||
return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
|
return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
|
||||||
@ -661,7 +659,7 @@ class Page : public MemoryChunk {
|
|||||||
Address ObjectAreaStart() { return address() + kObjectStartOffset; }
|
Address ObjectAreaStart() { return address() + kObjectStartOffset; }
|
||||||
|
|
||||||
// Returns the end address (exclusive) of the object area in this page.
|
// Returns the end address (exclusive) of the object area in this page.
|
||||||
Address ObjectAreaEnd() { return address() + Page::kPageSize; }
|
Address ObjectAreaEnd() { return address() + size(); }
|
||||||
|
|
||||||
// Checks whether an address is page aligned.
|
// Checks whether an address is page aligned.
|
||||||
static bool IsAlignedToPageSize(Address a) {
|
static bool IsAlignedToPageSize(Address a) {
|
||||||
@ -680,11 +678,17 @@ class Page : public MemoryChunk {
|
|||||||
return address() + offset;
|
return address() + offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Expand the committed area for pages that are small.
|
||||||
|
void CommitMore(intptr_t space_needed);
|
||||||
|
|
||||||
// ---------------------------------------------------------------------
|
// ---------------------------------------------------------------------
|
||||||
|
|
||||||
// Page size in bytes. This must be a multiple of the OS page size.
|
// Page size in bytes. This must be a multiple of the OS page size.
|
||||||
static const int kPageSize = 1 << kPageSizeBits;
|
static const int kPageSize = 1 << kPageSizeBits;
|
||||||
|
|
||||||
|
// For a 1Mbyte page grow 64k at a time.
|
||||||
|
static const int kGrowthUnit = 1 << (kPageSizeBits - 4);
|
||||||
|
|
||||||
// Page size mask.
|
// Page size mask.
|
||||||
static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
|
static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
|
||||||
|
|
||||||
@ -849,12 +853,10 @@ class CodeRange {
|
|||||||
FreeBlock(Address start_arg, size_t size_arg)
|
FreeBlock(Address start_arg, size_t size_arg)
|
||||||
: start(start_arg), size(size_arg) {
|
: start(start_arg), size(size_arg) {
|
||||||
ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
|
ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
|
||||||
ASSERT(size >= static_cast<size_t>(Page::kPageSize));
|
|
||||||
}
|
}
|
||||||
FreeBlock(void* start_arg, size_t size_arg)
|
FreeBlock(void* start_arg, size_t size_arg)
|
||||||
: start(static_cast<Address>(start_arg)), size(size_arg) {
|
: start(static_cast<Address>(start_arg)), size(size_arg) {
|
||||||
ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
|
ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
|
||||||
ASSERT(size >= static_cast<size_t>(Page::kPageSize));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Address start;
|
Address start;
|
||||||
@ -950,7 +952,9 @@ class MemoryAllocator {
|
|||||||
|
|
||||||
void TearDown();
|
void TearDown();
|
||||||
|
|
||||||
Page* AllocatePage(PagedSpace* owner, Executability executable);
|
Page* AllocatePage(intptr_t object_area_size,
|
||||||
|
PagedSpace* owner,
|
||||||
|
Executability executable);
|
||||||
|
|
||||||
LargePage* AllocateLargePage(intptr_t object_size,
|
LargePage* AllocateLargePage(intptr_t object_size,
|
||||||
Executability executable,
|
Executability executable,
|
||||||
@ -959,10 +963,14 @@ class MemoryAllocator {
|
|||||||
void Free(MemoryChunk* chunk);
|
void Free(MemoryChunk* chunk);
|
||||||
|
|
||||||
// Returns the maximum available bytes of heaps.
|
// Returns the maximum available bytes of heaps.
|
||||||
intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
|
intptr_t Available() {
|
||||||
|
return capacity_ < memory_allocator_reserved_ ?
|
||||||
|
0 :
|
||||||
|
capacity_ - memory_allocator_reserved_;
|
||||||
|
}
|
||||||
|
|
||||||
// Returns allocated spaces in bytes.
|
// Returns allocated spaces in bytes.
|
||||||
intptr_t Size() { return size_; }
|
intptr_t Size() { return memory_allocator_reserved_; }
|
||||||
|
|
||||||
// Returns the maximum available executable bytes of heaps.
|
// Returns the maximum available executable bytes of heaps.
|
||||||
intptr_t AvailableExecutable() {
|
intptr_t AvailableExecutable() {
|
||||||
@ -984,6 +992,7 @@ class MemoryAllocator {
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
MemoryChunk* AllocateChunk(intptr_t body_size,
|
MemoryChunk* AllocateChunk(intptr_t body_size,
|
||||||
|
intptr_t committed_body_size,
|
||||||
Executability executable,
|
Executability executable,
|
||||||
Space* space);
|
Space* space);
|
||||||
|
|
||||||
@ -991,6 +1000,7 @@ class MemoryAllocator {
|
|||||||
size_t alignment,
|
size_t alignment,
|
||||||
VirtualMemory* controller);
|
VirtualMemory* controller);
|
||||||
Address AllocateAlignedMemory(size_t requested,
|
Address AllocateAlignedMemory(size_t requested,
|
||||||
|
size_t committed,
|
||||||
size_t alignment,
|
size_t alignment,
|
||||||
Executability executable,
|
Executability executable,
|
||||||
VirtualMemory* controller);
|
VirtualMemory* controller);
|
||||||
@ -1010,6 +1020,12 @@ class MemoryAllocator {
|
|||||||
// and false otherwise.
|
// and false otherwise.
|
||||||
bool UncommitBlock(Address start, size_t size);
|
bool UncommitBlock(Address start, size_t size);
|
||||||
|
|
||||||
|
void AllocationBookkeeping(Space* owner,
|
||||||
|
Address base,
|
||||||
|
intptr_t reserved_size,
|
||||||
|
intptr_t committed_size,
|
||||||
|
Executability executable);
|
||||||
|
|
||||||
// Zaps a contiguous block of memory [start..(start+size)[ thus
|
// Zaps a contiguous block of memory [start..(start+size)[ thus
|
||||||
// filling it up with a recognizable non-NULL bit pattern.
|
// filling it up with a recognizable non-NULL bit pattern.
|
||||||
void ZapBlock(Address start, size_t size);
|
void ZapBlock(Address start, size_t size);
|
||||||
@ -1037,7 +1053,7 @@ class MemoryAllocator {
|
|||||||
size_t capacity_executable_;
|
size_t capacity_executable_;
|
||||||
|
|
||||||
// Allocated space size in bytes.
|
// Allocated space size in bytes.
|
||||||
size_t size_;
|
size_t memory_allocator_reserved_;
|
||||||
// Allocated executable space size in bytes.
|
// Allocated executable space size in bytes.
|
||||||
size_t size_executable_;
|
size_t size_executable_;
|
||||||
|
|
||||||
@ -1382,9 +1398,15 @@ class FreeList BASE_EMBEDDED {
|
|||||||
static const int kMinBlockSize = 3 * kPointerSize;
|
static const int kMinBlockSize = 3 * kPointerSize;
|
||||||
static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
|
static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
|
||||||
|
|
||||||
FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size);
|
FreeListNode* PickNodeFromList(FreeListNode** list,
|
||||||
|
int* node_size,
|
||||||
|
int minimum_size);
|
||||||
|
|
||||||
FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
|
FreeListNode* FindNodeFor(int size_in_bytes, int* node_size, Address limit);
|
||||||
|
FreeListNode* FindAbuttingNode(int size_in_bytes,
|
||||||
|
int* node_size,
|
||||||
|
Address limit,
|
||||||
|
FreeListNode** list_head);
|
||||||
|
|
||||||
PagedSpace* owner_;
|
PagedSpace* owner_;
|
||||||
Heap* heap_;
|
Heap* heap_;
|
||||||
@ -1484,6 +1506,8 @@ class PagedSpace : public Space {
|
|||||||
// free bytes that were not found at all due to lazy sweeping.
|
// free bytes that were not found at all due to lazy sweeping.
|
||||||
virtual intptr_t Waste() { return accounting_stats_.Waste(); }
|
virtual intptr_t Waste() { return accounting_stats_.Waste(); }
|
||||||
|
|
||||||
|
virtual int ObjectAlignment() { return kObjectAlignment; }
|
||||||
|
|
||||||
// Returns the allocation pointer in this space.
|
// Returns the allocation pointer in this space.
|
||||||
Address top() { return allocation_info_.top; }
|
Address top() { return allocation_info_.top; }
|
||||||
Address limit() { return allocation_info_.limit; }
|
Address limit() { return allocation_info_.limit; }
|
||||||
@ -1498,7 +1522,7 @@ class PagedSpace : public Space {
|
|||||||
// the free list or accounted as waste.
|
// the free list or accounted as waste.
|
||||||
// If add_to_freelist is false then just accounting stats are updated and
|
// If add_to_freelist is false then just accounting stats are updated and
|
||||||
// no attempt to add area to free list is made.
|
// no attempt to add area to free list is made.
|
||||||
int Free(Address start, int size_in_bytes) {
|
int AddToFreeLists(Address start, int size_in_bytes) {
|
||||||
int wasted = free_list_.Free(start, size_in_bytes);
|
int wasted = free_list_.Free(start, size_in_bytes);
|
||||||
accounting_stats_.DeallocateBytes(size_in_bytes - wasted);
|
accounting_stats_.DeallocateBytes(size_in_bytes - wasted);
|
||||||
return size_in_bytes - wasted;
|
return size_in_bytes - wasted;
|
||||||
@ -1506,6 +1530,7 @@ class PagedSpace : public Space {
|
|||||||
|
|
||||||
// Set space allocation info.
|
// Set space allocation info.
|
||||||
void SetTop(Address top, Address limit) {
|
void SetTop(Address top, Address limit) {
|
||||||
|
ASSERT(top == NULL || top >= Page::FromAddress(top - 1)->ObjectAreaStart());
|
||||||
ASSERT(top == limit ||
|
ASSERT(top == limit ||
|
||||||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
|
Page::FromAddress(top) == Page::FromAddress(limit - 1));
|
||||||
allocation_info_.top = top;
|
allocation_info_.top = top;
|
||||||
@ -1572,12 +1597,14 @@ class PagedSpace : public Space {
|
|||||||
|
|
||||||
void IncreaseUnsweptFreeBytes(Page* p) {
|
void IncreaseUnsweptFreeBytes(Page* p) {
|
||||||
ASSERT(ShouldBeSweptLazily(p));
|
ASSERT(ShouldBeSweptLazily(p));
|
||||||
unswept_free_bytes_ += (Page::kObjectAreaSize - p->LiveBytes());
|
unswept_free_bytes_ +=
|
||||||
|
(p->ObjectAreaEnd() - p->ObjectAreaStart()) - p->LiveBytes();
|
||||||
}
|
}
|
||||||
|
|
||||||
void DecreaseUnsweptFreeBytes(Page* p) {
|
void DecreaseUnsweptFreeBytes(Page* p) {
|
||||||
ASSERT(ShouldBeSweptLazily(p));
|
ASSERT(ShouldBeSweptLazily(p));
|
||||||
unswept_free_bytes_ -= (Page::kObjectAreaSize - p->LiveBytes());
|
unswept_free_bytes_ -=
|
||||||
|
(p->ObjectAreaEnd() - p->ObjectAreaStart() - p->LiveBytes());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool AdvanceSweeper(intptr_t bytes_to_sweep);
|
bool AdvanceSweeper(intptr_t bytes_to_sweep);
|
||||||
@ -1586,6 +1613,7 @@ class PagedSpace : public Space {
|
|||||||
return !first_unswept_page_->is_valid();
|
return !first_unswept_page_->is_valid();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline bool HasAPage() { return anchor_.next_page() != &anchor_; }
|
||||||
Page* FirstPage() { return anchor_.next_page(); }
|
Page* FirstPage() { return anchor_.next_page(); }
|
||||||
Page* LastPage() { return anchor_.prev_page(); }
|
Page* LastPage() { return anchor_.prev_page(); }
|
||||||
|
|
||||||
@ -1596,15 +1624,17 @@ class PagedSpace : public Space {
|
|||||||
FreeList::SizeStats sizes;
|
FreeList::SizeStats sizes;
|
||||||
free_list_.CountFreeListItems(p, &sizes);
|
free_list_.CountFreeListItems(p, &sizes);
|
||||||
|
|
||||||
|
intptr_t object_area_size = p->ObjectAreaEnd() - p->ObjectAreaStart();
|
||||||
|
|
||||||
intptr_t ratio;
|
intptr_t ratio;
|
||||||
intptr_t ratio_threshold;
|
intptr_t ratio_threshold;
|
||||||
if (identity() == CODE_SPACE) {
|
if (identity() == CODE_SPACE) {
|
||||||
ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
|
ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
|
||||||
Page::kObjectAreaSize;
|
object_area_size;
|
||||||
ratio_threshold = 10;
|
ratio_threshold = 10;
|
||||||
} else {
|
} else {
|
||||||
ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
|
ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
|
||||||
Page::kObjectAreaSize;
|
object_area_size;
|
||||||
ratio_threshold = 15;
|
ratio_threshold = 15;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1614,20 +1644,20 @@ class PagedSpace : public Space {
|
|||||||
identity(),
|
identity(),
|
||||||
static_cast<int>(sizes.small_size_),
|
static_cast<int>(sizes.small_size_),
|
||||||
static_cast<double>(sizes.small_size_ * 100) /
|
static_cast<double>(sizes.small_size_ * 100) /
|
||||||
Page::kObjectAreaSize,
|
object_area_size,
|
||||||
static_cast<int>(sizes.medium_size_),
|
static_cast<int>(sizes.medium_size_),
|
||||||
static_cast<double>(sizes.medium_size_ * 100) /
|
static_cast<double>(sizes.medium_size_ * 100) /
|
||||||
Page::kObjectAreaSize,
|
object_area_size,
|
||||||
static_cast<int>(sizes.large_size_),
|
static_cast<int>(sizes.large_size_),
|
||||||
static_cast<double>(sizes.large_size_ * 100) /
|
static_cast<double>(sizes.large_size_ * 100) /
|
||||||
Page::kObjectAreaSize,
|
object_area_size,
|
||||||
static_cast<int>(sizes.huge_size_),
|
static_cast<int>(sizes.huge_size_),
|
||||||
static_cast<double>(sizes.huge_size_ * 100) /
|
static_cast<double>(sizes.huge_size_ * 100) /
|
||||||
Page::kObjectAreaSize,
|
object_area_size,
|
||||||
(ratio > ratio_threshold) ? "[fragmented]" : "");
|
(ratio > ratio_threshold) ? "[fragmented]" : "");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (FLAG_always_compact && sizes.Total() != Page::kObjectAreaSize) {
|
if (FLAG_always_compact && sizes.Total() != object_area_size) {
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
if (ratio <= ratio_threshold) return 0; // Not fragmented.
|
if (ratio <= ratio_threshold) return 0; // Not fragmented.
|
||||||
@ -1658,12 +1688,6 @@ class PagedSpace : public Space {
|
|||||||
// Normal allocation information.
|
// Normal allocation information.
|
||||||
AllocationInfo allocation_info_;
|
AllocationInfo allocation_info_;
|
||||||
|
|
||||||
// Bytes of each page that cannot be allocated. Possibly non-zero
|
|
||||||
// for pages in spaces with only fixed-size objects. Always zero
|
|
||||||
// for pages in spaces with variable sized objects (those pages are
|
|
||||||
// padded with free-list nodes).
|
|
||||||
int page_extra_;
|
|
||||||
|
|
||||||
bool was_swept_conservatively_;
|
bool was_swept_conservatively_;
|
||||||
|
|
||||||
// The first page to be swept when the lazy sweeper advances. Is set
|
// The first page to be swept when the lazy sweeper advances. Is set
|
||||||
@ -1675,10 +1699,11 @@ class PagedSpace : public Space {
|
|||||||
// done conservatively.
|
// done conservatively.
|
||||||
intptr_t unswept_free_bytes_;
|
intptr_t unswept_free_bytes_;
|
||||||
|
|
||||||
// Expands the space by allocating a fixed number of pages. Returns false if
|
// Expands the space by allocating a page. Returns false if it cannot
|
||||||
// it cannot allocate requested number of pages from OS, or if the hard heap
|
// allocate a page from OS, or if the hard heap size limit has been hit. The
|
||||||
// size limit has been hit.
|
// new page will have at least enough committed space to satisfy the object
|
||||||
bool Expand();
|
// size indicated by the allocation_size argument;
|
||||||
|
bool Expand(intptr_t allocation_size);
|
||||||
|
|
||||||
// Generic fast case allocation function that tries linear allocation at the
|
// Generic fast case allocation function that tries linear allocation at the
|
||||||
// address denoted by top in allocation_info_.
|
// address denoted by top in allocation_info_.
|
||||||
@ -1833,7 +1858,8 @@ class SemiSpace : public Space {
|
|||||||
anchor_(this),
|
anchor_(this),
|
||||||
current_page_(NULL) { }
|
current_page_(NULL) { }
|
||||||
|
|
||||||
// Sets up the semispace using the given chunk.
|
// Sets up the semispace using the given chunk. After this, call Commit()
|
||||||
|
// to make the semispace usable.
|
||||||
void SetUp(Address start, int initial_capacity, int maximum_capacity);
|
void SetUp(Address start, int initial_capacity, int maximum_capacity);
|
||||||
|
|
||||||
// Tear down the space. Heap memory was not allocated by the space, so it
|
// Tear down the space. Heap memory was not allocated by the space, so it
|
||||||
@ -2338,14 +2364,7 @@ class OldSpace : public PagedSpace {
|
|||||||
intptr_t max_capacity,
|
intptr_t max_capacity,
|
||||||
AllocationSpace id,
|
AllocationSpace id,
|
||||||
Executability executable)
|
Executability executable)
|
||||||
: PagedSpace(heap, max_capacity, id, executable) {
|
: PagedSpace(heap, max_capacity, id, executable) { }
|
||||||
page_extra_ = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
// The limit of allocation for a page in this space.
|
|
||||||
virtual Address PageAllocationLimit(Page* page) {
|
|
||||||
return page->ObjectAreaEnd();
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
TRACK_MEMORY("OldSpace")
|
TRACK_MEMORY("OldSpace")
|
||||||
@ -2372,17 +2391,12 @@ class FixedSpace : public PagedSpace {
|
|||||||
const char* name)
|
const char* name)
|
||||||
: PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
|
: PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
|
||||||
object_size_in_bytes_(object_size_in_bytes),
|
object_size_in_bytes_(object_size_in_bytes),
|
||||||
name_(name) {
|
name_(name) { }
|
||||||
page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
|
|
||||||
}
|
|
||||||
|
|
||||||
// The limit of allocation for a page in this space.
|
|
||||||
virtual Address PageAllocationLimit(Page* page) {
|
|
||||||
return page->ObjectAreaEnd() - page_extra_;
|
|
||||||
}
|
|
||||||
|
|
||||||
int object_size_in_bytes() { return object_size_in_bytes_; }
|
int object_size_in_bytes() { return object_size_in_bytes_; }
|
||||||
|
|
||||||
|
virtual int ObjectAlignment() { return object_size_in_bytes_; }
|
||||||
|
|
||||||
// Prepares for a mark-compact GC.
|
// Prepares for a mark-compact GC.
|
||||||
virtual void PrepareForMarkCompact();
|
virtual void PrepareForMarkCompact();
|
||||||
|
|
||||||
|
@ -496,7 +496,6 @@ void StoreBuffer::FindPointersToNewSpaceInMapsRegion(
|
|||||||
Address map_aligned_end = MapEndAlign(end);
|
Address map_aligned_end = MapEndAlign(end);
|
||||||
|
|
||||||
ASSERT(map_aligned_start == start);
|
ASSERT(map_aligned_start == start);
|
||||||
ASSERT(map_aligned_end == end);
|
|
||||||
|
|
||||||
FindPointersToNewSpaceInMaps(map_aligned_start,
|
FindPointersToNewSpaceInMaps(map_aligned_start,
|
||||||
map_aligned_end,
|
map_aligned_end,
|
||||||
@ -524,52 +523,57 @@ void StoreBuffer::FindPointersToNewSpaceOnPage(
|
|||||||
RegionCallback region_callback,
|
RegionCallback region_callback,
|
||||||
ObjectSlotCallback slot_callback) {
|
ObjectSlotCallback slot_callback) {
|
||||||
Address visitable_start = page->ObjectAreaStart();
|
Address visitable_start = page->ObjectAreaStart();
|
||||||
Address end_of_page = page->ObjectAreaEnd();
|
|
||||||
|
|
||||||
Address visitable_end = visitable_start;
|
Address visitable_end = visitable_start;
|
||||||
|
|
||||||
Object* free_space_map = heap_->free_space_map();
|
Object* free_space_map = heap_->free_space_map();
|
||||||
Object* two_pointer_filler_map = heap_->two_pointer_filler_map();
|
Object* two_pointer_filler_map = heap_->two_pointer_filler_map();
|
||||||
|
|
||||||
while (visitable_end < end_of_page) {
|
while (true) { // While the page grows (doesn't normally happen).
|
||||||
Object* o = *reinterpret_cast<Object**>(visitable_end);
|
Address end_of_page = page->ObjectAreaEnd();
|
||||||
// Skip fillers but not things that look like fillers in the special
|
while (visitable_end < end_of_page) {
|
||||||
// garbage section which can contain anything.
|
Object* o = *reinterpret_cast<Object**>(visitable_end);
|
||||||
if (o == free_space_map ||
|
// Skip fillers but not things that look like fillers in the special
|
||||||
o == two_pointer_filler_map ||
|
// garbage section which can contain anything.
|
||||||
(visitable_end == space->top() && visitable_end != space->limit())) {
|
if (o == free_space_map ||
|
||||||
if (visitable_start != visitable_end) {
|
o == two_pointer_filler_map ||
|
||||||
// After calling this the special garbage section may have moved.
|
(visitable_end == space->top() && visitable_end != space->limit())) {
|
||||||
(this->*region_callback)(visitable_start,
|
if (visitable_start != visitable_end) {
|
||||||
visitable_end,
|
// After calling this the special garbage section may have moved.
|
||||||
slot_callback);
|
(this->*region_callback)(visitable_start,
|
||||||
if (visitable_end >= space->top() && visitable_end < space->limit()) {
|
visitable_end,
|
||||||
visitable_end = space->limit();
|
slot_callback);
|
||||||
visitable_start = visitable_end;
|
if (visitable_end >= space->top() && visitable_end < space->limit()) {
|
||||||
continue;
|
visitable_end = space->limit();
|
||||||
|
visitable_start = visitable_end;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (visitable_end == space->top() && visitable_end != space->limit()) {
|
||||||
|
visitable_start = visitable_end = space->limit();
|
||||||
|
} else {
|
||||||
|
// At this point we are either at the start of a filler or we are at
|
||||||
|
// the point where the space->top() used to be before the
|
||||||
|
// visit_pointer_region call above. Either way we can skip the
|
||||||
|
// object at the current spot: We don't promise to visit objects
|
||||||
|
// allocated during heap traversal, and if space->top() moved then it
|
||||||
|
// must be because an object was allocated at this point.
|
||||||
|
visitable_start =
|
||||||
|
visitable_end + HeapObject::FromAddress(visitable_end)->Size();
|
||||||
|
visitable_end = visitable_start;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if (visitable_end == space->top() && visitable_end != space->limit()) {
|
|
||||||
visitable_start = visitable_end = space->limit();
|
|
||||||
} else {
|
} else {
|
||||||
// At this point we are either at the start of a filler or we are at
|
ASSERT(o != free_space_map);
|
||||||
// the point where the space->top() used to be before the
|
ASSERT(o != two_pointer_filler_map);
|
||||||
// visit_pointer_region call above. Either way we can skip the
|
ASSERT(visitable_end < space->top() || visitable_end >= space->limit());
|
||||||
// object at the current spot: We don't promise to visit objects
|
visitable_end += kPointerSize;
|
||||||
// allocated during heap traversal, and if space->top() moved then it
|
|
||||||
// must be because an object was allocated at this point.
|
|
||||||
visitable_start =
|
|
||||||
visitable_end + HeapObject::FromAddress(visitable_end)->Size();
|
|
||||||
visitable_end = visitable_start;
|
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
ASSERT(o != free_space_map);
|
|
||||||
ASSERT(o != two_pointer_filler_map);
|
|
||||||
ASSERT(visitable_end < space->top() || visitable_end >= space->limit());
|
|
||||||
visitable_end += kPointerSize;
|
|
||||||
}
|
}
|
||||||
|
ASSERT(visitable_end >= end_of_page);
|
||||||
|
// If the page did not grow we are done.
|
||||||
|
if (end_of_page == page->ObjectAreaEnd()) break;
|
||||||
}
|
}
|
||||||
ASSERT(visitable_end == end_of_page);
|
ASSERT(visitable_end == page->ObjectAreaEnd());
|
||||||
if (visitable_start != visitable_end) {
|
if (visitable_start != visitable_end) {
|
||||||
(this->*region_callback)(visitable_start,
|
(this->*region_callback)(visitable_start,
|
||||||
visitable_end,
|
visitable_end,
|
||||||
|
10
src/utils.h
10
src/utils.h
@ -153,11 +153,9 @@ int HandleObjectPointerCompare(const Handle<T>* a, const Handle<T>* b) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Returns the smallest power of two which is >= x. If you pass in a
|
template<typename int_type>
|
||||||
// number that is already a power of two, it is returned as is.
|
inline int RoundUpToPowerOf2(int_type x_argument) {
|
||||||
// Implementation is from "Hacker's Delight" by Henry S. Warren, Jr.,
|
uintptr_t x = static_cast<uintptr_t>(x_argument);
|
||||||
// figure 3-3, page 48, where the function is called clp2.
|
|
||||||
inline uint32_t RoundUpToPowerOf2(uint32_t x) {
|
|
||||||
ASSERT(x <= 0x80000000u);
|
ASSERT(x <= 0x80000000u);
|
||||||
x = x - 1;
|
x = x - 1;
|
||||||
x = x | (x >> 1);
|
x = x | (x >> 1);
|
||||||
@ -165,7 +163,7 @@ inline uint32_t RoundUpToPowerOf2(uint32_t x) {
|
|||||||
x = x | (x >> 4);
|
x = x | (x >> 4);
|
||||||
x = x | (x >> 8);
|
x = x | (x >> 8);
|
||||||
x = x | (x >> 16);
|
x = x | (x >> 16);
|
||||||
return x + 1;
|
return static_cast<int_type>(x + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1236,17 +1236,14 @@ TEST(TestSizeOfObjectsVsHeapIteratorPrecision) {
|
|||||||
obj = iterator.next()) {
|
obj = iterator.next()) {
|
||||||
size_of_objects_2 += obj->Size();
|
size_of_objects_2 += obj->Size();
|
||||||
}
|
}
|
||||||
// Delta must be within 5% of the larger result.
|
// Delta must be within 1% of the larger result.
|
||||||
// TODO(gc): Tighten this up by distinguishing between byte
|
|
||||||
// arrays that are real and those that merely mark free space
|
|
||||||
// on the heap.
|
|
||||||
if (size_of_objects_1 > size_of_objects_2) {
|
if (size_of_objects_1 > size_of_objects_2) {
|
||||||
intptr_t delta = size_of_objects_1 - size_of_objects_2;
|
intptr_t delta = size_of_objects_1 - size_of_objects_2;
|
||||||
PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, "
|
PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, "
|
||||||
"Iterator: %" V8_PTR_PREFIX "d, "
|
"Iterator: %" V8_PTR_PREFIX "d, "
|
||||||
"delta: %" V8_PTR_PREFIX "d\n",
|
"delta: %" V8_PTR_PREFIX "d\n",
|
||||||
size_of_objects_1, size_of_objects_2, delta);
|
size_of_objects_1, size_of_objects_2, delta);
|
||||||
CHECK_GT(size_of_objects_1 / 20, delta);
|
CHECK_GT(size_of_objects_1 / 100, delta);
|
||||||
} else {
|
} else {
|
||||||
intptr_t delta = size_of_objects_2 - size_of_objects_1;
|
intptr_t delta = size_of_objects_2 - size_of_objects_1;
|
||||||
PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, "
|
PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, "
|
||||||
|
@ -534,15 +534,15 @@ TEST(BootUpMemoryUse) {
|
|||||||
intptr_t booted_memory = MemoryInUse();
|
intptr_t booted_memory = MemoryInUse();
|
||||||
if (sizeof(initial_memory) == 8) {
|
if (sizeof(initial_memory) == 8) {
|
||||||
if (v8::internal::Snapshot::IsEnabled()) {
|
if (v8::internal::Snapshot::IsEnabled()) {
|
||||||
CHECK_LE(booted_memory - initial_memory, 6654 * 1024); // 6444.
|
CHECK_LE(booted_memory - initial_memory, 3050 * 1024); // 2984.
|
||||||
} else {
|
} else {
|
||||||
CHECK_LE(booted_memory - initial_memory, 6777 * 1024); // 6596.
|
CHECK_LE(booted_memory - initial_memory, 3050 * 1024); // 3008.
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (v8::internal::Snapshot::IsEnabled()) {
|
if (v8::internal::Snapshot::IsEnabled()) {
|
||||||
CHECK_LE(booted_memory - initial_memory, 6500 * 1024); // 6356.
|
CHECK_LE(booted_memory - initial_memory, 2000 * 1024); // 1940.
|
||||||
} else {
|
} else {
|
||||||
CHECK_LE(booted_memory - initial_memory, 6654 * 1024); // 6424
|
CHECK_LE(booted_memory - initial_memory, 2000 * 1024); // 1948
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -140,8 +140,8 @@ TEST(MemoryAllocator) {
|
|||||||
heap->MaxReserved(),
|
heap->MaxReserved(),
|
||||||
OLD_POINTER_SPACE,
|
OLD_POINTER_SPACE,
|
||||||
NOT_EXECUTABLE);
|
NOT_EXECUTABLE);
|
||||||
Page* first_page =
|
Page* first_page = memory_allocator->AllocatePage(
|
||||||
memory_allocator->AllocatePage(&faked_space, NOT_EXECUTABLE);
|
Page::kObjectAreaSize, &faked_space, NOT_EXECUTABLE);
|
||||||
|
|
||||||
first_page->InsertAfter(faked_space.anchor()->prev_page());
|
first_page->InsertAfter(faked_space.anchor()->prev_page());
|
||||||
CHECK(first_page->is_valid());
|
CHECK(first_page->is_valid());
|
||||||
@ -154,7 +154,8 @@ TEST(MemoryAllocator) {
|
|||||||
|
|
||||||
// Again, we should get n or n - 1 pages.
|
// Again, we should get n or n - 1 pages.
|
||||||
Page* other =
|
Page* other =
|
||||||
memory_allocator->AllocatePage(&faked_space, NOT_EXECUTABLE);
|
memory_allocator->AllocatePage(
|
||||||
|
Page::kObjectAreaSize, &faked_space, NOT_EXECUTABLE);
|
||||||
CHECK(other->is_valid());
|
CHECK(other->is_valid());
|
||||||
total_pages++;
|
total_pages++;
|
||||||
other->InsertAfter(first_page);
|
other->InsertAfter(first_page);
|
||||||
|
Loading…
Reference in New Issue
Block a user