diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc index 56ff4549da..762fd98bdb 100644 --- a/src/deoptimizer.cc +++ b/src/deoptimizer.cc @@ -1150,6 +1150,7 @@ MemoryChunk* Deoptimizer::CreateCode(BailoutType type) { MemoryChunk* chunk = Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size, + desc.instr_size, EXECUTABLE, NULL); if (chunk == NULL) { diff --git a/src/heap.cc b/src/heap.cc index ff978cf3b7..59dc5d8713 100644 --- a/src/heap.cc +++ b/src/heap.cc @@ -582,10 +582,14 @@ void Heap::ReserveSpace( PagedSpace* map_space = Heap::map_space(); PagedSpace* cell_space = Heap::cell_space(); LargeObjectSpace* lo_space = Heap::lo_space(); + bool one_old_space_gc_has_been_performed = false; bool gc_performed = true; int counter = 0; static const int kThreshold = 20; + bool old_space_gc_performed; + while (gc_performed && counter++ < kThreshold) { + old_space_gc_performed = false; gc_performed = false; if (!new_space->ReserveSpace(new_space_size)) { Heap::CollectGarbage(NEW_SPACE); @@ -594,22 +598,27 @@ void Heap::ReserveSpace( if (!old_pointer_space->ReserveSpace(pointer_space_size)) { Heap::CollectGarbage(OLD_POINTER_SPACE); gc_performed = true; + old_space_gc_performed = true; } if (!(old_data_space->ReserveSpace(data_space_size))) { Heap::CollectGarbage(OLD_DATA_SPACE); gc_performed = true; + old_space_gc_performed = true; } if (!(code_space->ReserveSpace(code_space_size))) { Heap::CollectGarbage(CODE_SPACE); gc_performed = true; + old_space_gc_performed = true; } if (!(map_space->ReserveSpace(map_space_size))) { Heap::CollectGarbage(MAP_SPACE); gc_performed = true; + old_space_gc_performed = true; } if (!(cell_space->ReserveSpace(cell_space_size))) { Heap::CollectGarbage(CELL_SPACE); gc_performed = true; + old_space_gc_performed = true; } // We add a slack-factor of 2 in order to have space for a series of // large-object allocations that are only just larger than the page size. @@ -619,15 +628,22 @@ void Heap::ReserveSpace( // allocation in the other spaces. large_object_size += cell_space_size + map_space_size + code_space_size + data_space_size + pointer_space_size; - if (!(lo_space->ReserveSpace(large_object_size))) { + + // If we already did one GC in order to make space in old space, there is + // no sense in doing another one. We will attempt to force through the + // large object space allocation, which comes directly from the OS, + // regardless of any soft limit. + if (!one_old_space_gc_has_been_performed && + !(lo_space->ReserveSpace(large_object_size))) { Heap::CollectGarbage(LO_SPACE); gc_performed = true; } + if (old_space_gc_performed) one_old_space_gc_has_been_performed = true; } if (gc_performed) { // Failed to reserve the space after several attempts. - V8::FatalProcessOutOfMemory("Heap::ReserveSpace"); + V8::FatalProcessOutOfMemory("Heap.:ReserveSpace"); } } diff --git a/src/incremental-marking.cc b/src/incremental-marking.cc index 6248524668..05f60faf2c 100644 --- a/src/incremental-marking.cc +++ b/src/incremental-marking.cc @@ -287,7 +287,7 @@ void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk, // It's difficult to filter out slots recorded for large objects. if (chunk->owner()->identity() == LO_SPACE && - chunk->size() > static_cast(Page::kPageSize) && + chunk->size() > Page::kPageSize && is_compacting) { chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION); } diff --git a/src/mark-compact.cc b/src/mark-compact.cc index 8cd9d02651..3287b3bcbd 100644 --- a/src/mark-compact.cc +++ b/src/mark-compact.cc @@ -2919,7 +2919,8 @@ static void SweepPrecisely(PagedSpace* space, for ( ; live_objects != 0; live_objects--) { Address free_end = object_address + offsets[live_index++] * kPointerSize; if (free_end != free_start) { - space->Free(free_start, static_cast(free_end - free_start)); + space->AddToFreeLists(free_start, + static_cast(free_end - free_start)); } HeapObject* live_object = HeapObject::FromAddress(free_end); ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object))); @@ -2945,7 +2946,8 @@ static void SweepPrecisely(PagedSpace* space, cells[cell_index] = 0; } if (free_start != p->ObjectAreaEnd()) { - space->Free(free_start, static_cast(p->ObjectAreaEnd() - free_start)); + space->AddToFreeLists(free_start, + static_cast(p->ObjectAreaEnd() - free_start)); } p->ResetLiveBytes(); } @@ -3238,7 +3240,9 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { Page* p = evacuation_candidates_[i]; if (!p->IsEvacuationCandidate()) continue; PagedSpace* space = static_cast(p->owner()); - space->Free(p->ObjectAreaStart(), Page::kObjectAreaSize); + space->AddToFreeLists( + p->ObjectAreaStart(), + static_cast(p->ObjectAreaEnd() - p->ObjectAreaStart())); p->set_scan_on_scavenge(false); slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address()); p->ClearEvacuationCandidate(); @@ -3555,8 +3559,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { } size_t size = block_address - p->ObjectAreaStart(); if (cell_index == last_cell_index) { - freed_bytes += static_cast(space->Free(p->ObjectAreaStart(), - static_cast(size))); + freed_bytes += static_cast(space->AddToFreeLists( + p->ObjectAreaStart(), static_cast(size))); ASSERT_EQ(0, p->LiveBytes()); return freed_bytes; } @@ -3565,8 +3569,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { Address free_end = StartOfLiveObject(block_address, cells[cell_index]); // Free the first free space. size = free_end - p->ObjectAreaStart(); - freed_bytes += space->Free(p->ObjectAreaStart(), - static_cast(size)); + freed_bytes += space->AddToFreeLists(p->ObjectAreaStart(), + static_cast(size)); // The start of the current free area is represented in undigested form by // the address of the last 32-word section that contained a live object and // the marking bitmap for that cell, which describes where the live object @@ -3595,8 +3599,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { // so now we need to find the start of the first live object at the // end of the free space. free_end = StartOfLiveObject(block_address, cell); - freed_bytes += space->Free(free_start, - static_cast(free_end - free_start)); + freed_bytes += space->AddToFreeLists( + free_start, static_cast(free_end - free_start)); } } // Update our undigested record of where the current free area started. @@ -3610,8 +3614,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { // Handle the free space at the end of the page. if (block_address - free_start > 32 * kPointerSize) { free_start = DigestFreeStart(free_start, free_start_cell); - freed_bytes += space->Free(free_start, - static_cast(block_address - free_start)); + freed_bytes += space->AddToFreeLists( + free_start, static_cast(block_address - free_start)); } p->ResetLiveBytes(); diff --git a/src/serialize.cc b/src/serialize.cc index d9fc2b7b7c..820439cc1c 100644 --- a/src/serialize.cc +++ b/src/serialize.cc @@ -612,6 +612,7 @@ Address Deserializer::Allocate(int space_index, Space* space, int size) { pages_[LO_SPACE].Add(address); } last_object_address_ = address; + ASSERT(address >= Page::FromAddress(address)->ObjectAreaStart()); return address; } @@ -622,7 +623,12 @@ HeapObject* Deserializer::GetAddressFromEnd(int space) { int offset = source_->GetInt(); ASSERT(!SpaceIsLarge(space)); offset <<= kObjectAlignmentBits; - return HeapObject::FromAddress(high_water_[space] - offset); + Address address = high_water_[space] - offset; + // This assert will fail if kMinimumSpaceSizes is too small for a space, + // because we rely on the fact that all allocation is linear when the VM + // is very young. + ASSERT(address >= Page::FromAddress(address)->ObjectAreaStart()); + return HeapObject::FromAddress(address); } diff --git a/src/snapshot.h b/src/snapshot.h index 4f01a2d629..fbb6c8a04f 100644 --- a/src/snapshot.h +++ b/src/snapshot.h @@ -26,6 +26,7 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "isolate.h" +#include "spaces.h" #ifndef V8_SNAPSHOT_H_ #define V8_SNAPSHOT_H_ @@ -86,6 +87,7 @@ class Snapshot { DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot); }; + } } // namespace v8::internal #endif // V8_SNAPSHOT_H_ diff --git a/src/spaces-inl.h b/src/spaces-inl.h index d0cddebf78..89ed3a1cf6 100644 --- a/src/spaces-inl.h +++ b/src/spaces-inl.h @@ -164,12 +164,12 @@ Page* Page::Initialize(Heap* heap, Executability executable, PagedSpace* owner) { Page* page = reinterpret_cast(chunk); - ASSERT(chunk->size() == static_cast(kPageSize)); + ASSERT(chunk->size() <= kPageSize); ASSERT(chunk->owner() == owner); - owner->IncreaseCapacity(Page::kObjectAreaSize); - owner->Free(page->ObjectAreaStart(), - static_cast(page->ObjectAreaEnd() - - page->ObjectAreaStart())); + int object_bytes = + static_cast(page->ObjectAreaEnd() - page->ObjectAreaStart()); + owner->IncreaseCapacity(object_bytes); + owner->AddToFreeLists(page->ObjectAreaStart(), object_bytes); heap->incremental_marking()->SetOldSpacePageFlags(chunk); @@ -257,6 +257,7 @@ HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) { if (new_top > allocation_info_.limit) return NULL; allocation_info_.top = new_top; + ASSERT(new_top >= Page::FromAllocationTop(new_top)->ObjectAreaStart()); return HeapObject::FromAddress(current_top); } diff --git a/src/spaces.cc b/src/spaces.cc index 05c5876fdf..481721d595 100644 --- a/src/spaces.cc +++ b/src/spaces.cc @@ -31,6 +31,7 @@ #include "macro-assembler.h" #include "mark-compact.h" #include "platform.h" +#include "snapshot.h" namespace v8 { namespace internal { @@ -263,7 +264,7 @@ MemoryAllocator::MemoryAllocator(Isolate* isolate) : isolate_(isolate), capacity_(0), capacity_executable_(0), - size_(0), + memory_allocator_reserved_(0), size_executable_(0) { } @@ -273,7 +274,7 @@ bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) { capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); ASSERT_GE(capacity_, capacity_executable_); - size_ = 0; + memory_allocator_reserved_ = 0; size_executable_ = 0; return true; @@ -282,7 +283,7 @@ bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) { void MemoryAllocator::TearDown() { // Check that spaces were torn down before MemoryAllocator. - ASSERT(size_ == 0); + CHECK_EQ(memory_allocator_reserved_, 0); // TODO(gc) this will be true again when we fix FreeMemory. // ASSERT(size_executable_ == 0); capacity_ = 0; @@ -295,8 +296,8 @@ void MemoryAllocator::FreeMemory(VirtualMemory* reservation, // TODO(gc) make code_range part of memory allocator? ASSERT(reservation->IsReserved()); size_t size = reservation->size(); - ASSERT(size_ >= size); - size_ -= size; + ASSERT(memory_allocator_reserved_ >= size); + memory_allocator_reserved_ -= size; isolate_->counters()->memory_allocated()->Decrement(static_cast(size)); @@ -316,8 +317,8 @@ void MemoryAllocator::FreeMemory(Address base, size_t size, Executability executable) { // TODO(gc) make code_range part of memory allocator? - ASSERT(size_ >= size); - size_ -= size; + ASSERT(memory_allocator_reserved_ >= size); + memory_allocator_reserved_ -= size; isolate_->counters()->memory_allocated()->Decrement(static_cast(size)); @@ -343,7 +344,7 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size, VirtualMemory reservation(size, alignment); if (!reservation.IsReserved()) return NULL; - size_ += reservation.size(); + memory_allocator_reserved_ += reservation.size(); Address base = RoundUp(static_cast
(reservation.address()), alignment); controller->TakeControl(&reservation); @@ -352,11 +353,14 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size, Address MemoryAllocator::AllocateAlignedMemory(size_t size, + size_t reserved_size, size_t alignment, Executability executable, VirtualMemory* controller) { + ASSERT(RoundUp(reserved_size, OS::CommitPageSize()) >= + RoundUp(size, OS::CommitPageSize())); VirtualMemory reservation; - Address base = ReserveAlignedMemory(size, alignment, &reservation); + Address base = ReserveAlignedMemory(reserved_size, alignment, &reservation); if (base == NULL) return NULL; if (!reservation.Commit(base, size, @@ -375,6 +379,53 @@ void Page::InitializeAsAnchor(PagedSpace* owner) { } +void Page::CommitMore(intptr_t space_needed) { + intptr_t reserved_page_size = reservation_.IsReserved() ? + reservation_.size() : + Page::kPageSize; + ASSERT(size() + space_needed <= reserved_page_size); + // At increase the page size by at least 64k (this also rounds to OS page + // size). + int expand = Min(reserved_page_size - size(), + RoundUp(size() + space_needed, Page::kGrowthUnit) - size()); + ASSERT(expand <= kPageSize - size()); + ASSERT(expand <= reserved_page_size - size()); + Executability executable = + IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; + Address old_end = ObjectAreaEnd(); + if (!VirtualMemory::CommitRegion(old_end, expand, executable)) return; + + set_size(size() + expand); + + PagedSpace* paged_space = reinterpret_cast(owner()); + paged_space->heap()->isolate()->memory_allocator()->AllocationBookkeeping( + paged_space, + old_end, + 0, // No new memory was reserved. + expand, // New memory committed. + executable); + paged_space->IncreaseCapacity(expand); + + // In spaces with alignment requirements (e.g. map space) we have to align + // the expanded area with the correct object alignment. + uintptr_t object_area_size = old_end - ObjectAreaStart(); + uintptr_t aligned_object_area_size = + object_area_size - object_area_size % paged_space->ObjectAlignment(); + if (aligned_object_area_size != object_area_size) { + aligned_object_area_size += paged_space->ObjectAlignment(); + } + Address new_area = + reinterpret_cast
(ObjectAreaStart() + aligned_object_area_size); + // In spaces with alignment requirements, this will waste the space for one + // object per doubling of the page size until the next GC. + paged_space->AddToFreeLists(old_end, new_area - old_end); + + expand -= (new_area - old_end); + + paged_space->AddToFreeLists(new_area, expand); +} + + NewSpacePage* NewSpacePage::Initialize(Heap* heap, Address start, SemiSpace* semi_space) { @@ -460,9 +511,15 @@ void MemoryChunk::Unlink() { MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, + intptr_t committed_body_size, Executability executable, Space* owner) { - size_t chunk_size = MemoryChunk::kObjectStartOffset + body_size; + ASSERT(body_size >= committed_body_size); + size_t chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + body_size, + OS::CommitPageSize()); + intptr_t committed_chunk_size = + committed_body_size + MemoryChunk::kObjectStartOffset; + committed_chunk_size = RoundUp(committed_chunk_size, OS::CommitPageSize()); Heap* heap = isolate_->heap(); Address base = NULL; VirtualMemory reservation; @@ -482,20 +539,21 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, ASSERT(IsAligned(reinterpret_cast(base), MemoryChunk::kAlignment)); if (base == NULL) return NULL; - size_ += chunk_size; - // Update executable memory size. - size_executable_ += chunk_size; + // The AllocateAlignedMemory method will update the memory allocator + // memory used, but we are not using that if we have a code range, so + // we update it here. + memory_allocator_reserved_ += chunk_size; } else { - base = AllocateAlignedMemory(chunk_size, + base = AllocateAlignedMemory(committed_chunk_size, + chunk_size, MemoryChunk::kAlignment, executable, &reservation); if (base == NULL) return NULL; - // Update executable memory size. - size_executable_ += reservation.size(); } } else { - base = AllocateAlignedMemory(chunk_size, + base = AllocateAlignedMemory(committed_chunk_size, + chunk_size, MemoryChunk::kAlignment, executable, &reservation); @@ -503,21 +561,12 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, if (base == NULL) return NULL; } -#ifdef DEBUG - ZapBlock(base, chunk_size); -#endif - isolate_->counters()->memory_allocated()-> - Increment(static_cast(chunk_size)); - - LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); - if (owner != NULL) { - ObjectSpace space = static_cast(1 << owner->identity()); - PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); - } + AllocationBookkeeping( + owner, base, chunk_size, committed_chunk_size, executable); MemoryChunk* result = MemoryChunk::Initialize(heap, base, - chunk_size, + committed_chunk_size, executable, owner); result->set_reserved_memory(&reservation); @@ -525,9 +574,40 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, } -Page* MemoryAllocator::AllocatePage(PagedSpace* owner, +void MemoryAllocator::AllocationBookkeeping(Space* owner, + Address base, + intptr_t reserved_chunk_size, + intptr_t committed_chunk_size, + Executability executable) { + if (executable == EXECUTABLE) { + // Update executable memory size. + size_executable_ += reserved_chunk_size; + } + +#ifdef DEBUG + ZapBlock(base, committed_chunk_size); +#endif + isolate_->counters()->memory_allocated()-> + Increment(static_cast(committed_chunk_size)); + + LOG(isolate_, NewEvent("MemoryChunk", base, committed_chunk_size)); + if (owner != NULL) { + ObjectSpace space = static_cast(1 << owner->identity()); + PerformAllocationCallback( + space, kAllocationActionAllocate, committed_chunk_size); + } +} + + +Page* MemoryAllocator::AllocatePage(intptr_t committed_object_area_size, + PagedSpace* owner, Executability executable) { - MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, executable, owner); + ASSERT(committed_object_area_size <= Page::kObjectAreaSize); + + MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, + committed_object_area_size, + executable, + owner); if (chunk == NULL) return NULL; @@ -538,7 +618,8 @@ Page* MemoryAllocator::AllocatePage(PagedSpace* owner, LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, Executability executable, Space* owner) { - MemoryChunk* chunk = AllocateChunk(object_size, executable, owner); + MemoryChunk* chunk = + AllocateChunk(object_size, object_size, executable, owner); if (chunk == NULL) return NULL; return LargePage::Initialize(isolate_->heap(), chunk); } @@ -559,8 +640,12 @@ void MemoryAllocator::Free(MemoryChunk* chunk) { if (reservation->IsReserved()) { FreeMemory(reservation, chunk->executable()); } else { + // When we do not have a reservation that is because this allocation + // is part of the huge reserved chunk of memory reserved for code on + // x64. In that case the size was rounded up to the page size on + // allocation so we do the same now when freeing. FreeMemory(chunk->address(), - chunk->size(), + RoundUp(chunk->size(), Page::kPageSize), chunk->executable()); } } @@ -640,11 +725,12 @@ void MemoryAllocator::RemoveMemoryAllocationCallback( #ifdef DEBUG void MemoryAllocator::ReportStatistics() { - float pct = static_cast(capacity_ - size_) / capacity_; + float pct = + static_cast(capacity_ - memory_allocator_reserved_) / capacity_; PrintF(" capacity: %" V8_PTR_PREFIX "d" ", used: %" V8_PTR_PREFIX "d" ", available: %%%d\n\n", - capacity_, size_, static_cast(pct*100)); + capacity_, memory_allocator_reserved_, static_cast(pct*100)); } #endif @@ -723,7 +809,6 @@ MaybeObject* PagedSpace::FindObject(Address addr) { bool PagedSpace::CanExpand() { ASSERT(max_capacity_ % Page::kObjectAreaSize == 0); - ASSERT(Capacity() % Page::kObjectAreaSize == 0); if (Capacity() == max_capacity_) return false; @@ -735,11 +820,43 @@ bool PagedSpace::CanExpand() { return true; } -bool PagedSpace::Expand() { +bool PagedSpace::Expand(intptr_t size_in_bytes) { if (!CanExpand()) return false; + Page* last_page = anchor_.prev_page(); + if (last_page != &anchor_) { + // We have run out of linear allocation space. This may be because the + // most recently allocated page (stored last in the list) is a small one, + // that starts on a page aligned boundary, but has not a full kPageSize of + // committed memory. Let's commit more memory for the page. + intptr_t reserved_page_size = last_page->reserved_memory()->IsReserved() ? + last_page->reserved_memory()->size() : + Page::kPageSize; + if (last_page->size() < reserved_page_size && + (reserved_page_size - last_page->size()) >= size_in_bytes && + !last_page->IsEvacuationCandidate() && + last_page->WasSwept()) { + last_page->CommitMore(size_in_bytes); + return true; + } + } + + // We initially only commit a part of the page, but the deserialization + // of the initial snapshot makes the assumption that it can deserialize + // into linear memory of a certain size per space, so some of the spaces + // need to have a little more committed memory. + int initial = + Max(OS::CommitPageSize(), static_cast(Page::kGrowthUnit)); + + ASSERT(Page::kPageSize - initial < Page::kObjectAreaSize); + + intptr_t expansion_size = + Max(initial, + RoundUpToPowerOf2(MemoryChunk::kObjectStartOffset + size_in_bytes)) - + MemoryChunk::kObjectStartOffset; + Page* p = heap()->isolate()->memory_allocator()-> - AllocatePage(this, executable()); + AllocatePage(expansion_size, this, executable()); if (p == NULL) return false; ASSERT(Capacity() <= max_capacity_); @@ -784,6 +901,8 @@ void PagedSpace::ReleasePage(Page* page) { allocation_info_.top = allocation_info_.limit = NULL; } + intptr_t size = page->ObjectAreaEnd() - page->ObjectAreaStart(); + page->Unlink(); if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) { heap()->isolate()->memory_allocator()->Free(page); @@ -792,8 +911,7 @@ void PagedSpace::ReleasePage(Page* page) { } ASSERT(Capacity() > 0); - ASSERT(Capacity() % Page::kObjectAreaSize == 0); - accounting_stats_.ShrinkSpace(Page::kObjectAreaSize); + accounting_stats_.ShrinkSpace(size); } @@ -1671,7 +1789,7 @@ void FreeListNode::set_size(Heap* heap, int size_in_bytes) { // is big enough to be a FreeSpace with at least one extra word (the next // pointer), we set its map to be the free space map and its size to an // appropriate array length for the desired size from HeapObject::Size(). - // If the block is too small (eg, one or two words), to hold both a size + // If the block is too small (e.g. one or two words), to hold both a size // field and a next pointer, we give it a filler map that gives it the // correct size. if (size_in_bytes > FreeSpace::kHeaderSize) { @@ -1775,69 +1893,102 @@ int FreeList::Free(Address start, int size_in_bytes) { } -FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) { +FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, + int* node_size, + int minimum_size) { FreeListNode* node = *list; if (node == NULL) return NULL; + ASSERT(node->map() == node->GetHeap()->raw_unchecked_free_space_map()); + while (node != NULL && Page::FromAddress(node->address())->IsEvacuationCandidate()) { available_ -= node->Size(); node = node->next(); } - if (node != NULL) { - *node_size = node->Size(); - *list = node->next(); - } else { + if (node == NULL) { *list = NULL; + return NULL; } + // Gets the size without checking the map. When we are booting we have + // a FreeListNode before we have created its map. + intptr_t size = reinterpret_cast(node)->Size(); + + // We don't search the list for one that fits, preferring to look in the + // list of larger nodes, but we do check the first in the list, because + // if we had to expand the space or page we may have placed an entry that + // was just long enough at the head of one of the lists. + if (size < minimum_size) return NULL; + + *node_size = size; + available_ -= size; + *list = node->next(); + return node; } -FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) { +FreeListNode* FreeList::FindAbuttingNode( + int size_in_bytes, int* node_size, Address limit, FreeListNode** list_head) { + FreeListNode* first_node = *list_head; + if (first_node != NULL && + first_node->address() == limit && + reinterpret_cast(first_node)->Size() >= size_in_bytes && + !Page::FromAddress(first_node->address())->IsEvacuationCandidate()) { + FreeListNode* answer = first_node; + int size = reinterpret_cast(first_node)->Size(); + available_ -= size; + *node_size = size; + *list_head = first_node->next(); + ASSERT(IsVeryLong() || available_ == SumFreeLists()); + return answer; + } + return NULL; +} + + +FreeListNode* FreeList::FindNodeFor(int size_in_bytes, + int* node_size, + Address limit) { FreeListNode* node = NULL; - if (size_in_bytes <= kSmallAllocationMax) { - node = PickNodeFromList(&small_list_, node_size); + if (limit != NULL) { + // We may have a memory area at the head of the free list, which abuts the + // old linear allocation area. This happens if the linear allocation area + // has been shortened to allow an incremental marking step to be performed. + // In that case we prefer to return the free memory area that is contiguous + // with the old linear allocation area. + node = FindAbuttingNode(size_in_bytes, node_size, limit, &large_list_); + if (node != NULL) return node; + node = FindAbuttingNode(size_in_bytes, node_size, limit, &huge_list_); if (node != NULL) return node; } - if (size_in_bytes <= kMediumAllocationMax) { - node = PickNodeFromList(&medium_list_, node_size); - if (node != NULL) return node; - } + node = PickNodeFromList(&small_list_, node_size, size_in_bytes); + ASSERT(IsVeryLong() || available_ == SumFreeLists()); + if (node != NULL) return node; - if (size_in_bytes <= kLargeAllocationMax) { - node = PickNodeFromList(&large_list_, node_size); - if (node != NULL) return node; - } + node = PickNodeFromList(&medium_list_, node_size, size_in_bytes); + ASSERT(IsVeryLong() || available_ == SumFreeLists()); + if (node != NULL) return node; + node = PickNodeFromList(&large_list_, node_size, size_in_bytes); + ASSERT(IsVeryLong() || available_ == SumFreeLists()); + if (node != NULL) return node; + + // The tricky third clause in this for statement is due to the fact that + // PickNodeFromList can cut pages out of the list if they are unavailable for + // new allocation (e.g. if they are on a page that has been scheduled for + // evacuation). for (FreeListNode** cur = &huge_list_; *cur != NULL; - cur = (*cur)->next_address()) { - FreeListNode* cur_node = *cur; - while (cur_node != NULL && - Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) { - available_ -= reinterpret_cast(cur_node)->Size(); - cur_node = cur_node->next(); - } - - *cur = cur_node; - if (cur_node == NULL) break; - - ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map()); - FreeSpace* cur_as_free_space = reinterpret_cast(*cur); - int size = cur_as_free_space->Size(); - if (size >= size_in_bytes) { - // Large enough node found. Unlink it from the list. - node = *cur; - *node_size = size; - *cur = node->next(); - break; - } + cur = (*cur) == NULL ? cur : (*cur)->next_address()) { + node = PickNodeFromList(cur, node_size, size_in_bytes); + ASSERT(IsVeryLong() || available_ == SumFreeLists()); + if (node != NULL) return node; } return node; @@ -1856,10 +2007,23 @@ HeapObject* FreeList::Allocate(int size_in_bytes) { ASSERT(owner_->limit() - owner_->top() < size_in_bytes); int new_node_size = 0; - FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size); + FreeListNode* new_node = + FindNodeFor(size_in_bytes, &new_node_size, owner_->limit()); if (new_node == NULL) return NULL; - available_ -= new_node_size; + if (new_node->address() == owner_->limit()) { + // The new freelist node we were given is an extension of the one we had + // last. This is a common thing to happen when we extend a small page by + // committing more memory. In this case we just add the new node to the + // linear allocation area and recurse. + owner_->Allocate(new_node_size); + owner_->SetTop(owner_->top(), new_node->address() + new_node_size); + MaybeObject* allocation = owner_->AllocateRaw(size_in_bytes); + Object* answer; + if (!allocation->ToObject(&answer)) return NULL; + return HeapObject::cast(answer); + } + ASSERT(IsVeryLong() || available_ == SumFreeLists()); int bytes_left = new_node_size - size_in_bytes; @@ -1869,7 +2033,9 @@ HeapObject* FreeList::Allocate(int size_in_bytes) { // Mark the old linear allocation area with a free space map so it can be // skipped when scanning the heap. This also puts it back in the free list // if it is big enough. - owner_->Free(owner_->top(), old_linear_size); + if (old_linear_size != 0) { + owner_->AddToFreeLists(owner_->top(), old_linear_size); + } #ifdef DEBUG for (int i = 0; i < size_in_bytes / kPointerSize; i++) { @@ -1898,8 +2064,8 @@ HeapObject* FreeList::Allocate(int size_in_bytes) { // We don't want to give too large linear areas to the allocator while // incremental marking is going on, because we won't check again whether // we want to do another increment until the linear area is used up. - owner_->Free(new_node->address() + size_in_bytes + linear_size, - new_node_size - size_in_bytes - linear_size); + owner_->AddToFreeLists(new_node->address() + size_in_bytes + linear_size, + new_node_size - size_in_bytes - linear_size); owner_->SetTop(new_node->address() + size_in_bytes, new_node->address() + size_in_bytes + linear_size); } else if (bytes_left > 0) { @@ -1908,6 +2074,7 @@ HeapObject* FreeList::Allocate(int size_in_bytes) { owner_->SetTop(new_node->address() + size_in_bytes, new_node->address() + new_node_size); } else { + ASSERT(bytes_left == 0); // TODO(gc) Try not freeing linear allocation region when bytes_left // are zero. owner_->SetTop(NULL, NULL); @@ -2040,7 +2207,9 @@ bool NewSpace::ReserveSpace(int bytes) { HeapObject* allocation = HeapObject::cast(object); Address top = allocation_info_.top; if ((top - bytes) == allocation->address()) { - allocation_info_.top = allocation->address(); + Address new_top = allocation->address(); + ASSERT(new_top >= Page::FromAddress(new_top - 1)->ObjectAreaStart()); + allocation_info_.top = new_top; return true; } // There may be a borderline case here where the allocation succeeded, but @@ -2055,7 +2224,7 @@ void PagedSpace::PrepareForMarkCompact() { // Mark the old linear allocation area with a free space map so it can be // skipped when scanning the heap. int old_linear_size = static_cast(limit() - top()); - Free(top(), old_linear_size); + AddToFreeLists(top(), old_linear_size); SetTop(NULL, NULL); // Stop lazy sweeping and clear marking bits for unswept pages. @@ -2098,10 +2267,13 @@ bool PagedSpace::ReserveSpace(int size_in_bytes) { // Mark the old linear allocation area with a free space so it can be // skipped when scanning the heap. This also puts it back in the free list // if it is big enough. - Free(top(), old_linear_size); + AddToFreeLists(top(), old_linear_size); SetTop(new_area->address(), new_area->address() + size_in_bytes); - Allocate(size_in_bytes); + // The AddToFreeLists call above will reduce the size of the space in the + // allocation stats. We don't need to add this linear area to the size + // with an Allocate(size_in_bytes) call here, because the + // free_list_.Allocate() call above already accounted for this memory. return true; } @@ -2182,7 +2354,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { } // Try to expand the space and allocate in the new next page. - if (Expand()) { + if (Expand(size_in_bytes)) { return free_list_.Allocate(size_in_bytes); } @@ -2543,6 +2715,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() { heap()->mark_compact_collector()->ReportDeleteIfNeeded( object, heap()->isolate()); size_ -= static_cast(page->size()); + ASSERT(size_ >= 0); objects_size_ -= object->Size(); page_count_--; diff --git a/src/spaces.h b/src/spaces.h index 1a30078438..9864585aff 100644 --- a/src/spaces.h +++ b/src/spaces.h @@ -505,11 +505,9 @@ class MemoryChunk { static const int kObjectStartOffset = kBodyOffset - 1 + (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); - size_t size() const { return size_; } + intptr_t size() const { return size_; } - void set_size(size_t size) { - size_ = size; - } + void set_size(size_t size) { size_ = size; } Executability executable() { return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; @@ -661,7 +659,7 @@ class Page : public MemoryChunk { Address ObjectAreaStart() { return address() + kObjectStartOffset; } // Returns the end address (exclusive) of the object area in this page. - Address ObjectAreaEnd() { return address() + Page::kPageSize; } + Address ObjectAreaEnd() { return address() + size(); } // Checks whether an address is page aligned. static bool IsAlignedToPageSize(Address a) { @@ -680,11 +678,17 @@ class Page : public MemoryChunk { return address() + offset; } + // Expand the committed area for pages that are small. + void CommitMore(intptr_t space_needed); + // --------------------------------------------------------------------- // Page size in bytes. This must be a multiple of the OS page size. static const int kPageSize = 1 << kPageSizeBits; + // For a 1Mbyte page grow 64k at a time. + static const int kGrowthUnit = 1 << (kPageSizeBits - 4); + // Page size mask. static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; @@ -849,12 +853,10 @@ class CodeRange { FreeBlock(Address start_arg, size_t size_arg) : start(start_arg), size(size_arg) { ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); - ASSERT(size >= static_cast(Page::kPageSize)); } FreeBlock(void* start_arg, size_t size_arg) : start(static_cast
(start_arg)), size(size_arg) { ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); - ASSERT(size >= static_cast(Page::kPageSize)); } Address start; @@ -950,7 +952,9 @@ class MemoryAllocator { void TearDown(); - Page* AllocatePage(PagedSpace* owner, Executability executable); + Page* AllocatePage(intptr_t object_area_size, + PagedSpace* owner, + Executability executable); LargePage* AllocateLargePage(intptr_t object_size, Executability executable, @@ -959,10 +963,14 @@ class MemoryAllocator { void Free(MemoryChunk* chunk); // Returns the maximum available bytes of heaps. - intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; } + intptr_t Available() { + return capacity_ < memory_allocator_reserved_ ? + 0 : + capacity_ - memory_allocator_reserved_; + } // Returns allocated spaces in bytes. - intptr_t Size() { return size_; } + intptr_t Size() { return memory_allocator_reserved_; } // Returns the maximum available executable bytes of heaps. intptr_t AvailableExecutable() { @@ -984,6 +992,7 @@ class MemoryAllocator { #endif MemoryChunk* AllocateChunk(intptr_t body_size, + intptr_t committed_body_size, Executability executable, Space* space); @@ -991,6 +1000,7 @@ class MemoryAllocator { size_t alignment, VirtualMemory* controller); Address AllocateAlignedMemory(size_t requested, + size_t committed, size_t alignment, Executability executable, VirtualMemory* controller); @@ -1010,6 +1020,12 @@ class MemoryAllocator { // and false otherwise. bool UncommitBlock(Address start, size_t size); + void AllocationBookkeeping(Space* owner, + Address base, + intptr_t reserved_size, + intptr_t committed_size, + Executability executable); + // Zaps a contiguous block of memory [start..(start+size)[ thus // filling it up with a recognizable non-NULL bit pattern. void ZapBlock(Address start, size_t size); @@ -1037,7 +1053,7 @@ class MemoryAllocator { size_t capacity_executable_; // Allocated space size in bytes. - size_t size_; + size_t memory_allocator_reserved_; // Allocated executable space size in bytes. size_t size_executable_; @@ -1382,9 +1398,15 @@ class FreeList BASE_EMBEDDED { static const int kMinBlockSize = 3 * kPointerSize; static const int kMaxBlockSize = Page::kMaxHeapObjectSize; - FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size); + FreeListNode* PickNodeFromList(FreeListNode** list, + int* node_size, + int minimum_size); - FreeListNode* FindNodeFor(int size_in_bytes, int* node_size); + FreeListNode* FindNodeFor(int size_in_bytes, int* node_size, Address limit); + FreeListNode* FindAbuttingNode(int size_in_bytes, + int* node_size, + Address limit, + FreeListNode** list_head); PagedSpace* owner_; Heap* heap_; @@ -1484,6 +1506,8 @@ class PagedSpace : public Space { // free bytes that were not found at all due to lazy sweeping. virtual intptr_t Waste() { return accounting_stats_.Waste(); } + virtual int ObjectAlignment() { return kObjectAlignment; } + // Returns the allocation pointer in this space. Address top() { return allocation_info_.top; } Address limit() { return allocation_info_.limit; } @@ -1498,7 +1522,7 @@ class PagedSpace : public Space { // the free list or accounted as waste. // If add_to_freelist is false then just accounting stats are updated and // no attempt to add area to free list is made. - int Free(Address start, int size_in_bytes) { + int AddToFreeLists(Address start, int size_in_bytes) { int wasted = free_list_.Free(start, size_in_bytes); accounting_stats_.DeallocateBytes(size_in_bytes - wasted); return size_in_bytes - wasted; @@ -1506,6 +1530,7 @@ class PagedSpace : public Space { // Set space allocation info. void SetTop(Address top, Address limit) { + ASSERT(top == NULL || top >= Page::FromAddress(top - 1)->ObjectAreaStart()); ASSERT(top == limit || Page::FromAddress(top) == Page::FromAddress(limit - 1)); allocation_info_.top = top; @@ -1572,12 +1597,14 @@ class PagedSpace : public Space { void IncreaseUnsweptFreeBytes(Page* p) { ASSERT(ShouldBeSweptLazily(p)); - unswept_free_bytes_ += (Page::kObjectAreaSize - p->LiveBytes()); + unswept_free_bytes_ += + (p->ObjectAreaEnd() - p->ObjectAreaStart()) - p->LiveBytes(); } void DecreaseUnsweptFreeBytes(Page* p) { ASSERT(ShouldBeSweptLazily(p)); - unswept_free_bytes_ -= (Page::kObjectAreaSize - p->LiveBytes()); + unswept_free_bytes_ -= + (p->ObjectAreaEnd() - p->ObjectAreaStart() - p->LiveBytes()); } bool AdvanceSweeper(intptr_t bytes_to_sweep); @@ -1586,6 +1613,7 @@ class PagedSpace : public Space { return !first_unswept_page_->is_valid(); } + inline bool HasAPage() { return anchor_.next_page() != &anchor_; } Page* FirstPage() { return anchor_.next_page(); } Page* LastPage() { return anchor_.prev_page(); } @@ -1596,15 +1624,17 @@ class PagedSpace : public Space { FreeList::SizeStats sizes; free_list_.CountFreeListItems(p, &sizes); + intptr_t object_area_size = p->ObjectAreaEnd() - p->ObjectAreaStart(); + intptr_t ratio; intptr_t ratio_threshold; if (identity() == CODE_SPACE) { ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 / - Page::kObjectAreaSize; + object_area_size; ratio_threshold = 10; } else { ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 / - Page::kObjectAreaSize; + object_area_size; ratio_threshold = 15; } @@ -1614,20 +1644,20 @@ class PagedSpace : public Space { identity(), static_cast(sizes.small_size_), static_cast(sizes.small_size_ * 100) / - Page::kObjectAreaSize, + object_area_size, static_cast(sizes.medium_size_), static_cast(sizes.medium_size_ * 100) / - Page::kObjectAreaSize, + object_area_size, static_cast(sizes.large_size_), static_cast(sizes.large_size_ * 100) / - Page::kObjectAreaSize, + object_area_size, static_cast(sizes.huge_size_), static_cast(sizes.huge_size_ * 100) / - Page::kObjectAreaSize, + object_area_size, (ratio > ratio_threshold) ? "[fragmented]" : ""); } - if (FLAG_always_compact && sizes.Total() != Page::kObjectAreaSize) { + if (FLAG_always_compact && sizes.Total() != object_area_size) { return 1; } if (ratio <= ratio_threshold) return 0; // Not fragmented. @@ -1658,12 +1688,6 @@ class PagedSpace : public Space { // Normal allocation information. AllocationInfo allocation_info_; - // Bytes of each page that cannot be allocated. Possibly non-zero - // for pages in spaces with only fixed-size objects. Always zero - // for pages in spaces with variable sized objects (those pages are - // padded with free-list nodes). - int page_extra_; - bool was_swept_conservatively_; // The first page to be swept when the lazy sweeper advances. Is set @@ -1675,10 +1699,11 @@ class PagedSpace : public Space { // done conservatively. intptr_t unswept_free_bytes_; - // Expands the space by allocating a fixed number of pages. Returns false if - // it cannot allocate requested number of pages from OS, or if the hard heap - // size limit has been hit. - bool Expand(); + // Expands the space by allocating a page. Returns false if it cannot + // allocate a page from OS, or if the hard heap size limit has been hit. The + // new page will have at least enough committed space to satisfy the object + // size indicated by the allocation_size argument; + bool Expand(intptr_t allocation_size); // Generic fast case allocation function that tries linear allocation at the // address denoted by top in allocation_info_. @@ -1833,7 +1858,8 @@ class SemiSpace : public Space { anchor_(this), current_page_(NULL) { } - // Sets up the semispace using the given chunk. + // Sets up the semispace using the given chunk. After this, call Commit() + // to make the semispace usable. void SetUp(Address start, int initial_capacity, int maximum_capacity); // Tear down the space. Heap memory was not allocated by the space, so it @@ -2338,14 +2364,7 @@ class OldSpace : public PagedSpace { intptr_t max_capacity, AllocationSpace id, Executability executable) - : PagedSpace(heap, max_capacity, id, executable) { - page_extra_ = 0; - } - - // The limit of allocation for a page in this space. - virtual Address PageAllocationLimit(Page* page) { - return page->ObjectAreaEnd(); - } + : PagedSpace(heap, max_capacity, id, executable) { } public: TRACK_MEMORY("OldSpace") @@ -2372,17 +2391,12 @@ class FixedSpace : public PagedSpace { const char* name) : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE), object_size_in_bytes_(object_size_in_bytes), - name_(name) { - page_extra_ = Page::kObjectAreaSize % object_size_in_bytes; - } - - // The limit of allocation for a page in this space. - virtual Address PageAllocationLimit(Page* page) { - return page->ObjectAreaEnd() - page_extra_; - } + name_(name) { } int object_size_in_bytes() { return object_size_in_bytes_; } + virtual int ObjectAlignment() { return object_size_in_bytes_; } + // Prepares for a mark-compact GC. virtual void PrepareForMarkCompact(); diff --git a/src/store-buffer.cc b/src/store-buffer.cc index 9022b3be83..f85ec272de 100644 --- a/src/store-buffer.cc +++ b/src/store-buffer.cc @@ -496,7 +496,6 @@ void StoreBuffer::FindPointersToNewSpaceInMapsRegion( Address map_aligned_end = MapEndAlign(end); ASSERT(map_aligned_start == start); - ASSERT(map_aligned_end == end); FindPointersToNewSpaceInMaps(map_aligned_start, map_aligned_end, @@ -524,52 +523,57 @@ void StoreBuffer::FindPointersToNewSpaceOnPage( RegionCallback region_callback, ObjectSlotCallback slot_callback) { Address visitable_start = page->ObjectAreaStart(); - Address end_of_page = page->ObjectAreaEnd(); Address visitable_end = visitable_start; Object* free_space_map = heap_->free_space_map(); Object* two_pointer_filler_map = heap_->two_pointer_filler_map(); - while (visitable_end < end_of_page) { - Object* o = *reinterpret_cast(visitable_end); - // Skip fillers but not things that look like fillers in the special - // garbage section which can contain anything. - if (o == free_space_map || - o == two_pointer_filler_map || - (visitable_end == space->top() && visitable_end != space->limit())) { - if (visitable_start != visitable_end) { - // After calling this the special garbage section may have moved. - (this->*region_callback)(visitable_start, - visitable_end, - slot_callback); - if (visitable_end >= space->top() && visitable_end < space->limit()) { - visitable_end = space->limit(); - visitable_start = visitable_end; - continue; + while (true) { // While the page grows (doesn't normally happen). + Address end_of_page = page->ObjectAreaEnd(); + while (visitable_end < end_of_page) { + Object* o = *reinterpret_cast(visitable_end); + // Skip fillers but not things that look like fillers in the special + // garbage section which can contain anything. + if (o == free_space_map || + o == two_pointer_filler_map || + (visitable_end == space->top() && visitable_end != space->limit())) { + if (visitable_start != visitable_end) { + // After calling this the special garbage section may have moved. + (this->*region_callback)(visitable_start, + visitable_end, + slot_callback); + if (visitable_end >= space->top() && visitable_end < space->limit()) { + visitable_end = space->limit(); + visitable_start = visitable_end; + continue; + } + } + if (visitable_end == space->top() && visitable_end != space->limit()) { + visitable_start = visitable_end = space->limit(); + } else { + // At this point we are either at the start of a filler or we are at + // the point where the space->top() used to be before the + // visit_pointer_region call above. Either way we can skip the + // object at the current spot: We don't promise to visit objects + // allocated during heap traversal, and if space->top() moved then it + // must be because an object was allocated at this point. + visitable_start = + visitable_end + HeapObject::FromAddress(visitable_end)->Size(); + visitable_end = visitable_start; } - } - if (visitable_end == space->top() && visitable_end != space->limit()) { - visitable_start = visitable_end = space->limit(); } else { - // At this point we are either at the start of a filler or we are at - // the point where the space->top() used to be before the - // visit_pointer_region call above. Either way we can skip the - // object at the current spot: We don't promise to visit objects - // allocated during heap traversal, and if space->top() moved then it - // must be because an object was allocated at this point. - visitable_start = - visitable_end + HeapObject::FromAddress(visitable_end)->Size(); - visitable_end = visitable_start; + ASSERT(o != free_space_map); + ASSERT(o != two_pointer_filler_map); + ASSERT(visitable_end < space->top() || visitable_end >= space->limit()); + visitable_end += kPointerSize; } - } else { - ASSERT(o != free_space_map); - ASSERT(o != two_pointer_filler_map); - ASSERT(visitable_end < space->top() || visitable_end >= space->limit()); - visitable_end += kPointerSize; } + ASSERT(visitable_end >= end_of_page); + // If the page did not grow we are done. + if (end_of_page == page->ObjectAreaEnd()) break; } - ASSERT(visitable_end == end_of_page); + ASSERT(visitable_end == page->ObjectAreaEnd()); if (visitable_start != visitable_end) { (this->*region_callback)(visitable_start, visitable_end, diff --git a/src/utils.h b/src/utils.h index 1d40c98b9e..abcbefa404 100644 --- a/src/utils.h +++ b/src/utils.h @@ -153,11 +153,9 @@ int HandleObjectPointerCompare(const Handle* a, const Handle* b) { } -// Returns the smallest power of two which is >= x. If you pass in a -// number that is already a power of two, it is returned as is. -// Implementation is from "Hacker's Delight" by Henry S. Warren, Jr., -// figure 3-3, page 48, where the function is called clp2. -inline uint32_t RoundUpToPowerOf2(uint32_t x) { +template +inline int RoundUpToPowerOf2(int_type x_argument) { + uintptr_t x = static_cast(x_argument); ASSERT(x <= 0x80000000u); x = x - 1; x = x | (x >> 1); @@ -165,7 +163,7 @@ inline uint32_t RoundUpToPowerOf2(uint32_t x) { x = x | (x >> 4); x = x | (x >> 8); x = x | (x >> 16); - return x + 1; + return static_cast(x + 1); } diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc index 6de509c02c..c0a298190e 100644 --- a/test/cctest/test-heap.cc +++ b/test/cctest/test-heap.cc @@ -1236,17 +1236,14 @@ TEST(TestSizeOfObjectsVsHeapIteratorPrecision) { obj = iterator.next()) { size_of_objects_2 += obj->Size(); } - // Delta must be within 5% of the larger result. - // TODO(gc): Tighten this up by distinguishing between byte - // arrays that are real and those that merely mark free space - // on the heap. + // Delta must be within 1% of the larger result. if (size_of_objects_1 > size_of_objects_2) { intptr_t delta = size_of_objects_1 - size_of_objects_2; PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, " "Iterator: %" V8_PTR_PREFIX "d, " "delta: %" V8_PTR_PREFIX "d\n", size_of_objects_1, size_of_objects_2, delta); - CHECK_GT(size_of_objects_1 / 20, delta); + CHECK_GT(size_of_objects_1 / 100, delta); } else { intptr_t delta = size_of_objects_2 - size_of_objects_1; PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, " diff --git a/test/cctest/test-mark-compact.cc b/test/cctest/test-mark-compact.cc index 9de069d87f..f1b883f3d8 100644 --- a/test/cctest/test-mark-compact.cc +++ b/test/cctest/test-mark-compact.cc @@ -534,15 +534,15 @@ TEST(BootUpMemoryUse) { intptr_t booted_memory = MemoryInUse(); if (sizeof(initial_memory) == 8) { if (v8::internal::Snapshot::IsEnabled()) { - CHECK_LE(booted_memory - initial_memory, 6654 * 1024); // 6444. + CHECK_LE(booted_memory - initial_memory, 3050 * 1024); // 2984. } else { - CHECK_LE(booted_memory - initial_memory, 6777 * 1024); // 6596. + CHECK_LE(booted_memory - initial_memory, 3050 * 1024); // 3008. } } else { if (v8::internal::Snapshot::IsEnabled()) { - CHECK_LE(booted_memory - initial_memory, 6500 * 1024); // 6356. + CHECK_LE(booted_memory - initial_memory, 2000 * 1024); // 1940. } else { - CHECK_LE(booted_memory - initial_memory, 6654 * 1024); // 6424 + CHECK_LE(booted_memory - initial_memory, 2000 * 1024); // 1948 } } } diff --git a/test/cctest/test-spaces.cc b/test/cctest/test-spaces.cc index 6e495bc169..6eb1dddbf4 100644 --- a/test/cctest/test-spaces.cc +++ b/test/cctest/test-spaces.cc @@ -140,8 +140,8 @@ TEST(MemoryAllocator) { heap->MaxReserved(), OLD_POINTER_SPACE, NOT_EXECUTABLE); - Page* first_page = - memory_allocator->AllocatePage(&faked_space, NOT_EXECUTABLE); + Page* first_page = memory_allocator->AllocatePage( + Page::kObjectAreaSize, &faked_space, NOT_EXECUTABLE); first_page->InsertAfter(faked_space.anchor()->prev_page()); CHECK(first_page->is_valid()); @@ -154,7 +154,8 @@ TEST(MemoryAllocator) { // Again, we should get n or n - 1 pages. Page* other = - memory_allocator->AllocatePage(&faked_space, NOT_EXECUTABLE); + memory_allocator->AllocatePage( + Page::kObjectAreaSize, &faked_space, NOT_EXECUTABLE); CHECK(other->is_valid()); total_pages++; other->InsertAfter(first_page);