Remove dead Space::ReserveSpace implementations.
R=ishell@chromium.org Review URL: https://codereview.chromium.org/67813009 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@17648 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
f27eca66b3
commit
d8e85bf0e0
@ -2504,29 +2504,6 @@ intptr_t FreeList::SumFreeLists() {
|
||||
// -----------------------------------------------------------------------------
|
||||
// OldSpace implementation
|
||||
|
||||
bool NewSpace::ReserveSpace(int bytes) {
|
||||
// We can't reliably unpack a partial snapshot that needs more new space
|
||||
// space than the minimum NewSpace size. The limit can be set lower than
|
||||
// the end of new space either because there is more space on the next page
|
||||
// or because we have lowered the limit in order to get periodic incremental
|
||||
// marking. The most reliable way to ensure that there is linear space is
|
||||
// to do the allocation, then rewind the limit.
|
||||
ASSERT(bytes <= InitialCapacity());
|
||||
MaybeObject* maybe = AllocateRaw(bytes);
|
||||
Object* object = NULL;
|
||||
if (!maybe->ToObject(&object)) return false;
|
||||
HeapObject* allocation = HeapObject::cast(object);
|
||||
Address top = allocation_info_.top();
|
||||
if ((top - bytes) == allocation->address()) {
|
||||
allocation_info_.set_top(allocation->address());
|
||||
return true;
|
||||
}
|
||||
// There may be a borderline case here where the allocation succeeded, but
|
||||
// the limit and top have moved on to a new page. In that case we try again.
|
||||
return ReserveSpace(bytes);
|
||||
}
|
||||
|
||||
|
||||
void PagedSpace::PrepareForMarkCompact() {
|
||||
// We don't have a linear allocation area while sweeping. It will be restored
|
||||
// on the first allocation after the sweep.
|
||||
@ -2561,28 +2538,6 @@ void PagedSpace::PrepareForMarkCompact() {
|
||||
}
|
||||
|
||||
|
||||
bool PagedSpace::ReserveSpace(int size_in_bytes) {
|
||||
ASSERT(size_in_bytes <= AreaSize());
|
||||
ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
|
||||
Address current_top = allocation_info_.top();
|
||||
Address new_top = current_top + size_in_bytes;
|
||||
if (new_top <= allocation_info_.limit()) return true;
|
||||
|
||||
HeapObject* new_area = free_list_.Allocate(size_in_bytes);
|
||||
if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
|
||||
if (new_area == NULL) return false;
|
||||
|
||||
int old_linear_size = static_cast<int>(limit() - top());
|
||||
// Mark the old linear allocation area with a free space so it can be
|
||||
// skipped when scanning the heap. This also puts it back in the free list
|
||||
// if it is big enough.
|
||||
Free(top(), old_linear_size);
|
||||
|
||||
SetTop(new_area->address(), new_area->address() + size_in_bytes);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
intptr_t PagedSpace::SizeOfObjects() {
|
||||
ASSERT(!heap()->IsSweepingComplete() || (unswept_free_bytes_ == 0));
|
||||
return Size() - unswept_free_bytes_ - (limit() - top());
|
||||
@ -2598,15 +2553,6 @@ void PagedSpace::RepairFreeListsAfterBoot() {
|
||||
}
|
||||
|
||||
|
||||
// You have to call this last, since the implementation from PagedSpace
|
||||
// doesn't know that memory was 'promised' to large object space.
|
||||
bool LargeObjectSpace::ReserveSpace(int bytes) {
|
||||
return heap()->OldGenerationCapacityAvailable() >= bytes &&
|
||||
(!heap()->incremental_marking()->IsStopped() ||
|
||||
heap()->OldGenerationSpaceAvailable() >= bytes);
|
||||
}
|
||||
|
||||
|
||||
bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
|
||||
if (IsLazySweepingComplete()) return true;
|
||||
|
||||
|
14
src/spaces.h
14
src/spaces.h
@ -1768,8 +1768,6 @@ class PagedSpace : public Space {
|
||||
// failure object if not.
|
||||
MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
|
||||
|
||||
virtual bool ReserveSpace(int bytes);
|
||||
|
||||
// Give a block of memory to the space's free list. It might be added to
|
||||
// the free list or accounted as waste.
|
||||
// If add_to_freelist is false then just accounting stats are updated and
|
||||
@ -2167,11 +2165,6 @@ class SemiSpace : public Space {
|
||||
return 0;
|
||||
}
|
||||
|
||||
virtual bool ReserveSpace(int bytes) {
|
||||
UNREACHABLE();
|
||||
return false;
|
||||
}
|
||||
|
||||
bool is_committed() { return committed_; }
|
||||
bool Commit();
|
||||
bool Uncommit();
|
||||
@ -2535,8 +2528,6 @@ class NewSpace : public Space {
|
||||
// if successful.
|
||||
bool AddFreshPage();
|
||||
|
||||
virtual bool ReserveSpace(int bytes);
|
||||
|
||||
#ifdef VERIFY_HEAP
|
||||
// Verify the active semispace.
|
||||
virtual void Verify();
|
||||
@ -2849,11 +2840,6 @@ class LargeObjectSpace : public Space {
|
||||
// Checks whether the space is empty.
|
||||
bool IsEmpty() { return first_page_ == NULL; }
|
||||
|
||||
// See the comments for ReserveSpace in the Space class. This has to be
|
||||
// called after ReserveSpace has been called on the paged spaces, since they
|
||||
// may use some memory, leaving less for large objects.
|
||||
virtual bool ReserveSpace(int bytes);
|
||||
|
||||
LargePage* first_page() { return first_page_; }
|
||||
|
||||
#ifdef VERIFY_HEAP
|
||||
|
Loading…
Reference in New Issue
Block a user