Make top and limit field in AllocationInfo private, assert on non-aligned setting of these fields, and eliminate indirect access over top address on top pointer.

BUG=
R=mstarzinger@chromium.org

Review URL: https://codereview.chromium.org/40083002

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@17391 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
hpayer@chromium.org 2013-10-25 09:58:21 +00:00
parent 266dfe7819
commit f8a4bd7c4e
4 changed files with 123 additions and 66 deletions

View File

@ -9156,7 +9156,7 @@ Handle<String> SeqString::Truncate(Handle<SeqString> string, int new_length) {
if (newspace->Contains(start_of_string) &&
newspace->top() == start_of_string + old_size) {
// Last allocated object in new space. Simply lower allocation top.
*(newspace->allocation_top_address()) = start_of_string + new_size;
newspace->set_top(start_of_string + new_size);
} else {
// Sizes are pointer size aligned, so that we can use filler objects
// that are a multiple of pointer size.

View File

@ -264,11 +264,11 @@ void Page::set_prev_page(Page* page) {
// allocation) so it can be used by all the allocation functions and for all
// the paged spaces.
HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
Address current_top = allocation_info_.top;
Address current_top = allocation_info_.top();
Address new_top = current_top + size_in_bytes;
if (new_top > allocation_info_.limit) return NULL;
if (new_top > allocation_info_.limit()) return NULL;
allocation_info_.top = new_top;
allocation_info_.set_top(new_top);
return HeapObject::FromAddress(current_top);
}
@ -324,29 +324,29 @@ MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes,
MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) {
Address old_top = allocation_info_.top;
Address old_top = allocation_info_.top();
#ifdef DEBUG
// If we are stressing compaction we waste some memory in new space
// in order to get more frequent GCs.
if (FLAG_stress_compaction && !heap()->linear_allocation()) {
if (allocation_info_.limit - old_top >= size_in_bytes * 4) {
if (allocation_info_.limit() - old_top >= size_in_bytes * 4) {
int filler_size = size_in_bytes * 4;
for (int i = 0; i < filler_size; i += kPointerSize) {
*(reinterpret_cast<Object**>(old_top + i)) =
heap()->one_pointer_filler_map();
}
old_top += filler_size;
allocation_info_.top += filler_size;
allocation_info_.set_top(allocation_info_.top() + filler_size);
}
}
#endif
if (allocation_info_.limit - old_top < size_in_bytes) {
if (allocation_info_.limit() - old_top < size_in_bytes) {
return SlowAllocateRaw(size_in_bytes);
}
HeapObject* obj = HeapObject::FromAddress(old_top);
allocation_info_.top += size_in_bytes;
allocation_info_.set_top(allocation_info_.top() + size_in_bytes);
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
HeapProfiler* profiler = heap()->isolate()->heap_profiler();

View File

@ -960,8 +960,8 @@ PagedSpace::PagedSpace(Heap* heap,
* AreaSize();
accounting_stats_.Clear();
allocation_info_.top = NULL;
allocation_info_.limit = NULL;
allocation_info_.set_top(NULL);
allocation_info_.set_limit(NULL);
anchor_.InitializeAsAnchor(this);
}
@ -990,7 +990,7 @@ void PagedSpace::TearDown() {
size_t PagedSpace::CommittedPhysicalMemory() {
if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
size_t size = 0;
PageIterator it(this);
while (it.has_next()) {
@ -1142,8 +1142,9 @@ void PagedSpace::ReleasePage(Page* page, bool unlink) {
DecreaseUnsweptFreeBytes(page);
}
if (Page::FromAllocationTop(allocation_info_.top) == page) {
allocation_info_.top = allocation_info_.limit = NULL;
if (Page::FromAllocationTop(allocation_info_.top()) == page) {
allocation_info_.set_top(NULL);
allocation_info_.set_limit(NULL);
}
if (unlink) {
@ -1170,12 +1171,12 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
if (was_swept_conservatively_) return;
bool allocation_pointer_found_in_space =
(allocation_info_.top == allocation_info_.limit);
(allocation_info_.top() == allocation_info_.limit());
PageIterator page_iterator(this);
while (page_iterator.has_next()) {
Page* page = page_iterator.next();
CHECK(page->owner() == this);
if (page == Page::FromAllocationTop(allocation_info_.top)) {
if (page == Page::FromAllocationTop(allocation_info_.top())) {
allocation_pointer_found_in_space = true;
}
CHECK(page->WasSweptPrecisely());
@ -1286,8 +1287,8 @@ void NewSpace::TearDown() {
}
start_ = NULL;
allocation_info_.top = NULL;
allocation_info_.limit = NULL;
allocation_info_.set_top(NULL);
allocation_info_.set_limit(NULL);
to_space_.TearDown();
from_space_.TearDown();
@ -1344,22 +1345,22 @@ void NewSpace::Shrink() {
}
}
}
allocation_info_.limit = to_space_.page_high();
allocation_info_.set_limit(to_space_.page_high());
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
void NewSpace::UpdateAllocationInfo() {
MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
allocation_info_.top = to_space_.page_low();
allocation_info_.limit = to_space_.page_high();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.set_top(to_space_.page_low());
allocation_info_.set_limit(to_space_.page_high());
// Lower limit during incremental marking.
if (heap()->incremental_marking()->IsMarking() &&
inline_allocation_limit_step() != 0) {
Address new_limit =
allocation_info_.top + inline_allocation_limit_step();
allocation_info_.limit = Min(new_limit, allocation_info_.limit);
allocation_info_.top() + inline_allocation_limit_step();
allocation_info_.set_limit(Min(new_limit, allocation_info_.limit()));
}
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
@ -1378,7 +1379,7 @@ void NewSpace::ResetAllocationInfo() {
bool NewSpace::AddFreshPage() {
Address top = allocation_info_.top;
Address top = allocation_info_.top();
if (NewSpacePage::IsAtStart(top)) {
// The current page is already empty. Don't try to make another.
@ -1410,15 +1411,16 @@ bool NewSpace::AddFreshPage() {
MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
Address old_top = allocation_info_.top;
Address old_top = allocation_info_.top();
Address new_top = old_top + size_in_bytes;
Address high = to_space_.page_high();
if (allocation_info_.limit < high) {
if (allocation_info_.limit() < high) {
// Incremental marking has lowered the limit to get a
// chance to do a step.
allocation_info_.limit = Min(
allocation_info_.limit + inline_allocation_limit_step_,
Address new_limit = Min(
allocation_info_.limit() + inline_allocation_limit_step_,
high);
allocation_info_.set_limit(new_limit);
int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
heap()->incremental_marking()->Step(
bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
@ -1973,7 +1975,7 @@ void NewSpace::RecordPromotion(HeapObject* obj) {
size_t NewSpace::CommittedPhysicalMemory() {
if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
size_t size = to_space_.CommittedPhysicalMemory();
if (from_space_.is_committed()) {
size += from_space_.CommittedPhysicalMemory();
@ -2499,9 +2501,9 @@ bool NewSpace::ReserveSpace(int bytes) {
Object* object = NULL;
if (!maybe->ToObject(&object)) return false;
HeapObject* allocation = HeapObject::cast(object);
Address top = allocation_info_.top;
Address top = allocation_info_.top();
if ((top - bytes) == allocation->address()) {
allocation_info_.top = allocation->address();
allocation_info_.set_top(allocation->address());
return true;
}
// There may be a borderline case here where the allocation succeeded, but
@ -2547,9 +2549,9 @@ void PagedSpace::PrepareForMarkCompact() {
bool PagedSpace::ReserveSpace(int size_in_bytes) {
ASSERT(size_in_bytes <= AreaSize());
ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
Address current_top = allocation_info_.top;
Address current_top = allocation_info_.top();
Address new_top = current_top + size_in_bytes;
if (new_top <= allocation_info_.limit) return true;
if (new_top <= allocation_info_.limit()) return true;
HeapObject* new_area = free_list_.Allocate(size_in_bytes);
if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
@ -2624,16 +2626,17 @@ bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
if (allocation_info_.top >= allocation_info_.limit) return;
if (allocation_info_.top() >= allocation_info_.limit()) return;
if (Page::FromAllocationTop(allocation_info_.top)->IsEvacuationCandidate()) {
if (Page::FromAllocationTop(allocation_info_.top())->
IsEvacuationCandidate()) {
// Create filler object to keep page iterable if it was iterable.
int remaining =
static_cast<int>(allocation_info_.limit - allocation_info_.top);
heap()->CreateFillerObjectAt(allocation_info_.top, remaining);
static_cast<int>(allocation_info_.limit() - allocation_info_.top());
heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
allocation_info_.top = NULL;
allocation_info_.limit = NULL;
allocation_info_.set_top(NULL);
allocation_info_.set_limit(NULL);
}
}

View File

@ -1317,18 +1317,53 @@ class PageIterator BASE_EMBEDDED {
// space.
class AllocationInfo {
public:
AllocationInfo() : top(NULL), limit(NULL) {
AllocationInfo() : top_(NULL), limit_(NULL) {
}
Address top; // Current allocation top.
Address limit; // Current allocation limit.
INLINE(void set_top(Address top)) {
SLOW_ASSERT(top == NULL ||
(reinterpret_cast<intptr_t>(top) & HeapObjectTagMask()) == 0);
top_ = top;
}
INLINE(Address top()) const {
SLOW_ASSERT(top_ == NULL ||
(reinterpret_cast<intptr_t>(top_) & HeapObjectTagMask()) == 0);
return top_;
}
Address* top_address() {
return &top_;
}
INLINE(void set_limit(Address limit)) {
SLOW_ASSERT(limit == NULL ||
(reinterpret_cast<intptr_t>(limit) & HeapObjectTagMask()) == 0);
limit_ = limit;
}
INLINE(Address limit()) const {
SLOW_ASSERT(limit_ == NULL ||
(reinterpret_cast<intptr_t>(limit_) & HeapObjectTagMask()) == 0);
return limit_;
}
Address* limit_address() {
return &limit_;
}
#ifdef DEBUG
bool VerifyPagedAllocation() {
return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
&& (top <= limit);
return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_))
&& (top_ <= limit_);
}
#endif
private:
// Current allocation top.
Address top_;
// Current allocation limit.
Address limit_;
};
@ -1707,12 +1742,18 @@ class PagedSpace : public Space {
virtual intptr_t Waste() { return accounting_stats_.Waste(); }
// Returns the allocation pointer in this space.
Address top() { return allocation_info_.top; }
Address limit() { return allocation_info_.limit; }
Address top() { return allocation_info_.top(); }
Address limit() { return allocation_info_.limit(); }
// The allocation top and limit addresses.
Address* allocation_top_address() { return &allocation_info_.top; }
Address* allocation_limit_address() { return &allocation_info_.limit; }
// The allocation top address.
Address* allocation_top_address() {
return allocation_info_.top_address();
}
// The allocation limit address.
Address* allocation_limit_address() {
return allocation_info_.limit_address();
}
enum AllocationType {
NEW_OBJECT,
@ -1745,9 +1786,9 @@ class PagedSpace : public Space {
void SetTop(Address top, Address limit) {
ASSERT(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
allocation_info_.top = top;
allocation_info_.limit = limit;
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.set_top(top);
allocation_info_.set_limit(limit);
}
void Allocate(int bytes) {
@ -2388,9 +2429,15 @@ class NewSpace : public Space {
// Return the address of the allocation pointer in the active semispace.
Address top() {
ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top));
return allocation_info_.top;
ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top()));
return allocation_info_.top();
}
void set_top(Address top) {
ASSERT(to_space_.current_page()->ContainsLimit(top));
allocation_info_.set_top(top);
}
// Return the address of the first object in the active semispace.
Address bottom() { return to_space_.space_start(); }
@ -2415,9 +2462,15 @@ class NewSpace : public Space {
return reinterpret_cast<Address>(index << kPointerSizeLog2);
}
// The allocation top and limit addresses.
Address* allocation_top_address() { return &allocation_info_.top; }
Address* allocation_limit_address() { return &allocation_info_.limit; }
// The allocation top and limit address.
Address* allocation_top_address() {
return allocation_info_.top_address();
}
// The allocation limit address.
Address* allocation_limit_address() {
return allocation_info_.limit_address();
}
MUST_USE_RESULT INLINE(MaybeObject* AllocateRaw(int size_in_bytes));
@ -2427,13 +2480,14 @@ class NewSpace : public Space {
void LowerInlineAllocationLimit(intptr_t step) {
inline_allocation_limit_step_ = step;
if (step == 0) {
allocation_info_.limit = to_space_.page_high();
allocation_info_.set_limit(to_space_.page_high());
} else {
allocation_info_.limit = Min(
allocation_info_.top + inline_allocation_limit_step_,
allocation_info_.limit);
Address new_limit = Min(
allocation_info_.top() + inline_allocation_limit_step_,
allocation_info_.limit());
allocation_info_.set_limit(new_limit);
}
top_on_previous_step_ = allocation_info_.top;
top_on_previous_step_ = allocation_info_.top();
}
// Get the extent of the inactive semispace (for use as a marking stack,
@ -2580,9 +2634,9 @@ class OldSpace : public PagedSpace {
// For contiguous spaces, top should be in the space (or at the end) and limit
// should be the end of the space.
#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
SLOW_ASSERT((space).page_low() <= (info).top \
&& (info).top <= (space).page_high() \
&& (info).limit <= (space).page_high())
SLOW_ASSERT((space).page_low() <= (info).top() \
&& (info).top() <= (space).page_high() \
&& (info).limit() <= (space).page_high())
// -----------------------------------------------------------------------------