Remove some asserts to speed up debug mode.
Review URL: http://codereview.chromium.org/8256012 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@9606 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
fae807b3bb
commit
1cca5468aa
@ -359,7 +359,6 @@ AllocationSpace Heap::TargetSpaceId(InstanceType type) {
|
|||||||
|
|
||||||
|
|
||||||
void Heap::CopyBlock(Address dst, Address src, int byte_size) {
|
void Heap::CopyBlock(Address dst, Address src, int byte_size) {
|
||||||
ASSERT(IsAligned(byte_size, kPointerSize));
|
|
||||||
CopyWords(reinterpret_cast<Object**>(dst),
|
CopyWords(reinterpret_cast<Object**>(dst),
|
||||||
reinterpret_cast<Object**>(src),
|
reinterpret_cast<Object**>(src),
|
||||||
byte_size / kPointerSize);
|
byte_size / kPointerSize);
|
||||||
|
23
src/heap.cc
23
src/heap.cc
@ -1443,9 +1443,9 @@ class ScavengingVisitor : public StaticVisitorBase {
|
|||||||
HeapObject** slot,
|
HeapObject** slot,
|
||||||
HeapObject* object,
|
HeapObject* object,
|
||||||
int object_size) {
|
int object_size) {
|
||||||
ASSERT((size_restriction != SMALL) ||
|
SLOW_ASSERT((size_restriction != SMALL) ||
|
||||||
(object_size <= Page::kMaxHeapObjectSize));
|
(object_size <= Page::kMaxHeapObjectSize));
|
||||||
ASSERT(object->Size() == object_size);
|
SLOW_ASSERT(object->Size() == object_size);
|
||||||
|
|
||||||
Heap* heap = map->GetHeap();
|
Heap* heap = map->GetHeap();
|
||||||
if (heap->ShouldBePromoted(object->address(), object_size)) {
|
if (heap->ShouldBePromoted(object->address(), object_size)) {
|
||||||
@ -1678,9 +1678,9 @@ void Heap::SelectScavengingVisitorsTable() {
|
|||||||
|
|
||||||
|
|
||||||
void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
|
void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
|
||||||
ASSERT(HEAP->InFromSpace(object));
|
SLOW_ASSERT(HEAP->InFromSpace(object));
|
||||||
MapWord first_word = object->map_word();
|
MapWord first_word = object->map_word();
|
||||||
ASSERT(!first_word.IsForwardingAddress());
|
SLOW_ASSERT(!first_word.IsForwardingAddress());
|
||||||
Map* map = first_word.ToMap();
|
Map* map = first_word.ToMap();
|
||||||
map->GetHeap()->DoScavengeObject(map, p, object);
|
map->GetHeap()->DoScavengeObject(map, p, object);
|
||||||
}
|
}
|
||||||
@ -3688,7 +3688,7 @@ MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
|
|||||||
MaybeObject* Heap::CopyJSObject(JSObject* source) {
|
MaybeObject* Heap::CopyJSObject(JSObject* source) {
|
||||||
// Never used to copy functions. If functions need to be copied we
|
// Never used to copy functions. If functions need to be copied we
|
||||||
// have to be careful to clear the literals array.
|
// have to be careful to clear the literals array.
|
||||||
ASSERT(!source->IsJSFunction());
|
SLOW_ASSERT(!source->IsJSFunction());
|
||||||
|
|
||||||
// Make the clone.
|
// Make the clone.
|
||||||
Map* map = source->map();
|
Map* map = source->map();
|
||||||
@ -3714,7 +3714,7 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) {
|
|||||||
{ MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
|
{ MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
|
||||||
if (!maybe_clone->ToObject(&clone)) return maybe_clone;
|
if (!maybe_clone->ToObject(&clone)) return maybe_clone;
|
||||||
}
|
}
|
||||||
ASSERT(InNewSpace(clone));
|
SLOW_ASSERT(InNewSpace(clone));
|
||||||
// Since we know the clone is allocated in new space, we can copy
|
// Since we know the clone is allocated in new space, we can copy
|
||||||
// the contents without worrying about updating the write barrier.
|
// the contents without worrying about updating the write barrier.
|
||||||
CopyBlock(HeapObject::cast(clone)->address(),
|
CopyBlock(HeapObject::cast(clone)->address(),
|
||||||
@ -3722,7 +3722,8 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) {
|
|||||||
object_size);
|
object_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
|
SLOW_ASSERT(
|
||||||
|
JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
|
||||||
FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
|
FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
|
||||||
FixedArray* properties = FixedArray::cast(source->properties());
|
FixedArray* properties = FixedArray::cast(source->properties());
|
||||||
// Update elements if necessary.
|
// Update elements if necessary.
|
||||||
@ -4802,12 +4803,12 @@ void Heap::IterateAndMarkPointersToFromSpace(Address start,
|
|||||||
HeapObject::cast(object));
|
HeapObject::cast(object));
|
||||||
Object* new_object = *slot;
|
Object* new_object = *slot;
|
||||||
if (InNewSpace(new_object)) {
|
if (InNewSpace(new_object)) {
|
||||||
ASSERT(Heap::InToSpace(new_object));
|
SLOW_ASSERT(Heap::InToSpace(new_object));
|
||||||
ASSERT(new_object->IsHeapObject());
|
SLOW_ASSERT(new_object->IsHeapObject());
|
||||||
store_buffer_.EnterDirectlyIntoStoreBuffer(
|
store_buffer_.EnterDirectlyIntoStoreBuffer(
|
||||||
reinterpret_cast<Address>(slot));
|
reinterpret_cast<Address>(slot));
|
||||||
}
|
}
|
||||||
ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
|
SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
|
||||||
} else if (record_slots &&
|
} else if (record_slots &&
|
||||||
MarkCompactCollector::IsOnEvacuationCandidate(object)) {
|
MarkCompactCollector::IsOnEvacuationCandidate(object)) {
|
||||||
mark_compact_collector()->RecordSlot(slot, slot, object);
|
mark_compact_collector()->RecordSlot(slot, slot, object);
|
||||||
|
@ -143,9 +143,6 @@ void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
|
|||||||
|
|
||||||
|
|
||||||
void IncrementalMarking::WhiteToGrey(HeapObject* obj, MarkBit mark_bit) {
|
void IncrementalMarking::WhiteToGrey(HeapObject* obj, MarkBit mark_bit) {
|
||||||
ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
|
|
||||||
ASSERT(obj->Size() >= 2*kPointerSize);
|
|
||||||
ASSERT(IsMarking());
|
|
||||||
Marking::WhiteToGrey(mark_bit);
|
Marking::WhiteToGrey(mark_bit);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -739,8 +739,8 @@ void IncrementalMarking::Step(intptr_t allocated_bytes) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
|
MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
|
||||||
ASSERT(Marking::IsGrey(obj_mark_bit) ||
|
SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) ||
|
||||||
(obj->IsFiller() && Marking::IsWhite(obj_mark_bit)));
|
(obj->IsFiller() && Marking::IsWhite(obj_mark_bit)));
|
||||||
Marking::MarkBlack(obj_mark_bit);
|
Marking::MarkBlack(obj_mark_bit);
|
||||||
MemoryChunk::IncrementLiveBytes(obj->address(), size);
|
MemoryChunk::IncrementLiveBytes(obj->address(), size);
|
||||||
}
|
}
|
||||||
|
@ -38,7 +38,7 @@ namespace internal {
|
|||||||
|
|
||||||
|
|
||||||
MarkBit Marking::MarkBitFrom(Address addr) {
|
MarkBit Marking::MarkBitFrom(Address addr) {
|
||||||
MemoryChunk *p = MemoryChunk::FromAddress(addr);
|
MemoryChunk* p = MemoryChunk::FromAddress(addr);
|
||||||
return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr),
|
return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr),
|
||||||
p->ContainsOnlyData());
|
p->ContainsOnlyData());
|
||||||
}
|
}
|
||||||
|
@ -61,68 +61,52 @@ class Marking {
|
|||||||
// Impossible markbits: 01
|
// Impossible markbits: 01
|
||||||
static const char* kImpossibleBitPattern;
|
static const char* kImpossibleBitPattern;
|
||||||
static inline bool IsImpossible(MarkBit mark_bit) {
|
static inline bool IsImpossible(MarkBit mark_bit) {
|
||||||
ASSERT(strcmp(kImpossibleBitPattern, "01") == 0);
|
|
||||||
return !mark_bit.Get() && mark_bit.Next().Get();
|
return !mark_bit.Get() && mark_bit.Next().Get();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Black markbits: 10 - this is required by the sweeper.
|
// Black markbits: 10 - this is required by the sweeper.
|
||||||
static const char* kBlackBitPattern;
|
static const char* kBlackBitPattern;
|
||||||
static inline bool IsBlack(MarkBit mark_bit) {
|
static inline bool IsBlack(MarkBit mark_bit) {
|
||||||
ASSERT(strcmp(kBlackBitPattern, "10") == 0);
|
|
||||||
ASSERT(!IsImpossible(mark_bit));
|
|
||||||
return mark_bit.Get() && !mark_bit.Next().Get();
|
return mark_bit.Get() && !mark_bit.Next().Get();
|
||||||
}
|
}
|
||||||
|
|
||||||
// White markbits: 00 - this is required by the mark bit clearer.
|
// White markbits: 00 - this is required by the mark bit clearer.
|
||||||
static const char* kWhiteBitPattern;
|
static const char* kWhiteBitPattern;
|
||||||
static inline bool IsWhite(MarkBit mark_bit) {
|
static inline bool IsWhite(MarkBit mark_bit) {
|
||||||
ASSERT(strcmp(kWhiteBitPattern, "00") == 0);
|
|
||||||
ASSERT(!IsImpossible(mark_bit));
|
|
||||||
return !mark_bit.Get();
|
return !mark_bit.Get();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Grey markbits: 11
|
// Grey markbits: 11
|
||||||
static const char* kGreyBitPattern;
|
static const char* kGreyBitPattern;
|
||||||
static inline bool IsGrey(MarkBit mark_bit) {
|
static inline bool IsGrey(MarkBit mark_bit) {
|
||||||
ASSERT(strcmp(kGreyBitPattern, "11") == 0);
|
|
||||||
ASSERT(!IsImpossible(mark_bit));
|
|
||||||
return mark_bit.Get() && mark_bit.Next().Get();
|
return mark_bit.Get() && mark_bit.Next().Get();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void MarkBlack(MarkBit mark_bit) {
|
static inline void MarkBlack(MarkBit mark_bit) {
|
||||||
mark_bit.Set();
|
mark_bit.Set();
|
||||||
mark_bit.Next().Clear();
|
mark_bit.Next().Clear();
|
||||||
ASSERT(Marking::IsBlack(mark_bit));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void BlackToGrey(MarkBit markbit) {
|
static inline void BlackToGrey(MarkBit markbit) {
|
||||||
ASSERT(IsBlack(markbit));
|
|
||||||
markbit.Next().Set();
|
markbit.Next().Set();
|
||||||
ASSERT(IsGrey(markbit));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void WhiteToGrey(MarkBit markbit) {
|
static inline void WhiteToGrey(MarkBit markbit) {
|
||||||
ASSERT(IsWhite(markbit));
|
|
||||||
markbit.Set();
|
markbit.Set();
|
||||||
markbit.Next().Set();
|
markbit.Next().Set();
|
||||||
ASSERT(IsGrey(markbit));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void GreyToBlack(MarkBit markbit) {
|
static inline void GreyToBlack(MarkBit markbit) {
|
||||||
ASSERT(IsGrey(markbit));
|
|
||||||
markbit.Next().Clear();
|
markbit.Next().Clear();
|
||||||
ASSERT(IsBlack(markbit));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void BlackToGrey(HeapObject* obj) {
|
static inline void BlackToGrey(HeapObject* obj) {
|
||||||
ASSERT(obj->Size() >= 2 * kPointerSize);
|
|
||||||
BlackToGrey(MarkBitFrom(obj));
|
BlackToGrey(MarkBitFrom(obj));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void AnyToGrey(MarkBit markbit) {
|
static inline void AnyToGrey(MarkBit markbit) {
|
||||||
markbit.Set();
|
markbit.Set();
|
||||||
markbit.Next().Set();
|
markbit.Next().Set();
|
||||||
ASSERT(IsGrey(markbit));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns true if the the object whose mark is transferred is marked black.
|
// Returns true if the the object whose mark is transferred is marked black.
|
||||||
@ -173,8 +157,6 @@ class Marking {
|
|||||||
to_mark_bit.Next().Set();
|
to_mark_bit.Next().Set();
|
||||||
is_black = false; // Was actually gray.
|
is_black = false; // Was actually gray.
|
||||||
}
|
}
|
||||||
ASSERT(Color(from) == Color(to));
|
|
||||||
ASSERT(is_black == (Color(to) == BLACK_OBJECT));
|
|
||||||
return is_black;
|
return is_black;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -227,7 +209,6 @@ class MarkingDeque {
|
|||||||
inline void PushGrey(HeapObject* object) {
|
inline void PushGrey(HeapObject* object) {
|
||||||
ASSERT(object->IsHeapObject());
|
ASSERT(object->IsHeapObject());
|
||||||
if (IsFull()) {
|
if (IsFull()) {
|
||||||
ASSERT(Marking::IsGrey(Marking::MarkBitFrom(object)));
|
|
||||||
SetOverflowed();
|
SetOverflowed();
|
||||||
} else {
|
} else {
|
||||||
array_[top_] = object;
|
array_[top_] = object;
|
||||||
@ -246,7 +227,6 @@ class MarkingDeque {
|
|||||||
inline void UnshiftGrey(HeapObject* object) {
|
inline void UnshiftGrey(HeapObject* object) {
|
||||||
ASSERT(object->IsHeapObject());
|
ASSERT(object->IsHeapObject());
|
||||||
if (IsFull()) {
|
if (IsFull()) {
|
||||||
ASSERT(Marking::IsGrey(Marking::MarkBitFrom(object)));
|
|
||||||
SetOverflowed();
|
SetOverflowed();
|
||||||
} else {
|
} else {
|
||||||
bottom_ = ((bottom_ - 1) & mask_);
|
bottom_ = ((bottom_ - 1) & mask_);
|
||||||
|
@ -1300,7 +1300,6 @@ ACCESSORS(JSObject, properties, FixedArray, kPropertiesOffset)
|
|||||||
|
|
||||||
FixedArrayBase* JSObject::elements() {
|
FixedArrayBase* JSObject::elements() {
|
||||||
Object* array = READ_FIELD(this, kElementsOffset);
|
Object* array = READ_FIELD(this, kElementsOffset);
|
||||||
ASSERT(array->HasValidElements());
|
|
||||||
return static_cast<FixedArrayBase*>(array);
|
return static_cast<FixedArrayBase*>(array);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -257,16 +257,12 @@ HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
|
|||||||
if (new_top > allocation_info_.limit) return NULL;
|
if (new_top > allocation_info_.limit) return NULL;
|
||||||
|
|
||||||
allocation_info_.top = new_top;
|
allocation_info_.top = new_top;
|
||||||
ASSERT(allocation_info_.VerifyPagedAllocation());
|
|
||||||
ASSERT(current_top != NULL);
|
|
||||||
return HeapObject::FromAddress(current_top);
|
return HeapObject::FromAddress(current_top);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Raw allocation.
|
// Raw allocation.
|
||||||
MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
|
MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
|
||||||
ASSERT(HasBeenSetup());
|
|
||||||
ASSERT_OBJECT_SIZE(size_in_bytes);
|
|
||||||
HeapObject* object = AllocateLinearly(size_in_bytes);
|
HeapObject* object = AllocateLinearly(size_in_bytes);
|
||||||
if (object != NULL) {
|
if (object != NULL) {
|
||||||
if (identity() == CODE_SPACE) {
|
if (identity() == CODE_SPACE) {
|
||||||
|
12
src/spaces.h
12
src/spaces.h
@ -642,7 +642,6 @@ class Page : public MemoryChunk {
|
|||||||
// [page_addr + kObjectStartOffset .. page_addr + kPageSize].
|
// [page_addr + kObjectStartOffset .. page_addr + kPageSize].
|
||||||
INLINE(static Page* FromAllocationTop(Address top)) {
|
INLINE(static Page* FromAllocationTop(Address top)) {
|
||||||
Page* p = FromAddress(top - kPointerSize);
|
Page* p = FromAddress(top - kPointerSize);
|
||||||
ASSERT_PAGE_OFFSET(p->Offset(top));
|
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -666,7 +665,6 @@ class Page : public MemoryChunk {
|
|||||||
// Returns the offset of a given address to this page.
|
// Returns the offset of a given address to this page.
|
||||||
INLINE(int Offset(Address a)) {
|
INLINE(int Offset(Address a)) {
|
||||||
int offset = static_cast<int>(a - address());
|
int offset = static_cast<int>(a - address());
|
||||||
ASSERT_PAGE_OFFSET(offset);
|
|
||||||
return offset;
|
return offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1741,7 +1739,6 @@ class NewSpacePage : public MemoryChunk {
|
|||||||
reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
|
reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
|
||||||
~Page::kPageAlignmentMask);
|
~Page::kPageAlignmentMask);
|
||||||
NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
|
NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
|
||||||
ASSERT(page->InNewSpace());
|
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1818,7 +1815,6 @@ class SemiSpace : public Space {
|
|||||||
|
|
||||||
// Returns the start address of the current page of the space.
|
// Returns the start address of the current page of the space.
|
||||||
Address page_low() {
|
Address page_low() {
|
||||||
ASSERT(anchor_.next_page() != &anchor_);
|
|
||||||
return current_page_->body();
|
return current_page_->body();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2084,7 +2080,7 @@ class NewSpace : public Space {
|
|||||||
|
|
||||||
// Return the current capacity of a semispace.
|
// Return the current capacity of a semispace.
|
||||||
intptr_t EffectiveCapacity() {
|
intptr_t EffectiveCapacity() {
|
||||||
ASSERT(to_space_.Capacity() == from_space_.Capacity());
|
SLOW_ASSERT(to_space_.Capacity() == from_space_.Capacity());
|
||||||
return (to_space_.Capacity() / Page::kPageSize) * Page::kObjectAreaSize;
|
return (to_space_.Capacity() / Page::kPageSize) * Page::kObjectAreaSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2317,9 +2313,9 @@ class OldSpace : public PagedSpace {
|
|||||||
// For contiguous spaces, top should be in the space (or at the end) and limit
|
// For contiguous spaces, top should be in the space (or at the end) and limit
|
||||||
// should be the end of the space.
|
// should be the end of the space.
|
||||||
#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
|
#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
|
||||||
ASSERT((space).page_low() <= (info).top \
|
SLOW_ASSERT((space).page_low() <= (info).top \
|
||||||
&& (info).top <= (space).page_high() \
|
&& (info).top <= (space).page_high() \
|
||||||
&& (info).limit <= (space).page_high())
|
&& (info).limit <= (space).page_high())
|
||||||
|
|
||||||
|
|
||||||
// -----------------------------------------------------------------------------
|
// -----------------------------------------------------------------------------
|
||||||
|
@ -55,10 +55,10 @@ void StoreBuffer::Mark(Address addr) {
|
|||||||
|
|
||||||
void StoreBuffer::EnterDirectlyIntoStoreBuffer(Address addr) {
|
void StoreBuffer::EnterDirectlyIntoStoreBuffer(Address addr) {
|
||||||
if (store_buffer_rebuilding_enabled_) {
|
if (store_buffer_rebuilding_enabled_) {
|
||||||
ASSERT(!heap_->cell_space()->Contains(addr));
|
SLOW_ASSERT(!heap_->cell_space()->Contains(addr) &&
|
||||||
ASSERT(!heap_->code_space()->Contains(addr));
|
!heap_->code_space()->Contains(addr) &&
|
||||||
ASSERT(!heap_->old_data_space()->Contains(addr));
|
!heap_->old_data_space()->Contains(addr) &&
|
||||||
ASSERT(!heap_->new_space()->Contains(addr));
|
!heap_->new_space()->Contains(addr));
|
||||||
Address* top = old_top_;
|
Address* top = old_top_;
|
||||||
*top++ = addr;
|
*top++ = addr;
|
||||||
old_top_ = top;
|
old_top_ = top;
|
||||||
|
@ -168,7 +168,6 @@ static inline uint32_t RoundDownToPowerOf2(uint32_t x) {
|
|||||||
|
|
||||||
template <typename T, typename U>
|
template <typename T, typename U>
|
||||||
static inline bool IsAligned(T value, U alignment) {
|
static inline bool IsAligned(T value, U alignment) {
|
||||||
ASSERT(IsPowerOf2(alignment));
|
|
||||||
return (value & (alignment - 1)) == 0;
|
return (value & (alignment - 1)) == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user