* Split up code_space into old_data_space and code_space.
* Make old_data_space non-executable on OSs and hardware that support it. * Rename old_space to old_pointer_space (can contain pointers, esp. to new space). * Ensure that individual pages allocated for old_space are only executable when they are for code objects. * Ensure Space::Setup can cope with non-aligned memory. * Make some methods on Spaces virtual. Make a way to iterate over all spaces. * Replace executability flag with Executability enum in order to make intent at call site clearer. * Fix serialization/deserialization to allocate write barrier memory for large arrays. git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@165 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
1472ee5970
commit
388c1094b7
@ -656,7 +656,7 @@ v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction(
|
||||
|
||||
v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
|
||||
// All allocation spaces other than NEW_SPACE have the same effect.
|
||||
Heap::CollectGarbage(0, OLD_SPACE);
|
||||
Heap::CollectGarbage(0, OLD_DATA_SPACE);
|
||||
return v8::Undefined();
|
||||
}
|
||||
|
||||
|
@ -49,7 +49,7 @@ Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors) {
|
||||
}
|
||||
|
||||
|
||||
// Symbols are created in the old generation (code space).
|
||||
// Symbols are created in the old generation (data space).
|
||||
Handle<String> Factory::LookupSymbol(Vector<const char> string) {
|
||||
CALL_HEAP_FUNCTION(Heap::LookupSymbol(string), String);
|
||||
}
|
||||
|
@ -214,17 +214,19 @@ typedef bool (*WeakSlotCallback)(Object** pointer);
|
||||
// NOTE: SpaceIterator depends on AllocationSpace enumeration values being
|
||||
// consecutive.
|
||||
enum AllocationSpace {
|
||||
NEW_SPACE,
|
||||
OLD_SPACE,
|
||||
CODE_SPACE,
|
||||
MAP_SPACE,
|
||||
LO_SPACE,
|
||||
NEW_SPACE, // Semispaces collected with copying collector.
|
||||
OLD_POINTER_SPACE, // Must be first of the paged spaces - see PagedSpaces.
|
||||
OLD_DATA_SPACE, // May not have pointers to new space.
|
||||
CODE_SPACE, // Also one of the old spaces. Marked executable.
|
||||
MAP_SPACE, // Only map objects.
|
||||
LO_SPACE, // Large objects.
|
||||
FIRST_SPACE = NEW_SPACE,
|
||||
LAST_SPACE = LO_SPACE
|
||||
LAST_SPACE = LO_SPACE // <= 5 (see kSpaceBits and kLOSpacePointer)
|
||||
};
|
||||
const int kSpaceTagSize = 3;
|
||||
const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
|
||||
|
||||
|
||||
// A flag that indicates whether objects should be pretenured when
|
||||
// allocated (allocated directly into the old generation) or not
|
||||
// (allocated in the young generation if the object size and type
|
||||
@ -233,6 +235,8 @@ enum PretenureFlag { NOT_TENURED, TENURED };
|
||||
|
||||
enum GarbageCollector { SCAVENGER, MARK_COMPACTOR };
|
||||
|
||||
enum Executability { NOT_EXECUTABLE, EXECUTABLE };
|
||||
|
||||
|
||||
// A CodeDesc describes a buffer holding instructions and relocation
|
||||
// information. The instructions start at the beginning of the buffer
|
||||
|
@ -44,7 +44,8 @@ int Heap::MaxHeapObjectSize() {
|
||||
}
|
||||
|
||||
|
||||
Object* Heap::AllocateRaw(int size_in_bytes, AllocationSpace space) {
|
||||
Object* Heap::AllocateRaw(int size_in_bytes,
|
||||
AllocationSpace space) {
|
||||
ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
|
||||
#ifdef DEBUG
|
||||
if (FLAG_gc_interval >= 0 &&
|
||||
@ -60,8 +61,10 @@ Object* Heap::AllocateRaw(int size_in_bytes, AllocationSpace space) {
|
||||
}
|
||||
|
||||
Object* result;
|
||||
if (OLD_SPACE == space) {
|
||||
result = old_space_->AllocateRaw(size_in_bytes);
|
||||
if (OLD_POINTER_SPACE == space) {
|
||||
result = old_pointer_space_->AllocateRaw(size_in_bytes);
|
||||
} else if (OLD_DATA_SPACE == space) {
|
||||
result = old_data_space_->AllocateRaw(size_in_bytes);
|
||||
} else if (CODE_SPACE == space) {
|
||||
result = code_space_->AllocateRaw(size_in_bytes);
|
||||
} else if (LO_SPACE == space) {
|
||||
@ -75,32 +78,6 @@ Object* Heap::AllocateRaw(int size_in_bytes, AllocationSpace space) {
|
||||
}
|
||||
|
||||
|
||||
Object* Heap::AllocateForDeserialization(int size_in_bytes,
|
||||
AllocationSpace space) {
|
||||
ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
|
||||
PagedSpace* where;
|
||||
|
||||
switch (space) {
|
||||
case NEW_SPACE:
|
||||
return new_space_->AllocateRaw(size_in_bytes);
|
||||
case LO_SPACE:
|
||||
return lo_space_->AllocateRaw(size_in_bytes);
|
||||
case OLD_SPACE:
|
||||
where = old_space_;
|
||||
break;
|
||||
case CODE_SPACE:
|
||||
where = code_space_;
|
||||
break;
|
||||
case MAP_SPACE:
|
||||
where = map_space_;
|
||||
break;
|
||||
}
|
||||
|
||||
// Only paged spaces fall through.
|
||||
return where->AllocateForDeserialization(size_in_bytes);
|
||||
}
|
||||
|
||||
|
||||
Object* Heap::NumberFromInt32(int32_t value) {
|
||||
if (Smi::IsValid(value)) return Smi::FromInt(value);
|
||||
// Bypass NumberFromDouble to avoid various redundant checks.
|
||||
@ -160,9 +137,9 @@ void Heap::RecordWrite(Address address, int offset) {
|
||||
}
|
||||
|
||||
|
||||
AllocationSpace Heap::TargetSpace(HeapObject* object) {
|
||||
// Heap numbers and sequential strings are promoted to code space, all
|
||||
// other object types are promoted to old space. We do not use
|
||||
OldSpace* Heap::TargetSpace(HeapObject* object) {
|
||||
// Heap numbers and sequential strings are promoted to old data space, all
|
||||
// other object types are promoted to old pointer space. We do not use
|
||||
// object->IsHeapNumber() and object->IsSeqString() because we already
|
||||
// know that object has the heap object tag.
|
||||
InstanceType type = object->map()->instance_type();
|
||||
@ -171,7 +148,7 @@ AllocationSpace Heap::TargetSpace(HeapObject* object) {
|
||||
type != HEAP_NUMBER_TYPE &&
|
||||
(type >= FIRST_NONSTRING_TYPE ||
|
||||
String::cast(object)->representation_tag() != kSeqStringTag);
|
||||
return has_pointers ? OLD_SPACE : CODE_SPACE;
|
||||
return has_pointers ? old_pointer_space_ : old_data_space_;
|
||||
}
|
||||
|
||||
|
||||
|
282
src/heap.cc
282
src/heap.cc
@ -83,7 +83,8 @@ DECLARE_bool(log_gc);
|
||||
|
||||
|
||||
NewSpace* Heap::new_space_ = NULL;
|
||||
OldSpace* Heap::old_space_ = NULL;
|
||||
OldSpace* Heap::old_pointer_space_ = NULL;
|
||||
OldSpace* Heap::old_data_space_ = NULL;
|
||||
OldSpace* Heap::code_space_ = NULL;
|
||||
MapSpace* Heap::map_space_ = NULL;
|
||||
LargeObjectSpace* Heap::lo_space_ = NULL;
|
||||
@ -127,7 +128,8 @@ int Heap::Capacity() {
|
||||
if (!HasBeenSetup()) return 0;
|
||||
|
||||
return new_space_->Capacity() +
|
||||
old_space_->Capacity() +
|
||||
old_pointer_space_->Capacity() +
|
||||
old_data_space_->Capacity() +
|
||||
code_space_->Capacity() +
|
||||
map_space_->Capacity();
|
||||
}
|
||||
@ -137,7 +139,8 @@ int Heap::Available() {
|
||||
if (!HasBeenSetup()) return 0;
|
||||
|
||||
return new_space_->Available() +
|
||||
old_space_->Available() +
|
||||
old_pointer_space_->Available() +
|
||||
old_data_space_->Available() +
|
||||
code_space_->Available() +
|
||||
map_space_->Available();
|
||||
}
|
||||
@ -145,10 +148,11 @@ int Heap::Available() {
|
||||
|
||||
bool Heap::HasBeenSetup() {
|
||||
return new_space_ != NULL &&
|
||||
old_space_ != NULL &&
|
||||
code_space_ != NULL &&
|
||||
map_space_ != NULL &&
|
||||
lo_space_ != NULL;
|
||||
old_pointer_space_ != NULL &&
|
||||
old_data_space_ != NULL &&
|
||||
code_space_ != NULL &&
|
||||
map_space_ != NULL &&
|
||||
lo_space_ != NULL;
|
||||
}
|
||||
|
||||
|
||||
@ -175,13 +179,13 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
|
||||
// Is there enough space left in OLD to guarantee that a scavenge can
|
||||
// succeed?
|
||||
//
|
||||
// Note that old_space_->MaxAvailable() undercounts the memory available
|
||||
// Note that MemoryAllocator->MaxAvailable() undercounts the memory available
|
||||
// for object promotion. It counts only the bytes that the memory
|
||||
// allocator has not yet allocated from the OS and assigned to any space,
|
||||
// and does not count available bytes already in the old space or code
|
||||
// space. Undercounting is safe---we may get an unrequested full GC when
|
||||
// a scavenge would have succeeded.
|
||||
if (old_space_->MaxAvailable() <= new_space_->Size()) {
|
||||
if (MemoryAllocator::MaxAvailable() <= new_space_->Size()) {
|
||||
Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
|
||||
return MARK_COMPACTOR;
|
||||
}
|
||||
@ -256,9 +260,8 @@ void Heap::GarbageCollectionPrologue() {
|
||||
if (FLAG_gc_verbose) Print();
|
||||
|
||||
if (FLAG_print_rset) {
|
||||
// By definition, code space does not have remembered set bits that we
|
||||
// care about.
|
||||
old_space_->PrintRSet();
|
||||
// Not all spaces have remembered set bits that we care about.
|
||||
old_pointer_space_->PrintRSet();
|
||||
map_space_->PrintRSet();
|
||||
lo_space_->PrintRSet();
|
||||
}
|
||||
@ -270,11 +273,10 @@ void Heap::GarbageCollectionPrologue() {
|
||||
}
|
||||
|
||||
int Heap::SizeOfObjects() {
|
||||
return new_space_->Size() +
|
||||
old_space_->Size() +
|
||||
code_space_->Size() +
|
||||
map_space_->Size() +
|
||||
lo_space_->Size();
|
||||
int total = 0;
|
||||
AllSpaces spaces;
|
||||
while (Space* space = spaces.next()) total += space->Size();
|
||||
return total;
|
||||
}
|
||||
|
||||
void Heap::GarbageCollectionEpilogue() {
|
||||
@ -303,6 +305,14 @@ void Heap::GarbageCollectionEpilogue() {
|
||||
}
|
||||
|
||||
|
||||
void Heap::CollectAllGarbage() {
|
||||
// Since we are ignoring the return value, the exact choice of space does
|
||||
// not matter, so long as we do not specify NEW_SPACE, which would not
|
||||
// cause a full GC.
|
||||
CollectGarbage(0, OLD_POINTER_SPACE);
|
||||
}
|
||||
|
||||
|
||||
bool Heap::CollectGarbage(int requested_size, AllocationSpace space) {
|
||||
// The VM is in the GC state until exiting this function.
|
||||
VMState state(GC);
|
||||
@ -344,8 +354,10 @@ bool Heap::CollectGarbage(int requested_size, AllocationSpace space) {
|
||||
switch (space) {
|
||||
case NEW_SPACE:
|
||||
return new_space_->Available() >= requested_size;
|
||||
case OLD_SPACE:
|
||||
return old_space_->Available() >= requested_size;
|
||||
case OLD_POINTER_SPACE:
|
||||
return old_pointer_space_->Available() >= requested_size;
|
||||
case OLD_DATA_SPACE:
|
||||
return old_data_space_->Available() >= requested_size;
|
||||
case CODE_SPACE:
|
||||
return code_space_->Available() >= requested_size;
|
||||
case MAP_SPACE:
|
||||
@ -381,7 +393,7 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
|
||||
|
||||
// If we have used the mark-compact collector to collect the new
|
||||
// space, and it has not compacted the new space, we force a
|
||||
// separate scavenge collection. THIS IS A HACK. It covers the
|
||||
// separate scavenge collection. This is a hack. It covers the
|
||||
// case where (1) a new space collection was requested, (2) the
|
||||
// collector selection policy selected the mark-compact collector,
|
||||
// and (3) the mark-compact collector policy selected not to
|
||||
@ -483,9 +495,9 @@ static Address promoted_top = NULL;
|
||||
|
||||
|
||||
#ifdef DEBUG
|
||||
// Visitor class to verify pointers in code space do not point into
|
||||
// Visitor class to verify pointers in code or data space do not point into
|
||||
// new space.
|
||||
class VerifyCodeSpacePointersVisitor: public ObjectVisitor {
|
||||
class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
|
||||
public:
|
||||
void VisitPointers(Object** start, Object**end) {
|
||||
for (Object** current = start; current < end; current++) {
|
||||
@ -500,7 +512,7 @@ class VerifyCodeSpacePointersVisitor: public ObjectVisitor {
|
||||
void Heap::Scavenge() {
|
||||
#ifdef DEBUG
|
||||
if (FLAG_enable_slow_asserts) {
|
||||
VerifyCodeSpacePointersVisitor v;
|
||||
VerifyNonPointerSpacePointersVisitor v;
|
||||
HeapObjectIterator it(code_space_);
|
||||
while (it.has_next()) {
|
||||
HeapObject* object = it.next();
|
||||
@ -560,8 +572,8 @@ void Heap::Scavenge() {
|
||||
IterateRoots(©_visitor);
|
||||
|
||||
// Copy objects reachable from the old generation. By definition, there
|
||||
// are no intergenerational pointers in code space.
|
||||
IterateRSet(old_space_, &CopyObject);
|
||||
// are no intergenerational pointers in code or data spaces.
|
||||
IterateRSet(old_pointer_space_, &CopyObject);
|
||||
IterateRSet(map_space_, &CopyObject);
|
||||
lo_space_->IterateRSet(&CopyObject);
|
||||
|
||||
@ -694,12 +706,13 @@ int Heap::UpdateRSet(HeapObject* obj) {
|
||||
|
||||
|
||||
void Heap::RebuildRSets() {
|
||||
// By definition, we do not care about remembered set bits in code space.
|
||||
// By definition, we do not care about remembered set bits in code or data
|
||||
// spaces.
|
||||
map_space_->ClearRSet();
|
||||
RebuildRSets(map_space_);
|
||||
|
||||
old_space_->ClearRSet();
|
||||
RebuildRSets(old_space_);
|
||||
old_pointer_space_->ClearRSet();
|
||||
RebuildRSets(old_pointer_space_);
|
||||
|
||||
Heap::lo_space_->ClearRSet();
|
||||
RebuildRSets(lo_space_);
|
||||
@ -767,7 +780,7 @@ void Heap::CopyObject(HeapObject** p) {
|
||||
|
||||
// We use the first word (where the map pointer usually is) of a heap
|
||||
// object to record the forwarding pointer. A forwarding pointer can
|
||||
// point to the old space, the code space, or the to space of the new
|
||||
// point to an old space, the code space, or the to space of the new
|
||||
// generation.
|
||||
MapWord first_word = object->map_word();
|
||||
|
||||
@ -802,26 +815,23 @@ void Heap::CopyObject(HeapObject** p) {
|
||||
Object* result;
|
||||
// If the object should be promoted, we try to copy it to old space.
|
||||
if (ShouldBePromoted(object->address(), object_size)) {
|
||||
AllocationSpace target_space = Heap::TargetSpace(object);
|
||||
if (target_space == OLD_SPACE) {
|
||||
result = old_space_->AllocateRaw(object_size);
|
||||
} else {
|
||||
ASSERT(target_space == CODE_SPACE);
|
||||
result = code_space_->AllocateRaw(object_size);
|
||||
}
|
||||
OldSpace* target_space = Heap::TargetSpace(object);
|
||||
ASSERT(target_space == Heap::old_pointer_space_ ||
|
||||
target_space == Heap::old_data_space_);
|
||||
result = target_space->AllocateRaw(object_size);
|
||||
|
||||
if (!result->IsFailure()) {
|
||||
*p = MigrateObject(p, HeapObject::cast(result), object_size);
|
||||
if (target_space == OLD_SPACE) {
|
||||
if (target_space == Heap::old_pointer_space_) {
|
||||
// Record the object's address at the top of the to space, to allow
|
||||
// it to be swept by the scavenger.
|
||||
promoted_top -= kPointerSize;
|
||||
Memory::Object_at(promoted_top) = *p;
|
||||
} else {
|
||||
#ifdef DEBUG
|
||||
// Objects promoted to the code space should not have pointers to
|
||||
// Objects promoted to the data space should not have pointers to
|
||||
// new space.
|
||||
VerifyCodeSpacePointersVisitor v;
|
||||
VerifyNonPointerSpacePointersVisitor v;
|
||||
(*p)->Iterate(&v);
|
||||
#endif
|
||||
}
|
||||
@ -890,7 +900,7 @@ bool Heap::CreateInitialMaps() {
|
||||
if (obj->IsFailure()) return false;
|
||||
empty_fixed_array_ = FixedArray::cast(obj);
|
||||
|
||||
obj = Allocate(oddball_map(), CODE_SPACE);
|
||||
obj = Allocate(oddball_map(), OLD_DATA_SPACE);
|
||||
if (obj->IsFailure()) return false;
|
||||
null_value_ = obj;
|
||||
|
||||
@ -1016,7 +1026,7 @@ Object* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
|
||||
// Statically ensure that it is safe to allocate heap numbers in paged
|
||||
// spaces.
|
||||
STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
|
||||
AllocationSpace space = (pretenure == TENURED) ? CODE_SPACE : NEW_SPACE;
|
||||
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
|
||||
Object* result = AllocateRaw(HeapNumber::kSize, space);
|
||||
if (result->IsFailure()) return result;
|
||||
|
||||
@ -1042,7 +1052,7 @@ Object* Heap::AllocateHeapNumber(double value) {
|
||||
Object* Heap::CreateOddball(Map* map,
|
||||
const char* to_string,
|
||||
Object* to_number) {
|
||||
Object* result = Allocate(map, CODE_SPACE);
|
||||
Object* result = Allocate(map, OLD_DATA_SPACE);
|
||||
if (result->IsFailure()) return result;
|
||||
return Oddball::cast(result)->Initialize(to_string, to_number);
|
||||
}
|
||||
@ -1112,7 +1122,7 @@ bool Heap::CreateInitialObjects() {
|
||||
if (obj->IsFailure()) return false;
|
||||
nan_value_ = obj;
|
||||
|
||||
obj = Allocate(oddball_map(), CODE_SPACE);
|
||||
obj = Allocate(oddball_map(), OLD_DATA_SPACE);
|
||||
if (obj->IsFailure()) return false;
|
||||
undefined_value_ = obj;
|
||||
ASSERT(!InNewSpace(undefined_value()));
|
||||
@ -1295,7 +1305,8 @@ Object* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
|
||||
Object* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
|
||||
// Statically ensure that it is safe to allocate proxies in paged spaces.
|
||||
STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
|
||||
AllocationSpace space = (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
|
||||
AllocationSpace space =
|
||||
(pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
|
||||
Object* result = Allocate(proxy_map(), space);
|
||||
if (result->IsFailure()) return result;
|
||||
|
||||
@ -1491,9 +1502,11 @@ Object* Heap:: LookupSingleCharacterStringFromCode(uint16_t code) {
|
||||
|
||||
Object* Heap::AllocateByteArray(int length) {
|
||||
int size = ByteArray::SizeFor(length);
|
||||
AllocationSpace space = size > MaxHeapObjectSize() ? LO_SPACE : NEW_SPACE;
|
||||
AllocationSpace space =
|
||||
size > MaxHeapObjectSize() ? LO_SPACE : NEW_SPACE;
|
||||
|
||||
Object* result = AllocateRaw(size, space);
|
||||
|
||||
if (result->IsFailure()) return result;
|
||||
|
||||
reinterpret_cast<Array*>(result)->set_map(byte_array_map());
|
||||
@ -1510,10 +1523,13 @@ Object* Heap::CreateCode(const CodeDesc& desc,
|
||||
int sinfo_size = 0;
|
||||
if (sinfo != NULL) sinfo_size = sinfo->Serialize(NULL);
|
||||
int obj_size = Code::SizeFor(body_size, sinfo_size);
|
||||
AllocationSpace space =
|
||||
(obj_size > MaxHeapObjectSize()) ? LO_SPACE : CODE_SPACE;
|
||||
Object* result;
|
||||
if (obj_size > MaxHeapObjectSize()) {
|
||||
result = lo_space_->AllocateRawCode(obj_size);
|
||||
} else {
|
||||
result = code_space_->AllocateRaw(obj_size);
|
||||
}
|
||||
|
||||
Object* result = AllocateRaw(obj_size, space);
|
||||
if (result->IsFailure()) return result;
|
||||
|
||||
// Initialize the object
|
||||
@ -1537,9 +1553,13 @@ Object* Heap::CreateCode(const CodeDesc& desc,
|
||||
Object* Heap::CopyCode(Code* code) {
|
||||
// Allocate an object the same size as the code object.
|
||||
int obj_size = code->Size();
|
||||
AllocationSpace space =
|
||||
(obj_size > MaxHeapObjectSize()) ? LO_SPACE : CODE_SPACE;
|
||||
Object* result = AllocateRaw(obj_size, space);
|
||||
Object* result;
|
||||
if (obj_size > MaxHeapObjectSize()) {
|
||||
result = lo_space_->AllocateRawCode(obj_size);
|
||||
} else {
|
||||
result = code_space_->AllocateRaw(obj_size);
|
||||
}
|
||||
|
||||
if (result->IsFailure()) return result;
|
||||
|
||||
// Copy code object.
|
||||
@ -1597,7 +1617,7 @@ Object* Heap::AllocateFunctionPrototype(JSFunction* function) {
|
||||
Object* Heap::AllocateFunction(Map* function_map,
|
||||
SharedFunctionInfo* shared,
|
||||
Object* prototype) {
|
||||
Object* result = Allocate(function_map, OLD_SPACE);
|
||||
Object* result = Allocate(function_map, OLD_POINTER_SPACE);
|
||||
if (result->IsFailure()) return result;
|
||||
return InitializeFunction(JSFunction::cast(result), shared, prototype);
|
||||
}
|
||||
@ -1681,7 +1701,8 @@ Object* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
|
||||
if (properties->IsFailure()) return properties;
|
||||
|
||||
// Allocate the JSObject.
|
||||
AllocationSpace space = (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
|
||||
AllocationSpace space =
|
||||
(pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
|
||||
if (map->instance_size() > MaxHeapObjectSize()) space = LO_SPACE;
|
||||
Object* obj = Allocate(map, space);
|
||||
if (obj->IsFailure()) return obj;
|
||||
@ -1906,7 +1927,8 @@ Object* Heap::AllocateSymbol(unibrow::CharacterStream* buffer,
|
||||
}
|
||||
|
||||
// Allocate string.
|
||||
AllocationSpace space = (size > MaxHeapObjectSize()) ? LO_SPACE : CODE_SPACE;
|
||||
AllocationSpace space =
|
||||
(size > MaxHeapObjectSize()) ? LO_SPACE : OLD_DATA_SPACE;
|
||||
Object* result = AllocateRaw(size, space);
|
||||
if (result->IsFailure()) return result;
|
||||
|
||||
@ -1925,7 +1947,7 @@ Object* Heap::AllocateSymbol(unibrow::CharacterStream* buffer,
|
||||
|
||||
|
||||
Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
|
||||
AllocationSpace space = (pretenure == TENURED) ? CODE_SPACE : NEW_SPACE;
|
||||
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
|
||||
int size = AsciiString::SizeFor(length);
|
||||
if (size > MaxHeapObjectSize()) {
|
||||
space = LO_SPACE;
|
||||
@ -1955,7 +1977,7 @@ Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
|
||||
|
||||
|
||||
Object* Heap::AllocateRawTwoByteString(int length, PretenureFlag pretenure) {
|
||||
AllocationSpace space = (pretenure == TENURED) ? CODE_SPACE : NEW_SPACE;
|
||||
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
|
||||
int size = TwoByteString::SizeFor(length);
|
||||
if (size > MaxHeapObjectSize()) {
|
||||
space = LO_SPACE;
|
||||
@ -1986,7 +2008,7 @@ Object* Heap::AllocateRawTwoByteString(int length, PretenureFlag pretenure) {
|
||||
|
||||
Object* Heap::AllocateEmptyFixedArray() {
|
||||
int size = FixedArray::SizeFor(0);
|
||||
Object* result = AllocateRaw(size, CODE_SPACE);
|
||||
Object* result = AllocateRaw(size, OLD_DATA_SPACE);
|
||||
if (result->IsFailure()) return result;
|
||||
// Initialize the object.
|
||||
reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
|
||||
@ -2004,7 +2026,8 @@ Object* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
|
||||
if (size > MaxHeapObjectSize()) {
|
||||
result = lo_space_->AllocateRawFixedArray(size);
|
||||
} else {
|
||||
AllocationSpace space = (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
|
||||
AllocationSpace space =
|
||||
(pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
|
||||
result = AllocateRaw(size, space);
|
||||
}
|
||||
if (result->IsFailure()) return result;
|
||||
@ -2102,7 +2125,7 @@ STRUCT_LIST(MAKE_CASE)
|
||||
}
|
||||
int size = map->instance_size();
|
||||
AllocationSpace space =
|
||||
(size > MaxHeapObjectSize()) ? LO_SPACE : OLD_SPACE;
|
||||
(size > MaxHeapObjectSize()) ? LO_SPACE : OLD_POINTER_SPACE;
|
||||
Object* result = Heap::Allocate(map, space);
|
||||
if (result->IsFailure()) return result;
|
||||
Struct::cast(result)->InitializeBody(size);
|
||||
@ -2115,11 +2138,8 @@ STRUCT_LIST(MAKE_CASE)
|
||||
void Heap::Print() {
|
||||
if (!HasBeenSetup()) return;
|
||||
Top::PrintStack();
|
||||
new_space_->Print();
|
||||
old_space_->Print();
|
||||
code_space_->Print();
|
||||
map_space_->Print();
|
||||
lo_space_->Print();
|
||||
AllSpaces spaces;
|
||||
while (Space* space = spaces.next()) space->Print();
|
||||
}
|
||||
|
||||
|
||||
@ -2153,8 +2173,10 @@ void Heap::ReportHeapStatistics(const char* title) {
|
||||
MemoryAllocator::ReportStatistics();
|
||||
PrintF("To space : ");
|
||||
new_space_->ReportStatistics();
|
||||
PrintF("Old space : ");
|
||||
old_space_->ReportStatistics();
|
||||
PrintF("Old pointer space : ");
|
||||
old_pointer_space_->ReportStatistics();
|
||||
PrintF("Old data space : ");
|
||||
old_data_space_->ReportStatistics();
|
||||
PrintF("Code space : ");
|
||||
code_space_->ReportStatistics();
|
||||
PrintF("Map space : ");
|
||||
@ -2175,7 +2197,8 @@ bool Heap::Contains(Address addr) {
|
||||
if (OS::IsOutsideAllocatedSpace(addr)) return false;
|
||||
return HasBeenSetup() &&
|
||||
(new_space_->ToSpaceContains(addr) ||
|
||||
old_space_->Contains(addr) ||
|
||||
old_pointer_space_->Contains(addr) ||
|
||||
old_data_space_->Contains(addr) ||
|
||||
code_space_->Contains(addr) ||
|
||||
map_space_->Contains(addr) ||
|
||||
lo_space_->SlowContains(addr));
|
||||
@ -2194,8 +2217,10 @@ bool Heap::InSpace(Address addr, AllocationSpace space) {
|
||||
switch (space) {
|
||||
case NEW_SPACE:
|
||||
return new_space_->ToSpaceContains(addr);
|
||||
case OLD_SPACE:
|
||||
return old_space_->Contains(addr);
|
||||
case OLD_POINTER_SPACE:
|
||||
return old_pointer_space_->Contains(addr);
|
||||
case OLD_DATA_SPACE:
|
||||
return old_data_space_->Contains(addr);
|
||||
case CODE_SPACE:
|
||||
return code_space_->Contains(addr);
|
||||
case MAP_SPACE:
|
||||
@ -2215,11 +2240,10 @@ void Heap::Verify() {
|
||||
VerifyPointersVisitor visitor;
|
||||
Heap::IterateRoots(&visitor);
|
||||
|
||||
Heap::new_space_->Verify();
|
||||
Heap::old_space_->Verify();
|
||||
Heap::code_space_->Verify();
|
||||
Heap::map_space_->Verify();
|
||||
Heap::lo_space_->Verify();
|
||||
AllSpaces spaces;
|
||||
while (Space* space = spaces.next()) {
|
||||
space->Verify();
|
||||
}
|
||||
}
|
||||
#endif // DEBUG
|
||||
|
||||
@ -2308,7 +2332,7 @@ void Heap::IterateRSetRange(Address object_start,
|
||||
|
||||
void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) {
|
||||
ASSERT(Page::is_rset_in_use());
|
||||
ASSERT(space == old_space_ || space == map_space_);
|
||||
ASSERT(space == old_pointer_space_ || space == map_space_);
|
||||
|
||||
PageIterator it(space, PageIterator::PAGES_IN_USE);
|
||||
while (it.has_next()) {
|
||||
@ -2413,7 +2437,8 @@ bool Heap::ConfigureHeapDefault() {
|
||||
|
||||
|
||||
int Heap::PromotedSpaceSize() {
|
||||
return old_space_->Size()
|
||||
return old_pointer_space_->Size()
|
||||
+ old_data_space_->Size()
|
||||
+ code_space_->Size()
|
||||
+ map_space_->Size()
|
||||
+ lo_space_->Size();
|
||||
@ -2460,23 +2485,33 @@ bool Heap::Setup(bool create_heap_objects) {
|
||||
int old_space_size = new_space_start - old_space_start;
|
||||
int code_space_size = young_generation_size_ - old_space_size;
|
||||
|
||||
// Initialize new space. It will not contain code.
|
||||
// Initialize new space.
|
||||
new_space_ = new NewSpace(initial_semispace_size_,
|
||||
semispace_size_,
|
||||
NEW_SPACE,
|
||||
false);
|
||||
NEW_SPACE);
|
||||
if (new_space_ == NULL) return false;
|
||||
if (!new_space_->Setup(new_space_start, young_generation_size_)) return false;
|
||||
|
||||
// Initialize old space, set the maximum capacity to the old generation
|
||||
// size. It will not contain code.
|
||||
old_space_ = new OldSpace(old_generation_size_, OLD_SPACE, false);
|
||||
if (old_space_ == NULL) return false;
|
||||
if (!old_space_->Setup(old_space_start, old_space_size)) return false;
|
||||
old_pointer_space_ =
|
||||
new OldSpace(old_generation_size_, OLD_POINTER_SPACE, NOT_EXECUTABLE);
|
||||
if (old_pointer_space_ == NULL) return false;
|
||||
if (!old_pointer_space_->Setup(old_space_start, old_space_size >> 1)) {
|
||||
return false;
|
||||
}
|
||||
old_data_space_ =
|
||||
new OldSpace(old_generation_size_, OLD_DATA_SPACE, NOT_EXECUTABLE);
|
||||
if (old_data_space_ == NULL) return false;
|
||||
if (!old_data_space_->Setup(old_space_start + (old_space_size >> 1),
|
||||
old_space_size >> 1)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Initialize the code space, set its maximum capacity to the old
|
||||
// generation size. It needs executable memory.
|
||||
code_space_ = new OldSpace(old_generation_size_, CODE_SPACE, true);
|
||||
code_space_ =
|
||||
new OldSpace(old_generation_size_, CODE_SPACE, EXECUTABLE);
|
||||
if (code_space_ == NULL) return false;
|
||||
if (!code_space_->Setup(code_space_start, code_space_size)) return false;
|
||||
|
||||
@ -2487,8 +2522,10 @@ bool Heap::Setup(bool create_heap_objects) {
|
||||
// enough to hold at least a page will cause it to allocate.
|
||||
if (!map_space_->Setup(NULL, 0)) return false;
|
||||
|
||||
// The large object space may contain code, so it needs executable memory.
|
||||
lo_space_ = new LargeObjectSpace(LO_SPACE, true);
|
||||
// The large object code space may contain code or data. We set the memory
|
||||
// to be non-executable here for safety, but this means we need to enable it
|
||||
// explicitly when allocating large code objects.
|
||||
lo_space_ = new LargeObjectSpace(LO_SPACE);
|
||||
if (lo_space_ == NULL) return false;
|
||||
if (!lo_space_->Setup()) return false;
|
||||
|
||||
@ -2517,10 +2554,16 @@ void Heap::TearDown() {
|
||||
new_space_ = NULL;
|
||||
}
|
||||
|
||||
if (old_space_ != NULL) {
|
||||
old_space_->TearDown();
|
||||
delete old_space_;
|
||||
old_space_ = NULL;
|
||||
if (old_pointer_space_ != NULL) {
|
||||
old_pointer_space_->TearDown();
|
||||
delete old_pointer_space_;
|
||||
old_pointer_space_ = NULL;
|
||||
}
|
||||
|
||||
if (old_data_space_ != NULL) {
|
||||
old_data_space_->TearDown();
|
||||
delete old_data_space_;
|
||||
old_data_space_ = NULL;
|
||||
}
|
||||
|
||||
if (code_space_ != NULL) {
|
||||
@ -2548,7 +2591,8 @@ void Heap::TearDown() {
|
||||
void Heap::Shrink() {
|
||||
// Try to shrink map, old, and code spaces.
|
||||
map_space_->Shrink();
|
||||
old_space_->Shrink();
|
||||
old_pointer_space_->Shrink();
|
||||
old_data_space_->Shrink();
|
||||
code_space_->Shrink();
|
||||
}
|
||||
|
||||
@ -2572,6 +2616,57 @@ void Heap::PrintHandles() {
|
||||
#endif
|
||||
|
||||
|
||||
Space* AllSpaces::next() {
|
||||
switch (counter_++) {
|
||||
case NEW_SPACE:
|
||||
return Heap::new_space();
|
||||
case OLD_POINTER_SPACE:
|
||||
return Heap::old_pointer_space();
|
||||
case OLD_DATA_SPACE:
|
||||
return Heap::old_data_space();
|
||||
case CODE_SPACE:
|
||||
return Heap::code_space();
|
||||
case MAP_SPACE:
|
||||
return Heap::map_space();
|
||||
case LO_SPACE:
|
||||
return Heap::lo_space();
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
PagedSpace* PagedSpaces::next() {
|
||||
switch (counter_++) {
|
||||
case OLD_POINTER_SPACE:
|
||||
return Heap::old_pointer_space();
|
||||
case OLD_DATA_SPACE:
|
||||
return Heap::old_data_space();
|
||||
case CODE_SPACE:
|
||||
return Heap::code_space();
|
||||
case MAP_SPACE:
|
||||
return Heap::map_space();
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
OldSpace* OldSpaces::next() {
|
||||
switch (counter_++) {
|
||||
case OLD_POINTER_SPACE:
|
||||
return Heap::old_pointer_space();
|
||||
case OLD_DATA_SPACE:
|
||||
return Heap::old_data_space();
|
||||
case CODE_SPACE:
|
||||
return Heap::code_space();
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
SpaceIterator::SpaceIterator() : current_space_(FIRST_SPACE), iterator_(NULL) {
|
||||
}
|
||||
|
||||
@ -2612,8 +2707,11 @@ ObjectIterator* SpaceIterator::CreateIterator() {
|
||||
case NEW_SPACE:
|
||||
iterator_ = new SemiSpaceIterator(Heap::new_space());
|
||||
break;
|
||||
case OLD_SPACE:
|
||||
iterator_ = new HeapObjectIterator(Heap::old_space());
|
||||
case OLD_POINTER_SPACE:
|
||||
iterator_ = new HeapObjectIterator(Heap::old_pointer_space());
|
||||
break;
|
||||
case OLD_DATA_SPACE:
|
||||
iterator_ = new HeapObjectIterator(Heap::old_data_space());
|
||||
break;
|
||||
case CODE_SPACE:
|
||||
iterator_ = new HeapObjectIterator(Heap::code_space());
|
||||
|
69
src/heap.h
69
src/heap.h
@ -247,7 +247,8 @@ class Heap : public AllStatic {
|
||||
static Address NewSpaceTop() { return new_space_->top(); }
|
||||
|
||||
static NewSpace* new_space() { return new_space_; }
|
||||
static OldSpace* old_space() { return old_space_; }
|
||||
static OldSpace* old_pointer_space() { return old_pointer_space_; }
|
||||
static OldSpace* old_data_space() { return old_data_space_; }
|
||||
static OldSpace* code_space() { return code_space_; }
|
||||
static MapSpace* map_space() { return map_space_; }
|
||||
static LargeObjectSpace* lo_space() { return lo_space_; }
|
||||
@ -500,18 +501,13 @@ class Heap : public AllStatic {
|
||||
static Object* AllocateExternalStringFromTwoByte(
|
||||
ExternalTwoByteString::Resource* resource);
|
||||
|
||||
// Allocates an uninitialized object.
|
||||
// Allocates an uninitialized object. The memory is non-executable if the
|
||||
// hardware and OS allow.
|
||||
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
|
||||
// failed.
|
||||
// Please note this function does not perform a garbage collection.
|
||||
static inline Object* AllocateRaw(int size_in_bytes, AllocationSpace space);
|
||||
|
||||
|
||||
// Allocate an unitialized object during deserialization. Performs linear
|
||||
// allocation (ie, guaranteed no free list allocation) and assumes the
|
||||
// spaces are all preexpanded so allocation should not fail.
|
||||
static inline Object* AllocateForDeserialization(int size_in_bytes,
|
||||
AllocationSpace space);
|
||||
static inline Object* AllocateRaw(int size_in_bytes,
|
||||
AllocationSpace space);
|
||||
|
||||
// Makes a new native code object
|
||||
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
|
||||
@ -551,6 +547,9 @@ class Heap : public AllStatic {
|
||||
// Returns whether required_space bytes are available after the collection.
|
||||
static bool CollectGarbage(int required_space, AllocationSpace space);
|
||||
|
||||
// Performs a full garbage collection.
|
||||
static void CollectAllGarbage();
|
||||
|
||||
// Utility to invoke the scavenger. This is needed in test code to
|
||||
// ensure correct callback for weak global handles.
|
||||
static void PerformScavenge();
|
||||
@ -609,7 +608,7 @@ class Heap : public AllStatic {
|
||||
static bool InSpace(HeapObject* value, AllocationSpace space);
|
||||
|
||||
// Finds out which space an object should get promoted to based on its type.
|
||||
static inline AllocationSpace TargetSpace(HeapObject* object);
|
||||
static inline OldSpace* TargetSpace(HeapObject* object);
|
||||
|
||||
// Sets the stub_cache_ (only used when expanding the dictionary).
|
||||
static void set_code_stubs(Dictionary* value) { code_stubs_ = value; }
|
||||
@ -726,7 +725,8 @@ class Heap : public AllStatic {
|
||||
static const int kMaxMapSpaceSize = 8*MB;
|
||||
|
||||
static NewSpace* new_space_;
|
||||
static OldSpace* old_space_;
|
||||
static OldSpace* old_pointer_space_;
|
||||
static OldSpace* old_data_space_;
|
||||
static OldSpace* code_space_;
|
||||
static MapSpace* map_space_;
|
||||
static LargeObjectSpace* lo_space_;
|
||||
@ -801,11 +801,10 @@ class Heap : public AllStatic {
|
||||
bool new_object,
|
||||
PretenureFlag pretenure = NOT_TENURED);
|
||||
|
||||
// Allocate an uninitialized object in map space. The behavior is
|
||||
// identical to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that
|
||||
// (a) it doesn't have to test the allocation space argument and (b) can
|
||||
// reduce code size (since both AllocateRaw and AllocateRawMap are
|
||||
// inlined).
|
||||
// Allocate an uninitialized object in map space. The behavior is identical
|
||||
// to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
|
||||
// have to test the allocation space argument and (b) can reduce code size
|
||||
// (since both AllocateRaw and AllocateRawMap are inlined).
|
||||
static inline Object* AllocateRawMap(int size_in_bytes);
|
||||
|
||||
|
||||
@ -912,10 +911,44 @@ class VerifyPointersAndRSetVisitor: public ObjectVisitor {
|
||||
#endif
|
||||
|
||||
|
||||
// Space iterator for iterating over all spaces of the heap.
|
||||
// Returns each space in turn, and null when it is done.
|
||||
class AllSpaces BASE_EMBEDDED {
|
||||
public:
|
||||
Space* next();
|
||||
AllSpaces() { counter_ = FIRST_SPACE; }
|
||||
private:
|
||||
int counter_;
|
||||
};
|
||||
|
||||
|
||||
// Space iterator for iterating over all old spaces of the heap: Old pointer
|
||||
// space, old data space and code space.
|
||||
// Returns each space in turn, and null when it is done.
|
||||
class OldSpaces BASE_EMBEDDED {
|
||||
public:
|
||||
OldSpace* next();
|
||||
OldSpaces() { counter_ = OLD_POINTER_SPACE; }
|
||||
private:
|
||||
int counter_;
|
||||
};
|
||||
|
||||
|
||||
// Space iterator for iterating over all the paged spaces of the heap:
|
||||
// Map space, old pointer space, old data space and code space.
|
||||
// Returns each space in turn, and null when it is done.
|
||||
class PagedSpaces BASE_EMBEDDED {
|
||||
public:
|
||||
PagedSpace* next();
|
||||
PagedSpaces() { counter_ = OLD_POINTER_SPACE; }
|
||||
private:
|
||||
int counter_;
|
||||
};
|
||||
|
||||
|
||||
// Space iterator for iterating over all spaces of the heap.
|
||||
// For each space an object iterator is provided. The deallocation of the
|
||||
// returned object iterators is handled by the space iterator.
|
||||
|
||||
class SpaceIterator : public Malloced {
|
||||
public:
|
||||
SpaceIterator();
|
||||
|
@ -73,8 +73,9 @@ MarkCompactCollector::CollectorState MarkCompactCollector::state_ = IDLE;
|
||||
// collection.
|
||||
int MarkCompactCollector::live_bytes_ = 0;
|
||||
int MarkCompactCollector::live_young_objects_ = 0;
|
||||
int MarkCompactCollector::live_old_objects_ = 0;
|
||||
int MarkCompactCollector::live_immutable_objects_ = 0;
|
||||
int MarkCompactCollector::live_old_data_objects_ = 0;
|
||||
int MarkCompactCollector::live_old_pointer_objects_ = 0;
|
||||
int MarkCompactCollector::live_code_objects_ = 0;
|
||||
int MarkCompactCollector::live_map_objects_ = 0;
|
||||
int MarkCompactCollector::live_lo_objects_ = 0;
|
||||
#endif
|
||||
@ -131,14 +132,16 @@ void MarkCompactCollector::Prepare() {
|
||||
// because objects do not get promoted out of new space on non-compacting
|
||||
// GCs.
|
||||
if (!compacting_collection_) {
|
||||
int old_gen_recoverable = Heap::old_space()->Waste()
|
||||
+ Heap::old_space()->AvailableFree()
|
||||
+ Heap::code_space()->Waste()
|
||||
+ Heap::code_space()->AvailableFree();
|
||||
int old_gen_used = old_gen_recoverable
|
||||
+ Heap::old_space()->Size()
|
||||
+ Heap::code_space()->Size();
|
||||
int old_gen_fragmentation = (old_gen_recoverable * 100) / old_gen_used;
|
||||
int old_gen_recoverable = 0;
|
||||
int old_gen_used = 0;
|
||||
|
||||
OldSpaces spaces;
|
||||
while (OldSpace* space = spaces.next()) {
|
||||
old_gen_recoverable += space->Waste() + space->AvailableFree();
|
||||
old_gen_used += space->Size();
|
||||
}
|
||||
int old_gen_fragmentation =
|
||||
static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used);
|
||||
if (old_gen_fragmentation > kFragmentationLimit) {
|
||||
compacting_collection_ = true;
|
||||
}
|
||||
@ -154,17 +157,19 @@ void MarkCompactCollector::Prepare() {
|
||||
}
|
||||
#endif
|
||||
|
||||
Heap::map_space()->PrepareForMarkCompact(compacting_collection_);
|
||||
Heap::old_space()->PrepareForMarkCompact(compacting_collection_);
|
||||
Heap::code_space()->PrepareForMarkCompact(compacting_collection_);
|
||||
PagedSpaces spaces;
|
||||
while (PagedSpace* space = spaces.next()) {
|
||||
space->PrepareForMarkCompact(compacting_collection_);
|
||||
}
|
||||
|
||||
Counters::global_objects.Set(0);
|
||||
|
||||
#ifdef DEBUG
|
||||
live_bytes_ = 0;
|
||||
live_young_objects_ = 0;
|
||||
live_old_objects_ = 0;
|
||||
live_immutable_objects_ = 0;
|
||||
live_old_pointer_objects_ = 0;
|
||||
live_old_data_objects_ = 0;
|
||||
live_code_objects_ = 0;
|
||||
live_map_objects_ = 0;
|
||||
live_lo_objects_ = 0;
|
||||
#endif
|
||||
@ -575,8 +580,13 @@ void MarkCompactCollector::RefillMarkingStack() {
|
||||
ScanOverflowedObjects(&new_it);
|
||||
if (marking_stack.is_full()) return;
|
||||
|
||||
HeapObjectIterator old_it(Heap::old_space(), &OverflowObjectSize);
|
||||
ScanOverflowedObjects(&old_it);
|
||||
HeapObjectIterator old_pointer_it(Heap::old_pointer_space(),
|
||||
&OverflowObjectSize);
|
||||
ScanOverflowedObjects(&old_pointer_it);
|
||||
if (marking_stack.is_full()) return;
|
||||
|
||||
HeapObjectIterator old_data_it(Heap::old_data_space(), &OverflowObjectSize);
|
||||
ScanOverflowedObjects(&old_data_it);
|
||||
if (marking_stack.is_full()) return;
|
||||
|
||||
HeapObjectIterator code_it(Heap::code_space(), &OverflowObjectSize);
|
||||
@ -683,10 +693,12 @@ void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
|
||||
} else if (Heap::map_space()->Contains(obj)) {
|
||||
ASSERT(obj->IsMap());
|
||||
live_map_objects_++;
|
||||
} else if (Heap::old_space()->Contains(obj)) {
|
||||
live_old_objects_++;
|
||||
} else if (Heap::old_pointer_space()->Contains(obj)) {
|
||||
live_old_pointer_objects_++;
|
||||
} else if (Heap::old_data_space()->Contains(obj)) {
|
||||
live_old_data_objects_++;
|
||||
} else if (Heap::code_space()->Contains(obj)) {
|
||||
live_immutable_objects_++;
|
||||
live_code_objects_++;
|
||||
} else if (Heap::lo_space()->Contains(obj)) {
|
||||
live_lo_objects_++;
|
||||
} else {
|
||||
@ -704,7 +716,8 @@ static int CountMarkedCallback(HeapObject* obj) {
|
||||
|
||||
void MarkCompactCollector::VerifyHeapAfterMarkingPhase() {
|
||||
Heap::new_space()->Verify();
|
||||
Heap::old_space()->Verify();
|
||||
Heap::old_pointer_space()->Verify();
|
||||
Heap::old_data_space()->Verify();
|
||||
Heap::code_space()->Verify();
|
||||
Heap::map_space()->Verify();
|
||||
|
||||
@ -721,11 +734,15 @@ void MarkCompactCollector::VerifyHeapAfterMarkingPhase() {
|
||||
SemiSpaceIterator new_it(Heap::new_space(), &CountMarkedCallback);
|
||||
CHECK_LIVE_OBJECTS(new_it, live_young_objects_);
|
||||
|
||||
HeapObjectIterator old_it(Heap::old_space(), &CountMarkedCallback);
|
||||
CHECK_LIVE_OBJECTS(old_it, live_old_objects_);
|
||||
HeapObjectIterator old_pointer_it(Heap::old_pointer_space(),
|
||||
&CountMarkedCallback);
|
||||
CHECK_LIVE_OBJECTS(old_pointer_it, live_old_pointer_objects_);
|
||||
|
||||
HeapObjectIterator old_data_it(Heap::old_data_space(), &CountMarkedCallback);
|
||||
CHECK_LIVE_OBJECTS(old_data_it, live_old_data_objects_);
|
||||
|
||||
HeapObjectIterator code_it(Heap::code_space(), &CountMarkedCallback);
|
||||
CHECK_LIVE_OBJECTS(code_it, live_immutable_objects_);
|
||||
CHECK_LIVE_OBJECTS(code_it, live_code_objects_);
|
||||
|
||||
HeapObjectIterator map_it(Heap::map_space(), &CountMarkedCallback);
|
||||
CHECK_LIVE_OBJECTS(map_it, live_map_objects_);
|
||||
@ -807,14 +824,10 @@ void EncodeFreeRegion(Address free_start, int free_size) {
|
||||
// Try to promote all objects in new space. Heap numbers and sequential
|
||||
// strings are promoted to the code space, all others to the old space.
|
||||
inline Object* MCAllocateFromNewSpace(HeapObject* object, int object_size) {
|
||||
AllocationSpace target_space = Heap::TargetSpace(object);
|
||||
Object* forwarded;
|
||||
if (target_space == OLD_SPACE) {
|
||||
forwarded = Heap::old_space()->MCAllocateRaw(object_size);
|
||||
} else {
|
||||
ASSERT(target_space == CODE_SPACE);
|
||||
forwarded = Heap::code_space()->MCAllocateRaw(object_size);
|
||||
}
|
||||
OldSpace* target_space = Heap::TargetSpace(object);
|
||||
ASSERT(target_space == Heap::old_pointer_space() ||
|
||||
target_space == Heap::old_data_space());
|
||||
Object* forwarded = target_space->MCAllocateRaw(object_size);
|
||||
|
||||
if (forwarded->IsFailure()) {
|
||||
forwarded = Heap::new_space()->MCAllocateRaw(object_size);
|
||||
@ -824,8 +837,14 @@ inline Object* MCAllocateFromNewSpace(HeapObject* object, int object_size) {
|
||||
|
||||
|
||||
// Allocation functions for the paged spaces call the space's MCAllocateRaw.
|
||||
inline Object* MCAllocateFromOldSpace(HeapObject* object, int object_size) {
|
||||
return Heap::old_space()->MCAllocateRaw(object_size);
|
||||
inline Object* MCAllocateFromOldPointerSpace(HeapObject* object,
|
||||
int object_size) {
|
||||
return Heap::old_pointer_space()->MCAllocateRaw(object_size);
|
||||
}
|
||||
|
||||
|
||||
inline Object* MCAllocateFromOldDataSpace(HeapObject* object, int object_size) {
|
||||
return Heap::old_data_space()->MCAllocateRaw(object_size);
|
||||
}
|
||||
|
||||
|
||||
@ -1058,10 +1077,16 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
|
||||
}
|
||||
|
||||
|
||||
void MarkCompactCollector::DeallocateOldBlock(Address start,
|
||||
int size_in_bytes) {
|
||||
void MarkCompactCollector::DeallocateOldPointerBlock(Address start,
|
||||
int size_in_bytes) {
|
||||
Heap::ClearRSetRange(start, size_in_bytes);
|
||||
Heap::old_space()->Free(start, size_in_bytes);
|
||||
Heap::old_pointer_space()->Free(start, size_in_bytes);
|
||||
}
|
||||
|
||||
|
||||
void MarkCompactCollector::DeallocateOldDataBlock(Address start,
|
||||
int size_in_bytes) {
|
||||
Heap::old_data_space()->Free(start, size_in_bytes);
|
||||
}
|
||||
|
||||
|
||||
@ -1093,9 +1118,13 @@ void MarkCompactCollector::EncodeForwardingAddresses() {
|
||||
Heap::new_space()->MCResetRelocationInfo();
|
||||
|
||||
// Compute the forwarding pointers in each space.
|
||||
EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldSpace,
|
||||
EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
|
||||
IgnoreNonLiveObject>(
|
||||
Heap::old_space());
|
||||
Heap::old_pointer_space());
|
||||
|
||||
EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
|
||||
IgnoreNonLiveObject>(
|
||||
Heap::old_data_space());
|
||||
|
||||
EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
|
||||
LogNonLiveCodeObject>(
|
||||
@ -1115,7 +1144,8 @@ void MarkCompactCollector::EncodeForwardingAddresses() {
|
||||
// Write relocation info to the top page, so we can use it later. This is
|
||||
// done after promoting objects from the new space so we get the correct
|
||||
// allocation top.
|
||||
Heap::old_space()->MCWriteRelocationInfoToPage();
|
||||
Heap::old_pointer_space()->MCWriteRelocationInfoToPage();
|
||||
Heap::old_data_space()->MCWriteRelocationInfoToPage();
|
||||
Heap::code_space()->MCWriteRelocationInfoToPage();
|
||||
Heap::map_space()->MCWriteRelocationInfoToPage();
|
||||
}
|
||||
@ -1129,7 +1159,8 @@ void MarkCompactCollector::SweepSpaces() {
|
||||
// the map space last because freeing non-live maps overwrites them and
|
||||
// the other spaces rely on possibly non-live maps to get the sizes for
|
||||
// non-live objects.
|
||||
SweepSpace(Heap::old_space(), &DeallocateOldBlock);
|
||||
SweepSpace(Heap::old_pointer_space(), &DeallocateOldPointerBlock);
|
||||
SweepSpace(Heap::old_data_space(), &DeallocateOldDataBlock);
|
||||
SweepSpace(Heap::code_space(), &DeallocateCodeBlock);
|
||||
SweepSpace(Heap::new_space());
|
||||
SweepSpace(Heap::map_space(), &DeallocateMapBlock);
|
||||
@ -1193,19 +1224,16 @@ static int VerifyMapObject(HeapObject* obj) {
|
||||
|
||||
|
||||
void MarkCompactCollector::VerifyHeapAfterEncodingForwardingAddresses() {
|
||||
Heap::new_space()->Verify();
|
||||
Heap::old_space()->Verify();
|
||||
Heap::code_space()->Verify();
|
||||
Heap::map_space()->Verify();
|
||||
AllSpaces spaces;
|
||||
while (Space* space = spaces.next()) space->Verify();
|
||||
|
||||
ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
|
||||
int live_maps = IterateLiveObjects(Heap::map_space(), &VerifyMapObject);
|
||||
ASSERT(live_maps == live_map_objects_);
|
||||
|
||||
// Verify page headers in paged spaces.
|
||||
VerifyPageHeaders(Heap::old_space());
|
||||
VerifyPageHeaders(Heap::code_space());
|
||||
VerifyPageHeaders(Heap::map_space());
|
||||
PagedSpaces paged_spaces;
|
||||
while (PagedSpace* space = paged_spaces.next()) VerifyPageHeaders(space);
|
||||
}
|
||||
|
||||
|
||||
@ -1264,7 +1292,8 @@ class UpdatingVisitor: public ObjectVisitor {
|
||||
new_addr = Memory::Address_at(f_addr);
|
||||
|
||||
#ifdef DEBUG
|
||||
ASSERT(Heap::old_space()->Contains(new_addr) ||
|
||||
ASSERT(Heap::old_pointer_space()->Contains(new_addr) ||
|
||||
Heap::old_data_space()->Contains(new_addr) ||
|
||||
Heap::code_space()->Contains(new_addr) ||
|
||||
Heap::new_space()->FromSpaceContains(new_addr));
|
||||
|
||||
@ -1279,19 +1308,24 @@ class UpdatingVisitor: public ObjectVisitor {
|
||||
return;
|
||||
|
||||
} else {
|
||||
ASSERT(Heap::old_space()->Contains(obj) ||
|
||||
ASSERT(Heap::old_pointer_space()->Contains(obj) ||
|
||||
Heap::old_data_space()->Contains(obj) ||
|
||||
Heap::code_space()->Contains(obj) ||
|
||||
Heap::map_space()->Contains(obj));
|
||||
|
||||
new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj);
|
||||
ASSERT(Heap::old_space()->Contains(new_addr) ||
|
||||
ASSERT(Heap::old_pointer_space()->Contains(new_addr) ||
|
||||
Heap::old_data_space()->Contains(new_addr) ||
|
||||
Heap::code_space()->Contains(new_addr) ||
|
||||
Heap::map_space()->Contains(new_addr));
|
||||
|
||||
#ifdef DEBUG
|
||||
if (Heap::old_space()->Contains(obj)) {
|
||||
ASSERT(Heap::old_space()->MCSpaceOffsetForAddress(new_addr) <=
|
||||
Heap::old_space()->MCSpaceOffsetForAddress(old_addr));
|
||||
if (Heap::old_pointer_space()->Contains(obj)) {
|
||||
ASSERT(Heap::old_pointer_space()->MCSpaceOffsetForAddress(new_addr) <=
|
||||
Heap::old_pointer_space()->MCSpaceOffsetForAddress(old_addr));
|
||||
} else if (Heap::old_data_space()->Contains(obj)) {
|
||||
ASSERT(Heap::old_data_space()->MCSpaceOffsetForAddress(new_addr) <=
|
||||
Heap::old_data_space()->MCSpaceOffsetForAddress(old_addr));
|
||||
} else if (Heap::code_space()->Contains(obj)) {
|
||||
ASSERT(Heap::code_space()->MCSpaceOffsetForAddress(new_addr) <=
|
||||
Heap::code_space()->MCSpaceOffsetForAddress(old_addr));
|
||||
@ -1325,10 +1359,12 @@ void MarkCompactCollector::UpdatePointers() {
|
||||
|
||||
int live_maps = IterateLiveObjects(Heap::map_space(),
|
||||
&UpdatePointersInOldObject);
|
||||
int live_olds = IterateLiveObjects(Heap::old_space(),
|
||||
&UpdatePointersInOldObject);
|
||||
int live_immutables = IterateLiveObjects(Heap::code_space(),
|
||||
&UpdatePointersInOldObject);
|
||||
int live_pointer_olds = IterateLiveObjects(Heap::old_pointer_space(),
|
||||
&UpdatePointersInOldObject);
|
||||
int live_data_olds = IterateLiveObjects(Heap::old_data_space(),
|
||||
&UpdatePointersInOldObject);
|
||||
int live_codes = IterateLiveObjects(Heap::code_space(),
|
||||
&UpdatePointersInOldObject);
|
||||
int live_news = IterateLiveObjects(Heap::new_space(),
|
||||
&UpdatePointersInNewObject);
|
||||
|
||||
@ -1337,14 +1373,16 @@ void MarkCompactCollector::UpdatePointers() {
|
||||
while (it.has_next()) UpdatePointersInNewObject(it.next());
|
||||
|
||||
USE(live_maps);
|
||||
USE(live_olds);
|
||||
USE(live_immutables);
|
||||
USE(live_pointer_olds);
|
||||
USE(live_data_olds);
|
||||
USE(live_codes);
|
||||
USE(live_news);
|
||||
|
||||
#ifdef DEBUG
|
||||
ASSERT(live_maps == live_map_objects_);
|
||||
ASSERT(live_olds == live_old_objects_);
|
||||
ASSERT(live_immutables == live_immutable_objects_);
|
||||
ASSERT(live_data_olds == live_old_data_objects_);
|
||||
ASSERT(live_pointer_olds == live_old_pointer_objects_);
|
||||
ASSERT(live_codes == live_code_objects_);
|
||||
ASSERT(live_news == live_young_objects_);
|
||||
|
||||
if (FLAG_verify_global_gc) VerifyHeapAfterUpdatingPointers();
|
||||
@ -1457,16 +1495,10 @@ Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
|
||||
void MarkCompactCollector::VerifyHeapAfterUpdatingPointers() {
|
||||
ASSERT(state_ == UPDATE_POINTERS);
|
||||
|
||||
Heap::new_space()->Verify();
|
||||
Heap::old_space()->Verify();
|
||||
Heap::code_space()->Verify();
|
||||
Heap::map_space()->Verify();
|
||||
|
||||
// We don't have object size info after updating pointers, not much we can
|
||||
// do here.
|
||||
VerifyPageHeaders(Heap::old_space());
|
||||
VerifyPageHeaders(Heap::code_space());
|
||||
VerifyPageHeaders(Heap::map_space());
|
||||
AllSpaces spaces;
|
||||
while (Space* space = spaces.next()) space->Verify();
|
||||
PagedSpaces paged_spaces;
|
||||
while (PagedSpace* space = paged_spaces.next()) VerifyPageHeaders(space);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -1482,19 +1514,23 @@ void MarkCompactCollector::RelocateObjects() {
|
||||
// Relocates objects, always relocate map objects first. Relocating
|
||||
// objects in other space relies on map objects to get object size.
|
||||
int live_maps = IterateLiveObjects(Heap::map_space(), &RelocateMapObject);
|
||||
int live_olds = IterateLiveObjects(Heap::old_space(), &RelocateOldObject);
|
||||
int live_immutables =
|
||||
IterateLiveObjects(Heap::code_space(), &RelocateCodeObject);
|
||||
int live_pointer_olds = IterateLiveObjects(Heap::old_pointer_space(),
|
||||
&RelocateOldPointerObject);
|
||||
int live_data_olds = IterateLiveObjects(Heap::old_data_space(),
|
||||
&RelocateOldDataObject);
|
||||
int live_codes = IterateLiveObjects(Heap::code_space(), &RelocateCodeObject);
|
||||
int live_news = IterateLiveObjects(Heap::new_space(), &RelocateNewObject);
|
||||
|
||||
USE(live_maps);
|
||||
USE(live_olds);
|
||||
USE(live_immutables);
|
||||
USE(live_data_olds);
|
||||
USE(live_pointer_olds);
|
||||
USE(live_codes);
|
||||
USE(live_news);
|
||||
#ifdef DEBUG
|
||||
ASSERT(live_maps == live_map_objects_);
|
||||
ASSERT(live_olds == live_old_objects_);
|
||||
ASSERT(live_immutables == live_immutable_objects_);
|
||||
ASSERT(live_data_olds == live_old_data_objects_);
|
||||
ASSERT(live_pointer_olds == live_old_pointer_objects_);
|
||||
ASSERT(live_codes == live_code_objects_);
|
||||
ASSERT(live_news == live_young_objects_);
|
||||
#endif
|
||||
|
||||
@ -1516,9 +1552,8 @@ void MarkCompactCollector::RelocateObjects() {
|
||||
// page-by-page basis after committing the m-c forwarding pointer.
|
||||
Page::set_rset_state(Page::IN_USE);
|
||||
#endif
|
||||
Heap::map_space()->MCCommitRelocationInfo();
|
||||
Heap::old_space()->MCCommitRelocationInfo();
|
||||
Heap::code_space()->MCCommitRelocationInfo();
|
||||
PagedSpaces spaces;
|
||||
while (PagedSpace* space = spaces.next()) space->MCCommitRelocationInfo();
|
||||
|
||||
#ifdef DEBUG
|
||||
if (FLAG_verify_global_gc) VerifyHeapAfterRelocatingObjects();
|
||||
@ -1563,15 +1598,10 @@ int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
|
||||
}
|
||||
|
||||
|
||||
int MarkCompactCollector::RelocateOldObject(HeapObject* obj) {
|
||||
// decode map pointer (forwarded address)
|
||||
MapWord encoding = obj->map_word();
|
||||
Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
|
||||
ASSERT(Heap::map_space()->Contains(map_addr));
|
||||
|
||||
// Get forwarding address before resetting map pointer
|
||||
Address new_addr = GetForwardingAddressInOldSpace(obj);
|
||||
|
||||
static inline int RelocateOldObject(HeapObject* obj,
|
||||
OldSpace* space,
|
||||
Address new_addr,
|
||||
Address map_addr) {
|
||||
// recover map pointer
|
||||
obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
|
||||
|
||||
@ -1580,35 +1610,55 @@ int MarkCompactCollector::RelocateOldObject(HeapObject* obj) {
|
||||
int obj_size = obj->Size();
|
||||
ASSERT_OBJECT_SIZE(obj_size);
|
||||
|
||||
ASSERT(space->MCSpaceOffsetForAddress(new_addr) <=
|
||||
space->MCSpaceOffsetForAddress(obj->address()));
|
||||
|
||||
space->MCAdjustRelocationEnd(new_addr, obj_size);
|
||||
|
||||
#ifdef DEBUG
|
||||
if (FLAG_gc_verbose) {
|
||||
PrintF("relocate %p -> %p\n", obj->address(), new_addr);
|
||||
}
|
||||
#endif
|
||||
|
||||
return obj_size;
|
||||
}
|
||||
|
||||
|
||||
int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
|
||||
OldSpace* space) {
|
||||
// decode map pointer (forwarded address)
|
||||
MapWord encoding = obj->map_word();
|
||||
Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
|
||||
ASSERT(Heap::map_space()->Contains(map_addr));
|
||||
|
||||
// Get forwarding address before resetting map pointer
|
||||
Address new_addr = GetForwardingAddressInOldSpace(obj);
|
||||
|
||||
int obj_size = RelocateOldObject(obj, space, new_addr, map_addr);
|
||||
|
||||
Address old_addr = obj->address();
|
||||
|
||||
ASSERT(Heap::old_space()->MCSpaceOffsetForAddress(new_addr) <=
|
||||
Heap::old_space()->MCSpaceOffsetForAddress(old_addr));
|
||||
|
||||
Heap::old_space()->MCAdjustRelocationEnd(new_addr, obj_size);
|
||||
|
||||
if (new_addr != old_addr) {
|
||||
memmove(new_addr, old_addr, obj_size); // copy contents
|
||||
}
|
||||
|
||||
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
|
||||
if (copied_to->IsCode()) {
|
||||
// may also update inline cache target.
|
||||
Code::cast(copied_to)->Relocate(new_addr - old_addr);
|
||||
// Notify the logger that compile code has moved.
|
||||
LOG(CodeMoveEvent(old_addr, new_addr));
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
if (FLAG_gc_verbose) {
|
||||
PrintF("relocate %p -> %p\n", old_addr, new_addr);
|
||||
}
|
||||
#endif
|
||||
ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
|
||||
|
||||
return obj_size;
|
||||
}
|
||||
|
||||
|
||||
int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) {
|
||||
return RelocateOldNonCodeObject(obj, Heap::old_pointer_space());
|
||||
}
|
||||
|
||||
|
||||
int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
|
||||
return RelocateOldNonCodeObject(obj, Heap::old_data_space());
|
||||
}
|
||||
|
||||
|
||||
int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
|
||||
// decode map pointer (forwarded address)
|
||||
MapWord encoding = obj->map_word();
|
||||
@ -1618,20 +1668,7 @@ int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
|
||||
// Get forwarding address before resetting map pointer
|
||||
Address new_addr = GetForwardingAddressInOldSpace(obj);
|
||||
|
||||
// recover map pointer
|
||||
obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
|
||||
|
||||
// This is a non-map object, it relies on the assumption that the Map space
|
||||
// is compacted before the other spaces (see RelocateObjects).
|
||||
int obj_size = obj->Size();
|
||||
ASSERT_OBJECT_SIZE(obj_size);
|
||||
|
||||
Address old_addr = obj->address();
|
||||
|
||||
ASSERT(Heap::code_space()->MCSpaceOffsetForAddress(new_addr) <=
|
||||
Heap::code_space()->MCSpaceOffsetForAddress(old_addr));
|
||||
|
||||
Heap::code_space()->MCAdjustRelocationEnd(new_addr, obj_size);
|
||||
int obj_size = RelocateOldObject(obj, Heap::code_space(), new_addr, map_addr);
|
||||
|
||||
// convert inline cache target to address using old address
|
||||
if (obj->IsCode()) {
|
||||
@ -1639,6 +1676,8 @@ int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
|
||||
Code::cast(obj)->ConvertICTargetsFromObjectToAddress();
|
||||
}
|
||||
|
||||
Address old_addr = obj->address();
|
||||
|
||||
if (new_addr != old_addr) {
|
||||
memmove(new_addr, old_addr, obj_size); // copy contents
|
||||
}
|
||||
@ -1651,12 +1690,6 @@ int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
|
||||
LOG(CodeMoveEvent(old_addr, new_addr));
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
if (FLAG_gc_verbose) {
|
||||
PrintF("relocate %p -> %p\n", old_addr, new_addr);
|
||||
}
|
||||
#endif
|
||||
|
||||
return obj_size;
|
||||
}
|
||||
|
||||
@ -1687,13 +1720,10 @@ int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
|
||||
ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
|
||||
Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
|
||||
} else {
|
||||
AllocationSpace target_space = Heap::TargetSpace(obj);
|
||||
if (target_space == OLD_SPACE) {
|
||||
Heap::old_space()->MCAdjustRelocationEnd(new_addr, obj_size);
|
||||
} else {
|
||||
ASSERT(target_space == CODE_SPACE);
|
||||
Heap::code_space()->MCAdjustRelocationEnd(new_addr, obj_size);
|
||||
}
|
||||
OldSpace* target_space = Heap::TargetSpace(obj);
|
||||
ASSERT(target_space == Heap::old_pointer_space() ||
|
||||
target_space == Heap::old_data_space());
|
||||
target_space->MCAdjustRelocationEnd(new_addr, obj_size);
|
||||
}
|
||||
|
||||
// New and old addresses cannot overlap.
|
||||
@ -1721,26 +1751,14 @@ void MarkCompactCollector::VerifyHeapAfterRelocatingObjects() {
|
||||
ASSERT(state_ == RELOCATE_OBJECTS);
|
||||
|
||||
Heap::new_space()->Verify();
|
||||
Heap::old_space()->Verify();
|
||||
Heap::code_space()->Verify();
|
||||
Heap::map_space()->Verify();
|
||||
|
||||
PageIterator old_it(Heap::old_space(), PageIterator::PAGES_IN_USE);
|
||||
while (old_it.has_next()) {
|
||||
Page* p = old_it.next();
|
||||
ASSERT_PAGE_OFFSET(p->Offset(p->AllocationTop()));
|
||||
}
|
||||
|
||||
PageIterator code_it(Heap::code_space(), PageIterator::PAGES_IN_USE);
|
||||
while (code_it.has_next()) {
|
||||
Page* p = code_it.next();
|
||||
ASSERT_PAGE_OFFSET(p->Offset(p->AllocationTop()));
|
||||
}
|
||||
|
||||
PageIterator map_it(Heap::map_space(), PageIterator::PAGES_IN_USE);
|
||||
while (map_it.has_next()) {
|
||||
Page* p = map_it.next();
|
||||
ASSERT_PAGE_OFFSET(p->Offset(p->AllocationTop()));
|
||||
PagedSpaces spaces;
|
||||
while (PagedSpace* space = spaces.next()) {
|
||||
space->Verify();
|
||||
PageIterator it(space, PageIterator::PAGES_IN_USE);
|
||||
while (it.has_next()) {
|
||||
Page* p = it.next();
|
||||
ASSERT_PAGE_OFFSET(p->Offset(p->AllocationTop()));
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -241,7 +241,8 @@ class MarkCompactCollector : public AllStatic {
|
||||
|
||||
// Callback functions for deallocating non-live blocks in the old
|
||||
// generation.
|
||||
static void DeallocateOldBlock(Address start, int size_in_bytes);
|
||||
static void DeallocateOldPointerBlock(Address start, int size_in_bytes);
|
||||
static void DeallocateOldDataBlock(Address start, int size_in_bytes);
|
||||
static void DeallocateCodeBlock(Address start, int size_in_bytes);
|
||||
static void DeallocateMapBlock(Address start, int size_in_bytes);
|
||||
|
||||
@ -295,9 +296,13 @@ class MarkCompactCollector : public AllStatic {
|
||||
static int RelocateMapObject(HeapObject* obj);
|
||||
|
||||
// Relocates an old object.
|
||||
static int RelocateOldObject(HeapObject* obj);
|
||||
static int RelocateOldPointerObject(HeapObject* obj);
|
||||
static int RelocateOldDataObject(HeapObject* obj);
|
||||
|
||||
// Relocates an immutable object in the code space.
|
||||
// Helper function.
|
||||
static inline int RelocateOldNonCodeObject(HeapObject* obj, OldSpace* space);
|
||||
|
||||
// Relocates an object in the code space.
|
||||
static int RelocateCodeObject(HeapObject* obj);
|
||||
|
||||
// Copy a new object.
|
||||
@ -322,11 +327,14 @@ class MarkCompactCollector : public AllStatic {
|
||||
// Number of live objects in Heap::to_space_.
|
||||
static int live_young_objects_;
|
||||
|
||||
// Number of live objects in Heap::old_space_.
|
||||
static int live_old_objects_;
|
||||
// Number of live objects in Heap::old_pointer_space_.
|
||||
static int live_old_pointer_objects_;
|
||||
|
||||
// Number of live objects in Heap::old_data_space_.
|
||||
static int live_old_data_objects_;
|
||||
|
||||
// Number of live objects in Heap::code_space_.
|
||||
static int live_immutable_objects_;
|
||||
static int live_code_objects_;
|
||||
|
||||
// Number of live objects in Heap::map_space_.
|
||||
static int live_map_objects_;
|
||||
|
@ -182,8 +182,8 @@ int main(int argc, char** argv) {
|
||||
i::Bootstrapper::NativesSourceLookup(i);
|
||||
}
|
||||
}
|
||||
// Get rid of unreferenced scripts.
|
||||
i::Heap::CollectGarbage(0, i::OLD_SPACE);
|
||||
// Get rid of unreferenced scripts with a global GC.
|
||||
i::Heap::CollectAllGarbage();
|
||||
i::Serializer ser;
|
||||
ser.Serialize();
|
||||
char* str;
|
||||
|
@ -931,7 +931,7 @@ Object* JSObject::Copy(PretenureFlag pretenure) {
|
||||
// Make the clone.
|
||||
Object* clone = (pretenure == NOT_TENURED) ?
|
||||
Heap::Allocate(map(), NEW_SPACE) :
|
||||
Heap::Allocate(map(), OLD_SPACE);
|
||||
Heap::Allocate(map(), OLD_POINTER_SPACE);
|
||||
if (clone->IsFailure()) return clone;
|
||||
JSObject::cast(clone)->CopyBody(this);
|
||||
|
||||
|
@ -3327,6 +3327,8 @@ class Oddball: public HeapObject {
|
||||
|
||||
|
||||
// Proxy describes objects pointing from JavaScript to C structures.
|
||||
// Since they cannot contain references to JS HeapObjects they can be
|
||||
// placed in old_data_space.
|
||||
class Proxy: public HeapObject {
|
||||
public:
|
||||
// [proxy]: field containing the address.
|
||||
|
@ -353,8 +353,8 @@ static const int kMmapFd = -1;
|
||||
static const int kMmapFdOffset = 0;
|
||||
|
||||
|
||||
VirtualMemory::VirtualMemory(size_t size, void* address_hint) {
|
||||
address_ = mmap(address_hint, size, PROT_NONE,
|
||||
VirtualMemory::VirtualMemory(size_t size) {
|
||||
address_ = mmap(NULL, size, PROT_NONE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
|
||||
kMmapFd, kMmapFdOffset);
|
||||
size_ = size;
|
||||
|
@ -312,8 +312,8 @@ static const int kMmapFd = -1;
|
||||
static const int kMmapFdOffset = 0;
|
||||
|
||||
|
||||
VirtualMemory::VirtualMemory(size_t size, void* address_hint) {
|
||||
address_ = mmap(address_hint, size, PROT_NONE,
|
||||
VirtualMemory::VirtualMemory(size_t size) {
|
||||
address_ = mmap(NULL, size, PROT_NONE,
|
||||
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
|
||||
kMmapFd, kMmapFdOffset);
|
||||
size_ = size;
|
||||
|
@ -1171,9 +1171,8 @@ bool VirtualMemory::IsReserved() {
|
||||
}
|
||||
|
||||
|
||||
VirtualMemory::VirtualMemory(size_t size, void* address_hint) {
|
||||
address_ =
|
||||
VirtualAlloc(address_hint, size, MEM_RESERVE, PAGE_NOACCESS);
|
||||
VirtualMemory::VirtualMemory(size_t size) {
|
||||
address_ = VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS);
|
||||
size_ = size;
|
||||
}
|
||||
|
||||
|
@ -222,7 +222,7 @@ class OS {
|
||||
class VirtualMemory {
|
||||
public:
|
||||
// Reserves virtual memory with size.
|
||||
VirtualMemory(size_t size, void* address_hint = 0);
|
||||
explicit VirtualMemory(size_t size);
|
||||
~VirtualMemory();
|
||||
|
||||
// Returns whether the memory has been reserved.
|
||||
|
@ -4536,8 +4536,8 @@ static Object* Runtime_DebugGetLoadedScripts(Arguments args) {
|
||||
// Perform two GCs to get rid of all unreferenced scripts. The first GC gets
|
||||
// rid of all the cached script wrappes and the second gets rid of the
|
||||
// scripts which is no longer referenced.
|
||||
Heap::CollectGarbage(0, OLD_SPACE);
|
||||
Heap::CollectGarbage(0, OLD_SPACE);
|
||||
Heap::CollectAllGarbage();
|
||||
Heap::CollectAllGarbage();
|
||||
|
||||
// Get the number of scripts.
|
||||
int count;
|
||||
@ -4641,7 +4641,7 @@ static Object* Runtime_DebugReferencedBy(Arguments args) {
|
||||
ASSERT(args.length() == 3);
|
||||
|
||||
// First perform a full GC in order to avoid references from dead objects.
|
||||
Heap::CollectGarbage(0, OLD_SPACE);
|
||||
Heap::CollectAllGarbage();
|
||||
|
||||
// Check parameters.
|
||||
CONVERT_CHECKED(JSObject, target, args[0]);
|
||||
@ -4721,7 +4721,7 @@ static Object* Runtime_DebugConstructedBy(Arguments args) {
|
||||
ASSERT(args.length() == 2);
|
||||
|
||||
// First perform a full GC in order to avoid dead objects.
|
||||
Heap::CollectGarbage(0, OLD_SPACE);
|
||||
Heap::CollectAllGarbage();
|
||||
|
||||
// Check parameters.
|
||||
CONVERT_CHECKED(JSFunction, constructor, args[0]);
|
||||
|
173
src/serialize.cc
173
src/serialize.cc
@ -53,13 +53,23 @@ DEFINE_bool(debug_serialization, false,
|
||||
// - MAP and OLD spaces: 16 bits of page number, 11 bits of word offset in page
|
||||
// - NEW space: 27 bits of word offset
|
||||
// - LO space: 27 bits of page number
|
||||
// 3 bits to encode the AllocationSpace
|
||||
// 3 bits to encode the AllocationSpace (special values for code in LO space)
|
||||
// 2 bits identifying this as a HeapObject
|
||||
|
||||
const int kSpaceShift = kHeapObjectTagSize;
|
||||
const int kSpaceBits = kSpaceTagSize;
|
||||
const int kSpaceMask = kSpaceTagMask;
|
||||
|
||||
// These value are used instead of space numbers when serializing/
|
||||
// deserializing. They indicate an object that is in large object space, but
|
||||
// should be treated specially.
|
||||
// Make the pages executable on platforms that support it:
|
||||
const int kLOSpaceExecutable = LAST_SPACE + 1;
|
||||
// Reserve space for write barrier bits (for objects that can contain
|
||||
// references to new space):
|
||||
const int kLOSpacePointer = LAST_SPACE + 2;
|
||||
|
||||
|
||||
const int kOffsetShift = kSpaceShift + kSpaceBits;
|
||||
const int kOffsetBits = 11;
|
||||
const int kOffsetMask = (1 << kOffsetBits) - 1;
|
||||
@ -73,9 +83,28 @@ const int kPageAndOffsetBits = kPageBits + kOffsetBits;
|
||||
const int kPageAndOffsetMask = (1 << kPageAndOffsetBits) - 1;
|
||||
|
||||
|
||||
static inline AllocationSpace Space(Address addr) {
|
||||
static inline AllocationSpace GetSpace(Address addr) {
|
||||
const int encoded = reinterpret_cast<int>(addr);
|
||||
return static_cast<AllocationSpace>((encoded >> kSpaceShift) & kSpaceMask);
|
||||
int space_number = ((encoded >> kSpaceShift) & kSpaceMask);
|
||||
if (space_number == kLOSpaceExecutable) space_number = LO_SPACE;
|
||||
else if (space_number == kLOSpacePointer) space_number = LO_SPACE;
|
||||
return static_cast<AllocationSpace>(space_number);
|
||||
}
|
||||
|
||||
|
||||
static inline bool IsLargeExecutableObject(Address addr) {
|
||||
const int encoded = reinterpret_cast<int>(addr);
|
||||
const int space_number = ((encoded >> kSpaceShift) & kSpaceMask);
|
||||
if (space_number == kLOSpaceExecutable) return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
static inline bool IsLargeFixedArray(Address addr) {
|
||||
const int encoded = reinterpret_cast<int>(addr);
|
||||
const int space_number = ((encoded >> kSpaceShift) & kSpaceMask);
|
||||
if (space_number == kLOSpacePointer) return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@ -117,18 +146,29 @@ static inline int LargeObjectIndex(Address addr) {
|
||||
|
||||
class RelativeAddress {
|
||||
public:
|
||||
RelativeAddress(AllocationSpace space, int page_index, int page_offset)
|
||||
: space_(space), page_index_(page_index), page_offset_(page_offset) {}
|
||||
RelativeAddress(AllocationSpace space,
|
||||
int page_index,
|
||||
int page_offset)
|
||||
: space_(space), page_index_(page_index), page_offset_(page_offset) {
|
||||
ASSERT(space <= LAST_SPACE && space >= 0);
|
||||
}
|
||||
|
||||
// Return the encoding of 'this' as an Address. Decode with constructor.
|
||||
Address Encode() const;
|
||||
|
||||
AllocationSpace space() const { return space_; }
|
||||
AllocationSpace space() const {
|
||||
if (space_ == kLOSpaceExecutable) return LO_SPACE;
|
||||
if (space_ == kLOSpacePointer) return LO_SPACE;
|
||||
return static_cast<AllocationSpace>(space_);
|
||||
}
|
||||
int page_index() const { return page_index_; }
|
||||
int page_offset() const { return page_offset_; }
|
||||
|
||||
bool in_paged_space() const {
|
||||
return space_ == CODE_SPACE || space_ == OLD_SPACE || space_ == MAP_SPACE;
|
||||
return space_ == CODE_SPACE ||
|
||||
space_ == OLD_POINTER_SPACE ||
|
||||
space_ == OLD_DATA_SPACE ||
|
||||
space_ == MAP_SPACE;
|
||||
}
|
||||
|
||||
void next_address(int offset) { page_offset_ += offset; }
|
||||
@ -141,8 +181,18 @@ class RelativeAddress {
|
||||
void Verify();
|
||||
#endif
|
||||
|
||||
void set_to_large_code_object() {
|
||||
ASSERT(space_ == LO_SPACE);
|
||||
space_ = kLOSpaceExecutable;
|
||||
}
|
||||
void set_to_large_fixed_array() {
|
||||
ASSERT(space_ == LO_SPACE);
|
||||
space_ = kLOSpacePointer;
|
||||
}
|
||||
|
||||
|
||||
private:
|
||||
AllocationSpace space_;
|
||||
int space_;
|
||||
int page_index_;
|
||||
int page_offset_;
|
||||
};
|
||||
@ -154,7 +204,8 @@ Address RelativeAddress::Encode() const {
|
||||
int result = 0;
|
||||
switch (space_) {
|
||||
case MAP_SPACE:
|
||||
case OLD_SPACE:
|
||||
case OLD_POINTER_SPACE:
|
||||
case OLD_DATA_SPACE:
|
||||
case CODE_SPACE:
|
||||
ASSERT_EQ(0, page_index_ & ~kPageMask);
|
||||
word_offset = page_offset_ >> kObjectAlignmentBits;
|
||||
@ -168,6 +219,8 @@ Address RelativeAddress::Encode() const {
|
||||
result = word_offset << kPageAndOffsetShift;
|
||||
break;
|
||||
case LO_SPACE:
|
||||
case kLOSpaceExecutable:
|
||||
case kLOSpacePointer:
|
||||
ASSERT_EQ(0, page_offset_);
|
||||
ASSERT_EQ(0, page_index_ & ~kPageAndOffsetMask);
|
||||
result = page_index_ << kPageAndOffsetShift;
|
||||
@ -185,7 +238,8 @@ void RelativeAddress::Verify() {
|
||||
ASSERT(page_offset_ >= 0 && page_index_ >= 0);
|
||||
switch (space_) {
|
||||
case MAP_SPACE:
|
||||
case OLD_SPACE:
|
||||
case OLD_POINTER_SPACE:
|
||||
case OLD_DATA_SPACE:
|
||||
case CODE_SPACE:
|
||||
ASSERT(Page::kObjectStartOffset <= page_offset_ &&
|
||||
page_offset_ <= Page::kPageSize);
|
||||
@ -194,12 +248,20 @@ void RelativeAddress::Verify() {
|
||||
ASSERT(page_index_ == 0);
|
||||
break;
|
||||
case LO_SPACE:
|
||||
case kLOSpaceExecutable:
|
||||
case kLOSpacePointer:
|
||||
ASSERT(page_offset_ == 0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
enum GCTreatment {
|
||||
DataObject, // Object that cannot contain a reference to new space.
|
||||
PointerObject, // Object that can contain a reference to new space.
|
||||
CodeObject // Object that contains executable code.
|
||||
};
|
||||
|
||||
// A SimulatedHeapSpace simulates the allocation of objects in a page in
|
||||
// the heap. It uses linear allocation - that is, it doesn't simulate the
|
||||
// use of a free list. This simulated
|
||||
@ -222,7 +284,7 @@ class SimulatedHeapSpace {
|
||||
// Returns the RelativeAddress where the next
|
||||
// object of 'size' bytes will be allocated, and updates 'this' to
|
||||
// point to the next free address beyond that object.
|
||||
RelativeAddress Allocate(int size);
|
||||
RelativeAddress Allocate(int size, GCTreatment special_gc_treatment);
|
||||
|
||||
private:
|
||||
RelativeAddress current_;
|
||||
@ -232,7 +294,8 @@ class SimulatedHeapSpace {
|
||||
void SimulatedHeapSpace::InitEmptyHeap(AllocationSpace space) {
|
||||
switch (space) {
|
||||
case MAP_SPACE:
|
||||
case OLD_SPACE:
|
||||
case OLD_POINTER_SPACE:
|
||||
case OLD_DATA_SPACE:
|
||||
case CODE_SPACE:
|
||||
current_ = RelativeAddress(space, 0, Page::kObjectStartOffset);
|
||||
break;
|
||||
@ -247,13 +310,16 @@ void SimulatedHeapSpace::InitEmptyHeap(AllocationSpace space) {
|
||||
void SimulatedHeapSpace::InitCurrentHeap(AllocationSpace space) {
|
||||
switch (space) {
|
||||
case MAP_SPACE:
|
||||
case OLD_SPACE:
|
||||
case OLD_POINTER_SPACE:
|
||||
case OLD_DATA_SPACE:
|
||||
case CODE_SPACE: {
|
||||
PagedSpace* ps;
|
||||
if (space == MAP_SPACE) {
|
||||
ps = Heap::map_space();
|
||||
} else if (space == OLD_SPACE) {
|
||||
ps = Heap::old_space();
|
||||
} else if (space == OLD_POINTER_SPACE) {
|
||||
ps = Heap::old_pointer_space();
|
||||
} else if (space == OLD_DATA_SPACE) {
|
||||
ps = Heap::old_data_space();
|
||||
} else {
|
||||
ASSERT(space == CODE_SPACE);
|
||||
ps = Heap::code_space();
|
||||
@ -266,12 +332,15 @@ void SimulatedHeapSpace::InitCurrentHeap(AllocationSpace space) {
|
||||
if (it.next() == top_page) break;
|
||||
page_index++;
|
||||
}
|
||||
current_ = RelativeAddress(space, page_index, top_page->Offset(top));
|
||||
current_ = RelativeAddress(space,
|
||||
page_index,
|
||||
top_page->Offset(top));
|
||||
break;
|
||||
}
|
||||
case NEW_SPACE:
|
||||
current_ =
|
||||
RelativeAddress(space, 0, Heap::NewSpaceTop() - Heap::NewSpaceStart());
|
||||
current_ = RelativeAddress(space,
|
||||
0,
|
||||
Heap::NewSpaceTop() - Heap::NewSpaceStart());
|
||||
break;
|
||||
case LO_SPACE:
|
||||
int page_index = 0;
|
||||
@ -284,7 +353,8 @@ void SimulatedHeapSpace::InitCurrentHeap(AllocationSpace space) {
|
||||
}
|
||||
|
||||
|
||||
RelativeAddress SimulatedHeapSpace::Allocate(int size) {
|
||||
RelativeAddress SimulatedHeapSpace::Allocate(int size,
|
||||
GCTreatment special_gc_treatment) {
|
||||
#ifdef DEBUG
|
||||
current_.Verify();
|
||||
#endif
|
||||
@ -297,6 +367,11 @@ RelativeAddress SimulatedHeapSpace::Allocate(int size) {
|
||||
RelativeAddress result = current_;
|
||||
if (current_.space() == LO_SPACE) {
|
||||
current_.next_page();
|
||||
if (special_gc_treatment == CodeObject) {
|
||||
result.set_to_large_code_object();
|
||||
} else if (special_gc_treatment == PointerObject) {
|
||||
result.set_to_large_fixed_array();
|
||||
}
|
||||
} else {
|
||||
current_.next_address(alloc_size);
|
||||
}
|
||||
@ -924,7 +999,10 @@ void Serializer::PutHeader() {
|
||||
// and code spaces, because objects in new space will be promoted to them.
|
||||
writer_->PutC('S');
|
||||
writer_->PutC('[');
|
||||
writer_->PutInt(Heap::old_space()->Size() + Heap::new_space()->Size());
|
||||
writer_->PutInt(Heap::old_pointer_space()->Size() +
|
||||
Heap::new_space()->Size());
|
||||
writer_->PutC('|');
|
||||
writer_->PutInt(Heap::old_data_space()->Size() + Heap::new_space()->Size());
|
||||
writer_->PutC('|');
|
||||
writer_->PutInt(Heap::code_space()->Size() + Heap::new_space()->Size());
|
||||
writer_->PutC('|');
|
||||
@ -1094,16 +1172,24 @@ RelativeAddress Serializer::Allocate(HeapObject* obj) {
|
||||
// Find out which AllocationSpace 'obj' is in.
|
||||
AllocationSpace s;
|
||||
bool found = false;
|
||||
for (int i = 0; !found && i <= LAST_SPACE; i++) {
|
||||
for (int i = FIRST_SPACE; !found && i <= LAST_SPACE; i++) {
|
||||
s = static_cast<AllocationSpace>(i);
|
||||
found = Heap::InSpace(obj, s);
|
||||
}
|
||||
CHECK(found);
|
||||
if (s == NEW_SPACE) {
|
||||
s = Heap::TargetSpace(obj);
|
||||
Space* space = Heap::TargetSpace(obj);
|
||||
ASSERT(space == Heap::old_pointer_space() ||
|
||||
space == Heap::old_data_space());
|
||||
s = (space == Heap::old_pointer_space()) ?
|
||||
OLD_POINTER_SPACE :
|
||||
OLD_DATA_SPACE;
|
||||
}
|
||||
int size = obj->Size();
|
||||
return allocator_[s]->Allocate(size);
|
||||
GCTreatment gc_treatment = DataObject;
|
||||
if (obj->IsFixedArray()) gc_treatment = PointerObject;
|
||||
else if (obj->IsCode()) gc_treatment = CodeObject;
|
||||
return allocator_[s]->Allocate(size, gc_treatment);
|
||||
}
|
||||
|
||||
|
||||
@ -1116,8 +1202,11 @@ static const int kInitArraySize = 32;
|
||||
|
||||
Deserializer::Deserializer(const char* str, int len)
|
||||
: reader_(str, len),
|
||||
map_pages_(kInitArraySize), old_pages_(kInitArraySize),
|
||||
code_pages_(kInitArraySize), large_objects_(kInitArraySize),
|
||||
map_pages_(kInitArraySize),
|
||||
old_pointer_pages_(kInitArraySize),
|
||||
old_data_pages_(kInitArraySize),
|
||||
code_pages_(kInitArraySize),
|
||||
large_objects_(kInitArraySize),
|
||||
global_handles_(4) {
|
||||
root_ = true;
|
||||
roots_ = 0;
|
||||
@ -1281,7 +1370,11 @@ void Deserializer::GetHeader() {
|
||||
// during deserialization.
|
||||
reader_.ExpectC('S');
|
||||
reader_.ExpectC('[');
|
||||
InitPagedSpace(Heap::old_space(), reader_.GetInt(), &old_pages_);
|
||||
InitPagedSpace(Heap::old_pointer_space(),
|
||||
reader_.GetInt(),
|
||||
&old_pointer_pages_);
|
||||
reader_.ExpectC('|');
|
||||
InitPagedSpace(Heap::old_data_space(), reader_.GetInt(), &old_data_pages_);
|
||||
reader_.ExpectC('|');
|
||||
InitPagedSpace(Heap::code_space(), reader_.GetInt(), &code_pages_);
|
||||
reader_.ExpectC('|');
|
||||
@ -1340,7 +1433,15 @@ Object* Deserializer::GetObject() {
|
||||
Address a = GetEncodedAddress();
|
||||
|
||||
// Get a raw object of the right size in the right space.
|
||||
Object* o = Heap::AllocateRaw(size, Space(a));
|
||||
AllocationSpace space = GetSpace(a);
|
||||
Object *o;
|
||||
if (IsLargeExecutableObject(a)) {
|
||||
o = Heap::lo_space()->AllocateRawCode(size);
|
||||
} else if (IsLargeFixedArray(a)) {
|
||||
o = Heap::lo_space()->AllocateRawFixedArray(size);
|
||||
} else {
|
||||
o = Heap::AllocateRaw(size, space);
|
||||
}
|
||||
ASSERT(!o->IsFailure());
|
||||
// Check that the simulation of heap allocation was correct.
|
||||
ASSERT(o == Resolve(a));
|
||||
@ -1405,18 +1506,20 @@ Object* Deserializer::Resolve(Address encoded) {
|
||||
// Encoded addresses of HeapObjects always have 'HeapObject' tags.
|
||||
ASSERT(o->IsHeapObject());
|
||||
|
||||
switch (Space(encoded)) {
|
||||
// For Map space and Old space, we cache the known Pages in
|
||||
// map_pages and old_pages respectively. Even though MapSpace
|
||||
// keeps a list of page addresses, we don't rely on it since
|
||||
// GetObject uses AllocateRaw, and that appears not to update
|
||||
// the page list.
|
||||
switch (GetSpace(encoded)) {
|
||||
// For Map space and Old space, we cache the known Pages in map_pages,
|
||||
// old_pointer_pages and old_data_pages. Even though MapSpace keeps a list
|
||||
// of page addresses, we don't rely on it since GetObject uses AllocateRaw,
|
||||
// and that appears not to update the page list.
|
||||
case MAP_SPACE:
|
||||
return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
|
||||
Heap::map_space(), &map_pages_);
|
||||
case OLD_SPACE:
|
||||
case OLD_POINTER_SPACE:
|
||||
return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
|
||||
Heap::old_space(), &old_pages_);
|
||||
Heap::old_pointer_space(), &old_pointer_pages_);
|
||||
case OLD_DATA_SPACE:
|
||||
return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
|
||||
Heap::old_data_space(), &old_data_pages_);
|
||||
case CODE_SPACE:
|
||||
return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
|
||||
Heap::code_space(), &code_pages_);
|
||||
|
@ -312,10 +312,11 @@ class Deserializer: public ObjectVisitor {
|
||||
bool has_log_; // The file has log information.
|
||||
|
||||
// Resolve caches the following:
|
||||
List<Page*> map_pages_; // All pages in the map space.
|
||||
List<Page*> old_pages_; // All pages in the old space.
|
||||
List<Page*> map_pages_; // All pages in the map space.
|
||||
List<Page*> old_pointer_pages_; // All pages in the old pointer space.
|
||||
List<Page*> old_data_pages_; // All pages in the old data space.
|
||||
List<Page*> code_pages_;
|
||||
List<Object*> large_objects_; // All known large objects.
|
||||
List<Object*> large_objects_; // All known large objects.
|
||||
// A list of global handles at deserialization time.
|
||||
List<Object**> global_handles_;
|
||||
|
||||
|
@ -86,14 +86,7 @@ Page* Page::next_page() {
|
||||
|
||||
Address Page::AllocationTop() {
|
||||
PagedSpace* owner = MemoryAllocator::PageOwner(this);
|
||||
if (Heap::old_space() == owner) {
|
||||
return Heap::old_space()->PageAllocationTop(this);
|
||||
} else if (Heap::code_space() == owner) {
|
||||
return Heap::code_space()->PageAllocationTop(this);
|
||||
} else {
|
||||
ASSERT(Heap::map_space() == owner);
|
||||
return Heap::map_space()->PageAllocationTop(this);
|
||||
}
|
||||
return owner->PageAllocationTop(this);
|
||||
}
|
||||
|
||||
|
||||
@ -282,24 +275,6 @@ Object* PagedSpace::MCAllocateRaw(int size_in_bytes) {
|
||||
}
|
||||
|
||||
|
||||
// Allocating during deserialization. Always roll to the next page in the
|
||||
// space, which should be suitably expanded.
|
||||
Object* PagedSpace::AllocateForDeserialization(int size_in_bytes) {
|
||||
ASSERT(HasBeenSetup());
|
||||
ASSERT_OBJECT_SIZE(size_in_bytes);
|
||||
HeapObject* object = AllocateLinearly(&allocation_info_, size_in_bytes);
|
||||
if (object != NULL) return object;
|
||||
|
||||
// The space should be pre-expanded.
|
||||
Page* current_page = Page::FromAllocationTop(allocation_info_.top);
|
||||
ASSERT(current_page->next_page()->is_valid());
|
||||
object = AllocateInNextPage(current_page, size_in_bytes);
|
||||
|
||||
ASSERT(object != NULL);
|
||||
return object;
|
||||
}
|
||||
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// LargeObjectChunk
|
||||
|
||||
|
@ -227,10 +227,10 @@ void MemoryAllocator::TearDown() {
|
||||
|
||||
void* MemoryAllocator::AllocateRawMemory(const size_t requested,
|
||||
size_t* allocated,
|
||||
bool executable) {
|
||||
Executability executable) {
|
||||
if (size_ + static_cast<int>(requested) > capacity_) return NULL;
|
||||
|
||||
void* mem = OS::Allocate(requested, allocated, executable);
|
||||
void* mem = OS::Allocate(requested, allocated, executable == EXECUTABLE);
|
||||
int alloced = *allocated;
|
||||
size_ += alloced;
|
||||
Counters::memory_allocated.Increment(alloced);
|
||||
@ -316,7 +316,7 @@ Page* MemoryAllocator::CommitPages(Address start, size_t size,
|
||||
ASSERT(initial_chunk_->address() <= start);
|
||||
ASSERT(start + size <= reinterpret_cast<Address>(initial_chunk_->address())
|
||||
+ initial_chunk_->size());
|
||||
if (!initial_chunk_->Commit(start, size, owner->executable())) {
|
||||
if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
|
||||
return Page::FromAddress(NULL);
|
||||
}
|
||||
Counters::memory_allocated.Increment(size);
|
||||
@ -332,7 +332,7 @@ Page* MemoryAllocator::CommitPages(Address start, size_t size,
|
||||
|
||||
bool MemoryAllocator::CommitBlock(Address start,
|
||||
size_t size,
|
||||
bool executable) {
|
||||
Executability executable) {
|
||||
ASSERT(start != NULL);
|
||||
ASSERT(size > 0);
|
||||
ASSERT(initial_chunk_ != NULL);
|
||||
@ -474,7 +474,9 @@ void MemoryAllocator::ReportStatistics() {
|
||||
// -----------------------------------------------------------------------------
|
||||
// PagedSpace implementation
|
||||
|
||||
PagedSpace::PagedSpace(int max_capacity, AllocationSpace id, bool executable)
|
||||
PagedSpace::PagedSpace(int max_capacity,
|
||||
AllocationSpace id,
|
||||
Executability executable)
|
||||
: Space(id, executable) {
|
||||
max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
|
||||
* Page::kObjectAreaSize;
|
||||
@ -494,8 +496,11 @@ bool PagedSpace::Setup(Address start, size_t size) {
|
||||
int num_pages = 0;
|
||||
// Try to use the virtual memory range passed to us. If it is too small to
|
||||
// contain at least one page, ignore it and allocate instead.
|
||||
if (PagesInChunk(start, size) > 0) {
|
||||
first_page_ = MemoryAllocator::CommitPages(start, size, this, &num_pages);
|
||||
int pages_in_chunk = PagesInChunk(start, size);
|
||||
if (pages_in_chunk > 0) {
|
||||
first_page_ = MemoryAllocator::CommitPages(RoundUp(start, Page::kPageSize),
|
||||
Page::kPageSize * pages_in_chunk,
|
||||
this, &num_pages);
|
||||
} else {
|
||||
int requested_pages = Min(MemoryAllocator::kPagesPerChunk,
|
||||
max_capacity_ / Page::kObjectAreaSize);
|
||||
@ -768,15 +773,14 @@ void PagedSpace::Print() { }
|
||||
|
||||
NewSpace::NewSpace(int initial_semispace_capacity,
|
||||
int maximum_semispace_capacity,
|
||||
AllocationSpace id,
|
||||
bool executable)
|
||||
: Space(id, executable) {
|
||||
AllocationSpace id)
|
||||
: Space(id, NOT_EXECUTABLE) {
|
||||
ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
|
||||
ASSERT(IsPowerOf2(maximum_semispace_capacity));
|
||||
maximum_capacity_ = maximum_semispace_capacity;
|
||||
capacity_ = initial_semispace_capacity;
|
||||
to_space_ = new SemiSpace(capacity_, maximum_capacity_, id, executable);
|
||||
from_space_ = new SemiSpace(capacity_, maximum_capacity_, id, executable);
|
||||
to_space_ = new SemiSpace(capacity_, maximum_capacity_, id);
|
||||
from_space_ = new SemiSpace(capacity_, maximum_capacity_, id);
|
||||
|
||||
// Allocate and setup the histogram arrays if necessary.
|
||||
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
|
||||
@ -940,9 +944,8 @@ void NewSpace::Verify() {
|
||||
|
||||
SemiSpace::SemiSpace(int initial_capacity,
|
||||
int maximum_capacity,
|
||||
AllocationSpace id,
|
||||
bool executable)
|
||||
: Space(id, executable), capacity_(initial_capacity),
|
||||
AllocationSpace id)
|
||||
: Space(id, NOT_EXECUTABLE), capacity_(initial_capacity),
|
||||
maximum_capacity_(maximum_capacity), start_(NULL), age_mark_(NULL) {
|
||||
}
|
||||
|
||||
@ -980,6 +983,9 @@ bool SemiSpace::Double() {
|
||||
|
||||
#ifdef DEBUG
|
||||
void SemiSpace::Print() { }
|
||||
|
||||
|
||||
void SemiSpace::Verify() { }
|
||||
#endif
|
||||
|
||||
|
||||
@ -2190,7 +2196,7 @@ HeapObject* LargeObjectIterator::next() {
|
||||
|
||||
LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
|
||||
size_t* chunk_size,
|
||||
bool executable) {
|
||||
Executability executable) {
|
||||
size_t requested = ChunkSizeFor(size_in_bytes);
|
||||
void* mem = MemoryAllocator::AllocateRawMemory(requested,
|
||||
chunk_size,
|
||||
@ -2216,8 +2222,8 @@ int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
|
||||
// -----------------------------------------------------------------------------
|
||||
// LargeObjectSpace
|
||||
|
||||
LargeObjectSpace::LargeObjectSpace(AllocationSpace id, bool executable)
|
||||
: Space(id, executable),
|
||||
LargeObjectSpace::LargeObjectSpace(AllocationSpace id)
|
||||
: Space(id, NOT_EXECUTABLE), // Managed on a per-allocation basis
|
||||
first_chunk_(NULL),
|
||||
size_(0),
|
||||
page_count_(0) {}
|
||||
@ -2245,11 +2251,12 @@ void LargeObjectSpace::TearDown() {
|
||||
|
||||
|
||||
Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
|
||||
int object_size) {
|
||||
int object_size,
|
||||
Executability executable) {
|
||||
ASSERT(0 < object_size && object_size <= requested_size);
|
||||
size_t chunk_size;
|
||||
LargeObjectChunk* chunk =
|
||||
LargeObjectChunk::New(requested_size, &chunk_size, executable());
|
||||
LargeObjectChunk::New(requested_size, &chunk_size, executable);
|
||||
if (chunk == NULL) {
|
||||
return Failure::RetryAfterGC(requested_size, identity());
|
||||
}
|
||||
@ -2280,15 +2287,28 @@ Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
|
||||
}
|
||||
|
||||
|
||||
Object* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
|
||||
Object* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
|
||||
ASSERT(0 < size_in_bytes);
|
||||
return AllocateRawInternal(size_in_bytes, size_in_bytes);
|
||||
return AllocateRawInternal(size_in_bytes,
|
||||
size_in_bytes,
|
||||
EXECUTABLE);
|
||||
}
|
||||
|
||||
|
||||
Object* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
|
||||
ASSERT(0 < size_in_bytes);
|
||||
int extra_rset_bytes = ExtraRSetBytesFor(size_in_bytes);
|
||||
return AllocateRawInternal(size_in_bytes + extra_rset_bytes, size_in_bytes);
|
||||
return AllocateRawInternal(size_in_bytes + extra_rset_bytes,
|
||||
size_in_bytes,
|
||||
NOT_EXECUTABLE);
|
||||
}
|
||||
|
||||
|
||||
Object* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
|
||||
ASSERT(0 < size_in_bytes);
|
||||
return AllocateRawInternal(size_in_bytes,
|
||||
size_in_bytes,
|
||||
NOT_EXECUTABLE);
|
||||
}
|
||||
|
||||
|
||||
|
122
src/spaces.h
122
src/spaces.h
@ -209,7 +209,7 @@ class Page {
|
||||
// 8K bytes per page.
|
||||
static const int kPageSizeBits = 13;
|
||||
|
||||
// Page size in bytes.
|
||||
// Page size in bytes. This must be a multiple of the OS page size.
|
||||
static const int kPageSize = 1 << kPageSizeBits;
|
||||
|
||||
// Page size mask.
|
||||
@ -234,7 +234,7 @@ class Page {
|
||||
//---------------------------------------------------------------------------
|
||||
// Page header description.
|
||||
//
|
||||
// If a page is not in a large object space, the first word,
|
||||
// If a page is not in the large object space, the first word,
|
||||
// opaque_header, encodes the next page address (aligned to kPageSize 8K)
|
||||
// and the chunk number (0 ~ 8K-1). Only MemoryAllocator should use
|
||||
// opaque_header. The value range of the opaque_header is [0..kPageSize[,
|
||||
@ -275,15 +275,21 @@ class Page {
|
||||
// Space is the abstract superclass for all allocation spaces.
|
||||
class Space : public Malloced {
|
||||
public:
|
||||
Space(AllocationSpace id, bool executable)
|
||||
Space(AllocationSpace id, Executability executable)
|
||||
: id_(id), executable_(executable) {}
|
||||
virtual ~Space() {}
|
||||
// Does the space need executable memory?
|
||||
bool executable() { return executable_; }
|
||||
Executability executable() { return executable_; }
|
||||
// Identity used in error reporting.
|
||||
AllocationSpace identity() { return id_; }
|
||||
virtual int Size() = 0;
|
||||
#ifdef DEBUG
|
||||
virtual void Verify() = 0;
|
||||
virtual void Print() = 0;
|
||||
#endif
|
||||
private:
|
||||
AllocationSpace id_;
|
||||
bool executable_;
|
||||
Executability executable_;
|
||||
};
|
||||
|
||||
|
||||
@ -338,7 +344,7 @@ class MemoryAllocator : public AllStatic {
|
||||
// the address is not NULL, the size is greater than zero, and that the
|
||||
// block is contained in the initial chunk. Returns true if it succeeded
|
||||
// and false otherwise.
|
||||
static bool CommitBlock(Address start, size_t size, bool executable);
|
||||
static bool CommitBlock(Address start, size_t size, Executability executable);
|
||||
|
||||
// Attempts to allocate the requested (non-zero) number of pages from the
|
||||
// OS. Fewer pages might be allocated than requested. If it fails to
|
||||
@ -363,12 +369,17 @@ class MemoryAllocator : public AllStatic {
|
||||
// but keep track of allocated bytes as part of heap.
|
||||
static void* AllocateRawMemory(const size_t requested,
|
||||
size_t* allocated,
|
||||
bool executable);
|
||||
Executability executable);
|
||||
static void FreeRawMemory(void* buf, size_t length);
|
||||
|
||||
// Returns the maximum available bytes of heaps.
|
||||
static int Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
|
||||
|
||||
// Returns maximum available bytes that the old space can have.
|
||||
static int MaxAvailable() {
|
||||
return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
|
||||
}
|
||||
|
||||
// Links two pages.
|
||||
static inline void SetNextPage(Page* prev, Page* next);
|
||||
|
||||
@ -661,7 +672,7 @@ class PagedSpace : public Space {
|
||||
friend class PageIterator;
|
||||
public:
|
||||
// Creates a space with a maximum capacity, and an id.
|
||||
PagedSpace(int max_capacity, AllocationSpace id, bool executable);
|
||||
PagedSpace(int max_capacity, AllocationSpace id, Executability executable);
|
||||
|
||||
virtual ~PagedSpace() {}
|
||||
|
||||
@ -695,6 +706,11 @@ class PagedSpace : public Space {
|
||||
// Clears remembered sets of pages in this space.
|
||||
void ClearRSet();
|
||||
|
||||
// Prepares for a mark-compact GC.
|
||||
virtual void PrepareForMarkCompact(bool will_compact) = 0;
|
||||
|
||||
virtual Address PageAllocationTop(Page* page) = 0;
|
||||
|
||||
// Current capacity without growing (Size() + Available() + Waste()).
|
||||
int Capacity() { return accounting_stats_.Capacity(); }
|
||||
|
||||
@ -702,7 +718,7 @@ class PagedSpace : public Space {
|
||||
int Available() { return accounting_stats_.Available(); }
|
||||
|
||||
// Allocated bytes in this space.
|
||||
int Size() { return accounting_stats_.Size(); }
|
||||
virtual int Size() { return accounting_stats_.Size(); }
|
||||
|
||||
// Wasted bytes due to fragmentation and not recoverable until the
|
||||
// next GC of this space.
|
||||
@ -723,9 +739,6 @@ class PagedSpace : public Space {
|
||||
inline Object* MCAllocateRaw(int size_in_bytes);
|
||||
|
||||
|
||||
// Allocate the requested number of bytes during deserialization.
|
||||
inline Object* AllocateForDeserialization(int size_in_bytes);
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Mark-compact collection support functions
|
||||
|
||||
@ -741,6 +754,10 @@ class PagedSpace : public Space {
|
||||
// of the space.
|
||||
int MCSpaceOffsetForAddress(Address addr);
|
||||
|
||||
// Updates the allocation pointer to the relocation top after a mark-compact
|
||||
// collection.
|
||||
virtual void MCCommitRelocationInfo() = 0;
|
||||
|
||||
// Releases half of unused pages.
|
||||
void Shrink();
|
||||
|
||||
@ -749,7 +766,7 @@ class PagedSpace : public Space {
|
||||
|
||||
#ifdef DEBUG
|
||||
// Print meta info and objects in this space.
|
||||
void Print();
|
||||
virtual void Print();
|
||||
|
||||
// Report code object related statistics
|
||||
void CollectCodeStatistics();
|
||||
@ -869,8 +886,8 @@ class SemiSpace : public Space {
|
||||
// addresses.
|
||||
SemiSpace(int initial_capacity,
|
||||
int maximum_capacity,
|
||||
AllocationSpace id,
|
||||
bool executable);
|
||||
AllocationSpace id);
|
||||
virtual ~SemiSpace() {}
|
||||
|
||||
// Sets up the semispace using the given chunk.
|
||||
bool Setup(Address start, int size);
|
||||
@ -913,8 +930,16 @@ class SemiSpace : public Space {
|
||||
// The offset of an address from the begining of the space.
|
||||
int SpaceOffsetForAddress(Address addr) { return addr - low(); }
|
||||
|
||||
// If we don't have this here then SemiSpace will be abstract. However
|
||||
// it should never be called.
|
||||
virtual int Size() {
|
||||
UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
void Print();
|
||||
virtual void Print();
|
||||
virtual void Verify();
|
||||
#endif
|
||||
|
||||
private:
|
||||
@ -999,8 +1024,8 @@ class NewSpace : public Space {
|
||||
// and it must be aligned to its size.
|
||||
NewSpace(int initial_semispace_capacity,
|
||||
int maximum_semispace_capacity,
|
||||
AllocationSpace id,
|
||||
bool executable);
|
||||
AllocationSpace id);
|
||||
virtual ~NewSpace() {}
|
||||
|
||||
// Sets up the new space using the given chunk.
|
||||
bool Setup(Address start, int size);
|
||||
@ -1032,7 +1057,7 @@ class NewSpace : public Space {
|
||||
}
|
||||
|
||||
// Return the allocated bytes in the active semispace.
|
||||
int Size() { return top() - bottom(); }
|
||||
virtual int Size() { return top() - bottom(); }
|
||||
// Return the current capacity of a semispace.
|
||||
int Capacity() { return capacity_; }
|
||||
// Return the available bytes without growing in the active semispace.
|
||||
@ -1107,9 +1132,9 @@ class NewSpace : public Space {
|
||||
|
||||
#ifdef DEBUG
|
||||
// Verify the active semispace.
|
||||
void Verify();
|
||||
virtual void Verify();
|
||||
// Print the active semispace.
|
||||
void Print() { to_space_->Print(); }
|
||||
virtual void Print() { to_space_->Print(); }
|
||||
#endif
|
||||
|
||||
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
|
||||
@ -1341,22 +1366,18 @@ class OldSpace : public PagedSpace {
|
||||
public:
|
||||
// Creates an old space object with a given maximum capacity.
|
||||
// The constructor does not allocate pages from OS.
|
||||
explicit OldSpace(int max_capacity, AllocationSpace id, bool executable)
|
||||
explicit OldSpace(int max_capacity,
|
||||
AllocationSpace id,
|
||||
Executability executable)
|
||||
: PagedSpace(max_capacity, id, executable), free_list_(id) {
|
||||
}
|
||||
|
||||
// Returns maximum available bytes that the old space can have.
|
||||
int MaxAvailable() {
|
||||
return (MemoryAllocator::Available() / Page::kPageSize)
|
||||
* Page::kObjectAreaSize;
|
||||
}
|
||||
|
||||
// The bytes available on the free list (ie, not above the linear allocation
|
||||
// pointer).
|
||||
int AvailableFree() { return free_list_.available(); }
|
||||
|
||||
// The top of allocation in a page in this space. Undefined if page is unused.
|
||||
Address PageAllocationTop(Page* page) {
|
||||
virtual Address PageAllocationTop(Page* page) {
|
||||
return page == TopPageOf(allocation_info_) ? top() : page->ObjectAreaEnd();
|
||||
}
|
||||
|
||||
@ -1370,7 +1391,7 @@ class OldSpace : public PagedSpace {
|
||||
|
||||
// Prepare for full garbage collection. Resets the relocation pointer and
|
||||
// clears the free list.
|
||||
void PrepareForMarkCompact(bool will_compact);
|
||||
virtual void PrepareForMarkCompact(bool will_compact);
|
||||
|
||||
// Adjust the top of relocation pointer to point to the end of the object
|
||||
// given by 'address' and 'size_in_bytes'. Move it to the next page if
|
||||
@ -1380,11 +1401,11 @@ class OldSpace : public PagedSpace {
|
||||
|
||||
// Updates the allocation pointer to the relocation top after a mark-compact
|
||||
// collection.
|
||||
void MCCommitRelocationInfo();
|
||||
virtual void MCCommitRelocationInfo();
|
||||
|
||||
#ifdef DEBUG
|
||||
// Verify integrity of this space.
|
||||
void Verify();
|
||||
virtual void Verify();
|
||||
|
||||
// Reports statistics for the space
|
||||
void ReportStatistics();
|
||||
@ -1420,14 +1441,10 @@ class MapSpace : public PagedSpace {
|
||||
public:
|
||||
// Creates a map space object with a maximum capacity.
|
||||
explicit MapSpace(int max_capacity, AllocationSpace id)
|
||||
: PagedSpace(max_capacity, id, false), free_list_(id) { }
|
||||
|
||||
// The bytes available on the free list (ie, not above the linear allocation
|
||||
// pointer).
|
||||
int AvailableFree() { return free_list_.available(); }
|
||||
: PagedSpace(max_capacity, id, NOT_EXECUTABLE), free_list_(id) { }
|
||||
|
||||
// The top of allocation in a page in this space. Undefined if page is unused.
|
||||
Address PageAllocationTop(Page* page) {
|
||||
virtual Address PageAllocationTop(Page* page) {
|
||||
return page == TopPageOf(allocation_info_) ? top()
|
||||
: page->ObjectAreaEnd() - kPageExtra;
|
||||
}
|
||||
@ -1442,15 +1459,15 @@ class MapSpace : public PagedSpace {
|
||||
Address PageAddress(int page_index) { return page_addresses_[page_index]; }
|
||||
|
||||
// Prepares for a mark-compact GC.
|
||||
void PrepareForMarkCompact(bool will_compact);
|
||||
virtual void PrepareForMarkCompact(bool will_compact);
|
||||
|
||||
// Updates the allocation pointer to the relocation top after a mark-compact
|
||||
// collection.
|
||||
void MCCommitRelocationInfo();
|
||||
virtual void MCCommitRelocationInfo();
|
||||
|
||||
#ifdef DEBUG
|
||||
// Verify integrity of this space.
|
||||
void Verify();
|
||||
virtual void Verify();
|
||||
|
||||
// Reports statistic info of the space
|
||||
void ReportStatistics();
|
||||
@ -1490,7 +1507,6 @@ class MapSpace : public PagedSpace {
|
||||
// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
|
||||
// A large object always starts at Page::kObjectStartOffset to a page.
|
||||
// Large objects do not move during garbage collections.
|
||||
//
|
||||
|
||||
// A LargeObjectChunk holds exactly one large object page with exactly one
|
||||
// large object.
|
||||
@ -1503,7 +1519,7 @@ class LargeObjectChunk {
|
||||
// parameter chunk_size.
|
||||
static LargeObjectChunk* New(int size_in_bytes,
|
||||
size_t* chunk_size,
|
||||
bool executable);
|
||||
Executability executable);
|
||||
|
||||
// Interpret a raw address as a large object chunk.
|
||||
static LargeObjectChunk* FromAddress(Address address) {
|
||||
@ -1553,7 +1569,8 @@ class LargeObjectChunk {
|
||||
class LargeObjectSpace : public Space {
|
||||
friend class LargeObjectIterator;
|
||||
public:
|
||||
explicit LargeObjectSpace(AllocationSpace id, bool executable);
|
||||
explicit LargeObjectSpace(AllocationSpace id);
|
||||
virtual ~LargeObjectSpace() {}
|
||||
|
||||
// Initializes internal data structures.
|
||||
bool Setup();
|
||||
@ -1561,8 +1578,10 @@ class LargeObjectSpace : public Space {
|
||||
// Releases internal resources, frees objects in this space.
|
||||
void TearDown();
|
||||
|
||||
// Allocates a (non-FixedArray) large object.
|
||||
// Allocates a (non-FixedArray, non-Code) large object.
|
||||
Object* AllocateRaw(int size_in_bytes);
|
||||
// Allocates a large Code object.
|
||||
Object* AllocateRawCode(int size_in_bytes);
|
||||
// Allocates a large FixedArray.
|
||||
Object* AllocateRawFixedArray(int size_in_bytes);
|
||||
|
||||
@ -1572,7 +1591,7 @@ class LargeObjectSpace : public Space {
|
||||
return LargeObjectChunk::ObjectSizeFor(MemoryAllocator::Available());
|
||||
}
|
||||
|
||||
int Size() {
|
||||
virtual int Size() {
|
||||
return size_;
|
||||
}
|
||||
|
||||
@ -1601,8 +1620,8 @@ class LargeObjectSpace : public Space {
|
||||
bool IsEmpty() { return first_chunk_ == NULL; }
|
||||
|
||||
#ifdef DEBUG
|
||||
void Verify();
|
||||
void Print();
|
||||
virtual void Verify();
|
||||
virtual void Print();
|
||||
void ReportStatistics();
|
||||
void CollectCodeStatistics();
|
||||
// Dump the remembered sets in the space to stdout.
|
||||
@ -1619,8 +1638,11 @@ class LargeObjectSpace : public Space {
|
||||
int page_count_; // number of chunks
|
||||
|
||||
|
||||
// Shared implementation of AllocateRaw and AllocateRawFixedArray.
|
||||
Object* AllocateRawInternal(int requested_size, int object_size);
|
||||
// Shared implementation of AllocateRaw, AllocateRawCode and
|
||||
// AllocateRawFixedArray.
|
||||
Object* AllocateRawInternal(int requested_size,
|
||||
int object_size,
|
||||
Executability executable);
|
||||
|
||||
// Returns the number of extra bytes (rounded up to the nearest full word)
|
||||
// required for extra_object_bytes of extra pointers (in bytes).
|
||||
|
@ -461,10 +461,10 @@ THREADED_TEST(ScriptUsingStringResource) {
|
||||
CHECK(source->IsExternal());
|
||||
CHECK_EQ(resource,
|
||||
static_cast<TestResource*>(source->GetExternalStringResource()));
|
||||
v8::internal::Heap::CollectGarbage(0, v8::internal::OLD_SPACE);
|
||||
v8::internal::Heap::CollectAllGarbage();
|
||||
CHECK_EQ(0, TestResource::dispose_count);
|
||||
}
|
||||
v8::internal::Heap::CollectGarbage(0, v8::internal::OLD_SPACE);
|
||||
v8::internal::Heap::CollectAllGarbage();
|
||||
CHECK_EQ(1, TestResource::dispose_count);
|
||||
}
|
||||
|
||||
@ -481,10 +481,10 @@ THREADED_TEST(ScriptUsingAsciiStringResource) {
|
||||
Local<Value> value = script->Run();
|
||||
CHECK(value->IsNumber());
|
||||
CHECK_EQ(7, value->Int32Value());
|
||||
v8::internal::Heap::CollectGarbage(0, v8::internal::OLD_SPACE);
|
||||
v8::internal::Heap::CollectAllGarbage();
|
||||
CHECK_EQ(0, TestAsciiResource::dispose_count);
|
||||
}
|
||||
v8::internal::Heap::CollectGarbage(0, v8::internal::OLD_SPACE);
|
||||
v8::internal::Heap::CollectAllGarbage();
|
||||
CHECK_EQ(1, TestAsciiResource::dispose_count);
|
||||
}
|
||||
|
||||
@ -2455,7 +2455,7 @@ static v8::Handle<Value> ArgumentsTestCallback(const v8::Arguments& args) {
|
||||
CHECK_EQ(v8::Integer::New(3), args[2]);
|
||||
CHECK_EQ(v8::Undefined(), args[3]);
|
||||
v8::HandleScope scope;
|
||||
i::Heap::CollectGarbage(0, i::OLD_SPACE);
|
||||
i::Heap::CollectAllGarbage();
|
||||
return v8::Undefined();
|
||||
}
|
||||
|
||||
@ -4694,7 +4694,7 @@ THREADED_TEST(LockUnlockLock) {
|
||||
|
||||
static void EnsureNoSurvivingGlobalObjects() {
|
||||
int count = 0;
|
||||
v8::internal::Heap::CollectGarbage(0, v8::internal::OLD_SPACE);
|
||||
v8::internal::Heap::CollectAllGarbage();
|
||||
v8::internal::HeapIterator it;
|
||||
while (it.has_next()) {
|
||||
v8::internal::HeapObject* object = it.next();
|
||||
|
@ -618,7 +618,7 @@ static void DebugEventBreakPointCollectGarbage(
|
||||
Heap::CollectGarbage(0, v8::internal::NEW_SPACE);
|
||||
} else {
|
||||
// Mark sweep (and perhaps compact).
|
||||
Heap::CollectGarbage(0, v8::internal::OLD_SPACE);
|
||||
Heap::CollectAllGarbage();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -960,7 +960,7 @@ static void CallAndGC(v8::Local<v8::Object> recv, v8::Local<v8::Function> f) {
|
||||
CHECK_EQ(2 + i * 3, break_point_hit_count);
|
||||
|
||||
// Mark sweep (and perhaps compact) and call function.
|
||||
Heap::CollectGarbage(0, v8::internal::OLD_SPACE);
|
||||
Heap::CollectAllGarbage();
|
||||
f->Call(recv, 0, NULL);
|
||||
CHECK_EQ(3 + i * 3, break_point_hit_count);
|
||||
}
|
||||
|
@ -176,7 +176,8 @@ TEST(Tagging) {
|
||||
CHECK(Failure::RetryAfterGC(12, NEW_SPACE)->IsFailure());
|
||||
CHECK_EQ(12, Failure::RetryAfterGC(12, NEW_SPACE)->requested());
|
||||
CHECK_EQ(NEW_SPACE, Failure::RetryAfterGC(12, NEW_SPACE)->allocation_space());
|
||||
CHECK_EQ(OLD_SPACE, Failure::RetryAfterGC(12, OLD_SPACE)->allocation_space());
|
||||
CHECK_EQ(OLD_POINTER_SPACE,
|
||||
Failure::RetryAfterGC(12, OLD_POINTER_SPACE)->allocation_space());
|
||||
CHECK(Failure::Exception()->IsFailure());
|
||||
CHECK(Smi::FromInt(Smi::kMinValue)->IsSmi());
|
||||
CHECK(Smi::FromInt(Smi::kMaxValue)->IsSmi());
|
||||
@ -353,7 +354,7 @@ TEST(WeakGlobalHandlesMark) {
|
||||
Handle<Object> h1 = GlobalHandles::Create(i);
|
||||
Handle<Object> h2 = GlobalHandles::Create(u);
|
||||
|
||||
CHECK(Heap::CollectGarbage(0, OLD_SPACE));
|
||||
CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
|
||||
CHECK(Heap::CollectGarbage(0, NEW_SPACE));
|
||||
// Make sure the object is promoted.
|
||||
|
||||
@ -363,7 +364,7 @@ TEST(WeakGlobalHandlesMark) {
|
||||
CHECK(!GlobalHandles::IsNearDeath(h1.location()));
|
||||
CHECK(!GlobalHandles::IsNearDeath(h2.location()));
|
||||
|
||||
CHECK(Heap::CollectGarbage(0, OLD_SPACE));
|
||||
CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
|
||||
|
||||
CHECK((*h1)->IsString());
|
||||
|
||||
@ -400,7 +401,7 @@ TEST(DeleteWeakGlobalHandle) {
|
||||
CHECK(!WeakPointerCleared);
|
||||
|
||||
// Mark-compact treats weak reference properly.
|
||||
CHECK(Heap::CollectGarbage(0, OLD_SPACE));
|
||||
CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
|
||||
|
||||
CHECK(WeakPointerCleared);
|
||||
}
|
||||
@ -751,11 +752,11 @@ TEST(Iteration) {
|
||||
Handle<Object> objs[objs_count];
|
||||
int next_objs_index = 0;
|
||||
|
||||
// Allocate a JS array to OLD_SPACE and NEW_SPACE
|
||||
// Allocate a JS array to OLD_POINTER_SPACE and NEW_SPACE
|
||||
objs[next_objs_index++] = Factory::NewJSArray(10);
|
||||
objs[next_objs_index++] = Factory::NewJSArray(10, TENURED);
|
||||
|
||||
// Allocate a small string to CODE_SPACE and NEW_SPACE
|
||||
// Allocate a small string to OLD_DATA_SPACE and NEW_SPACE
|
||||
objs[next_objs_index++] =
|
||||
Factory::NewStringFromAscii(CStrVector("abcdefghij"));
|
||||
objs[next_objs_index++] =
|
||||
|
@ -102,10 +102,10 @@ TEST(Promotion) {
|
||||
CHECK(Heap::InSpace(*array, NEW_SPACE));
|
||||
|
||||
// Call the m-c collector, so array becomes an old object.
|
||||
CHECK(Heap::CollectGarbage(0, OLD_SPACE));
|
||||
CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
|
||||
|
||||
// Array now sits in the old space
|
||||
CHECK(Heap::InSpace(*array, OLD_SPACE));
|
||||
CHECK(Heap::InSpace(*array, OLD_POINTER_SPACE));
|
||||
}
|
||||
|
||||
|
||||
@ -120,7 +120,7 @@ TEST(NoPromotion) {
|
||||
v8::HandleScope sc;
|
||||
|
||||
// Do a mark compact GC to shrink the heap.
|
||||
CHECK(Heap::CollectGarbage(0, OLD_SPACE));
|
||||
CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
|
||||
|
||||
// Allocate a big Fixed array in the new space.
|
||||
int size = (Heap::MaxHeapObjectSize() - Array::kHeaderSize) / kPointerSize;
|
||||
@ -142,7 +142,7 @@ TEST(NoPromotion) {
|
||||
}
|
||||
|
||||
// Call mark compact GC, and it should pass.
|
||||
CHECK(Heap::CollectGarbage(0, OLD_SPACE));
|
||||
CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
|
||||
|
||||
// array should not be promoted because the old space is full.
|
||||
CHECK(Heap::InSpace(*array, NEW_SPACE));
|
||||
@ -154,7 +154,7 @@ TEST(MarkCompactCollector) {
|
||||
|
||||
v8::HandleScope sc;
|
||||
// call mark-compact when heap is empty
|
||||
CHECK(Heap::CollectGarbage(0, OLD_SPACE));
|
||||
CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
|
||||
|
||||
// keep allocating garbage in new space until it fails
|
||||
const int ARRAY_SIZE = 100;
|
||||
@ -190,7 +190,7 @@ TEST(MarkCompactCollector) {
|
||||
Top::context()->global()->SetProperty(func_name, function, NONE);
|
||||
|
||||
JSObject* obj = JSObject::cast(Heap::AllocateJSObject(function));
|
||||
CHECK(Heap::CollectGarbage(0, OLD_SPACE));
|
||||
CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
|
||||
|
||||
func_name = String::cast(Heap::LookupAsciiSymbol("theFunction"));
|
||||
CHECK(Top::context()->global()->HasLocalProperty(func_name));
|
||||
@ -204,7 +204,7 @@ TEST(MarkCompactCollector) {
|
||||
String* prop_name = String::cast(Heap::LookupAsciiSymbol("theSlot"));
|
||||
obj->SetProperty(prop_name, Smi::FromInt(23), NONE);
|
||||
|
||||
CHECK(Heap::CollectGarbage(0, OLD_SPACE));
|
||||
CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
|
||||
|
||||
obj_name = String::cast(Heap::LookupAsciiSymbol("theObject"));
|
||||
CHECK(Top::context()->global()->HasLocalProperty(obj_name));
|
||||
@ -242,7 +242,7 @@ TEST(GCCallback) {
|
||||
CHECK_EQ(0, gc_starts);
|
||||
CHECK_EQ(gc_ends, gc_starts);
|
||||
|
||||
CHECK(Heap::CollectGarbage(0, OLD_SPACE));
|
||||
CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
|
||||
CHECK_EQ(1, gc_starts);
|
||||
CHECK_EQ(gc_ends, gc_starts);
|
||||
}
|
||||
@ -292,7 +292,7 @@ TEST(ObjectGroups) {
|
||||
GlobalHandles::AddToGroup(reinterpret_cast<void*>(2), g2s1.location());
|
||||
GlobalHandles::AddToGroup(reinterpret_cast<void*>(2), g2s2.location());
|
||||
// Do a full GC
|
||||
CHECK(Heap::CollectGarbage(0, OLD_SPACE));
|
||||
CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
|
||||
|
||||
// All object should be alive.
|
||||
CHECK_EQ(0, NumberOfWeakCalls);
|
||||
@ -308,7 +308,7 @@ TEST(ObjectGroups) {
|
||||
GlobalHandles::AddToGroup(reinterpret_cast<void*>(2), g2s1.location());
|
||||
GlobalHandles::AddToGroup(reinterpret_cast<void*>(2), g2s2.location());
|
||||
|
||||
CHECK(Heap::CollectGarbage(0, OLD_SPACE));
|
||||
CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
|
||||
|
||||
// All objects should be gone. 5 global handles in total.
|
||||
CHECK_EQ(5, NumberOfWeakCalls);
|
||||
|
@ -101,7 +101,7 @@ TEST(MemoryAllocator) {
|
||||
CHECK(Heap::ConfigureHeapDefault());
|
||||
CHECK(MemoryAllocator::Setup(Heap::MaxCapacity()));
|
||||
|
||||
OldSpace faked_space(Heap::MaxCapacity(), OLD_SPACE, false);
|
||||
OldSpace faked_space(Heap::MaxCapacity(), OLD_POINTER_SPACE, NOT_EXECUTABLE);
|
||||
int total_pages = 0;
|
||||
int requested = 2;
|
||||
int allocated;
|
||||
@ -159,8 +159,7 @@ TEST(NewSpace) {
|
||||
|
||||
NewSpace* s = new NewSpace(Heap::InitialSemiSpaceSize(),
|
||||
Heap::SemiSpaceSize(),
|
||||
NEW_SPACE,
|
||||
false);
|
||||
NEW_SPACE);
|
||||
CHECK(s != NULL);
|
||||
|
||||
void* chunk =
|
||||
@ -187,7 +186,9 @@ TEST(OldSpace) {
|
||||
CHECK(Heap::ConfigureHeapDefault());
|
||||
CHECK(MemoryAllocator::Setup(Heap::MaxCapacity()));
|
||||
|
||||
OldSpace* s = new OldSpace(Heap::OldGenerationSize(), OLD_SPACE, false);
|
||||
OldSpace* s = new OldSpace(Heap::OldGenerationSize(),
|
||||
OLD_POINTER_SPACE,
|
||||
NOT_EXECUTABLE);
|
||||
CHECK(s != NULL);
|
||||
|
||||
void* chunk =
|
||||
@ -213,7 +214,7 @@ TEST(LargeObjectSpace) {
|
||||
CHECK(Heap::ConfigureHeapDefault());
|
||||
MemoryAllocator::Setup(Heap::MaxCapacity());
|
||||
|
||||
LargeObjectSpace* lo = new LargeObjectSpace(LO_SPACE, false);
|
||||
LargeObjectSpace* lo = new LargeObjectSpace(LO_SPACE);
|
||||
CHECK(lo != NULL);
|
||||
|
||||
CHECK(lo->Setup());
|
||||
|
Loading…
Reference in New Issue
Block a user