Revert 10413-10416 initial memory use reduction due to
test failures. Review URL: http://codereview.chromium.org/9178014 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@10417 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
07b46f47d2
commit
a02dbe4258
@ -42,11 +42,10 @@ namespace internal {
|
||||
static const int kEventsBufferSize = 256*KB;
|
||||
static const int kTickSamplesBufferChunkSize = 64*KB;
|
||||
static const int kTickSamplesBufferChunksCount = 16;
|
||||
static const int kProfilerStackSize = 32 * KB;
|
||||
|
||||
|
||||
ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
|
||||
: Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
|
||||
: Thread("v8:ProfEvntProc"),
|
||||
generator_(generator),
|
||||
running_(true),
|
||||
ticks_buffer_(sizeof(TickSampleEventRecord),
|
||||
|
10
src/d8.cc
10
src/d8.cc
@ -126,9 +126,6 @@ ShellOptions Shell::options;
|
||||
const char* Shell::kPrompt = "d8> ";
|
||||
|
||||
|
||||
const int MB = 1024 * 1024;
|
||||
|
||||
|
||||
#ifndef V8_SHARED
|
||||
bool CounterMap::Match(void* key1, void* key2) {
|
||||
const char* name1 = reinterpret_cast<const char*>(key1);
|
||||
@ -1194,11 +1191,14 @@ Handle<String> SourceGroup::ReadFile(const char* name) {
|
||||
|
||||
#ifndef V8_SHARED
|
||||
i::Thread::Options SourceGroup::GetThreadOptions() {
|
||||
i::Thread::Options options;
|
||||
options.name = "IsolateThread";
|
||||
// On some systems (OSX 10.6) the stack size default is 0.5Mb or less
|
||||
// which is not enough to parse the big literal expressions used in tests.
|
||||
// The stack size should be at least StackGuard::kLimitSize + some
|
||||
// OS-specific padding for thread startup code. 2Mbytes seems to be enough.
|
||||
return i::Thread::Options("IsolateThread", 2 * MB);
|
||||
// OS-specific padding for thread startup code.
|
||||
options.stack_size = 2 << 20; // 2 Mb seems to be enough
|
||||
return options;
|
||||
}
|
||||
|
||||
|
||||
|
@ -1086,7 +1086,6 @@ MemoryChunk* Deoptimizer::CreateCode(BailoutType type) {
|
||||
|
||||
MemoryChunk* chunk =
|
||||
Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size,
|
||||
desc.instr_size,
|
||||
EXECUTABLE,
|
||||
NULL);
|
||||
if (chunk == NULL) {
|
||||
|
@ -505,6 +505,7 @@ Isolate* Heap::isolate() {
|
||||
#define GC_GREEDY_CHECK() { }
|
||||
#endif
|
||||
|
||||
|
||||
// Calls the FUNCTION_CALL function and retries it up to three times
|
||||
// to guarantee that any allocations performed during the call will
|
||||
// succeed if there's enough memory.
|
||||
|
17
src/heap.cc
17
src/heap.cc
@ -582,11 +582,8 @@ void Heap::ReserveSpace(
|
||||
PagedSpace* map_space = Heap::map_space();
|
||||
PagedSpace* cell_space = Heap::cell_space();
|
||||
LargeObjectSpace* lo_space = Heap::lo_space();
|
||||
bool one_old_space_gc_has_been_performed = false;
|
||||
bool gc_performed = true;
|
||||
bool old_space_gc_performed;
|
||||
while (gc_performed) {
|
||||
old_space_gc_performed = false;
|
||||
gc_performed = false;
|
||||
if (!new_space->ReserveSpace(new_space_size)) {
|
||||
Heap::CollectGarbage(NEW_SPACE);
|
||||
@ -595,27 +592,22 @@ void Heap::ReserveSpace(
|
||||
if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
|
||||
Heap::CollectGarbage(OLD_POINTER_SPACE);
|
||||
gc_performed = true;
|
||||
old_space_gc_performed = true;
|
||||
}
|
||||
if (!(old_data_space->ReserveSpace(data_space_size))) {
|
||||
Heap::CollectGarbage(OLD_DATA_SPACE);
|
||||
gc_performed = true;
|
||||
old_space_gc_performed = true;
|
||||
}
|
||||
if (!(code_space->ReserveSpace(code_space_size))) {
|
||||
Heap::CollectGarbage(CODE_SPACE);
|
||||
gc_performed = true;
|
||||
old_space_gc_performed = true;
|
||||
}
|
||||
if (!(map_space->ReserveSpace(map_space_size))) {
|
||||
Heap::CollectGarbage(MAP_SPACE);
|
||||
gc_performed = true;
|
||||
old_space_gc_performed = true;
|
||||
}
|
||||
if (!(cell_space->ReserveSpace(cell_space_size))) {
|
||||
Heap::CollectGarbage(CELL_SPACE);
|
||||
gc_performed = true;
|
||||
old_space_gc_performed = true;
|
||||
}
|
||||
// We add a slack-factor of 2 in order to have space for a series of
|
||||
// large-object allocations that are only just larger than the page size.
|
||||
@ -625,17 +617,10 @@ void Heap::ReserveSpace(
|
||||
// allocation in the other spaces.
|
||||
large_object_size += cell_space_size + map_space_size + code_space_size +
|
||||
data_space_size + pointer_space_size;
|
||||
|
||||
// If we already did one GC in order to make space in old space, there is
|
||||
// no sense in doing another one. We will attempt to force through the
|
||||
// large object space allocation, which comes directly from the OS,
|
||||
// regardless of any soft limit.
|
||||
if (!one_old_space_gc_has_been_performed &&
|
||||
!(lo_space->ReserveSpace(large_object_size))) {
|
||||
if (!(lo_space->ReserveSpace(large_object_size))) {
|
||||
Heap::CollectGarbage(LO_SPACE);
|
||||
gc_performed = true;
|
||||
}
|
||||
if (old_space_gc_performed) one_old_space_gc_has_been_performed = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -287,7 +287,7 @@ void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
|
||||
|
||||
// It's difficult to filter out slots recorded for large objects.
|
||||
if (chunk->owner()->identity() == LO_SPACE &&
|
||||
chunk->size() > Page::kPageSize &&
|
||||
chunk->size() > static_cast<size_t>(Page::kPageSize) &&
|
||||
is_compacting) {
|
||||
chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
|
||||
}
|
||||
|
@ -2887,8 +2887,7 @@ static void SweepPrecisely(PagedSpace* space,
|
||||
for ( ; live_objects != 0; live_objects--) {
|
||||
Address free_end = object_address + offsets[live_index++] * kPointerSize;
|
||||
if (free_end != free_start) {
|
||||
space->AddToFreeLists(free_start,
|
||||
static_cast<int>(free_end - free_start));
|
||||
space->Free(free_start, static_cast<int>(free_end - free_start));
|
||||
}
|
||||
HeapObject* live_object = HeapObject::FromAddress(free_end);
|
||||
ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
|
||||
@ -2914,8 +2913,7 @@ static void SweepPrecisely(PagedSpace* space,
|
||||
cells[cell_index] = 0;
|
||||
}
|
||||
if (free_start != p->ObjectAreaEnd()) {
|
||||
space->AddToFreeLists(free_start,
|
||||
static_cast<int>(p->ObjectAreaEnd() - free_start));
|
||||
space->Free(free_start, static_cast<int>(p->ObjectAreaEnd() - free_start));
|
||||
}
|
||||
p->ResetLiveBytes();
|
||||
}
|
||||
@ -3208,8 +3206,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
|
||||
Page* p = evacuation_candidates_[i];
|
||||
if (!p->IsEvacuationCandidate()) continue;
|
||||
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
|
||||
space->AddToFreeLists(p->ObjectAreaStart(),
|
||||
p->ObjectAreaEnd() - p->ObjectAreaStart());
|
||||
space->Free(p->ObjectAreaStart(), Page::kObjectAreaSize);
|
||||
p->set_scan_on_scavenge(false);
|
||||
slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
|
||||
p->ClearEvacuationCandidate();
|
||||
@ -3526,8 +3523,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
|
||||
}
|
||||
size_t size = block_address - p->ObjectAreaStart();
|
||||
if (cell_index == last_cell_index) {
|
||||
freed_bytes += static_cast<int>(space->AddToFreeLists(
|
||||
p->ObjectAreaStart(), static_cast<int>(size)));
|
||||
freed_bytes += static_cast<int>(space->Free(p->ObjectAreaStart(),
|
||||
static_cast<int>(size)));
|
||||
ASSERT_EQ(0, p->LiveBytes());
|
||||
return freed_bytes;
|
||||
}
|
||||
@ -3536,8 +3533,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
|
||||
Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
|
||||
// Free the first free space.
|
||||
size = free_end - p->ObjectAreaStart();
|
||||
freed_bytes += space->AddToFreeLists(p->ObjectAreaStart(),
|
||||
static_cast<int>(size));
|
||||
freed_bytes += space->Free(p->ObjectAreaStart(),
|
||||
static_cast<int>(size));
|
||||
// The start of the current free area is represented in undigested form by
|
||||
// the address of the last 32-word section that contained a live object and
|
||||
// the marking bitmap for that cell, which describes where the live object
|
||||
@ -3566,8 +3563,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
|
||||
// so now we need to find the start of the first live object at the
|
||||
// end of the free space.
|
||||
free_end = StartOfLiveObject(block_address, cell);
|
||||
freed_bytes += space->AddToFreeLists(
|
||||
free_start, static_cast<int>(free_end - free_start));
|
||||
freed_bytes += space->Free(free_start,
|
||||
static_cast<int>(free_end - free_start));
|
||||
}
|
||||
}
|
||||
// Update our undigested record of where the current free area started.
|
||||
@ -3581,8 +3578,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
|
||||
// Handle the free space at the end of the page.
|
||||
if (block_address - free_start > 32 * kPointerSize) {
|
||||
free_start = DigestFreeStart(free_start, free_start_cell);
|
||||
freed_bytes += space->AddToFreeLists(
|
||||
free_start, static_cast<int>(block_address - free_start));
|
||||
freed_bytes += space->Free(free_start,
|
||||
static_cast<int>(block_address - free_start));
|
||||
}
|
||||
|
||||
p->ResetLiveBytes();
|
||||
|
@ -464,8 +464,15 @@ class Thread::PlatformData : public Malloced {
|
||||
|
||||
Thread::Thread(const Options& options)
|
||||
: data_(new PlatformData),
|
||||
stack_size_(options.stack_size()) {
|
||||
set_name(options.name());
|
||||
stack_size_(options.stack_size) {
|
||||
set_name(options.name);
|
||||
}
|
||||
|
||||
|
||||
Thread::Thread(const char* name)
|
||||
: data_(new PlatformData),
|
||||
stack_size_(0) {
|
||||
set_name(name);
|
||||
}
|
||||
|
||||
|
||||
@ -710,10 +717,8 @@ class SignalSender : public Thread {
|
||||
FULL_INTERVAL
|
||||
};
|
||||
|
||||
static const int kSignalSenderStackSize = 32 * KB;
|
||||
|
||||
explicit SignalSender(int interval)
|
||||
: Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
|
||||
: Thread("SignalSender"),
|
||||
interval_(interval) {}
|
||||
|
||||
static void AddActiveSampler(Sampler* sampler) {
|
||||
|
@ -720,8 +720,15 @@ class Thread::PlatformData : public Malloced {
|
||||
|
||||
Thread::Thread(const Options& options)
|
||||
: data_(new PlatformData()),
|
||||
stack_size_(options.stack_size()) {
|
||||
set_name(options.name());
|
||||
stack_size_(options.stack_size) {
|
||||
set_name(options.name);
|
||||
}
|
||||
|
||||
|
||||
Thread::Thread(const char* name)
|
||||
: data_(new PlatformData()),
|
||||
stack_size_(0) {
|
||||
set_name(name);
|
||||
}
|
||||
|
||||
|
||||
@ -1028,10 +1035,8 @@ class SignalSender : public Thread {
|
||||
FULL_INTERVAL
|
||||
};
|
||||
|
||||
static const int kSignalSenderStackSize = 32 * KB;
|
||||
|
||||
explicit SignalSender(int interval)
|
||||
: Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
|
||||
: Thread("SignalSender"),
|
||||
vm_tgid_(getpid()),
|
||||
interval_(interval) {}
|
||||
|
||||
|
@ -473,11 +473,17 @@ class Thread::PlatformData : public Malloced {
|
||||
pthread_t thread_; // Thread handle for pthread.
|
||||
};
|
||||
|
||||
|
||||
Thread::Thread(const Options& options)
|
||||
: data_(new PlatformData),
|
||||
stack_size_(options.stack_size()) {
|
||||
set_name(options.name());
|
||||
stack_size_(options.stack_size) {
|
||||
set_name(options.name);
|
||||
}
|
||||
|
||||
|
||||
Thread::Thread(const char* name)
|
||||
: data_(new PlatformData),
|
||||
stack_size_(0) {
|
||||
set_name(name);
|
||||
}
|
||||
|
||||
|
||||
@ -730,13 +736,10 @@ class Sampler::PlatformData : public Malloced {
|
||||
thread_act_t profiled_thread_;
|
||||
};
|
||||
|
||||
|
||||
class SamplerThread : public Thread {
|
||||
public:
|
||||
static const int kSamplerThreadStackSize = 32 * KB;
|
||||
|
||||
explicit SamplerThread(int interval)
|
||||
: Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
|
||||
: Thread("SamplerThread"),
|
||||
interval_(interval) {}
|
||||
|
||||
static void AddActiveSampler(Sampler* sampler) {
|
||||
|
@ -512,8 +512,15 @@ class Thread::PlatformData : public Malloced {
|
||||
|
||||
Thread::Thread(const Options& options)
|
||||
: data_(new PlatformData()),
|
||||
stack_size_(options.stack_size()) {
|
||||
set_name(options.name());
|
||||
stack_size_(options.stack_size) {
|
||||
set_name(options.name);
|
||||
}
|
||||
|
||||
|
||||
Thread::Thread(const char* name)
|
||||
: data_(new PlatformData()),
|
||||
stack_size_(0) {
|
||||
set_name(name);
|
||||
}
|
||||
|
||||
|
||||
@ -782,10 +789,8 @@ class SignalSender : public Thread {
|
||||
FULL_INTERVAL
|
||||
};
|
||||
|
||||
static const int kSignalSenderStackSize = 32 * KB;
|
||||
|
||||
explicit SignalSender(int interval)
|
||||
: Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
|
||||
: Thread("SignalSender"),
|
||||
vm_tgid_(getpid()),
|
||||
interval_(interval) {}
|
||||
|
||||
|
@ -369,11 +369,17 @@ class Thread::PlatformData : public Malloced {
|
||||
pthread_t thread_; // Thread handle for pthread.
|
||||
};
|
||||
|
||||
|
||||
Thread::Thread(const Options& options)
|
||||
: data_(new PlatformData()),
|
||||
stack_size_(options.stack_size()) {
|
||||
set_name(options.name());
|
||||
stack_size_(options.stack_size) {
|
||||
set_name(options.name);
|
||||
}
|
||||
|
||||
|
||||
Thread::Thread(const char* name)
|
||||
: data_(new PlatformData()),
|
||||
stack_size_(0) {
|
||||
set_name(name);
|
||||
}
|
||||
|
||||
|
||||
@ -620,10 +626,8 @@ class SignalSender : public Thread {
|
||||
FULL_INTERVAL
|
||||
};
|
||||
|
||||
static const int kSignalSenderStackSize = 32 * KB;
|
||||
|
||||
explicit SignalSender(int interval)
|
||||
: Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
|
||||
: Thread("SignalSender"),
|
||||
interval_(interval) {}
|
||||
|
||||
static void InstallSignalHandler() {
|
||||
|
@ -1526,9 +1526,16 @@ class Thread::PlatformData : public Malloced {
|
||||
// handle until it is started.
|
||||
|
||||
Thread::Thread(const Options& options)
|
||||
: stack_size_(options.stack_size()) {
|
||||
: stack_size_(options.stack_size) {
|
||||
data_ = new PlatformData(kNoThread);
|
||||
set_name(options.name());
|
||||
set_name(options.name);
|
||||
}
|
||||
|
||||
|
||||
Thread::Thread(const char* name)
|
||||
: stack_size_(0) {
|
||||
data_ = new PlatformData(kNoThread);
|
||||
set_name(name);
|
||||
}
|
||||
|
||||
|
||||
@ -1894,10 +1901,8 @@ class Sampler::PlatformData : public Malloced {
|
||||
|
||||
class SamplerThread : public Thread {
|
||||
public:
|
||||
static const int kSamplerThreadStackSize = 32 * KB;
|
||||
|
||||
explicit SamplerThread(int interval)
|
||||
: Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
|
||||
: Thread("SamplerThread"),
|
||||
interval_(interval) {}
|
||||
|
||||
static void AddActiveSampler(Sampler* sampler) {
|
||||
|
@ -412,22 +412,16 @@ class Thread {
|
||||
LOCAL_STORAGE_KEY_MAX_VALUE = kMaxInt
|
||||
};
|
||||
|
||||
class Options {
|
||||
public:
|
||||
Options() : name_("v8:<unknown>"), stack_size_(0) {}
|
||||
Options(const char* name, int stack_size = 0)
|
||||
: name_(name), stack_size_(stack_size) {}
|
||||
struct Options {
|
||||
Options() : name("v8:<unknown>"), stack_size(0) {}
|
||||
|
||||
const char* name() const { return name_; }
|
||||
int stack_size() const { return stack_size_; }
|
||||
|
||||
private:
|
||||
const char* name_;
|
||||
int stack_size_;
|
||||
const char* name;
|
||||
int stack_size;
|
||||
};
|
||||
|
||||
// Create new thread.
|
||||
explicit Thread(const Options& options);
|
||||
explicit Thread(const char* name);
|
||||
virtual ~Thread();
|
||||
|
||||
// Start new thread by calling the Run() method in the new thread.
|
||||
|
@ -612,7 +612,6 @@ Address Deserializer::Allocate(int space_index, Space* space, int size) {
|
||||
pages_[LO_SPACE].Add(address);
|
||||
}
|
||||
last_object_address_ = address;
|
||||
ASSERT(address >= Page::FromAddress(address)->ObjectAreaStart());
|
||||
return address;
|
||||
}
|
||||
|
||||
@ -623,12 +622,7 @@ HeapObject* Deserializer::GetAddressFromEnd(int space) {
|
||||
int offset = source_->GetInt();
|
||||
ASSERT(!SpaceIsLarge(space));
|
||||
offset <<= kObjectAlignmentBits;
|
||||
Address address = high_water_[space] - offset;
|
||||
// This assert will fail if kMinimumSpaceSizes is too small for a space,
|
||||
// because we rely on the fact that all allocation is linear when the VM
|
||||
// is very young.
|
||||
ASSERT(address >= Page::FromAddress(address)->ObjectAreaStart());
|
||||
return HeapObject::FromAddress(address);
|
||||
return HeapObject::FromAddress(high_water_[space] - offset);
|
||||
}
|
||||
|
||||
|
||||
|
@ -26,7 +26,6 @@
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "isolate.h"
|
||||
#include "spaces.h"
|
||||
|
||||
#ifndef V8_SNAPSHOT_H_
|
||||
#define V8_SNAPSHOT_H_
|
||||
@ -87,21 +86,6 @@ class Snapshot {
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot);
|
||||
};
|
||||
|
||||
|
||||
// These are the sizes of the spaces that are needed in order to unpack the
|
||||
// VM boot snapshot.
|
||||
const intptr_t kMinimumSpaceSizes[LAST_SPACE + 1] = {
|
||||
0, // New space.
|
||||
512 * 1024, // Old pointer space.
|
||||
128 * 1024, // Old data space.
|
||||
256 * 1024, // Code space.
|
||||
64 * 1024, // Map space.
|
||||
64 * 1024, // Cell space.
|
||||
0 // Large object space.
|
||||
};
|
||||
|
||||
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_SNAPSHOT_H_
|
||||
|
@ -164,12 +164,12 @@ Page* Page::Initialize(Heap* heap,
|
||||
Executability executable,
|
||||
PagedSpace* owner) {
|
||||
Page* page = reinterpret_cast<Page*>(chunk);
|
||||
ASSERT(chunk->size() <= kPageSize);
|
||||
ASSERT(chunk->size() == static_cast<size_t>(kPageSize));
|
||||
ASSERT(chunk->owner() == owner);
|
||||
int object_bytes =
|
||||
static_cast<int>(page->ObjectAreaEnd() - page->ObjectAreaStart());
|
||||
owner->IncreaseCapacity(object_bytes);
|
||||
owner->AddToFreeLists(page->ObjectAreaStart(), object_bytes);
|
||||
owner->IncreaseCapacity(Page::kObjectAreaSize);
|
||||
owner->Free(page->ObjectAreaStart(),
|
||||
static_cast<int>(page->ObjectAreaEnd() -
|
||||
page->ObjectAreaStart()));
|
||||
|
||||
heap->incremental_marking()->SetOldSpacePageFlags(chunk);
|
||||
|
||||
@ -257,7 +257,6 @@ HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
|
||||
if (new_top > allocation_info_.limit) return NULL;
|
||||
|
||||
allocation_info_.top = new_top;
|
||||
ASSERT(new_top >= Page::FromAllocationTop(new_top)->ObjectAreaStart());
|
||||
return HeapObject::FromAddress(current_top);
|
||||
}
|
||||
|
||||
|
367
src/spaces.cc
367
src/spaces.cc
@ -31,7 +31,6 @@
|
||||
#include "macro-assembler.h"
|
||||
#include "mark-compact.h"
|
||||
#include "platform.h"
|
||||
#include "snapshot.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
@ -264,7 +263,7 @@ MemoryAllocator::MemoryAllocator(Isolate* isolate)
|
||||
: isolate_(isolate),
|
||||
capacity_(0),
|
||||
capacity_executable_(0),
|
||||
memory_allocator_reserved_(0),
|
||||
size_(0),
|
||||
size_executable_(0) {
|
||||
}
|
||||
|
||||
@ -274,7 +273,7 @@ bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
|
||||
capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
|
||||
ASSERT_GE(capacity_, capacity_executable_);
|
||||
|
||||
memory_allocator_reserved_ = 0;
|
||||
size_ = 0;
|
||||
size_executable_ = 0;
|
||||
|
||||
return true;
|
||||
@ -283,7 +282,7 @@ bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
|
||||
|
||||
void MemoryAllocator::TearDown() {
|
||||
// Check that spaces were torn down before MemoryAllocator.
|
||||
CHECK_EQ(memory_allocator_reserved_, 0);
|
||||
ASSERT(size_ == 0);
|
||||
// TODO(gc) this will be true again when we fix FreeMemory.
|
||||
// ASSERT(size_executable_ == 0);
|
||||
capacity_ = 0;
|
||||
@ -296,8 +295,8 @@ void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
|
||||
// TODO(gc) make code_range part of memory allocator?
|
||||
ASSERT(reservation->IsReserved());
|
||||
size_t size = reservation->size();
|
||||
ASSERT(memory_allocator_reserved_ >= size);
|
||||
memory_allocator_reserved_ -= size;
|
||||
ASSERT(size_ >= size);
|
||||
size_ -= size;
|
||||
|
||||
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
|
||||
|
||||
@ -317,8 +316,8 @@ void MemoryAllocator::FreeMemory(Address base,
|
||||
size_t size,
|
||||
Executability executable) {
|
||||
// TODO(gc) make code_range part of memory allocator?
|
||||
ASSERT(memory_allocator_reserved_ >= size);
|
||||
memory_allocator_reserved_ -= size;
|
||||
ASSERT(size_ >= size);
|
||||
size_ -= size;
|
||||
|
||||
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
|
||||
|
||||
@ -344,7 +343,7 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size,
|
||||
VirtualMemory reservation(size, alignment);
|
||||
|
||||
if (!reservation.IsReserved()) return NULL;
|
||||
memory_allocator_reserved_ += reservation.size();
|
||||
size_ += reservation.size();
|
||||
Address base = RoundUp(static_cast<Address>(reservation.address()),
|
||||
alignment);
|
||||
controller->TakeControl(&reservation);
|
||||
@ -353,14 +352,11 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size,
|
||||
|
||||
|
||||
Address MemoryAllocator::AllocateAlignedMemory(size_t size,
|
||||
size_t reserved_size,
|
||||
size_t alignment,
|
||||
Executability executable,
|
||||
VirtualMemory* controller) {
|
||||
ASSERT(RoundUp(reserved_size, OS::CommitPageSize()) >=
|
||||
RoundUp(size, OS::CommitPageSize()));
|
||||
VirtualMemory reservation;
|
||||
Address base = ReserveAlignedMemory(reserved_size, alignment, &reservation);
|
||||
Address base = ReserveAlignedMemory(size, alignment, &reservation);
|
||||
if (base == NULL) return NULL;
|
||||
if (!reservation.Commit(base,
|
||||
size,
|
||||
@ -379,53 +375,6 @@ void Page::InitializeAsAnchor(PagedSpace* owner) {
|
||||
}
|
||||
|
||||
|
||||
void Page::CommitMore(intptr_t space_needed) {
|
||||
intptr_t reserved_page_size = reservation_.IsReserved() ?
|
||||
reservation_.size() :
|
||||
Page::kPageSize;
|
||||
ASSERT(size() < reserved_page_size);
|
||||
intptr_t expand = Min(Max(size(), space_needed), reserved_page_size - size());
|
||||
// At least double the page size (this also rounds to OS page size).
|
||||
expand = Min(reserved_page_size - size(),
|
||||
RoundUpToPowerOf2(size() + expand) - size());
|
||||
ASSERT(expand <= kPageSize - size());
|
||||
ASSERT(expand <= reserved_page_size - size());
|
||||
Executability executable =
|
||||
IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
|
||||
Address old_end = ObjectAreaEnd();
|
||||
if (!VirtualMemory::CommitRegion(old_end, expand, executable)) return;
|
||||
|
||||
set_size(size() + expand);
|
||||
|
||||
PagedSpace* paged_space = reinterpret_cast<PagedSpace*>(owner());
|
||||
paged_space->heap()->isolate()->memory_allocator()->AllocationBookkeeping(
|
||||
paged_space,
|
||||
old_end,
|
||||
0, // No new memory was reserved.
|
||||
expand, // New memory committed.
|
||||
executable);
|
||||
paged_space->IncreaseCapacity(expand);
|
||||
|
||||
// In spaces with alignment requirements (e.g. map space) we have to align
|
||||
// the expanded area with the correct object alignment.
|
||||
uintptr_t object_area_size = old_end - ObjectAreaStart();
|
||||
uintptr_t aligned_object_area_size =
|
||||
object_area_size - object_area_size % paged_space->ObjectAlignment();
|
||||
if (aligned_object_area_size != object_area_size) {
|
||||
aligned_object_area_size += paged_space->ObjectAlignment();
|
||||
}
|
||||
Address new_area =
|
||||
reinterpret_cast<Address>(ObjectAreaStart() + aligned_object_area_size);
|
||||
// In spaces with alignment requirements, this will waste the space for one
|
||||
// object per doubling of the page size until the next GC.
|
||||
paged_space->AddToFreeLists(old_end, new_area - old_end);
|
||||
|
||||
expand -= (new_area - old_end);
|
||||
|
||||
paged_space->AddToFreeLists(new_area, expand);
|
||||
}
|
||||
|
||||
|
||||
NewSpacePage* NewSpacePage::Initialize(Heap* heap,
|
||||
Address start,
|
||||
SemiSpace* semi_space) {
|
||||
@ -511,15 +460,9 @@ void MemoryChunk::Unlink() {
|
||||
|
||||
|
||||
MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
|
||||
intptr_t committed_body_size,
|
||||
Executability executable,
|
||||
Space* owner) {
|
||||
ASSERT(body_size >= committed_body_size);
|
||||
size_t chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + body_size,
|
||||
OS::CommitPageSize());
|
||||
intptr_t committed_chunk_size =
|
||||
committed_body_size + MemoryChunk::kObjectStartOffset;
|
||||
committed_chunk_size = RoundUp(committed_chunk_size, OS::CommitPageSize());
|
||||
size_t chunk_size = MemoryChunk::kObjectStartOffset + body_size;
|
||||
Heap* heap = isolate_->heap();
|
||||
Address base = NULL;
|
||||
VirtualMemory reservation;
|
||||
@ -539,21 +482,20 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
|
||||
ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
|
||||
MemoryChunk::kAlignment));
|
||||
if (base == NULL) return NULL;
|
||||
// The AllocateAlignedMemory method will update the memory allocator
|
||||
// memory used, but we are not using that if we have a code range, so
|
||||
// we update it here.
|
||||
memory_allocator_reserved_ += chunk_size;
|
||||
size_ += chunk_size;
|
||||
// Update executable memory size.
|
||||
size_executable_ += chunk_size;
|
||||
} else {
|
||||
base = AllocateAlignedMemory(committed_chunk_size,
|
||||
chunk_size,
|
||||
base = AllocateAlignedMemory(chunk_size,
|
||||
MemoryChunk::kAlignment,
|
||||
executable,
|
||||
&reservation);
|
||||
if (base == NULL) return NULL;
|
||||
// Update executable memory size.
|
||||
size_executable_ += reservation.size();
|
||||
}
|
||||
} else {
|
||||
base = AllocateAlignedMemory(committed_chunk_size,
|
||||
chunk_size,
|
||||
base = AllocateAlignedMemory(chunk_size,
|
||||
MemoryChunk::kAlignment,
|
||||
executable,
|
||||
&reservation);
|
||||
@ -561,12 +503,21 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
|
||||
if (base == NULL) return NULL;
|
||||
}
|
||||
|
||||
AllocationBookkeeping(
|
||||
owner, base, chunk_size, committed_chunk_size, executable);
|
||||
#ifdef DEBUG
|
||||
ZapBlock(base, chunk_size);
|
||||
#endif
|
||||
isolate_->counters()->memory_allocated()->
|
||||
Increment(static_cast<int>(chunk_size));
|
||||
|
||||
LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
|
||||
if (owner != NULL) {
|
||||
ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
|
||||
PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
|
||||
}
|
||||
|
||||
MemoryChunk* result = MemoryChunk::Initialize(heap,
|
||||
base,
|
||||
committed_chunk_size,
|
||||
chunk_size,
|
||||
executable,
|
||||
owner);
|
||||
result->set_reserved_memory(&reservation);
|
||||
@ -574,40 +525,9 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
|
||||
}
|
||||
|
||||
|
||||
void MemoryAllocator::AllocationBookkeeping(Space* owner,
|
||||
Address base,
|
||||
intptr_t reserved_chunk_size,
|
||||
intptr_t committed_chunk_size,
|
||||
Executability executable) {
|
||||
if (executable == EXECUTABLE) {
|
||||
// Update executable memory size.
|
||||
size_executable_ += reserved_chunk_size;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
ZapBlock(base, committed_chunk_size);
|
||||
#endif
|
||||
isolate_->counters()->memory_allocated()->
|
||||
Increment(static_cast<int>(committed_chunk_size));
|
||||
|
||||
LOG(isolate_, NewEvent("MemoryChunk", base, committed_chunk_size));
|
||||
if (owner != NULL) {
|
||||
ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
|
||||
PerformAllocationCallback(
|
||||
space, kAllocationActionAllocate, committed_chunk_size);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Page* MemoryAllocator::AllocatePage(intptr_t committed_object_area_size,
|
||||
PagedSpace* owner,
|
||||
Page* MemoryAllocator::AllocatePage(PagedSpace* owner,
|
||||
Executability executable) {
|
||||
ASSERT(committed_object_area_size <= Page::kObjectAreaSize);
|
||||
|
||||
MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize,
|
||||
committed_object_area_size,
|
||||
executable,
|
||||
owner);
|
||||
MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, executable, owner);
|
||||
|
||||
if (chunk == NULL) return NULL;
|
||||
|
||||
@ -618,8 +538,7 @@ Page* MemoryAllocator::AllocatePage(intptr_t committed_object_area_size,
|
||||
LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
|
||||
Executability executable,
|
||||
Space* owner) {
|
||||
MemoryChunk* chunk =
|
||||
AllocateChunk(object_size, object_size, executable, owner);
|
||||
MemoryChunk* chunk = AllocateChunk(object_size, executable, owner);
|
||||
if (chunk == NULL) return NULL;
|
||||
return LargePage::Initialize(isolate_->heap(), chunk);
|
||||
}
|
||||
@ -640,12 +559,8 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
|
||||
if (reservation->IsReserved()) {
|
||||
FreeMemory(reservation, chunk->executable());
|
||||
} else {
|
||||
// When we do not have a reservation that is because this allocation
|
||||
// is part of the huge reserved chunk of memory reserved for code on
|
||||
// x64. In that case the size was rounded up to the page size on
|
||||
// allocation so we do the same now when freeing.
|
||||
FreeMemory(chunk->address(),
|
||||
RoundUp(chunk->size(), Page::kPageSize),
|
||||
chunk->size(),
|
||||
chunk->executable());
|
||||
}
|
||||
}
|
||||
@ -725,12 +640,11 @@ void MemoryAllocator::RemoveMemoryAllocationCallback(
|
||||
|
||||
#ifdef DEBUG
|
||||
void MemoryAllocator::ReportStatistics() {
|
||||
float pct =
|
||||
static_cast<float>(capacity_ - memory_allocator_reserved_) / capacity_;
|
||||
float pct = static_cast<float>(capacity_ - size_) / capacity_;
|
||||
PrintF(" capacity: %" V8_PTR_PREFIX "d"
|
||||
", used: %" V8_PTR_PREFIX "d"
|
||||
", available: %%%d\n\n",
|
||||
capacity_, memory_allocator_reserved_, static_cast<int>(pct*100));
|
||||
capacity_, size_, static_cast<int>(pct*100));
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -798,6 +712,7 @@ MaybeObject* PagedSpace::FindObject(Address addr) {
|
||||
|
||||
bool PagedSpace::CanExpand() {
|
||||
ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
|
||||
ASSERT(Capacity() % Page::kObjectAreaSize == 0);
|
||||
|
||||
if (Capacity() == max_capacity_) return false;
|
||||
|
||||
@ -809,42 +724,11 @@ bool PagedSpace::CanExpand() {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PagedSpace::Expand(intptr_t size_in_bytes) {
|
||||
bool PagedSpace::Expand() {
|
||||
if (!CanExpand()) return false;
|
||||
|
||||
Page* last_page = anchor_.prev_page();
|
||||
if (last_page != &anchor_) {
|
||||
// We have run out of linear allocation space. This may be because the
|
||||
// most recently allocated page (stored last in the list) is a small one,
|
||||
// that starts on a page aligned boundary, but has not a full kPageSize of
|
||||
// committed memory. Let's commit more memory for the page.
|
||||
intptr_t reserved_page_size = last_page->reserved_memory()->IsReserved() ?
|
||||
last_page->reserved_memory()->size() :
|
||||
Page::kPageSize;
|
||||
if (last_page->size() < reserved_page_size &&
|
||||
(reserved_page_size - last_page->size()) >= size_in_bytes &&
|
||||
!last_page->IsEvacuationCandidate() &&
|
||||
last_page->WasSwept()) {
|
||||
last_page->CommitMore(size_in_bytes);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// We initially only commit a part of the page, but the deserialization
|
||||
// of the initial snapshot makes the assumption that it can deserialize
|
||||
// into linear memory of a certain size per space, so some of the spaces
|
||||
// need to have a little more committed memory.
|
||||
int initial = Max(OS::CommitPageSize(), kMinimumSpaceSizes[identity()]);
|
||||
|
||||
ASSERT(Page::kPageSize - initial < Page::kObjectAreaSize);
|
||||
|
||||
intptr_t expansion_size =
|
||||
Max(initial,
|
||||
RoundUpToPowerOf2(MemoryChunk::kObjectStartOffset + size_in_bytes)) -
|
||||
MemoryChunk::kObjectStartOffset;
|
||||
|
||||
Page* p = heap()->isolate()->memory_allocator()->
|
||||
AllocatePage(expansion_size, this, executable());
|
||||
AllocatePage(this, executable());
|
||||
if (p == NULL) return false;
|
||||
|
||||
ASSERT(Capacity() <= max_capacity_);
|
||||
@ -887,8 +771,6 @@ void PagedSpace::ReleasePage(Page* page) {
|
||||
allocation_info_.top = allocation_info_.limit = NULL;
|
||||
}
|
||||
|
||||
intptr_t size = page->ObjectAreaEnd() - page->ObjectAreaStart();
|
||||
|
||||
page->Unlink();
|
||||
if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
|
||||
heap()->isolate()->memory_allocator()->Free(page);
|
||||
@ -897,7 +779,8 @@ void PagedSpace::ReleasePage(Page* page) {
|
||||
}
|
||||
|
||||
ASSERT(Capacity() > 0);
|
||||
accounting_stats_.ShrinkSpace(size);
|
||||
ASSERT(Capacity() % Page::kObjectAreaSize == 0);
|
||||
accounting_stats_.ShrinkSpace(Page::kObjectAreaSize);
|
||||
}
|
||||
|
||||
|
||||
@ -1026,13 +909,14 @@ bool NewSpace::SetUp(int reserved_semispace_capacity,
|
||||
2 * heap()->ReservedSemiSpaceSize());
|
||||
ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
|
||||
|
||||
to_space_.SetUp(chunk_base_,
|
||||
initial_semispace_capacity,
|
||||
maximum_semispace_capacity);
|
||||
from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
|
||||
initial_semispace_capacity,
|
||||
maximum_semispace_capacity);
|
||||
if (!to_space_.Commit()) {
|
||||
if (!to_space_.SetUp(chunk_base_,
|
||||
initial_semispace_capacity,
|
||||
maximum_semispace_capacity)) {
|
||||
return false;
|
||||
}
|
||||
if (!from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
|
||||
initial_semispace_capacity,
|
||||
maximum_semispace_capacity)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1265,7 +1149,7 @@ void NewSpace::Verify() {
|
||||
// -----------------------------------------------------------------------------
|
||||
// SemiSpace implementation
|
||||
|
||||
void SemiSpace::SetUp(Address start,
|
||||
bool SemiSpace::SetUp(Address start,
|
||||
int initial_capacity,
|
||||
int maximum_capacity) {
|
||||
// Creates a space in the young generation. The constructor does not
|
||||
@ -1284,6 +1168,8 @@ void SemiSpace::SetUp(Address start,
|
||||
object_mask_ = address_mask_ | kHeapObjectTagMask;
|
||||
object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
|
||||
age_mark_ = start_;
|
||||
|
||||
return Commit();
|
||||
}
|
||||
|
||||
|
||||
@ -1772,7 +1658,7 @@ void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
|
||||
// is big enough to be a FreeSpace with at least one extra word (the next
|
||||
// pointer), we set its map to be the free space map and its size to an
|
||||
// appropriate array length for the desired size from HeapObject::Size().
|
||||
// If the block is too small (e.g. one or two words), to hold both a size
|
||||
// If the block is too small (eg, one or two words), to hold both a size
|
||||
// field and a next pointer, we give it a filler map that gives it the
|
||||
// correct size.
|
||||
if (size_in_bytes > FreeSpace::kHeaderSize) {
|
||||
@ -1876,102 +1762,69 @@ int FreeList::Free(Address start, int size_in_bytes) {
|
||||
}
|
||||
|
||||
|
||||
FreeListNode* FreeList::PickNodeFromList(FreeListNode** list,
|
||||
int* node_size,
|
||||
int minimum_size) {
|
||||
FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) {
|
||||
FreeListNode* node = *list;
|
||||
|
||||
if (node == NULL) return NULL;
|
||||
|
||||
ASSERT(node->map() == node->GetHeap()->raw_unchecked_free_space_map());
|
||||
|
||||
while (node != NULL &&
|
||||
Page::FromAddress(node->address())->IsEvacuationCandidate()) {
|
||||
available_ -= node->Size();
|
||||
node = node->next();
|
||||
}
|
||||
|
||||
if (node == NULL) {
|
||||
if (node != NULL) {
|
||||
*node_size = node->Size();
|
||||
*list = node->next();
|
||||
} else {
|
||||
*list = NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Gets the size without checking the map. When we are booting we have
|
||||
// a FreeListNode before we have created its map.
|
||||
intptr_t size = reinterpret_cast<FreeSpace*>(node)->Size();
|
||||
|
||||
// We don't search the list for one that fits, preferring to look in the
|
||||
// list of larger nodes, but we do check the first in the list, because
|
||||
// if we had to expand the space or page we may have placed an entry that
|
||||
// was just long enough at the head of one of the lists.
|
||||
if (size < minimum_size) return NULL;
|
||||
|
||||
*node_size = size;
|
||||
available_ -= size;
|
||||
*list = node->next();
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
|
||||
FreeListNode* FreeList::FindAbuttingNode(
|
||||
int size_in_bytes, int* node_size, Address limit, FreeListNode** list_head) {
|
||||
FreeListNode* first_node = *list_head;
|
||||
if (first_node != NULL &&
|
||||
first_node->address() == limit &&
|
||||
reinterpret_cast<FreeSpace*>(first_node)->Size() >= size_in_bytes &&
|
||||
!Page::FromAddress(first_node->address())->IsEvacuationCandidate()) {
|
||||
FreeListNode* answer = first_node;
|
||||
int size = reinterpret_cast<FreeSpace*>(first_node)->Size();
|
||||
available_ -= size;
|
||||
*node_size = size;
|
||||
*list_head = first_node->next();
|
||||
ASSERT(IsVeryLong() || available_ == SumFreeLists());
|
||||
return answer;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
FreeListNode* FreeList::FindNodeFor(int size_in_bytes,
|
||||
int* node_size,
|
||||
Address limit) {
|
||||
FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
|
||||
FreeListNode* node = NULL;
|
||||
|
||||
if (limit != NULL) {
|
||||
// We may have a memory area at the head of the free list, which abuts the
|
||||
// old linear allocation area. This happens if the linear allocation area
|
||||
// has been shortened to allow an incremental marking step to be performed.
|
||||
// In that case we prefer to return the free memory area that is contiguous
|
||||
// with the old linear allocation area.
|
||||
node = FindAbuttingNode(size_in_bytes, node_size, limit, &large_list_);
|
||||
if (node != NULL) return node;
|
||||
node = FindAbuttingNode(size_in_bytes, node_size, limit, &huge_list_);
|
||||
if (size_in_bytes <= kSmallAllocationMax) {
|
||||
node = PickNodeFromList(&small_list_, node_size);
|
||||
if (node != NULL) return node;
|
||||
}
|
||||
|
||||
node = PickNodeFromList(&small_list_, node_size, size_in_bytes);
|
||||
ASSERT(IsVeryLong() || available_ == SumFreeLists());
|
||||
if (node != NULL) return node;
|
||||
if (size_in_bytes <= kMediumAllocationMax) {
|
||||
node = PickNodeFromList(&medium_list_, node_size);
|
||||
if (node != NULL) return node;
|
||||
}
|
||||
|
||||
node = PickNodeFromList(&medium_list_, node_size, size_in_bytes);
|
||||
ASSERT(IsVeryLong() || available_ == SumFreeLists());
|
||||
if (node != NULL) return node;
|
||||
if (size_in_bytes <= kLargeAllocationMax) {
|
||||
node = PickNodeFromList(&large_list_, node_size);
|
||||
if (node != NULL) return node;
|
||||
}
|
||||
|
||||
node = PickNodeFromList(&large_list_, node_size, size_in_bytes);
|
||||
ASSERT(IsVeryLong() || available_ == SumFreeLists());
|
||||
if (node != NULL) return node;
|
||||
|
||||
// The tricky third clause in this for statement is due to the fact that
|
||||
// PickNodeFromList can cut pages out of the list if they are unavailable for
|
||||
// new allocation (e.g. if they are on a page that has been scheduled for
|
||||
// evacuation).
|
||||
for (FreeListNode** cur = &huge_list_;
|
||||
*cur != NULL;
|
||||
cur = (*cur) == NULL ? cur : (*cur)->next_address()) {
|
||||
node = PickNodeFromList(cur, node_size, size_in_bytes);
|
||||
ASSERT(IsVeryLong() || available_ == SumFreeLists());
|
||||
if (node != NULL) return node;
|
||||
cur = (*cur)->next_address()) {
|
||||
FreeListNode* cur_node = *cur;
|
||||
while (cur_node != NULL &&
|
||||
Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
|
||||
available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size();
|
||||
cur_node = cur_node->next();
|
||||
}
|
||||
|
||||
*cur = cur_node;
|
||||
if (cur_node == NULL) break;
|
||||
|
||||
ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map());
|
||||
FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
|
||||
int size = cur_as_free_space->Size();
|
||||
if (size >= size_in_bytes) {
|
||||
// Large enough node found. Unlink it from the list.
|
||||
node = *cur;
|
||||
*node_size = size;
|
||||
*cur = node->next();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return node;
|
||||
@ -1990,23 +1843,10 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
|
||||
ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
|
||||
|
||||
int new_node_size = 0;
|
||||
FreeListNode* new_node =
|
||||
FindNodeFor(size_in_bytes, &new_node_size, owner_->limit());
|
||||
FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
|
||||
if (new_node == NULL) return NULL;
|
||||
|
||||
if (new_node->address() == owner_->limit()) {
|
||||
// The new freelist node we were given is an extension of the one we had
|
||||
// last. This is a common thing to happen when we extend a small page by
|
||||
// committing more memory. In this case we just add the new node to the
|
||||
// linear allocation area and recurse.
|
||||
owner_->Allocate(new_node_size);
|
||||
owner_->SetTop(owner_->top(), new_node->address() + new_node_size);
|
||||
MaybeObject* allocation = owner_->AllocateRaw(size_in_bytes);
|
||||
Object* answer;
|
||||
if (!allocation->ToObject(&answer)) return NULL;
|
||||
return HeapObject::cast(answer);
|
||||
}
|
||||
|
||||
available_ -= new_node_size;
|
||||
ASSERT(IsVeryLong() || available_ == SumFreeLists());
|
||||
|
||||
int bytes_left = new_node_size - size_in_bytes;
|
||||
@ -2016,9 +1856,7 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
|
||||
// Mark the old linear allocation area with a free space map so it can be
|
||||
// skipped when scanning the heap. This also puts it back in the free list
|
||||
// if it is big enough.
|
||||
if (old_linear_size != 0) {
|
||||
owner_->AddToFreeLists(owner_->top(), old_linear_size);
|
||||
}
|
||||
owner_->Free(owner_->top(), old_linear_size);
|
||||
|
||||
#ifdef DEBUG
|
||||
for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
|
||||
@ -2047,8 +1885,8 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
|
||||
// We don't want to give too large linear areas to the allocator while
|
||||
// incremental marking is going on, because we won't check again whether
|
||||
// we want to do another increment until the linear area is used up.
|
||||
owner_->AddToFreeLists(new_node->address() + size_in_bytes + linear_size,
|
||||
new_node_size - size_in_bytes - linear_size);
|
||||
owner_->Free(new_node->address() + size_in_bytes + linear_size,
|
||||
new_node_size - size_in_bytes - linear_size);
|
||||
owner_->SetTop(new_node->address() + size_in_bytes,
|
||||
new_node->address() + size_in_bytes + linear_size);
|
||||
} else if (bytes_left > 0) {
|
||||
@ -2057,7 +1895,6 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
|
||||
owner_->SetTop(new_node->address() + size_in_bytes,
|
||||
new_node->address() + new_node_size);
|
||||
} else {
|
||||
ASSERT(bytes_left == 0);
|
||||
// TODO(gc) Try not freeing linear allocation region when bytes_left
|
||||
// are zero.
|
||||
owner_->SetTop(NULL, NULL);
|
||||
@ -2190,9 +2027,7 @@ bool NewSpace::ReserveSpace(int bytes) {
|
||||
HeapObject* allocation = HeapObject::cast(object);
|
||||
Address top = allocation_info_.top;
|
||||
if ((top - bytes) == allocation->address()) {
|
||||
Address new_top = allocation->address();
|
||||
ASSERT(new_top >= Page::FromAddress(new_top - 1)->ObjectAreaStart());
|
||||
allocation_info_.top = new_top;
|
||||
allocation_info_.top = allocation->address();
|
||||
return true;
|
||||
}
|
||||
// There may be a borderline case here where the allocation succeeded, but
|
||||
@ -2207,7 +2042,7 @@ void PagedSpace::PrepareForMarkCompact() {
|
||||
// Mark the old linear allocation area with a free space map so it can be
|
||||
// skipped when scanning the heap.
|
||||
int old_linear_size = static_cast<int>(limit() - top());
|
||||
AddToFreeLists(top(), old_linear_size);
|
||||
Free(top(), old_linear_size);
|
||||
SetTop(NULL, NULL);
|
||||
|
||||
// Stop lazy sweeping and clear marking bits for unswept pages.
|
||||
@ -2250,13 +2085,10 @@ bool PagedSpace::ReserveSpace(int size_in_bytes) {
|
||||
// Mark the old linear allocation area with a free space so it can be
|
||||
// skipped when scanning the heap. This also puts it back in the free list
|
||||
// if it is big enough.
|
||||
AddToFreeLists(top(), old_linear_size);
|
||||
Free(top(), old_linear_size);
|
||||
|
||||
SetTop(new_area->address(), new_area->address() + size_in_bytes);
|
||||
// The AddToFreeLists call above will reduce the size of the space in the
|
||||
// allocation stats. We don't need to add this linear area to the size
|
||||
// with an Allocate(size_in_bytes) call here, because the
|
||||
// free_list_.Allocate() call above already accounted for this memory.
|
||||
Allocate(size_in_bytes);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2337,7 +2169,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
|
||||
}
|
||||
|
||||
// Try to expand the space and allocate in the new next page.
|
||||
if (Expand(size_in_bytes)) {
|
||||
if (Expand()) {
|
||||
return free_list_.Allocate(size_in_bytes);
|
||||
}
|
||||
|
||||
@ -2698,7 +2530,6 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
|
||||
heap()->mark_compact_collector()->ReportDeleteIfNeeded(
|
||||
object, heap()->isolate());
|
||||
size_ -= static_cast<int>(page->size());
|
||||
ASSERT(size_ >= 0);
|
||||
objects_size_ -= object->Size();
|
||||
page_count_--;
|
||||
|
||||
|
92
src/spaces.h
92
src/spaces.h
@ -502,9 +502,11 @@ class MemoryChunk {
|
||||
static const int kObjectStartOffset = kBodyOffset - 1 +
|
||||
(kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
|
||||
|
||||
intptr_t size() const { return size_; }
|
||||
size_t size() const { return size_; }
|
||||
|
||||
void set_size(size_t size) { size_ = size; }
|
||||
void set_size(size_t size) {
|
||||
size_ = size;
|
||||
}
|
||||
|
||||
Executability executable() {
|
||||
return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
|
||||
@ -656,7 +658,7 @@ class Page : public MemoryChunk {
|
||||
Address ObjectAreaStart() { return address() + kObjectStartOffset; }
|
||||
|
||||
// Returns the end address (exclusive) of the object area in this page.
|
||||
Address ObjectAreaEnd() { return address() + size(); }
|
||||
Address ObjectAreaEnd() { return address() + Page::kPageSize; }
|
||||
|
||||
// Checks whether an address is page aligned.
|
||||
static bool IsAlignedToPageSize(Address a) {
|
||||
@ -675,10 +677,6 @@ class Page : public MemoryChunk {
|
||||
return address() + offset;
|
||||
}
|
||||
|
||||
// Expand the committed area for pages that are small. This
|
||||
// happens primarily when the VM is newly booted.
|
||||
void CommitMore(intptr_t space_needed);
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
|
||||
// Page size in bytes. This must be a multiple of the OS page size.
|
||||
@ -848,10 +846,12 @@ class CodeRange {
|
||||
FreeBlock(Address start_arg, size_t size_arg)
|
||||
: start(start_arg), size(size_arg) {
|
||||
ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
|
||||
ASSERT(size >= static_cast<size_t>(Page::kPageSize));
|
||||
}
|
||||
FreeBlock(void* start_arg, size_t size_arg)
|
||||
: start(static_cast<Address>(start_arg)), size(size_arg) {
|
||||
ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
|
||||
ASSERT(size >= static_cast<size_t>(Page::kPageSize));
|
||||
}
|
||||
|
||||
Address start;
|
||||
@ -947,9 +947,7 @@ class MemoryAllocator {
|
||||
|
||||
void TearDown();
|
||||
|
||||
Page* AllocatePage(intptr_t object_area_size,
|
||||
PagedSpace* owner,
|
||||
Executability executable);
|
||||
Page* AllocatePage(PagedSpace* owner, Executability executable);
|
||||
|
||||
LargePage* AllocateLargePage(intptr_t object_size,
|
||||
Executability executable,
|
||||
@ -958,14 +956,10 @@ class MemoryAllocator {
|
||||
void Free(MemoryChunk* chunk);
|
||||
|
||||
// Returns the maximum available bytes of heaps.
|
||||
intptr_t Available() {
|
||||
return capacity_ < memory_allocator_reserved_ ?
|
||||
0 :
|
||||
capacity_ - memory_allocator_reserved_;
|
||||
}
|
||||
intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
|
||||
|
||||
// Returns allocated spaces in bytes.
|
||||
intptr_t Size() { return memory_allocator_reserved_; }
|
||||
intptr_t Size() { return size_; }
|
||||
|
||||
// Returns the maximum available executable bytes of heaps.
|
||||
intptr_t AvailableExecutable() {
|
||||
@ -987,7 +981,6 @@ class MemoryAllocator {
|
||||
#endif
|
||||
|
||||
MemoryChunk* AllocateChunk(intptr_t body_size,
|
||||
intptr_t committed_body_size,
|
||||
Executability executable,
|
||||
Space* space);
|
||||
|
||||
@ -995,7 +988,6 @@ class MemoryAllocator {
|
||||
size_t alignment,
|
||||
VirtualMemory* controller);
|
||||
Address AllocateAlignedMemory(size_t requested,
|
||||
size_t committed,
|
||||
size_t alignment,
|
||||
Executability executable,
|
||||
VirtualMemory* controller);
|
||||
@ -1015,12 +1007,6 @@ class MemoryAllocator {
|
||||
// and false otherwise.
|
||||
bool UncommitBlock(Address start, size_t size);
|
||||
|
||||
void AllocationBookkeeping(Space* owner,
|
||||
Address base,
|
||||
intptr_t reserved_size,
|
||||
intptr_t committed_size,
|
||||
Executability executable);
|
||||
|
||||
// Zaps a contiguous block of memory [start..(start+size)[ thus
|
||||
// filling it up with a recognizable non-NULL bit pattern.
|
||||
void ZapBlock(Address start, size_t size);
|
||||
@ -1048,7 +1034,7 @@ class MemoryAllocator {
|
||||
size_t capacity_executable_;
|
||||
|
||||
// Allocated space size in bytes.
|
||||
size_t memory_allocator_reserved_;
|
||||
size_t size_;
|
||||
// Allocated executable space size in bytes.
|
||||
size_t size_executable_;
|
||||
|
||||
@ -1393,15 +1379,9 @@ class FreeList BASE_EMBEDDED {
|
||||
static const int kMinBlockSize = 3 * kPointerSize;
|
||||
static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
|
||||
|
||||
FreeListNode* PickNodeFromList(FreeListNode** list,
|
||||
int* node_size,
|
||||
int minimum_size);
|
||||
FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size);
|
||||
|
||||
FreeListNode* FindNodeFor(int size_in_bytes, int* node_size, Address limit);
|
||||
FreeListNode* FindAbuttingNode(int size_in_bytes,
|
||||
int* node_size,
|
||||
Address limit,
|
||||
FreeListNode** list_head);
|
||||
FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
|
||||
|
||||
PagedSpace* owner_;
|
||||
Heap* heap_;
|
||||
@ -1501,8 +1481,6 @@ class PagedSpace : public Space {
|
||||
// free bytes that were not found at all due to lazy sweeping.
|
||||
virtual intptr_t Waste() { return accounting_stats_.Waste(); }
|
||||
|
||||
virtual int ObjectAlignment() { return kObjectAlignment; }
|
||||
|
||||
// Returns the allocation pointer in this space.
|
||||
Address top() { return allocation_info_.top; }
|
||||
Address limit() { return allocation_info_.limit; }
|
||||
@ -1517,7 +1495,7 @@ class PagedSpace : public Space {
|
||||
// the free list or accounted as waste.
|
||||
// If add_to_freelist is false then just accounting stats are updated and
|
||||
// no attempt to add area to free list is made.
|
||||
int AddToFreeLists(Address start, int size_in_bytes) {
|
||||
int Free(Address start, int size_in_bytes) {
|
||||
int wasted = free_list_.Free(start, size_in_bytes);
|
||||
accounting_stats_.DeallocateBytes(size_in_bytes - wasted);
|
||||
return size_in_bytes - wasted;
|
||||
@ -1525,7 +1503,6 @@ class PagedSpace : public Space {
|
||||
|
||||
// Set space allocation info.
|
||||
void SetTop(Address top, Address limit) {
|
||||
ASSERT(top == NULL || top >= Page::FromAddress(top - 1)->ObjectAreaStart());
|
||||
ASSERT(top == limit ||
|
||||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
|
||||
allocation_info_.top = top;
|
||||
@ -1596,7 +1573,6 @@ class PagedSpace : public Space {
|
||||
return !first_unswept_page_->is_valid();
|
||||
}
|
||||
|
||||
inline bool HasAPage() { return anchor_.next_page() != &anchor_; }
|
||||
Page* FirstPage() { return anchor_.next_page(); }
|
||||
Page* LastPage() { return anchor_.prev_page(); }
|
||||
|
||||
@ -1669,6 +1645,12 @@ class PagedSpace : public Space {
|
||||
// Normal allocation information.
|
||||
AllocationInfo allocation_info_;
|
||||
|
||||
// Bytes of each page that cannot be allocated. Possibly non-zero
|
||||
// for pages in spaces with only fixed-size objects. Always zero
|
||||
// for pages in spaces with variable sized objects (those pages are
|
||||
// padded with free-list nodes).
|
||||
int page_extra_;
|
||||
|
||||
bool was_swept_conservatively_;
|
||||
|
||||
// The first page to be swept when the lazy sweeper advances. Is set
|
||||
@ -1680,11 +1662,10 @@ class PagedSpace : public Space {
|
||||
// done conservatively.
|
||||
intptr_t unswept_free_bytes_;
|
||||
|
||||
// Expands the space by allocating a page. Returns false if it cannot
|
||||
// allocate a page from OS, or if the hard heap size limit has been hit. The
|
||||
// new page will have at least enough committed space to satisfy the object
|
||||
// size indicated by the allocation_size argument;
|
||||
bool Expand(intptr_t allocation_size);
|
||||
// Expands the space by allocating a fixed number of pages. Returns false if
|
||||
// it cannot allocate requested number of pages from OS, or if the hard heap
|
||||
// size limit has been hit.
|
||||
bool Expand();
|
||||
|
||||
// Generic fast case allocation function that tries linear allocation at the
|
||||
// address denoted by top in allocation_info_.
|
||||
@ -1839,9 +1820,8 @@ class SemiSpace : public Space {
|
||||
anchor_(this),
|
||||
current_page_(NULL) { }
|
||||
|
||||
// Sets up the semispace using the given chunk. After this, call Commit()
|
||||
// to make the semispace usable.
|
||||
void SetUp(Address start, int initial_capacity, int maximum_capacity);
|
||||
// Sets up the semispace using the given chunk.
|
||||
bool SetUp(Address start, int initial_capacity, int maximum_capacity);
|
||||
|
||||
// Tear down the space. Heap memory was not allocated by the space, so it
|
||||
// is not deallocated here.
|
||||
@ -2345,7 +2325,14 @@ class OldSpace : public PagedSpace {
|
||||
intptr_t max_capacity,
|
||||
AllocationSpace id,
|
||||
Executability executable)
|
||||
: PagedSpace(heap, max_capacity, id, executable) { }
|
||||
: PagedSpace(heap, max_capacity, id, executable) {
|
||||
page_extra_ = 0;
|
||||
}
|
||||
|
||||
// The limit of allocation for a page in this space.
|
||||
virtual Address PageAllocationLimit(Page* page) {
|
||||
return page->ObjectAreaEnd();
|
||||
}
|
||||
|
||||
public:
|
||||
TRACK_MEMORY("OldSpace")
|
||||
@ -2372,12 +2359,17 @@ class FixedSpace : public PagedSpace {
|
||||
const char* name)
|
||||
: PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
|
||||
object_size_in_bytes_(object_size_in_bytes),
|
||||
name_(name) { }
|
||||
name_(name) {
|
||||
page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
|
||||
}
|
||||
|
||||
// The limit of allocation for a page in this space.
|
||||
virtual Address PageAllocationLimit(Page* page) {
|
||||
return page->ObjectAreaEnd() - page_extra_;
|
||||
}
|
||||
|
||||
int object_size_in_bytes() { return object_size_in_bytes_; }
|
||||
|
||||
virtual int ObjectAlignment() { return object_size_in_bytes_; }
|
||||
|
||||
// Prepares for a mark-compact GC.
|
||||
virtual void PrepareForMarkCompact();
|
||||
|
||||
|
@ -496,6 +496,7 @@ void StoreBuffer::FindPointersToNewSpaceInMapsRegion(
|
||||
Address map_aligned_end = MapEndAlign(end);
|
||||
|
||||
ASSERT(map_aligned_start == start);
|
||||
ASSERT(map_aligned_end == end);
|
||||
|
||||
FindPointersToNewSpaceInMaps(map_aligned_start,
|
||||
map_aligned_end,
|
||||
@ -523,57 +524,52 @@ void StoreBuffer::FindPointersToNewSpaceOnPage(
|
||||
RegionCallback region_callback,
|
||||
ObjectSlotCallback slot_callback) {
|
||||
Address visitable_start = page->ObjectAreaStart();
|
||||
Address end_of_page = page->ObjectAreaEnd();
|
||||
|
||||
Address visitable_end = visitable_start;
|
||||
|
||||
Object* free_space_map = heap_->free_space_map();
|
||||
Object* two_pointer_filler_map = heap_->two_pointer_filler_map();
|
||||
|
||||
while (true) { // While the page grows (doesn't normally happen).
|
||||
Address end_of_page = page->ObjectAreaEnd();
|
||||
while (visitable_end < end_of_page) {
|
||||
Object* o = *reinterpret_cast<Object**>(visitable_end);
|
||||
// Skip fillers but not things that look like fillers in the special
|
||||
// garbage section which can contain anything.
|
||||
if (o == free_space_map ||
|
||||
o == two_pointer_filler_map ||
|
||||
(visitable_end == space->top() && visitable_end != space->limit())) {
|
||||
if (visitable_start != visitable_end) {
|
||||
// After calling this the special garbage section may have moved.
|
||||
(this->*region_callback)(visitable_start,
|
||||
visitable_end,
|
||||
slot_callback);
|
||||
if (visitable_end >= space->top() && visitable_end < space->limit()) {
|
||||
visitable_end = space->limit();
|
||||
visitable_start = visitable_end;
|
||||
continue;
|
||||
}
|
||||
while (visitable_end < end_of_page) {
|
||||
Object* o = *reinterpret_cast<Object**>(visitable_end);
|
||||
// Skip fillers but not things that look like fillers in the special
|
||||
// garbage section which can contain anything.
|
||||
if (o == free_space_map ||
|
||||
o == two_pointer_filler_map ||
|
||||
(visitable_end == space->top() && visitable_end != space->limit())) {
|
||||
if (visitable_start != visitable_end) {
|
||||
// After calling this the special garbage section may have moved.
|
||||
(this->*region_callback)(visitable_start,
|
||||
visitable_end,
|
||||
slot_callback);
|
||||
if (visitable_end >= space->top() && visitable_end < space->limit()) {
|
||||
visitable_end = space->limit();
|
||||
visitable_start = visitable_end;
|
||||
continue;
|
||||
}
|
||||
if (visitable_end == space->top() && visitable_end != space->limit()) {
|
||||
visitable_start = visitable_end = space->limit();
|
||||
} else {
|
||||
// At this point we are either at the start of a filler or we are at
|
||||
// the point where the space->top() used to be before the
|
||||
// visit_pointer_region call above. Either way we can skip the
|
||||
// object at the current spot: We don't promise to visit objects
|
||||
// allocated during heap traversal, and if space->top() moved then it
|
||||
// must be because an object was allocated at this point.
|
||||
visitable_start =
|
||||
visitable_end + HeapObject::FromAddress(visitable_end)->Size();
|
||||
visitable_end = visitable_start;
|
||||
}
|
||||
} else {
|
||||
ASSERT(o != free_space_map);
|
||||
ASSERT(o != two_pointer_filler_map);
|
||||
ASSERT(visitable_end < space->top() || visitable_end >= space->limit());
|
||||
visitable_end += kPointerSize;
|
||||
}
|
||||
if (visitable_end == space->top() && visitable_end != space->limit()) {
|
||||
visitable_start = visitable_end = space->limit();
|
||||
} else {
|
||||
// At this point we are either at the start of a filler or we are at
|
||||
// the point where the space->top() used to be before the
|
||||
// visit_pointer_region call above. Either way we can skip the
|
||||
// object at the current spot: We don't promise to visit objects
|
||||
// allocated during heap traversal, and if space->top() moved then it
|
||||
// must be because an object was allocated at this point.
|
||||
visitable_start =
|
||||
visitable_end + HeapObject::FromAddress(visitable_end)->Size();
|
||||
visitable_end = visitable_start;
|
||||
}
|
||||
} else {
|
||||
ASSERT(o != free_space_map);
|
||||
ASSERT(o != two_pointer_filler_map);
|
||||
ASSERT(visitable_end < space->top() || visitable_end >= space->limit());
|
||||
visitable_end += kPointerSize;
|
||||
}
|
||||
ASSERT(visitable_end >= end_of_page);
|
||||
// If the page did not grow we are done.
|
||||
if (end_of_page == page->ObjectAreaEnd()) break;
|
||||
}
|
||||
ASSERT(visitable_end == page->ObjectAreaEnd());
|
||||
ASSERT(visitable_end == end_of_page);
|
||||
if (visitable_start != visitable_end) {
|
||||
(this->*region_callback)(visitable_start,
|
||||
visitable_end,
|
||||
|
10
src/utils.h
10
src/utils.h
@ -153,9 +153,11 @@ int HandleObjectPointerCompare(const Handle<T>* a, const Handle<T>* b) {
|
||||
}
|
||||
|
||||
|
||||
template<typename int_type>
|
||||
inline int RoundUpToPowerOf2(int_type x_argument) {
|
||||
uintptr_t x = static_cast<uintptr_t>(x_argument);
|
||||
// Returns the smallest power of two which is >= x. If you pass in a
|
||||
// number that is already a power of two, it is returned as is.
|
||||
// Implementation is from "Hacker's Delight" by Henry S. Warren, Jr.,
|
||||
// figure 3-3, page 48, where the function is called clp2.
|
||||
inline uint32_t RoundUpToPowerOf2(uint32_t x) {
|
||||
ASSERT(x <= 0x80000000u);
|
||||
x = x - 1;
|
||||
x = x | (x >> 1);
|
||||
@ -163,7 +165,7 @@ inline int RoundUpToPowerOf2(int_type x_argument) {
|
||||
x = x | (x >> 4);
|
||||
x = x | (x >> 8);
|
||||
x = x | (x >> 16);
|
||||
return static_cast<int_type>(x + 1);
|
||||
return x + 1;
|
||||
}
|
||||
|
||||
|
||||
|
@ -1236,14 +1236,17 @@ TEST(TestSizeOfObjectsVsHeapIteratorPrecision) {
|
||||
obj = iterator.next()) {
|
||||
size_of_objects_2 += obj->Size();
|
||||
}
|
||||
// Delta must be within 1% of the larger result.
|
||||
// Delta must be within 5% of the larger result.
|
||||
// TODO(gc): Tighten this up by distinguishing between byte
|
||||
// arrays that are real and those that merely mark free space
|
||||
// on the heap.
|
||||
if (size_of_objects_1 > size_of_objects_2) {
|
||||
intptr_t delta = size_of_objects_1 - size_of_objects_2;
|
||||
PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, "
|
||||
"Iterator: %" V8_PTR_PREFIX "d, "
|
||||
"delta: %" V8_PTR_PREFIX "d\n",
|
||||
size_of_objects_1, size_of_objects_2, delta);
|
||||
CHECK_GT(size_of_objects_1 / 100, delta);
|
||||
CHECK_GT(size_of_objects_1 / 20, delta);
|
||||
} else {
|
||||
intptr_t delta = size_of_objects_2 - size_of_objects_1;
|
||||
PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, "
|
||||
|
@ -526,25 +526,12 @@ static intptr_t MemoryInUse() {
|
||||
|
||||
TEST(BootUpMemoryUse) {
|
||||
intptr_t initial_memory = MemoryInUse();
|
||||
FLAG_crankshaft = false; // Avoid flakiness.
|
||||
// Only Linux has the proc filesystem and only if it is mapped. If it's not
|
||||
// there we just skip the test.
|
||||
if (initial_memory >= 0) {
|
||||
InitializeVM();
|
||||
intptr_t booted_memory = MemoryInUse();
|
||||
if (sizeof(initial_memory) == 8) {
|
||||
if (v8::internal::Snapshot::IsEnabled()) {
|
||||
CHECK_LE(booted_memory - initial_memory, 3700 * 1024); // 3640.
|
||||
} else {
|
||||
CHECK_LE(booted_memory - initial_memory, 3300 * 1024); // 3276.
|
||||
}
|
||||
} else {
|
||||
if (v8::internal::Snapshot::IsEnabled()) {
|
||||
CHECK_LE(booted_memory - initial_memory, 2300 * 1024); // 2276.
|
||||
} else {
|
||||
CHECK_LE(booted_memory - initial_memory, 2500 * 1024); // 2416
|
||||
}
|
||||
}
|
||||
CHECK_LE(booted_memory - initial_memory, 16 * 1024 * 1024);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -140,8 +140,8 @@ TEST(MemoryAllocator) {
|
||||
heap->MaxReserved(),
|
||||
OLD_POINTER_SPACE,
|
||||
NOT_EXECUTABLE);
|
||||
Page* first_page = memory_allocator->AllocatePage(
|
||||
Page::kObjectAreaSize, &faked_space, NOT_EXECUTABLE);
|
||||
Page* first_page =
|
||||
memory_allocator->AllocatePage(&faked_space, NOT_EXECUTABLE);
|
||||
|
||||
first_page->InsertAfter(faked_space.anchor()->prev_page());
|
||||
CHECK(first_page->is_valid());
|
||||
@ -154,8 +154,7 @@ TEST(MemoryAllocator) {
|
||||
|
||||
// Again, we should get n or n - 1 pages.
|
||||
Page* other =
|
||||
memory_allocator->AllocatePage(
|
||||
Page::kObjectAreaSize, &faked_space, NOT_EXECUTABLE);
|
||||
memory_allocator->AllocatePage(&faked_space, NOT_EXECUTABLE);
|
||||
CHECK(other->is_valid());
|
||||
total_pages++;
|
||||
other->InsertAfter(first_page);
|
||||
|
Loading…
Reference in New Issue
Block a user