[Memory] Don't heap allocate instances of base::VirtualMemory.

- Changes some instance fields from VM pointers to VM.  
- Changes some comments to correctly describe code.  

Bug: v8:6635
Change-Id: I9ec93ef0b09d541c966caa6482c5832cd6b1e149
Reviewed-on: https://chromium-review.googlesource.com/584931
Commit-Queue: Bill Budge <bbudge@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#46880}
This commit is contained in:
Bill Budge 2017-07-25 07:31:34 -07:00 committed by Commit Bot
parent 57031e82db
commit 92da5a474d
7 changed files with 48 additions and 64 deletions

View File

@ -285,11 +285,10 @@ class V8_BASE_EXPORT OS {
DISALLOW_IMPLICIT_CONSTRUCTORS(OS); DISALLOW_IMPLICIT_CONSTRUCTORS(OS);
}; };
// Represents and controls an area of reserved memory. // Represents and controls an area of reserved memory.
// Control of the reserved memory can be assigned to another VirtualMemory // Control of the reserved memory can be assigned to another VirtualMemory
// object by assignment or copy-contructing. This removes the reserved memory // object by calling TakeControl. This removes the reserved memory from the
// from the original object. // 'from' instance.
class V8_BASE_EXPORT VirtualMemory { class V8_BASE_EXPORT VirtualMemory {
public: public:
// Empty VirtualMemory object, controlling no reserved memory. // Empty VirtualMemory object, controlling no reserved memory.

View File

@ -13,15 +13,17 @@ namespace v8 {
namespace internal { namespace internal {
void SequentialMarkingDeque::SetUp() { void SequentialMarkingDeque::SetUp() {
backing_store_ = base::VirtualMemory reservation(kMaxSize, heap_->GetRandomMmapAddr());
new base::VirtualMemory(kMaxSize, heap_->GetRandomMmapAddr()); if (!reservation.IsReserved()) {
backing_store_committed_size_ = 0;
if (backing_store_ == nullptr) {
V8::FatalProcessOutOfMemory("SequentialMarkingDeque::SetUp"); V8::FatalProcessOutOfMemory("SequentialMarkingDeque::SetUp");
} }
backing_store_committed_size_ = 0;
backing_store_.TakeControl(&reservation);
} }
void SequentialMarkingDeque::TearDown() { delete backing_store_; } void SequentialMarkingDeque::TearDown() {
if (backing_store_.IsReserved()) backing_store_.Release();
}
void SequentialMarkingDeque::StartUsing() { void SequentialMarkingDeque::StartUsing() {
base::LockGuard<base::Mutex> guard(&mutex_); base::LockGuard<base::Mutex> guard(&mutex_);
@ -32,7 +34,7 @@ void SequentialMarkingDeque::StartUsing() {
} }
in_use_ = true; in_use_ = true;
EnsureCommitted(); EnsureCommitted();
array_ = reinterpret_cast<HeapObject**>(backing_store_->address()); array_ = reinterpret_cast<HeapObject**>(backing_store_.address());
size_t size = FLAG_force_marking_deque_overflows size_t size = FLAG_force_marking_deque_overflows
? 64 * kPointerSize ? 64 * kPointerSize
: backing_store_committed_size_; : backing_store_committed_size_;
@ -64,8 +66,8 @@ void SequentialMarkingDeque::Clear() {
void SequentialMarkingDeque::Uncommit() { void SequentialMarkingDeque::Uncommit() {
DCHECK(!in_use_); DCHECK(!in_use_);
bool success = backing_store_->Uncommit(backing_store_->address(), bool success = backing_store_.Uncommit(backing_store_.address(),
backing_store_committed_size_); backing_store_committed_size_);
backing_store_committed_size_ = 0; backing_store_committed_size_ = 0;
CHECK(success); CHECK(success);
} }
@ -75,7 +77,7 @@ void SequentialMarkingDeque::EnsureCommitted() {
if (backing_store_committed_size_ > 0) return; if (backing_store_committed_size_ > 0) return;
for (size_t size = kMaxSize; size >= kMinSize; size /= 2) { for (size_t size = kMaxSize; size >= kMinSize; size /= 2) {
if (backing_store_->Commit(backing_store_->address(), size, false)) { if (backing_store_.Commit(backing_store_.address(), size, false)) {
backing_store_committed_size_ = size; backing_store_committed_size_ = size;
break; break;
} }

View File

@ -23,8 +23,7 @@ class HeapObject;
class SequentialMarkingDeque { class SequentialMarkingDeque {
public: public:
explicit SequentialMarkingDeque(Heap* heap) explicit SequentialMarkingDeque(Heap* heap)
: backing_store_(nullptr), : backing_store_committed_size_(0),
backing_store_committed_size_(0),
array_(nullptr), array_(nullptr),
top_(0), top_(0),
bottom_(0), bottom_(0),
@ -132,7 +131,7 @@ class SequentialMarkingDeque {
base::Mutex mutex_; base::Mutex mutex_;
base::VirtualMemory* backing_store_; base::VirtualMemory backing_store_;
size_t backing_store_committed_size_; size_t backing_store_committed_size_;
HeapObject** array_; HeapObject** array_;
// array_[(top - 1) & mask_] is the top element in the deque. The Deque is // array_[(top - 1) & mask_] is the top element in the deque. The Deque is

View File

@ -88,17 +88,14 @@ PauseAllocationObserversScope::~PauseAllocationObserversScope() {
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// CodeRange // CodeRange
CodeRange::CodeRange(Isolate* isolate) CodeRange::CodeRange(Isolate* isolate)
: isolate_(isolate), : isolate_(isolate),
code_range_(NULL),
free_list_(0), free_list_(0),
allocation_list_(0), allocation_list_(0),
current_allocation_block_index_(0) {} current_allocation_block_index_(0) {}
bool CodeRange::SetUp(size_t requested) { bool CodeRange::SetUp(size_t requested) {
DCHECK(code_range_ == NULL); DCHECK(!virtual_memory_.IsReserved());
if (requested == 0) { if (requested == 0) {
// When a target requires the code range feature, we put all code objects // When a target requires the code range feature, we put all code objects
@ -122,38 +119,31 @@ bool CodeRange::SetUp(size_t requested) {
DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize); DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
code_range_ = new base::VirtualMemory( base::VirtualMemory reservation(
requested, requested,
Max(kCodeRangeAreaAlignment, Max(kCodeRangeAreaAlignment,
static_cast<size_t>(base::OS::AllocateAlignment())), static_cast<size_t>(base::OS::AllocateAlignment())),
base::OS::GetRandomMmapAddr()); base::OS::GetRandomMmapAddr());
CHECK(code_range_ != NULL); if (!reservation.IsReserved()) return false;
if (!code_range_->IsReserved()) {
delete code_range_;
code_range_ = NULL;
return false;
}
// We are sure that we have mapped a block of requested addresses. // We are sure that we have mapped a block of requested addresses.
DCHECK(code_range_->size() == requested); DCHECK(reservation.size() == requested);
Address base = reinterpret_cast<Address>(code_range_->address()); Address base = reinterpret_cast<Address>(reservation.address());
// On some platforms, specifically Win64, we need to reserve some pages at // On some platforms, specifically Win64, we need to reserve some pages at
// the beginning of an executable space. // the beginning of an executable space.
if (reserved_area > 0) { if (reserved_area > 0) {
if (!code_range_->Commit(base, reserved_area, true)) { if (!reservation.Commit(base, reserved_area, true)) return false;
delete code_range_;
code_range_ = NULL;
return false;
}
base += reserved_area; base += reserved_area;
} }
Address aligned_base = RoundUp(base, MemoryChunk::kAlignment); Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
size_t size = code_range_->size() - (aligned_base - base) - reserved_area; size_t size = reservation.size() - (aligned_base - base) - reserved_area;
allocation_list_.Add(FreeBlock(aligned_base, size)); allocation_list_.Add(FreeBlock(aligned_base, size));
current_allocation_block_index_ = 0; current_allocation_block_index_ = 0;
LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested)); LOG(isolate_, NewEvent("CodeRange", reservation.address(), requested));
virtual_memory_.TakeControl(&reservation);
return true; return true;
} }
@ -224,7 +214,7 @@ Address CodeRange::AllocateRawMemory(const size_t requested_size,
DCHECK(*allocated <= current.size); DCHECK(*allocated <= current.size);
DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment)); DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
if (!isolate_->heap()->memory_allocator()->CommitExecutableMemory( if (!isolate_->heap()->memory_allocator()->CommitExecutableMemory(
code_range_, current.start, commit_size, *allocated)) { &virtual_memory_, current.start, commit_size, *allocated)) {
*allocated = 0; *allocated = 0;
ReleaseBlock(&current); ReleaseBlock(&current);
return NULL; return NULL;
@ -240,7 +230,7 @@ bool CodeRange::CommitRawMemory(Address start, size_t length) {
bool CodeRange::UncommitRawMemory(Address start, size_t length) { bool CodeRange::UncommitRawMemory(Address start, size_t length) {
return code_range_->Uncommit(start, length); return virtual_memory_.Uncommit(start, length);
} }
@ -248,13 +238,12 @@ void CodeRange::FreeRawMemory(Address address, size_t length) {
DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment)); DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
base::LockGuard<base::Mutex> guard(&code_range_mutex_); base::LockGuard<base::Mutex> guard(&code_range_mutex_);
free_list_.Add(FreeBlock(address, length)); free_list_.Add(FreeBlock(address, length));
code_range_->Uncommit(address, length); virtual_memory_.Uncommit(address, length);
} }
void CodeRange::TearDown() { void CodeRange::TearDown() {
delete code_range_; // Frees all memory in the virtual memory range. if (virtual_memory_.IsReserved()) virtual_memory_.Release();
code_range_ = NULL;
base::LockGuard<base::Mutex> guard(&code_range_mutex_); base::LockGuard<base::Mutex> guard(&code_range_mutex_);
free_list_.Free(); free_list_.Free();
allocation_list_.Free(); allocation_list_.Free();

View File

@ -1037,19 +1037,19 @@ class CodeRange {
// Returns false on failure. // Returns false on failure.
bool SetUp(size_t requested_size); bool SetUp(size_t requested_size);
bool valid() { return code_range_ != NULL; } bool valid() { return virtual_memory_.IsReserved(); }
Address start() { Address start() {
DCHECK(valid()); DCHECK(valid());
return static_cast<Address>(code_range_->address()); return static_cast<Address>(virtual_memory_.address());
} }
size_t size() { size_t size() {
DCHECK(valid()); DCHECK(valid());
return code_range_->size(); return virtual_memory_.size();
} }
bool contains(Address address) { bool contains(Address address) {
if (!valid()) return false; if (!valid()) return false;
Address start = static_cast<Address>(code_range_->address()); Address start = static_cast<Address>(virtual_memory_.address());
return start <= address && address < start + code_range_->size(); return start <= address && address < start + virtual_memory_.size();
} }
// Allocates a chunk of memory from the large-object portion of // Allocates a chunk of memory from the large-object portion of
@ -1099,7 +1099,7 @@ class CodeRange {
Isolate* isolate_; Isolate* isolate_;
// The reserved range of virtual memory that all code objects are put in. // The reserved range of virtual memory that all code objects are put in.
base::VirtualMemory* code_range_; base::VirtualMemory virtual_memory_;
// The global mutex guards free_list_ and allocation_list_ as GC threads may // The global mutex guards free_list_ and allocation_list_ as GC threads may
// access both lists concurrently to the main thread. // access both lists concurrently to the main thread.

View File

@ -16,11 +16,7 @@ namespace v8 {
namespace internal { namespace internal {
StoreBuffer::StoreBuffer(Heap* heap) StoreBuffer::StoreBuffer(Heap* heap)
: heap_(heap), : heap_(heap), top_(nullptr), current_(0), mode_(NOT_IN_GC) {
top_(nullptr),
current_(0),
mode_(NOT_IN_GC),
virtual_memory_(nullptr) {
for (int i = 0; i < kStoreBuffers; i++) { for (int i = 0; i < kStoreBuffers; i++) {
start_[i] = nullptr; start_[i] = nullptr;
limit_[i] = nullptr; limit_[i] = nullptr;
@ -35,10 +31,9 @@ void StoreBuffer::SetUp() {
// Allocate 3x the buffer size, so that we can start the new store buffer // Allocate 3x the buffer size, so that we can start the new store buffer
// aligned to 2x the size. This lets us use a bit test to detect the end of // aligned to 2x the size. This lets us use a bit test to detect the end of
// the area. // the area.
virtual_memory_ = base::VirtualMemory reservation(kStoreBufferSize * 3,
new base::VirtualMemory(kStoreBufferSize * 3, heap_->GetRandomMmapAddr()); heap_->GetRandomMmapAddr());
uintptr_t start_as_int = uintptr_t start_as_int = reinterpret_cast<uintptr_t>(reservation.address());
reinterpret_cast<uintptr_t>(virtual_memory_->address());
start_[0] = start_[0] =
reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize)); reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize));
limit_[0] = start_[0] + (kStoreBufferSize / kPointerSize); limit_[0] = start_[0] + (kStoreBufferSize / kPointerSize);
@ -46,30 +41,30 @@ void StoreBuffer::SetUp() {
limit_[1] = start_[1] + (kStoreBufferSize / kPointerSize); limit_[1] = start_[1] + (kStoreBufferSize / kPointerSize);
Address* vm_limit = reinterpret_cast<Address*>( Address* vm_limit = reinterpret_cast<Address*>(
reinterpret_cast<char*>(virtual_memory_->address()) + reinterpret_cast<char*>(reservation.address()) + reservation.size());
virtual_memory_->size());
USE(vm_limit); USE(vm_limit);
for (int i = 0; i < kStoreBuffers; i++) { for (int i = 0; i < kStoreBuffers; i++) {
DCHECK(reinterpret_cast<Address>(start_[i]) >= virtual_memory_->address()); DCHECK(reinterpret_cast<Address>(start_[i]) >= reservation.address());
DCHECK(reinterpret_cast<Address>(limit_[i]) >= virtual_memory_->address()); DCHECK(reinterpret_cast<Address>(limit_[i]) >= reservation.address());
DCHECK(start_[i] <= vm_limit); DCHECK(start_[i] <= vm_limit);
DCHECK(limit_[i] <= vm_limit); DCHECK(limit_[i] <= vm_limit);
DCHECK((reinterpret_cast<uintptr_t>(limit_[i]) & kStoreBufferMask) == 0); DCHECK((reinterpret_cast<uintptr_t>(limit_[i]) & kStoreBufferMask) == 0);
} }
if (!virtual_memory_->Commit(reinterpret_cast<Address>(start_[0]), if (!reservation.Commit(reinterpret_cast<Address>(start_[0]),
kStoreBufferSize * kStoreBuffers, kStoreBufferSize * kStoreBuffers,
false)) { // Not executable. false)) { // Not executable.
V8::FatalProcessOutOfMemory("StoreBuffer::SetUp"); V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
} }
current_ = 0; current_ = 0;
top_ = start_[current_]; top_ = start_[current_];
virtual_memory_.TakeControl(&reservation);
} }
void StoreBuffer::TearDown() { void StoreBuffer::TearDown() {
delete virtual_memory_; if (virtual_memory_.IsReserved()) virtual_memory_.Release();
top_ = nullptr; top_ = nullptr;
for (int i = 0; i < kStoreBuffers; i++) { for (int i = 0; i < kStoreBuffers; i++) {
start_[i] = nullptr; start_[i] = nullptr;

View File

@ -208,7 +208,7 @@ class StoreBuffer {
// IN_GC mode. // IN_GC mode.
StoreBufferMode mode_; StoreBufferMode mode_;
base::VirtualMemory* virtual_memory_; base::VirtualMemory virtual_memory_;
// Callbacks are more efficient than reading out the gc state for every // Callbacks are more efficient than reading out the gc state for every
// store buffer operation. // store buffer operation.