[Memory] Move VirtualMemory out of base:: platform.
- Moves base::VirtualMemory to v8::internal::VirtualMemory. - Makes VirtualMemory platform-independent by moving internals to new OS:: static methods, for each platform. This will make it easier to delegate memory management in VirtualMemory to V8::Platform, so that embedders like Blink can override it. We can't depend on V8::Platform in base/platform. Bug: chromium:756050 Cq-Include-Trybots: master.tryserver.chromium.linux:linux_chromium_rel_ng Change-Id: Iadfe230b6850bd917727a373f277afded9883adf Reviewed-on: https://chromium-review.googlesource.com/653214 Commit-Queue: Bill Budge <bbudge@chromium.org> Reviewed-by: Ulan Degenbaev <ulan@chromium.org> Cr-Commit-Position: refs/heads/master@{#48048}
This commit is contained in:
parent
aabb893a32
commit
4dd293d922
@ -99,29 +99,106 @@ void AlignedFree(void *ptr) {
|
||||
#endif
|
||||
}
|
||||
|
||||
bool AllocVirtualMemory(size_t size, void* hint, base::VirtualMemory* result) {
|
||||
base::VirtualMemory first_try(size, hint);
|
||||
VirtualMemory::VirtualMemory() : address_(nullptr), size_(0) {}
|
||||
|
||||
VirtualMemory::VirtualMemory(size_t size, void* hint)
|
||||
: address_(base::OS::ReserveRegion(size, hint)), size_(size) {}
|
||||
|
||||
VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
|
||||
: address_(nullptr), size_(0) {
|
||||
address_ = base::OS::ReserveAlignedRegion(size, alignment, hint, &size_);
|
||||
}
|
||||
|
||||
VirtualMemory::~VirtualMemory() {
|
||||
if (IsReserved()) {
|
||||
bool result = base::OS::ReleaseRegion(address(), size());
|
||||
DCHECK(result);
|
||||
USE(result);
|
||||
}
|
||||
}
|
||||
|
||||
void VirtualMemory::Reset() {
|
||||
address_ = nullptr;
|
||||
size_ = 0;
|
||||
}
|
||||
|
||||
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
|
||||
CHECK(InVM(address, size));
|
||||
return base::OS::CommitRegion(address, size, is_executable);
|
||||
}
|
||||
|
||||
bool VirtualMemory::Uncommit(void* address, size_t size) {
|
||||
CHECK(InVM(address, size));
|
||||
return base::OS::UncommitRegion(address, size);
|
||||
}
|
||||
|
||||
bool VirtualMemory::Guard(void* address) {
|
||||
CHECK(InVM(address, base::OS::CommitPageSize()));
|
||||
base::OS::Guard(address, base::OS::CommitPageSize());
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t VirtualMemory::ReleasePartial(void* free_start) {
|
||||
DCHECK(IsReserved());
|
||||
// Notice: Order is important here. The VirtualMemory object might live
|
||||
// inside the allocated region.
|
||||
const size_t size = size_ - (reinterpret_cast<size_t>(free_start) -
|
||||
reinterpret_cast<size_t>(address_));
|
||||
CHECK(InVM(free_start, size));
|
||||
DCHECK_LT(address_, free_start);
|
||||
DCHECK_LT(free_start, reinterpret_cast<void*>(
|
||||
reinterpret_cast<size_t>(address_) + size_));
|
||||
const bool result =
|
||||
base::OS::ReleasePartialRegion(address_, size_, free_start, size);
|
||||
USE(result);
|
||||
DCHECK(result);
|
||||
size_ -= size;
|
||||
return size;
|
||||
}
|
||||
|
||||
void VirtualMemory::Release() {
|
||||
DCHECK(IsReserved());
|
||||
// Notice: Order is important here. The VirtualMemory object might live
|
||||
// inside the allocated region.
|
||||
void* address = address_;
|
||||
size_t size = size_;
|
||||
CHECK(InVM(address, size));
|
||||
Reset();
|
||||
bool result = base::OS::ReleaseRegion(address, size);
|
||||
USE(result);
|
||||
DCHECK(result);
|
||||
}
|
||||
|
||||
void VirtualMemory::TakeControl(VirtualMemory* from) {
|
||||
DCHECK(!IsReserved());
|
||||
address_ = from->address_;
|
||||
size_ = from->size_;
|
||||
from->Reset();
|
||||
}
|
||||
|
||||
bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result) {
|
||||
VirtualMemory first_try(size, hint);
|
||||
if (first_try.IsReserved()) {
|
||||
result->TakeControl(&first_try);
|
||||
return true;
|
||||
}
|
||||
|
||||
V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
|
||||
base::VirtualMemory second_try(size, hint);
|
||||
VirtualMemory second_try(size, hint);
|
||||
result->TakeControl(&second_try);
|
||||
return result->IsReserved();
|
||||
}
|
||||
|
||||
bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint,
|
||||
base::VirtualMemory* result) {
|
||||
base::VirtualMemory first_try(size, alignment, hint);
|
||||
VirtualMemory* result) {
|
||||
VirtualMemory first_try(size, alignment, hint);
|
||||
if (first_try.IsReserved()) {
|
||||
result->TakeControl(&first_try);
|
||||
return true;
|
||||
}
|
||||
|
||||
V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
|
||||
base::VirtualMemory second_try(size, alignment, hint);
|
||||
VirtualMemory second_try(size, alignment, hint);
|
||||
result->TakeControl(&second_try);
|
||||
return result->IsReserved();
|
||||
}
|
||||
|
@ -76,9 +76,88 @@ class FreeStoreAllocationPolicy {
|
||||
void* AlignedAlloc(size_t size, size_t alignment);
|
||||
void AlignedFree(void *ptr);
|
||||
|
||||
bool AllocVirtualMemory(size_t size, void* hint, base::VirtualMemory* result);
|
||||
// Represents and controls an area of reserved memory.
|
||||
class V8_EXPORT_PRIVATE VirtualMemory {
|
||||
public:
|
||||
// Empty VirtualMemory object, controlling no reserved memory.
|
||||
VirtualMemory();
|
||||
|
||||
// Reserves virtual memory with size.
|
||||
explicit VirtualMemory(size_t size, void* hint);
|
||||
|
||||
// Reserves virtual memory containing an area of the given size that
|
||||
// is aligned per alignment. This may not be at the position returned
|
||||
// by address().
|
||||
VirtualMemory(size_t size, size_t alignment, void* hint);
|
||||
|
||||
// Construct a virtual memory by assigning it some already mapped address
|
||||
// and size.
|
||||
VirtualMemory(void* address, size_t size) : address_(address), size_(size) {}
|
||||
|
||||
// Releases the reserved memory, if any, controlled by this VirtualMemory
|
||||
// object.
|
||||
~VirtualMemory();
|
||||
|
||||
// Returns whether the memory has been reserved.
|
||||
bool IsReserved() const { return address_ != nullptr; }
|
||||
|
||||
// Initialize or resets an embedded VirtualMemory object.
|
||||
void Reset();
|
||||
|
||||
// Returns the start address of the reserved memory.
|
||||
// If the memory was reserved with an alignment, this address is not
|
||||
// necessarily aligned. The user might need to round it up to a multiple of
|
||||
// the alignment to get the start of the aligned block.
|
||||
void* address() const {
|
||||
DCHECK(IsReserved());
|
||||
return address_;
|
||||
}
|
||||
|
||||
void* end() const {
|
||||
DCHECK(IsReserved());
|
||||
return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(address_) +
|
||||
size_);
|
||||
}
|
||||
|
||||
// Returns the size of the reserved memory. The returned value is only
|
||||
// meaningful when IsReserved() returns true.
|
||||
// If the memory was reserved with an alignment, this size may be larger
|
||||
// than the requested size.
|
||||
size_t size() const { return size_; }
|
||||
|
||||
// Commits real memory. Returns whether the operation succeeded.
|
||||
bool Commit(void* address, size_t size, bool is_executable);
|
||||
|
||||
// Uncommit real memory. Returns whether the operation succeeded.
|
||||
bool Uncommit(void* address, size_t size);
|
||||
|
||||
// Creates a single guard page at the given address.
|
||||
bool Guard(void* address);
|
||||
|
||||
// Releases the memory after |free_start|. Returns the bytes released.
|
||||
size_t ReleasePartial(void* free_start);
|
||||
|
||||
void Release();
|
||||
|
||||
// Assign control of the reserved region to a different VirtualMemory object.
|
||||
// The old object is no longer functional (IsReserved() returns false).
|
||||
void TakeControl(VirtualMemory* from);
|
||||
|
||||
bool InVM(void* address, size_t size) {
|
||||
return (reinterpret_cast<uintptr_t>(address_) <=
|
||||
reinterpret_cast<uintptr_t>(address)) &&
|
||||
((reinterpret_cast<uintptr_t>(address_) + size_) >=
|
||||
(reinterpret_cast<uintptr_t>(address) + size));
|
||||
}
|
||||
|
||||
private:
|
||||
void* address_; // Start address of the virtual memory.
|
||||
size_t size_; // Size of the virtual memory.
|
||||
};
|
||||
|
||||
bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result);
|
||||
bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint,
|
||||
base::VirtualMemory* result);
|
||||
VirtualMemory* result);
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -485,8 +485,7 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
|
||||
virtual void Free(void* data, size_t) { free(data); }
|
||||
|
||||
virtual void* Reserve(size_t length) {
|
||||
return base::VirtualMemory::ReserveRegion(length,
|
||||
base::OS::GetRandomMmapAddr());
|
||||
return base::OS::ReserveRegion(length, base::OS::GetRandomMmapAddr());
|
||||
}
|
||||
|
||||
virtual void Free(void* data, size_t length,
|
||||
@ -496,7 +495,7 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
|
||||
return Free(data, length);
|
||||
}
|
||||
case v8::ArrayBuffer::Allocator::AllocationMode::kReservation: {
|
||||
base::VirtualMemory::ReleaseRegion(data, length);
|
||||
base::OS::ReleaseRegion(data, length);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -65,23 +65,106 @@ double AIXTimezoneCache::LocalTimeOffset() {
|
||||
|
||||
TimezoneCache* OS::CreateTimezoneCache() { return new AIXTimezoneCache(); }
|
||||
|
||||
// Constants used for mmap.
|
||||
static const int kMmapFd = -1;
|
||||
static const int kMmapFdOffset = 0;
|
||||
|
||||
void* OS::Allocate(const size_t requested, size_t* allocated,
|
||||
OS::MemoryPermission access, void* hint) {
|
||||
const size_t msize = RoundUp(requested, getpagesize());
|
||||
int prot = GetProtectionFromMemoryPermission(access);
|
||||
void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, kMmapFd,
|
||||
kMmapFdOffset);
|
||||
|
||||
if (mbase == MAP_FAILED) return NULL;
|
||||
*allocated = msize;
|
||||
return mbase;
|
||||
}
|
||||
|
||||
// static
|
||||
void* OS::ReserveRegion(size_t size, void* hint) {
|
||||
void* result = mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS,
|
||||
kMmapFd, kMmapFdOffset);
|
||||
|
||||
if (result == MAP_FAILED) return nullptr;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
// static
|
||||
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
|
||||
size_t* allocated) {
|
||||
DCHECK((alignment % OS::AllocateAlignment()) == 0);
|
||||
hint = AlignedAddress(hint, alignment);
|
||||
size_t request_size =
|
||||
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
|
||||
void* result = ReserveRegion(request_size, hint);
|
||||
if (result == nullptr) {
|
||||
*allocated = 0;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
uint8_t* base = static_cast<uint8_t*>(result);
|
||||
uint8_t* aligned_base = RoundUp(base, alignment);
|
||||
DCHECK_LE(base, aligned_base);
|
||||
|
||||
// Unmap extra memory reserved before and after the desired block.
|
||||
if (aligned_base != base) {
|
||||
size_t prefix_size = static_cast<size_t>(aligned_base - base);
|
||||
OS::Free(base, prefix_size);
|
||||
request_size -= prefix_size;
|
||||
}
|
||||
|
||||
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
|
||||
DCHECK_LE(aligned_size, request_size);
|
||||
|
||||
if (aligned_size != request_size) {
|
||||
size_t suffix_size = request_size - aligned_size;
|
||||
OS::Free(aligned_base + aligned_size, suffix_size);
|
||||
request_size -= suffix_size;
|
||||
}
|
||||
|
||||
DCHECK(aligned_size == request_size);
|
||||
|
||||
address_ = static_cast<void*>(aligned_base);
|
||||
size_ = aligned_size;
|
||||
|
||||
*allocated = aligned_size;
|
||||
return static_cast<void*>(aligned_base);
|
||||
}
|
||||
|
||||
// static
|
||||
bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
|
||||
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
|
||||
|
||||
if (mprotect(address, size, prot) == -1) return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// static
|
||||
bool OS::UncommitRegion(void* address, size_t size) {
|
||||
return mprotect(address, size, PROT_NONE) != -1;
|
||||
}
|
||||
|
||||
// static
|
||||
bool OS::ReleasePartialRegion(void* address, size_t size, void* free_start,
|
||||
size_t free_size) {
|
||||
return munmap(free_start, free_size) == 0;
|
||||
}
|
||||
|
||||
// static
|
||||
bool OS::ReleaseRegion(void* address, size_t size) {
|
||||
return munmap(address, size) == 0;
|
||||
}
|
||||
|
||||
// static
|
||||
bool OS::HasLazyCommits() { return true; }
|
||||
|
||||
static unsigned StringToLong(char* buffer) {
|
||||
return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT
|
||||
}
|
||||
|
||||
|
||||
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
|
||||
std::vector<SharedLibraryAddress> result;
|
||||
static const int MAP_LENGTH = 1024;
|
||||
@ -122,119 +205,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
void OS::SignalCodeMovingGC() {}
|
||||
|
||||
|
||||
// Constants used for mmap.
|
||||
static const int kMmapFd = -1;
|
||||
static const int kMmapFdOffset = 0;
|
||||
|
||||
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) {}
|
||||
|
||||
VirtualMemory::VirtualMemory(size_t size, void* hint)
|
||||
: address_(ReserveRegion(size, hint)), size_(size) {}
|
||||
|
||||
VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
|
||||
: address_(NULL), size_(0) {
|
||||
DCHECK((alignment % OS::AllocateAlignment()) == 0);
|
||||
hint = AlignedAddress(hint, alignment);
|
||||
size_t request_size =
|
||||
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
|
||||
void* reservation = mmap(hint, request_size, PROT_NONE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, kMmapFd, kMmapFdOffset);
|
||||
if (reservation == MAP_FAILED) return;
|
||||
|
||||
uint8_t* base = static_cast<uint8_t*>(reservation);
|
||||
uint8_t* aligned_base = RoundUp(base, alignment);
|
||||
DCHECK_LE(base, aligned_base);
|
||||
|
||||
// Unmap extra memory reserved before and after the desired block.
|
||||
if (aligned_base != base) {
|
||||
size_t prefix_size = static_cast<size_t>(aligned_base - base);
|
||||
OS::Free(base, prefix_size);
|
||||
request_size -= prefix_size;
|
||||
}
|
||||
|
||||
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
|
||||
DCHECK_LE(aligned_size, request_size);
|
||||
|
||||
if (aligned_size != request_size) {
|
||||
size_t suffix_size = request_size - aligned_size;
|
||||
OS::Free(aligned_base + aligned_size, suffix_size);
|
||||
request_size -= suffix_size;
|
||||
}
|
||||
|
||||
DCHECK(aligned_size == request_size);
|
||||
|
||||
address_ = static_cast<void*>(aligned_base);
|
||||
size_ = aligned_size;
|
||||
}
|
||||
|
||||
|
||||
VirtualMemory::~VirtualMemory() {
|
||||
if (IsReserved()) {
|
||||
bool result = ReleaseRegion(address(), size());
|
||||
DCHECK(result);
|
||||
USE(result);
|
||||
}
|
||||
}
|
||||
|
||||
void VirtualMemory::Reset() {
|
||||
address_ = NULL;
|
||||
size_ = 0;
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
|
||||
return CommitRegion(address, size, is_executable);
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::Uncommit(void* address, size_t size) {
|
||||
return UncommitRegion(address, size);
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::Guard(void* address) {
|
||||
OS::Guard(address, OS::CommitPageSize());
|
||||
return true;
|
||||
}
|
||||
|
||||
void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
|
||||
void* result = mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS,
|
||||
kMmapFd, kMmapFdOffset);
|
||||
|
||||
if (result == MAP_FAILED) return NULL;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
|
||||
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
|
||||
|
||||
if (mprotect(base, size, prot) == -1) return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::UncommitRegion(void* base, size_t size) {
|
||||
return mprotect(base, size, PROT_NONE) != -1;
|
||||
}
|
||||
|
||||
bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
|
||||
void* free_start, size_t free_size) {
|
||||
return munmap(free_start, free_size) == 0;
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
|
||||
return munmap(base, size) == 0;
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::HasLazyCommits() { return true; }
|
||||
} // namespace base
|
||||
} // namespace v8
|
||||
|
@ -26,6 +26,31 @@
|
||||
namespace v8 {
|
||||
namespace base {
|
||||
|
||||
namespace {
|
||||
|
||||
// The VirtualMemory implementation is taken from platform-win32.cc.
|
||||
// The mmap-based virtual memory implementation as it is used on most posix
|
||||
// platforms does not work well because Cygwin does not support MAP_FIXED.
|
||||
// This causes VirtualMemory::Commit to not always commit the memory region
|
||||
// specified.
|
||||
|
||||
static void* RandomizedVirtualAlloc(size_t size, int action, int protection,
|
||||
void* hint) {
|
||||
LPVOID base = NULL;
|
||||
|
||||
if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
|
||||
// For exectutable pages try and randomize the allocation address
|
||||
base = VirtualAlloc(hint, size, action, protection);
|
||||
}
|
||||
|
||||
// After three attempts give up and let the OS find an address to use.
|
||||
if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
|
||||
|
||||
return base;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
class CygwinTimezoneCache : public PosixTimezoneCache {
|
||||
const char* LocalTimezone(double time) override;
|
||||
|
||||
@ -65,6 +90,75 @@ void* OS::Allocate(const size_t requested, size_t* allocated,
|
||||
return mbase;
|
||||
}
|
||||
|
||||
// static
|
||||
void* OS::ReserveRegion(size_t size, void* hint) {
|
||||
return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS, hint);
|
||||
}
|
||||
|
||||
// static
|
||||
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
|
||||
size_t* allocated) {
|
||||
hint = AlignedAddress(hint, alignment);
|
||||
DCHECK((alignment % OS::AllocateAlignment()) == 0);
|
||||
size_t request_size =
|
||||
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
|
||||
void* address = ReserveRegion(request_size, hint);
|
||||
if (address == NULL) {
|
||||
*allocated = 0;
|
||||
return nullptr;
|
||||
}
|
||||
uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
|
||||
// Try reducing the size by freeing and then reallocating a specific area.
|
||||
bool result = ReleaseRegion(address, request_size);
|
||||
USE(result);
|
||||
DCHECK(result);
|
||||
address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
|
||||
if (address != nullptr) {
|
||||
request_size = size;
|
||||
DCHECK(base == static_cast<uint8_t*>(address));
|
||||
} else {
|
||||
// Resizing failed, just go with a bigger area.
|
||||
address = ReserveRegion(request_size, hint);
|
||||
if (address == nullptr) {
|
||||
*allocated = 0;
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
*allocated = request_size;
|
||||
return static_cast<void*>(address);
|
||||
}
|
||||
|
||||
// static
|
||||
bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
|
||||
int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
|
||||
if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// static
|
||||
bool OS::UncommitRegion(void* address, size_t size) {
|
||||
return VirtualFree(address, size, MEM_DECOMMIT) != 0;
|
||||
}
|
||||
|
||||
// static
|
||||
bool OS::ReleasePartialRegion(void* address, size_t size, void* free_start,
|
||||
size_t free_size) {
|
||||
return VirtualFree(free_start, free_size, MEM_DECOMMIT) != 0;
|
||||
}
|
||||
|
||||
// static
|
||||
bool OS::ReleaseRegion(void* address, size_t size) {
|
||||
return VirtualFree(address, 0, MEM_RELEASE) != 0;
|
||||
}
|
||||
|
||||
// static
|
||||
bool OS::HasLazyCommits() {
|
||||
// TODO(alph): implement for the platform.
|
||||
return false;
|
||||
}
|
||||
|
||||
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
|
||||
std::vector<SharedLibraryAddresses> result;
|
||||
@ -131,124 +225,5 @@ void OS::SignalCodeMovingGC() {
|
||||
// Nothing to do on Cygwin.
|
||||
}
|
||||
|
||||
|
||||
// The VirtualMemory implementation is taken from platform-win32.cc.
|
||||
// The mmap-based virtual memory implementation as it is used on most posix
|
||||
// platforms does not work well because Cygwin does not support MAP_FIXED.
|
||||
// This causes VirtualMemory::Commit to not always commit the memory region
|
||||
// specified.
|
||||
|
||||
static void* RandomizedVirtualAlloc(size_t size, int action, int protection,
|
||||
void* hint) {
|
||||
LPVOID base = NULL;
|
||||
|
||||
if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
|
||||
// For exectutable pages try and randomize the allocation address
|
||||
base = VirtualAlloc(hint, size, action, protection);
|
||||
}
|
||||
|
||||
// After three attempts give up and let the OS find an address to use.
|
||||
if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
|
||||
|
||||
return base;
|
||||
}
|
||||
|
||||
|
||||
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
|
||||
|
||||
VirtualMemory::VirtualMemory(size_t size, void* hint)
|
||||
: address_(ReserveRegion(size, hint)), size_(size) {}
|
||||
|
||||
VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
|
||||
: address_(NULL), size_(0) {
|
||||
hint = AlignedAddress(hint, alignment);
|
||||
DCHECK((alignment % OS::AllocateAlignment()) == 0);
|
||||
size_t request_size = RoundUp(size + alignment,
|
||||
static_cast<intptr_t>(OS::AllocateAlignment()));
|
||||
void* address = ReserveRegion(request_size, hint);
|
||||
if (address == NULL) return;
|
||||
uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
|
||||
// Try reducing the size by freeing and then reallocating a specific area.
|
||||
bool result = ReleaseRegion(address, request_size);
|
||||
USE(result);
|
||||
DCHECK(result);
|
||||
address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
|
||||
if (address != NULL) {
|
||||
request_size = size;
|
||||
DCHECK(base == static_cast<uint8_t*>(address));
|
||||
} else {
|
||||
// Resizing failed, just go with a bigger area.
|
||||
address = ReserveRegion(request_size, hint);
|
||||
if (address == NULL) return;
|
||||
}
|
||||
address_ = address;
|
||||
size_ = request_size;
|
||||
}
|
||||
|
||||
|
||||
VirtualMemory::~VirtualMemory() {
|
||||
if (IsReserved()) {
|
||||
bool result = ReleaseRegion(address_, size_);
|
||||
DCHECK(result);
|
||||
USE(result);
|
||||
}
|
||||
}
|
||||
|
||||
void VirtualMemory::Reset() {
|
||||
address_ = NULL;
|
||||
size_ = 0;
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
|
||||
return CommitRegion(address, size, is_executable);
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::Uncommit(void* address, size_t size) {
|
||||
DCHECK(IsReserved());
|
||||
return UncommitRegion(address, size);
|
||||
}
|
||||
|
||||
void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
|
||||
return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS, hint);
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
|
||||
int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
|
||||
if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::Guard(void* address) {
|
||||
if (NULL == VirtualAlloc(address,
|
||||
OS::CommitPageSize(),
|
||||
MEM_COMMIT,
|
||||
PAGE_NOACCESS)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::UncommitRegion(void* base, size_t size) {
|
||||
return VirtualFree(base, size, MEM_DECOMMIT) != 0;
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
|
||||
return VirtualFree(base, 0, MEM_RELEASE) != 0;
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::HasLazyCommits() {
|
||||
// TODO(alph): implement for the platform.
|
||||
return false;
|
||||
}
|
||||
|
||||
} // namespace base
|
||||
} // namespace v8
|
||||
|
@ -40,23 +40,109 @@ TimezoneCache* OS::CreateTimezoneCache() {
|
||||
return new PosixDefaultTimezoneCache();
|
||||
}
|
||||
|
||||
// Constants used for mmap.
|
||||
static const int kMmapFd = -1;
|
||||
static const int kMmapFdOffset = 0;
|
||||
|
||||
void* OS::Allocate(const size_t requested, size_t* allocated,
|
||||
OS::MemoryPermission access, void* hint) {
|
||||
const size_t msize = RoundUp(requested, getpagesize());
|
||||
int prot = GetProtectionFromMemoryPermission(access);
|
||||
void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
|
||||
void* mbase =
|
||||
mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, kMmapFd, kMmapFdOffset);
|
||||
|
||||
if (mbase == MAP_FAILED) return NULL;
|
||||
*allocated = msize;
|
||||
return mbase;
|
||||
}
|
||||
|
||||
// static
|
||||
void* OS::ReserveRegion(size_t size, void* hint) {
|
||||
void* result = mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON, kMmapFd,
|
||||
kMmapFdOffset);
|
||||
|
||||
if (result == MAP_FAILED) return NULL;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
// static
|
||||
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
|
||||
size_t* allocated) {
|
||||
hint = AlignedAddress(hint, alignment);
|
||||
DCHECK((alignment % OS::AllocateAlignment()) == 0);
|
||||
size_t request_size = RoundUp(size + alignment,
|
||||
static_cast<intptr_t>(OS::AllocateAlignment()));
|
||||
void* result = ReserveRegion(request_size, hint);
|
||||
if (result == nullptr) {
|
||||
*allocated = 0;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
uint8_t* base = static_cast<uint8_t*>(result);
|
||||
uint8_t* aligned_base = RoundUp(base, alignment);
|
||||
DCHECK_LE(base, aligned_base);
|
||||
|
||||
// Unmap extra memory reserved before and after the desired block.
|
||||
if (aligned_base != base) {
|
||||
size_t prefix_size = static_cast<size_t>(aligned_base - base);
|
||||
OS::Free(base, prefix_size);
|
||||
request_size -= prefix_size;
|
||||
}
|
||||
|
||||
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
|
||||
DCHECK_LE(aligned_size, request_size);
|
||||
|
||||
if (aligned_size != request_size) {
|
||||
size_t suffix_size = request_size - aligned_size;
|
||||
OS::Free(aligned_base + aligned_size, suffix_size);
|
||||
request_size -= suffix_size;
|
||||
}
|
||||
|
||||
DCHECK(aligned_size == request_size);
|
||||
|
||||
*allocated = aligned_size;
|
||||
return static_cast<void*>(aligned_base);
|
||||
}
|
||||
|
||||
// static
|
||||
bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
|
||||
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
|
||||
if (MAP_FAILED == mmap(address, size, prot,
|
||||
MAP_PRIVATE | MAP_ANON | MAP_FIXED, kMmapFd,
|
||||
kMmapFdOffset)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// static
|
||||
bool OS::UncommitRegion(void* address, size_t size) {
|
||||
return mmap(address, size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_FIXED,
|
||||
kMmapFd, kMmapFdOffset) != MAP_FAILED;
|
||||
}
|
||||
|
||||
// static
|
||||
bool OS::ReleasePartialRegion(void* address, size_t size, void* free_start,
|
||||
size_t free_size) {
|
||||
return munmap(free_start, free_size) == 0;
|
||||
}
|
||||
|
||||
// static
|
||||
bool OS::ReleaseRegion(void* address, size_t size) {
|
||||
return munmap(address, size) == 0;
|
||||
}
|
||||
|
||||
// static
|
||||
bool OS::HasLazyCommits() {
|
||||
// TODO(alph): implement for the platform.
|
||||
return false;
|
||||
}
|
||||
|
||||
static unsigned StringToLong(char* buffer) {
|
||||
return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT
|
||||
}
|
||||
|
||||
|
||||
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
|
||||
std::vector<SharedLibraryAddress> result;
|
||||
static const int MAP_LENGTH = 1024;
|
||||
@ -80,8 +166,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
|
||||
bytes_read = -1;
|
||||
do {
|
||||
bytes_read++;
|
||||
if (bytes_read >= MAP_LENGTH - 1)
|
||||
break;
|
||||
if (bytes_read >= MAP_LENGTH - 1) break;
|
||||
bytes_read = read(fd, buffer + bytes_read, 1);
|
||||
if (bytes_read < 1) break;
|
||||
} while (buffer[bytes_read] != '\n');
|
||||
@ -98,135 +183,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
void OS::SignalCodeMovingGC() {
|
||||
}
|
||||
|
||||
|
||||
|
||||
// Constants used for mmap.
|
||||
static const int kMmapFd = -1;
|
||||
static const int kMmapFdOffset = 0;
|
||||
|
||||
|
||||
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
|
||||
|
||||
VirtualMemory::VirtualMemory(size_t size, void* hint)
|
||||
: address_(ReserveRegion(size, hint)), size_(size) {}
|
||||
|
||||
VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
|
||||
: address_(NULL), size_(0) {
|
||||
hint = AlignedAddress(hint, alignment);
|
||||
DCHECK((alignment % OS::AllocateAlignment()) == 0);
|
||||
size_t request_size = RoundUp(size + alignment,
|
||||
static_cast<intptr_t>(OS::AllocateAlignment()));
|
||||
void* reservation = mmap(hint, request_size, PROT_NONE,
|
||||
MAP_PRIVATE | MAP_ANON, kMmapFd, kMmapFdOffset);
|
||||
if (reservation == MAP_FAILED) return;
|
||||
|
||||
uint8_t* base = static_cast<uint8_t*>(reservation);
|
||||
uint8_t* aligned_base = RoundUp(base, alignment);
|
||||
DCHECK_LE(base, aligned_base);
|
||||
|
||||
// Unmap extra memory reserved before and after the desired block.
|
||||
if (aligned_base != base) {
|
||||
size_t prefix_size = static_cast<size_t>(aligned_base - base);
|
||||
OS::Free(base, prefix_size);
|
||||
request_size -= prefix_size;
|
||||
}
|
||||
|
||||
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
|
||||
DCHECK_LE(aligned_size, request_size);
|
||||
|
||||
if (aligned_size != request_size) {
|
||||
size_t suffix_size = request_size - aligned_size;
|
||||
OS::Free(aligned_base + aligned_size, suffix_size);
|
||||
request_size -= suffix_size;
|
||||
}
|
||||
|
||||
DCHECK(aligned_size == request_size);
|
||||
|
||||
address_ = static_cast<void*>(aligned_base);
|
||||
size_ = aligned_size;
|
||||
}
|
||||
|
||||
|
||||
VirtualMemory::~VirtualMemory() {
|
||||
if (IsReserved()) {
|
||||
bool result = ReleaseRegion(address(), size());
|
||||
DCHECK(result);
|
||||
USE(result);
|
||||
}
|
||||
}
|
||||
|
||||
void VirtualMemory::Reset() {
|
||||
address_ = NULL;
|
||||
size_ = 0;
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
|
||||
return CommitRegion(address, size, is_executable);
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::Uncommit(void* address, size_t size) {
|
||||
return UncommitRegion(address, size);
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::Guard(void* address) {
|
||||
OS::Guard(address, OS::CommitPageSize());
|
||||
return true;
|
||||
}
|
||||
|
||||
void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
|
||||
void* result = mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON, kMmapFd,
|
||||
kMmapFdOffset);
|
||||
|
||||
if (result == MAP_FAILED) return NULL;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
|
||||
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
|
||||
if (MAP_FAILED == mmap(base,
|
||||
size,
|
||||
prot,
|
||||
MAP_PRIVATE | MAP_ANON | MAP_FIXED,
|
||||
kMmapFd,
|
||||
kMmapFdOffset)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::UncommitRegion(void* base, size_t size) {
|
||||
return mmap(base,
|
||||
size,
|
||||
PROT_NONE,
|
||||
MAP_PRIVATE | MAP_ANON | MAP_FIXED,
|
||||
kMmapFd,
|
||||
kMmapFdOffset) != MAP_FAILED;
|
||||
}
|
||||
|
||||
bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
|
||||
void* free_start, size_t free_size) {
|
||||
return munmap(free_start, free_size) == 0;
|
||||
}
|
||||
|
||||
bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
|
||||
return munmap(base, size) == 0;
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::HasLazyCommits() {
|
||||
// TODO(alph): implement for the platform.
|
||||
return false;
|
||||
}
|
||||
void OS::SignalCodeMovingGC() {}
|
||||
|
||||
} // namespace base
|
||||
} // namespace v8
|
||||
|
@ -37,28 +37,35 @@ TimezoneCache* OS::CreateTimezoneCache() {
|
||||
return new PosixDefaultTimezoneCache();
|
||||
}
|
||||
|
||||
// static
|
||||
void* OS::Allocate(const size_t requested, size_t* allocated,
|
||||
OS::MemoryPermission access, void* hint) {
|
||||
CHECK(false); // TODO(scottmg): Port, https://crbug.com/731217.
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
|
||||
CHECK(false); // TODO(scottmg): Port, https://crbug.com/731217.
|
||||
return std::vector<SharedLibraryAddress>();
|
||||
// static
|
||||
bool OS::Guard(void* address, size_t size) {
|
||||
return mx_vmar_protect(mx_vmar_root_self(),
|
||||
reinterpret_cast<uintptr_t>(address), size,
|
||||
0 /*no permissions*/) == MX_OK;
|
||||
}
|
||||
|
||||
void OS::SignalCodeMovingGC() {
|
||||
CHECK(false); // TODO(scottmg): Port, https://crbug.com/731217.
|
||||
// static
|
||||
void* OS::ReserveRegion(size_t size, void* hint) {
|
||||
mx_handle_t vmo;
|
||||
if (mx_vmo_create(size, 0, &vmo) != MX_OK) return nullptr;
|
||||
uintptr_t result;
|
||||
mx_status_t status = mx_vmar_map(mx_vmar_root_self(), 0, vmo, 0, size,
|
||||
0 /*no permissions*/, &result);
|
||||
mx_handle_close(vmo);
|
||||
if (status != MX_OK) return nullptr;
|
||||
return reinterpret_cast<void*>(result);
|
||||
}
|
||||
|
||||
VirtualMemory::VirtualMemory() : address_(nullptr), size_(0) {}
|
||||
|
||||
VirtualMemory::VirtualMemory(size_t size, void* hint)
|
||||
: address_(ReserveRegion(size, hint)), size_(size) {}
|
||||
|
||||
VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
|
||||
: address_(nullptr), size_(0) {
|
||||
// static
|
||||
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
|
||||
size_t* allocated) {
|
||||
DCHECK((alignment % OS::AllocateAlignment()) == 0);
|
||||
hint = AlignedAddress(hint, alignment);
|
||||
size_t request_size =
|
||||
@ -75,7 +82,10 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
|
||||
// Either the vmo is now referenced by the vmar, or we failed and are bailing,
|
||||
// so close the vmo either way.
|
||||
mx_handle_close(vmo);
|
||||
if (status != MX_OK) return;
|
||||
if (status != MX_OK) {
|
||||
*allocated = 0;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
uint8_t* base = reinterpret_cast<uint8_t*>(reservation);
|
||||
uint8_t* aligned_base = RoundUp(base, alignment);
|
||||
@ -102,84 +112,55 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
|
||||
|
||||
DCHECK(aligned_size == request_size);
|
||||
|
||||
address_ = static_cast<void*>(aligned_base);
|
||||
size_ = aligned_size;
|
||||
}
|
||||
|
||||
VirtualMemory::~VirtualMemory() {
|
||||
if (IsReserved()) {
|
||||
bool result = ReleaseRegion(address(), size());
|
||||
DCHECK(result);
|
||||
USE(result);
|
||||
}
|
||||
}
|
||||
|
||||
void VirtualMemory::Reset() {
|
||||
address_ = nullptr;
|
||||
size_ = 0;
|
||||
}
|
||||
|
||||
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
|
||||
CHECK(InVM(address, size));
|
||||
return CommitRegion(address, size, is_executable);
|
||||
}
|
||||
|
||||
bool VirtualMemory::Uncommit(void* address, size_t size) {
|
||||
return UncommitRegion(address, size);
|
||||
}
|
||||
|
||||
bool VirtualMemory::Guard(void* address) {
|
||||
return mx_vmar_protect(mx_vmar_root_self(),
|
||||
reinterpret_cast<uintptr_t>(address),
|
||||
OS::CommitPageSize(), 0 /*no permissions*/) == MX_OK;
|
||||
*allocated = aligned_size;
|
||||
return static_cast<void*>(aligned_base);
|
||||
}
|
||||
|
||||
// static
|
||||
void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
|
||||
mx_handle_t vmo;
|
||||
if (mx_vmo_create(size, 0, &vmo) != MX_OK) return nullptr;
|
||||
uintptr_t result;
|
||||
mx_status_t status = mx_vmar_map(mx_vmar_root_self(), 0, vmo, 0, size,
|
||||
0 /*no permissions*/, &result);
|
||||
mx_handle_close(vmo);
|
||||
if (status != MX_OK) return nullptr;
|
||||
return reinterpret_cast<void*>(result);
|
||||
}
|
||||
|
||||
// static
|
||||
bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
|
||||
bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
|
||||
uint32_t prot = MX_VM_FLAG_PERM_READ | MX_VM_FLAG_PERM_WRITE |
|
||||
(is_executable ? MX_VM_FLAG_PERM_EXECUTE : 0);
|
||||
return mx_vmar_protect(mx_vmar_root_self(), reinterpret_cast<uintptr_t>(base),
|
||||
size, prot) == MX_OK;
|
||||
return mx_vmar_protect(mx_vmar_root_self(),
|
||||
reinterpret_cast<uintptr_t>(address), size,
|
||||
prot) == MX_OK;
|
||||
}
|
||||
|
||||
// static
|
||||
bool VirtualMemory::UncommitRegion(void* base, size_t size) {
|
||||
return mx_vmar_protect(mx_vmar_root_self(), reinterpret_cast<uintptr_t>(base),
|
||||
size, 0 /*no permissions*/) == MX_OK;
|
||||
bool OS::UncommitRegion(void* address, size_t size) {
|
||||
return mx_vmar_protect(mx_vmar_root_self(),
|
||||
reinterpret_cast<uintptr_t>(address), size,
|
||||
0 /*no permissions*/) == MX_OK;
|
||||
}
|
||||
|
||||
// static
|
||||
bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
|
||||
void* free_start, size_t free_size) {
|
||||
bool OS::ReleasePartialRegion(void* address, size_t size, void* free_start,
|
||||
size_t free_size) {
|
||||
return mx_vmar_unmap(mx_vmar_root_self(),
|
||||
reinterpret_cast<uintptr_t>(free_start),
|
||||
free_size) == MX_OK;
|
||||
}
|
||||
|
||||
// static
|
||||
bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
|
||||
return mx_vmar_unmap(mx_vmar_root_self(), reinterpret_cast<uintptr_t>(base),
|
||||
size) == MX_OK;
|
||||
bool OS::ReleaseRegion(void* address, size_t size) {
|
||||
return mx_vmar_unmap(mx_vmar_root_self(),
|
||||
reinterpret_cast<uintptr_t>(address), size) == MX_OK;
|
||||
}
|
||||
|
||||
// static
|
||||
bool VirtualMemory::HasLazyCommits() {
|
||||
bool OS::HasLazyCommits() {
|
||||
// TODO(scottmg): Port, https://crbug.com/731217.
|
||||
return false;
|
||||
}
|
||||
|
||||
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
|
||||
CHECK(false); // TODO(scottmg): Port, https://crbug.com/731217.
|
||||
return std::vector<SharedLibraryAddress>();
|
||||
}
|
||||
|
||||
void OS::SignalCodeMovingGC() {
|
||||
CHECK(false); // TODO(scottmg): Port, https://crbug.com/731217.
|
||||
}
|
||||
|
||||
} // namespace base
|
||||
} // namespace v8
|
||||
|
||||
|
@ -97,16 +97,120 @@ TimezoneCache* OS::CreateTimezoneCache() {
|
||||
return new PosixDefaultTimezoneCache();
|
||||
}
|
||||
|
||||
// Constants used for mmap.
|
||||
static const int kMmapFd = -1;
|
||||
static const int kMmapFdOffset = 0;
|
||||
|
||||
void* OS::Allocate(const size_t requested, size_t* allocated,
|
||||
OS::MemoryPermission access, void* hint) {
|
||||
const size_t msize = RoundUp(requested, AllocateAlignment());
|
||||
int prot = GetProtectionFromMemoryPermission(access);
|
||||
void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, kMmapFd,
|
||||
kMmapFdOffset);
|
||||
if (mbase == MAP_FAILED) return NULL;
|
||||
*allocated = msize;
|
||||
return mbase;
|
||||
}
|
||||
|
||||
// static
|
||||
void* OS::ReserveRegion(size_t size, void* hint) {
|
||||
void* result =
|
||||
mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
|
||||
kMmapFd, kMmapFdOffset);
|
||||
|
||||
if (result == MAP_FAILED) return NULL;
|
||||
|
||||
#if defined(LEAK_SANITIZER)
|
||||
__lsan_register_root_region(result, size);
|
||||
#endif
|
||||
return result;
|
||||
}
|
||||
|
||||
// static
|
||||
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
|
||||
size_t* allocated) {
|
||||
DCHECK((alignment % OS::AllocateAlignment()) == 0);
|
||||
hint = AlignedAddress(hint, alignment);
|
||||
size_t request_size =
|
||||
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
|
||||
void* reservation =
|
||||
mmap(hint, request_size, PROT_NONE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, kMmapFd, kMmapFdOffset);
|
||||
if (reservation == MAP_FAILED) {
|
||||
*allocated = 0;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
uint8_t* base = static_cast<uint8_t*>(reservation);
|
||||
uint8_t* aligned_base = RoundUp(base, alignment);
|
||||
DCHECK_LE(base, aligned_base);
|
||||
|
||||
// Unmap extra memory reserved before and after the desired block.
|
||||
if (aligned_base != base) {
|
||||
size_t prefix_size = static_cast<size_t>(aligned_base - base);
|
||||
OS::Free(base, prefix_size);
|
||||
request_size -= prefix_size;
|
||||
}
|
||||
|
||||
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
|
||||
DCHECK_LE(aligned_size, request_size);
|
||||
|
||||
if (aligned_size != request_size) {
|
||||
size_t suffix_size = request_size - aligned_size;
|
||||
OS::Free(aligned_base + aligned_size, suffix_size);
|
||||
request_size -= suffix_size;
|
||||
}
|
||||
|
||||
DCHECK(aligned_size == request_size);
|
||||
|
||||
#if defined(LEAK_SANITIZER)
|
||||
__lsan_register_root_region(static_cast<void*>(aligned_base), aligned_size);
|
||||
#endif
|
||||
|
||||
*allocated = aligned_size;
|
||||
return static_cast<void*>(aligned_base);
|
||||
}
|
||||
|
||||
// static
|
||||
bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
|
||||
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
|
||||
if (MAP_FAILED == mmap(address, size, prot,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, kMmapFd,
|
||||
kMmapFdOffset)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// static
|
||||
bool OS::UncommitRegion(void* address, size_t size) {
|
||||
return mmap(address, size, PROT_NONE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, kMmapFd,
|
||||
kMmapFdOffset) != MAP_FAILED;
|
||||
}
|
||||
|
||||
// static
|
||||
bool OS::ReleasePartialRegion(void* address, size_t size, void* free_start,
|
||||
size_t free_size) {
|
||||
#if defined(LEAK_SANITIZER)
|
||||
__lsan_unregister_root_region(address, size);
|
||||
__lsan_register_root_region(address, size - free_size);
|
||||
#endif
|
||||
return munmap(free_start, free_size) == 0;
|
||||
}
|
||||
|
||||
// static
|
||||
bool OS::ReleaseRegion(void* address, size_t size) {
|
||||
#if defined(LEAK_SANITIZER)
|
||||
__lsan_unregister_root_region(address, size);
|
||||
#endif
|
||||
return munmap(address, size) == 0;
|
||||
}
|
||||
|
||||
// static
|
||||
bool OS::HasLazyCommits() { return true; }
|
||||
|
||||
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
|
||||
std::vector<SharedLibraryAddress> result;
|
||||
// This function assumes that the layout of the file is as follows:
|
||||
@ -190,131 +294,5 @@ void OS::SignalCodeMovingGC() {
|
||||
fclose(f);
|
||||
}
|
||||
|
||||
// Constants used for mmap.
|
||||
static const int kMmapFd = -1;
|
||||
static const int kMmapFdOffset = 0;
|
||||
|
||||
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) {}
|
||||
|
||||
VirtualMemory::VirtualMemory(size_t size, void* hint)
|
||||
: address_(ReserveRegion(size, hint)), size_(size) {}
|
||||
|
||||
VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
|
||||
: address_(NULL), size_(0) {
|
||||
DCHECK((alignment % OS::AllocateAlignment()) == 0);
|
||||
hint = AlignedAddress(hint, alignment);
|
||||
size_t request_size =
|
||||
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
|
||||
void* reservation =
|
||||
mmap(hint, request_size, PROT_NONE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, kMmapFd, kMmapFdOffset);
|
||||
if (reservation == MAP_FAILED) return;
|
||||
|
||||
uint8_t* base = static_cast<uint8_t*>(reservation);
|
||||
uint8_t* aligned_base = RoundUp(base, alignment);
|
||||
DCHECK_LE(base, aligned_base);
|
||||
|
||||
// Unmap extra memory reserved before and after the desired block.
|
||||
if (aligned_base != base) {
|
||||
size_t prefix_size = static_cast<size_t>(aligned_base - base);
|
||||
OS::Free(base, prefix_size);
|
||||
request_size -= prefix_size;
|
||||
}
|
||||
|
||||
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
|
||||
DCHECK_LE(aligned_size, request_size);
|
||||
|
||||
if (aligned_size != request_size) {
|
||||
size_t suffix_size = request_size - aligned_size;
|
||||
OS::Free(aligned_base + aligned_size, suffix_size);
|
||||
request_size -= suffix_size;
|
||||
}
|
||||
|
||||
DCHECK(aligned_size == request_size);
|
||||
|
||||
address_ = static_cast<void*>(aligned_base);
|
||||
size_ = aligned_size;
|
||||
#if defined(LEAK_SANITIZER)
|
||||
__lsan_register_root_region(address_, size_);
|
||||
#endif
|
||||
}
|
||||
|
||||
VirtualMemory::~VirtualMemory() {
|
||||
if (IsReserved()) {
|
||||
bool result = ReleaseRegion(address(), size());
|
||||
DCHECK(result);
|
||||
USE(result);
|
||||
}
|
||||
}
|
||||
|
||||
void VirtualMemory::Reset() {
|
||||
address_ = NULL;
|
||||
size_ = 0;
|
||||
}
|
||||
|
||||
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
|
||||
CHECK(InVM(address, size));
|
||||
return CommitRegion(address, size, is_executable);
|
||||
}
|
||||
|
||||
bool VirtualMemory::Uncommit(void* address, size_t size) {
|
||||
CHECK(InVM(address, size));
|
||||
return UncommitRegion(address, size);
|
||||
}
|
||||
|
||||
bool VirtualMemory::Guard(void* address) {
|
||||
CHECK(InVM(address, OS::CommitPageSize()));
|
||||
OS::Guard(address, OS::CommitPageSize());
|
||||
return true;
|
||||
}
|
||||
|
||||
void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
|
||||
void* result =
|
||||
mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
|
||||
kMmapFd, kMmapFdOffset);
|
||||
|
||||
if (result == MAP_FAILED) return NULL;
|
||||
|
||||
#if defined(LEAK_SANITIZER)
|
||||
__lsan_register_root_region(result, size);
|
||||
#endif
|
||||
return result;
|
||||
}
|
||||
|
||||
bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
|
||||
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
|
||||
if (MAP_FAILED == mmap(base, size, prot,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, kMmapFd,
|
||||
kMmapFdOffset)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool VirtualMemory::UncommitRegion(void* base, size_t size) {
|
||||
return mmap(base, size, PROT_NONE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, kMmapFd,
|
||||
kMmapFdOffset) != MAP_FAILED;
|
||||
}
|
||||
|
||||
bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
|
||||
void* free_start, size_t free_size) {
|
||||
#if defined(LEAK_SANITIZER)
|
||||
__lsan_unregister_root_region(base, size);
|
||||
__lsan_register_root_region(base, size - free_size);
|
||||
#endif
|
||||
return munmap(free_start, free_size) == 0;
|
||||
}
|
||||
|
||||
bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
|
||||
#if defined(LEAK_SANITIZER)
|
||||
__lsan_unregister_root_region(base, size);
|
||||
#endif
|
||||
return munmap(base, size) == 0;
|
||||
}
|
||||
|
||||
bool VirtualMemory::HasLazyCommits() { return true; }
|
||||
|
||||
} // namespace base
|
||||
} // namespace v8
|
||||
|
@ -51,6 +51,7 @@ namespace base {
|
||||
static const int kMmapFd = VM_MAKE_TAG(255);
|
||||
static const off_t kMmapFdOffset = 0;
|
||||
|
||||
// static
|
||||
void* OS::Allocate(const size_t requested, size_t* allocated,
|
||||
OS::MemoryPermission access, void* hint) {
|
||||
const size_t msize = RoundUp(requested, getpagesize());
|
||||
@ -62,58 +63,31 @@ void* OS::Allocate(const size_t requested, size_t* allocated,
|
||||
return mbase;
|
||||
}
|
||||
|
||||
// static
|
||||
void* OS::ReserveRegion(size_t size, void* hint) {
|
||||
void* result =
|
||||
mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
|
||||
kMmapFd, kMmapFdOffset);
|
||||
|
||||
if (result == MAP_FAILED) return nullptr;
|
||||
|
||||
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
|
||||
std::vector<SharedLibraryAddress> result;
|
||||
unsigned int images_count = _dyld_image_count();
|
||||
for (unsigned int i = 0; i < images_count; ++i) {
|
||||
const mach_header* header = _dyld_get_image_header(i);
|
||||
if (header == NULL) continue;
|
||||
#if V8_HOST_ARCH_X64
|
||||
uint64_t size;
|
||||
char* code_ptr = getsectdatafromheader_64(
|
||||
reinterpret_cast<const mach_header_64*>(header),
|
||||
SEG_TEXT,
|
||||
SECT_TEXT,
|
||||
&size);
|
||||
#else
|
||||
unsigned int size;
|
||||
char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
|
||||
#endif
|
||||
if (code_ptr == NULL) continue;
|
||||
const intptr_t slide = _dyld_get_image_vmaddr_slide(i);
|
||||
const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
|
||||
result.push_back(SharedLibraryAddress(_dyld_get_image_name(i), start,
|
||||
start + size, slide));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
void OS::SignalCodeMovingGC() {
|
||||
}
|
||||
|
||||
TimezoneCache* OS::CreateTimezoneCache() {
|
||||
return new PosixDefaultTimezoneCache();
|
||||
}
|
||||
|
||||
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
|
||||
|
||||
VirtualMemory::VirtualMemory(size_t size, void* hint)
|
||||
: address_(ReserveRegion(size, hint)), size_(size) {}
|
||||
|
||||
VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
|
||||
: address_(NULL), size_(0) {
|
||||
// static
|
||||
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
|
||||
size_t* allocated) {
|
||||
DCHECK((alignment % OS::AllocateAlignment()) == 0);
|
||||
hint = AlignedAddress(hint, alignment);
|
||||
size_t request_size = RoundUp(size + alignment,
|
||||
static_cast<intptr_t>(OS::AllocateAlignment()));
|
||||
void* reservation =
|
||||
mmap(hint, request_size, PROT_NONE,
|
||||
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, kMmapFd, kMmapFdOffset);
|
||||
if (reservation == MAP_FAILED) return;
|
||||
void* result = ReserveRegion(request_size, hint);
|
||||
if (result == nullptr) {
|
||||
*allocated = 0;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
uint8_t* base = static_cast<uint8_t*>(reservation);
|
||||
uint8_t* base = static_cast<uint8_t*>(result);
|
||||
uint8_t* aligned_base = RoundUp(base, alignment);
|
||||
DCHECK_LE(base, aligned_base);
|
||||
|
||||
@ -135,54 +109,12 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
|
||||
|
||||
DCHECK(aligned_size == request_size);
|
||||
|
||||
address_ = static_cast<void*>(aligned_base);
|
||||
size_ = aligned_size;
|
||||
*allocated = aligned_size;
|
||||
return static_cast<void*>(aligned_base);
|
||||
}
|
||||
|
||||
|
||||
VirtualMemory::~VirtualMemory() {
|
||||
if (IsReserved()) {
|
||||
bool result = ReleaseRegion(address(), size());
|
||||
DCHECK(result);
|
||||
USE(result);
|
||||
}
|
||||
}
|
||||
|
||||
void VirtualMemory::Reset() {
|
||||
address_ = NULL;
|
||||
size_ = 0;
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
|
||||
return CommitRegion(address, size, is_executable);
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::Uncommit(void* address, size_t size) {
|
||||
return UncommitRegion(address, size);
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::Guard(void* address) {
|
||||
OS::Guard(address, OS::CommitPageSize());
|
||||
return true;
|
||||
}
|
||||
|
||||
void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
|
||||
void* result =
|
||||
mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
|
||||
kMmapFd, kMmapFdOffset);
|
||||
|
||||
if (result == MAP_FAILED) return NULL;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::CommitRegion(void* address,
|
||||
size_t size,
|
||||
bool is_executable) {
|
||||
// static
|
||||
bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
|
||||
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
|
||||
if (MAP_FAILED == mmap(address,
|
||||
size,
|
||||
@ -195,8 +127,8 @@ bool VirtualMemory::CommitRegion(void* address,
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::UncommitRegion(void* address, size_t size) {
|
||||
// static
|
||||
bool OS::UncommitRegion(void* address, size_t size) {
|
||||
return mmap(address,
|
||||
size,
|
||||
PROT_NONE,
|
||||
@ -205,16 +137,49 @@ bool VirtualMemory::UncommitRegion(void* address, size_t size) {
|
||||
kMmapFdOffset) != MAP_FAILED;
|
||||
}
|
||||
|
||||
bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
|
||||
void* free_start, size_t free_size) {
|
||||
// static
|
||||
bool OS::ReleasePartialRegion(void* address, size_t size, void* free_start,
|
||||
size_t free_size) {
|
||||
return munmap(free_start, free_size) == 0;
|
||||
}
|
||||
|
||||
bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
|
||||
// static
|
||||
bool OS::ReleaseRegion(void* address, size_t size) {
|
||||
return munmap(address, size) == 0;
|
||||
}
|
||||
|
||||
bool VirtualMemory::HasLazyCommits() { return true; }
|
||||
// static
|
||||
bool OS::HasLazyCommits() { return true; }
|
||||
|
||||
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
|
||||
std::vector<SharedLibraryAddress> result;
|
||||
unsigned int images_count = _dyld_image_count();
|
||||
for (unsigned int i = 0; i < images_count; ++i) {
|
||||
const mach_header* header = _dyld_get_image_header(i);
|
||||
if (header == NULL) continue;
|
||||
#if V8_HOST_ARCH_X64
|
||||
uint64_t size;
|
||||
char* code_ptr = getsectdatafromheader_64(
|
||||
reinterpret_cast<const mach_header_64*>(header), SEG_TEXT, SECT_TEXT,
|
||||
&size);
|
||||
#else
|
||||
unsigned int size;
|
||||
char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
|
||||
#endif
|
||||
if (code_ptr == NULL) continue;
|
||||
const intptr_t slide = _dyld_get_image_vmaddr_slide(i);
|
||||
const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
|
||||
result.push_back(SharedLibraryAddress(_dyld_get_image_name(i), start,
|
||||
start + size, slide));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void OS::SignalCodeMovingGC() {}
|
||||
|
||||
TimezoneCache* OS::CreateTimezoneCache() {
|
||||
return new PosixDefaultTimezoneCache();
|
||||
}
|
||||
|
||||
} // namespace base
|
||||
} // namespace v8
|
||||
|
@ -38,16 +38,105 @@ TimezoneCache* OS::CreateTimezoneCache() {
|
||||
return new PosixDefaultTimezoneCache();
|
||||
}
|
||||
|
||||
// Constants used for mmap.
|
||||
static const int kMmapFd = -1;
|
||||
static const int kMmapFdOffset = 0;
|
||||
|
||||
void* OS::Allocate(const size_t requested, size_t* allocated,
|
||||
OS::MemoryPermission access, void* hint) {
|
||||
const size_t msize = RoundUp(requested, AllocateAlignment());
|
||||
int prot = GetProtectionFromMemoryPermission(access);
|
||||
void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
|
||||
void* mbase =
|
||||
mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, kMmapFd, kMmapFdOffset);
|
||||
if (mbase == MAP_FAILED) return NULL;
|
||||
*allocated = msize;
|
||||
return mbase;
|
||||
}
|
||||
|
||||
// static
|
||||
void* OS::ReserveRegion(size_t size, void* hint) {
|
||||
void* result =
|
||||
mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
|
||||
kMmapFd, kMmapFdOffset);
|
||||
|
||||
if (result == MAP_FAILED) return NULL;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
// static
|
||||
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
|
||||
size_t* allocated) {
|
||||
DCHECK((alignment % OS::AllocateAlignment()) == 0);
|
||||
hint = AlignedAddress(hint, alignment);
|
||||
size_t request_size =
|
||||
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
|
||||
void* result = ReserveRegion(request_size, hint);
|
||||
if (result == nullptr) {
|
||||
*allocated = 0;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
uint8_t* base = static_cast<uint8_t*>(result);
|
||||
uint8_t* aligned_base = RoundUp(base, alignment);
|
||||
DCHECK_LE(base, aligned_base);
|
||||
|
||||
// Unmap extra memory reserved before and after the desired block.
|
||||
if (aligned_base != base) {
|
||||
size_t prefix_size = static_cast<size_t>(aligned_base - base);
|
||||
OS::Free(base, prefix_size);
|
||||
request_size -= prefix_size;
|
||||
}
|
||||
|
||||
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
|
||||
DCHECK_LE(aligned_size, request_size);
|
||||
|
||||
if (aligned_size != request_size) {
|
||||
size_t suffix_size = request_size - aligned_size;
|
||||
OS::Free(aligned_base + aligned_size, suffix_size);
|
||||
request_size -= suffix_size;
|
||||
}
|
||||
|
||||
DCHECK(aligned_size == request_size);
|
||||
|
||||
*allocated = aligned_size;
|
||||
return static_cast<void*>(aligned_base);
|
||||
}
|
||||
|
||||
// static
|
||||
bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
|
||||
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
|
||||
if (MAP_FAILED == mmap(address, size, prot,
|
||||
MAP_PRIVATE | MAP_ANON | MAP_FIXED, kMmapFd,
|
||||
kMmapFdOffset)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// static
|
||||
bool OS::UncommitRegion(void* address, size_t size) {
|
||||
return mmap(address, size, PROT_NONE,
|
||||
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, kMmapFd,
|
||||
kMmapFdOffset) != MAP_FAILED;
|
||||
}
|
||||
|
||||
// static
|
||||
bool OS::ReleasePartialRegion(void* address, size_t size, void* free_start,
|
||||
size_t free_size) {
|
||||
return munmap(free_start, free_size) == 0;
|
||||
}
|
||||
|
||||
// static
|
||||
bool OS::ReleaseRegion(void* address, size_t size) {
|
||||
return munmap(address, size) == 0;
|
||||
}
|
||||
|
||||
// static
|
||||
bool OS::HasLazyCommits() {
|
||||
// TODO(alph): implement for the platform.
|
||||
return false;
|
||||
}
|
||||
|
||||
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
|
||||
std::vector<SharedLibraryAddress> result;
|
||||
@ -132,133 +221,5 @@ void OS::SignalCodeMovingGC() {
|
||||
fclose(f);
|
||||
}
|
||||
|
||||
|
||||
|
||||
// Constants used for mmap.
|
||||
static const int kMmapFd = -1;
|
||||
static const int kMmapFdOffset = 0;
|
||||
|
||||
|
||||
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
|
||||
|
||||
VirtualMemory::VirtualMemory(size_t size, void* hint)
|
||||
: address_(ReserveRegion(size, hint)), size_(size) {}
|
||||
|
||||
VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
|
||||
: address_(NULL), size_(0) {
|
||||
DCHECK((alignment % OS::AllocateAlignment()) == 0);
|
||||
hint = AlignedAddress(hint, alignment);
|
||||
size_t request_size = RoundUp(size + alignment,
|
||||
static_cast<intptr_t>(OS::AllocateAlignment()));
|
||||
void* reservation =
|
||||
mmap(hint, request_size, PROT_NONE,
|
||||
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, kMmapFd, kMmapFdOffset);
|
||||
if (reservation == MAP_FAILED) return;
|
||||
|
||||
uint8_t* base = static_cast<uint8_t*>(reservation);
|
||||
uint8_t* aligned_base = RoundUp(base, alignment);
|
||||
DCHECK_LE(base, aligned_base);
|
||||
|
||||
// Unmap extra memory reserved before and after the desired block.
|
||||
if (aligned_base != base) {
|
||||
size_t prefix_size = static_cast<size_t>(aligned_base - base);
|
||||
OS::Free(base, prefix_size);
|
||||
request_size -= prefix_size;
|
||||
}
|
||||
|
||||
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
|
||||
DCHECK_LE(aligned_size, request_size);
|
||||
|
||||
if (aligned_size != request_size) {
|
||||
size_t suffix_size = request_size - aligned_size;
|
||||
OS::Free(aligned_base + aligned_size, suffix_size);
|
||||
request_size -= suffix_size;
|
||||
}
|
||||
|
||||
DCHECK(aligned_size == request_size);
|
||||
|
||||
address_ = static_cast<void*>(aligned_base);
|
||||
size_ = aligned_size;
|
||||
}
|
||||
|
||||
|
||||
VirtualMemory::~VirtualMemory() {
|
||||
if (IsReserved()) {
|
||||
bool result = ReleaseRegion(address(), size());
|
||||
DCHECK(result);
|
||||
USE(result);
|
||||
}
|
||||
}
|
||||
|
||||
void VirtualMemory::Reset() {
|
||||
address_ = NULL;
|
||||
size_ = 0;
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
|
||||
return CommitRegion(address, size, is_executable);
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::Uncommit(void* address, size_t size) {
|
||||
return UncommitRegion(address, size);
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::Guard(void* address) {
|
||||
OS::Guard(address, OS::CommitPageSize());
|
||||
return true;
|
||||
}
|
||||
|
||||
void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
|
||||
void* result =
|
||||
mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
|
||||
kMmapFd, kMmapFdOffset);
|
||||
|
||||
if (result == MAP_FAILED) return NULL;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
|
||||
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
|
||||
if (MAP_FAILED == mmap(base,
|
||||
size,
|
||||
prot,
|
||||
MAP_PRIVATE | MAP_ANON | MAP_FIXED,
|
||||
kMmapFd,
|
||||
kMmapFdOffset)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::UncommitRegion(void* base, size_t size) {
|
||||
return mmap(base,
|
||||
size,
|
||||
PROT_NONE,
|
||||
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
|
||||
kMmapFd,
|
||||
kMmapFdOffset) != MAP_FAILED;
|
||||
}
|
||||
|
||||
bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
|
||||
void* free_start, size_t free_size) {
|
||||
return munmap(free_start, free_size) == 0;
|
||||
}
|
||||
|
||||
bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
|
||||
return munmap(base, size) == 0;
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::HasLazyCommits() {
|
||||
// TODO(alph): implement for the platform.
|
||||
return false;
|
||||
}
|
||||
|
||||
} // namespace base
|
||||
} // namespace v8
|
||||
|
@ -129,6 +129,7 @@ void OS::ProtectCode(void* address, const size_t size) {
|
||||
|
||||
|
||||
// Create guard pages.
|
||||
#if !V8_OS_FUCHSIA
|
||||
void OS::Guard(void* address, const size_t size) {
|
||||
#if V8_OS_CYGWIN
|
||||
DWORD oldprotect;
|
||||
@ -137,6 +138,7 @@ void OS::Guard(void* address, const size_t size) {
|
||||
mprotect(address, size, PROT_NONE);
|
||||
#endif
|
||||
}
|
||||
#endif // !V8_OS_FUCHSIA
|
||||
|
||||
// Make a region of memory readable and writable.
|
||||
void OS::Unprotect(void* address, const size_t size) {
|
||||
|
@ -89,16 +89,103 @@ TimezoneCache* OS::CreateTimezoneCache() {
|
||||
return new PosixDefaultTimezoneCache();
|
||||
}
|
||||
|
||||
// Constants used for mmap.
|
||||
static const int kMmapFd = -1;
|
||||
static const int kMmapFdOffset = 0;
|
||||
|
||||
void* OS::Allocate(const size_t requested, size_t* allocated,
|
||||
OS::MemoryPermission access, void* hint) {
|
||||
const size_t msize = RoundUp(requested, AllocateAlignment());
|
||||
int prot = GetProtectionFromMemoryPermission(access);
|
||||
void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, kMmapFd,
|
||||
kMmapFdOffset);
|
||||
if (mbase == MAP_FAILED) return NULL;
|
||||
*allocated = msize;
|
||||
return mbase;
|
||||
}
|
||||
|
||||
// static
|
||||
void* OS::ReserveRegion(size_t size, void* hint) {
|
||||
void* result =
|
||||
mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY,
|
||||
kMmapFd, kMmapFdOffset);
|
||||
|
||||
if (result == MAP_FAILED) return NULL;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
// static
|
||||
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
|
||||
size_t* allocated) {
|
||||
DCHECK((alignment % OS::AllocateAlignment()) == 0);
|
||||
hint = AlignedAddress(hint, alignment);
|
||||
size_t request_size = RoundUp(size + alignment,
|
||||
static_cast<intptr_t>(OS::AllocateAlignment()));
|
||||
void* result = ReserveRegion(request_size, hint);
|
||||
if (result == nullptr) {
|
||||
*allocated = 0;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
uint8_t* base = static_cast<uint8_t*>(result);
|
||||
uint8_t* aligned_base = RoundUp(base, alignment);
|
||||
DCHECK_LE(base, aligned_base);
|
||||
|
||||
// Unmap extra memory reserved before and after the desired block.
|
||||
if (aligned_base != base) {
|
||||
size_t prefix_size = static_cast<size_t>(aligned_base - base);
|
||||
OS::Free(base, prefix_size);
|
||||
request_size -= prefix_size;
|
||||
}
|
||||
|
||||
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
|
||||
DCHECK_LE(aligned_size, request_size);
|
||||
|
||||
if (aligned_size != request_size) {
|
||||
size_t suffix_size = request_size - aligned_size;
|
||||
OS::Free(aligned_base + aligned_size, suffix_size);
|
||||
request_size -= suffix_size;
|
||||
}
|
||||
|
||||
DCHECK(aligned_size == request_size);
|
||||
|
||||
*allocated = aligned_size;
|
||||
return static_cast<void*>(aligned_base);
|
||||
}
|
||||
|
||||
// static
|
||||
bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
|
||||
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
|
||||
if (MAP_FAILED == mmap(address, size, prot,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, kMmapFd,
|
||||
kMmapFdOffset)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// static
|
||||
bool OS::UncommitRegion(void* address, size_t size) {
|
||||
return mmap(address, size, PROT_NONE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_LAZY, kMmapFd,
|
||||
kMmapFdOffset) != MAP_FAILED;
|
||||
}
|
||||
|
||||
// static
|
||||
bool OS::ReleasePartialRegion(void* address, size_t size, void* free_start,
|
||||
size_t free_size) {
|
||||
return munmap(free_start, free_size) == 0;
|
||||
}
|
||||
|
||||
// static
|
||||
bool OS::ReleaseRegion(void* address, size_t size) {
|
||||
return munmap(address, size) == 0;
|
||||
}
|
||||
|
||||
// static
|
||||
bool OS::HasLazyCommits() { return false; }
|
||||
|
||||
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
|
||||
std::vector<SharedLibraryAddress> result;
|
||||
@ -124,16 +211,16 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
|
||||
return result;
|
||||
}
|
||||
|
||||
mapinfos = reinterpret_cast<procfs_mapinfo *>(
|
||||
malloc(num * sizeof(procfs_mapinfo)));
|
||||
mapinfos =
|
||||
reinterpret_cast<procfs_mapinfo*>(malloc(num * sizeof(procfs_mapinfo)));
|
||||
if (mapinfos == NULL) {
|
||||
close(proc_fd);
|
||||
return result;
|
||||
}
|
||||
|
||||
/* Fill the map entries. */
|
||||
if (devctl(proc_fd, DCMD_PROC_PAGEDATA,
|
||||
mapinfos, num * sizeof(procfs_mapinfo), &num) != EOK) {
|
||||
if (devctl(proc_fd, DCMD_PROC_PAGEDATA, mapinfos,
|
||||
num * sizeof(procfs_mapinfo), &num) != EOK) {
|
||||
free(mapinfos);
|
||||
close(proc_fd);
|
||||
return result;
|
||||
@ -146,8 +233,8 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
|
||||
if (devctl(proc_fd, DCMD_PROC_MAPDEBUG, &map, sizeof(map), 0) != EOK) {
|
||||
continue;
|
||||
}
|
||||
result.push_back(SharedLibraryAddress(
|
||||
map.info.path, mapinfo->vaddr, mapinfo->vaddr + mapinfo->size));
|
||||
result.push_back(SharedLibraryAddress(map.info.path, mapinfo->vaddr,
|
||||
mapinfo->vaddr + mapinfo->size));
|
||||
}
|
||||
}
|
||||
free(mapinfos);
|
||||
@ -155,132 +242,7 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
void OS::SignalCodeMovingGC() {
|
||||
}
|
||||
|
||||
|
||||
// Constants used for mmap.
|
||||
static const int kMmapFd = -1;
|
||||
static const int kMmapFdOffset = 0;
|
||||
|
||||
|
||||
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
|
||||
|
||||
VirtualMemory::VirtualMemory(size_t size, void* hint)
|
||||
: address_(ReserveRegion(size, hint)), size_(size) {}
|
||||
|
||||
VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
|
||||
: address_(NULL), size_(0) {
|
||||
DCHECK((alignment % OS::AllocateAlignment()) == 0);
|
||||
hint = AlignedAddress(hint, alignment);
|
||||
size_t request_size = RoundUp(size + alignment,
|
||||
static_cast<intptr_t>(OS::AllocateAlignment()));
|
||||
void* reservation =
|
||||
mmap(hint, request_size, PROT_NONE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY, kMmapFd, kMmapFdOffset);
|
||||
if (reservation == MAP_FAILED) return;
|
||||
|
||||
uint8_t* base = static_cast<uint8_t*>(reservation);
|
||||
uint8_t* aligned_base = RoundUp(base, alignment);
|
||||
DCHECK_LE(base, aligned_base);
|
||||
|
||||
// Unmap extra memory reserved before and after the desired block.
|
||||
if (aligned_base != base) {
|
||||
size_t prefix_size = static_cast<size_t>(aligned_base - base);
|
||||
OS::Free(base, prefix_size);
|
||||
request_size -= prefix_size;
|
||||
}
|
||||
|
||||
size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
|
||||
DCHECK_LE(aligned_size, request_size);
|
||||
|
||||
if (aligned_size != request_size) {
|
||||
size_t suffix_size = request_size - aligned_size;
|
||||
OS::Free(aligned_base + aligned_size, suffix_size);
|
||||
request_size -= suffix_size;
|
||||
}
|
||||
|
||||
DCHECK(aligned_size == request_size);
|
||||
|
||||
address_ = static_cast<void*>(aligned_base);
|
||||
size_ = aligned_size;
|
||||
}
|
||||
|
||||
|
||||
VirtualMemory::~VirtualMemory() {
|
||||
if (IsReserved()) {
|
||||
bool result = ReleaseRegion(address(), size());
|
||||
DCHECK(result);
|
||||
USE(result);
|
||||
}
|
||||
}
|
||||
|
||||
void VirtualMemory::Reset() {
|
||||
address_ = NULL;
|
||||
size_ = 0;
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
|
||||
return CommitRegion(address, size, is_executable);
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::Uncommit(void* address, size_t size) {
|
||||
return UncommitRegion(address, size);
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::Guard(void* address) {
|
||||
OS::Guard(address, OS::CommitPageSize());
|
||||
return true;
|
||||
}
|
||||
|
||||
void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
|
||||
void* result =
|
||||
mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY,
|
||||
kMmapFd, kMmapFdOffset);
|
||||
|
||||
if (result == MAP_FAILED) return NULL;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
|
||||
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
|
||||
if (MAP_FAILED == mmap(base,
|
||||
size,
|
||||
prot,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
|
||||
kMmapFd,
|
||||
kMmapFdOffset)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::UncommitRegion(void* base, size_t size) {
|
||||
return mmap(base,
|
||||
size,
|
||||
PROT_NONE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_LAZY,
|
||||
kMmapFd,
|
||||
kMmapFdOffset) != MAP_FAILED;
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
|
||||
return munmap(base, size) == 0;
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::HasLazyCommits() {
|
||||
return false;
|
||||
}
|
||||
void OS::SignalCodeMovingGC() {}
|
||||
|
||||
} // namespace base
|
||||
} // namespace v8
|
||||
|
@ -58,49 +58,47 @@ double SolarisTimezoneCache::LocalTimeOffset() {
|
||||
|
||||
TimezoneCache* OS::CreateTimezoneCache() { return new SolarisTimezoneCache(); }
|
||||
|
||||
// Constants used for mmap.
|
||||
static const int kMmapFd = -1;
|
||||
static const int kMmapFdOffset = 0;
|
||||
|
||||
void* OS::Allocate(const size_t requested, size_t* allocated,
|
||||
OS::MemoryPermission access, void* hint) {
|
||||
const size_t msize = RoundUp(requested, getpagesize());
|
||||
int prot = GetProtectionFromMemoryPermission(access);
|
||||
void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
|
||||
void* mbase =
|
||||
mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, kMmapFd, kMmapFdOffset);
|
||||
|
||||
if (mbase == MAP_FAILED) return NULL;
|
||||
*allocated = msize;
|
||||
return mbase;
|
||||
}
|
||||
|
||||
// static
|
||||
void* OS::ReserveRegion(size_t size, void* hint) {
|
||||
void* result =
|
||||
mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
|
||||
kMmapFd, kMmapFdOffset);
|
||||
|
||||
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
|
||||
return std::vector<SharedLibraryAddress>();
|
||||
if (result == MAP_FAILED) return NULL;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
void OS::SignalCodeMovingGC() {
|
||||
}
|
||||
|
||||
|
||||
// Constants used for mmap.
|
||||
static const int kMmapFd = -1;
|
||||
static const int kMmapFdOffset = 0;
|
||||
|
||||
|
||||
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
|
||||
|
||||
VirtualMemory::VirtualMemory(size_t size, void* hint)
|
||||
: address_(ReserveRegion(size, hint)), size_(size) {}
|
||||
|
||||
VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
|
||||
: address_(NULL), size_(0) {
|
||||
// static
|
||||
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
|
||||
size_t* allocated) {
|
||||
DCHECK((alignment % OS::AllocateAlignment()) == 0);
|
||||
hint = AlignedAddress(hint, alignment);
|
||||
size_t request_size = RoundUp(size + alignment,
|
||||
static_cast<intptr_t>(OS::AllocateAlignment()));
|
||||
void* reservation =
|
||||
mmap(hint, request_size, PROT_NONE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, kMmapFd, kMmapFdOffset);
|
||||
if (reservation == MAP_FAILED) return;
|
||||
void* result = ReserveRegion(request_size, hint);
|
||||
if (result == nullptr) {
|
||||
*allocated = 0;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
uint8_t* base = static_cast<uint8_t*>(reservation);
|
||||
uint8_t* base = static_cast<uint8_t*>(result);
|
||||
uint8_t* aligned_base = RoundUp(base, alignment);
|
||||
DCHECK_LE(base, aligned_base);
|
||||
|
||||
@ -122,88 +120,50 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
|
||||
|
||||
DCHECK(aligned_size == request_size);
|
||||
|
||||
address_ = static_cast<void*>(aligned_base);
|
||||
size_ = aligned_size;
|
||||
*allocated = aligned_size;
|
||||
return static_cast<void*>(aligned_base);
|
||||
}
|
||||
|
||||
|
||||
VirtualMemory::~VirtualMemory() {
|
||||
if (IsReserved()) {
|
||||
bool result = ReleaseRegion(address(), size());
|
||||
DCHECK(result);
|
||||
USE(result);
|
||||
}
|
||||
}
|
||||
|
||||
void VirtualMemory::Reset() {
|
||||
address_ = NULL;
|
||||
size_ = 0;
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
|
||||
return CommitRegion(address, size, is_executable);
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::Uncommit(void* address, size_t size) {
|
||||
return UncommitRegion(address, size);
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::Guard(void* address) {
|
||||
OS::Guard(address, OS::CommitPageSize());
|
||||
return true;
|
||||
}
|
||||
|
||||
void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
|
||||
void* result =
|
||||
mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
|
||||
kMmapFd, kMmapFdOffset);
|
||||
|
||||
if (result == MAP_FAILED) return NULL;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
|
||||
// static
|
||||
bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
|
||||
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
|
||||
if (MAP_FAILED == mmap(base,
|
||||
size,
|
||||
prot,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
|
||||
kMmapFd,
|
||||
if (MAP_FAILED == mmap(address, size, prot,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, kMmapFd,
|
||||
kMmapFdOffset)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::UncommitRegion(void* base, size_t size) {
|
||||
return mmap(base,
|
||||
size,
|
||||
PROT_NONE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
|
||||
kMmapFd,
|
||||
// static
|
||||
bool OS::UncommitRegion(void* address, size_t size) {
|
||||
return mmap(address, size, PROT_NONE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, kMmapFd,
|
||||
kMmapFdOffset) != MAP_FAILED;
|
||||
}
|
||||
|
||||
bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
|
||||
void* free_start, size_t free_size) {
|
||||
// static
|
||||
bool OS::ReleasePartialRegion(void* address, size_t size, void* free_start,
|
||||
size_t free_size) {
|
||||
return munmap(free_start, free_size) == 0;
|
||||
}
|
||||
|
||||
bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
|
||||
return munmap(base, size) == 0;
|
||||
// static
|
||||
bool OS::ReleaseRegion(void* address, size_t size) {
|
||||
return munmap(address, size) == 0;
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::HasLazyCommits() {
|
||||
// static
|
||||
bool OS::HasLazyCommits() {
|
||||
// TODO(alph): implement for the platform.
|
||||
return false;
|
||||
}
|
||||
|
||||
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
|
||||
return std::vector<SharedLibraryAddress>();
|
||||
}
|
||||
|
||||
void OS::SignalCodeMovingGC() {}
|
||||
|
||||
} // namespace base
|
||||
} // namespace v8
|
||||
|
@ -737,6 +737,8 @@ void* OS::GetRandomMmapAddr() {
|
||||
return reinterpret_cast<void *>(address);
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
static void* RandomizedVirtualAlloc(size_t size, int action, int protection,
|
||||
void* hint) {
|
||||
LPVOID base = NULL;
|
||||
@ -762,6 +764,8 @@ static void* RandomizedVirtualAlloc(size_t size, int action, int protection,
|
||||
return base;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
void* OS::Allocate(const size_t requested, size_t* allocated,
|
||||
bool is_executable, void* hint) {
|
||||
return OS::Allocate(requested, allocated,
|
||||
@ -809,18 +813,15 @@ void OS::Free(void* address, const size_t size) {
|
||||
USE(size);
|
||||
}
|
||||
|
||||
|
||||
intptr_t OS::CommitPageSize() {
|
||||
return 4096;
|
||||
}
|
||||
|
||||
|
||||
void OS::ProtectCode(void* address, const size_t size) {
|
||||
DWORD old_protect;
|
||||
VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
|
||||
}
|
||||
|
||||
|
||||
void OS::Guard(void* address, const size_t size) {
|
||||
DWORD oldprotect;
|
||||
VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
|
||||
@ -831,6 +832,76 @@ void OS::Unprotect(void* address, const size_t size) {
|
||||
USE(result);
|
||||
}
|
||||
|
||||
// static
|
||||
// static
|
||||
void* OS::ReserveRegion(size_t size, void* hint) {
|
||||
return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS, hint);
|
||||
}
|
||||
|
||||
void* OS::ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
|
||||
size_t* allocated) {
|
||||
DCHECK((alignment % OS::AllocateAlignment()) == 0);
|
||||
hint = AlignedAddress(hint, alignment);
|
||||
size_t request_size =
|
||||
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
|
||||
void* address = ReserveRegion(request_size, hint);
|
||||
if (address == nullptr) {
|
||||
*allocated = 0;
|
||||
return nullptr;
|
||||
}
|
||||
uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
|
||||
// Try reducing the size by freeing and then reallocating a specific area.
|
||||
bool result = ReleaseRegion(address, request_size);
|
||||
USE(result);
|
||||
DCHECK(result);
|
||||
address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
|
||||
if (address != nullptr) {
|
||||
request_size = size;
|
||||
DCHECK(base == static_cast<uint8_t*>(address));
|
||||
} else {
|
||||
// Resizing failed, just go with a bigger area.
|
||||
address = ReserveRegion(request_size, hint);
|
||||
if (address == nullptr) {
|
||||
*allocated = 0;
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
*allocated = request_size;
|
||||
return static_cast<void*>(address);
|
||||
}
|
||||
|
||||
// static
|
||||
bool OS::CommitRegion(void* address, size_t size, bool is_executable) {
|
||||
int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
|
||||
if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// static
|
||||
bool OS::UncommitRegion(void* address, size_t size) {
|
||||
return VirtualFree(address, size, MEM_DECOMMIT) != 0;
|
||||
}
|
||||
|
||||
// static
|
||||
bool OS::ReleasePartialRegion(void* address, size_t size, void* free_start,
|
||||
size_t free_size) {
|
||||
return VirtualFree(free_start, free_size, MEM_DECOMMIT) != 0;
|
||||
}
|
||||
|
||||
// static
|
||||
bool OS::ReleaseRegion(void* address, size_t size) {
|
||||
return VirtualFree(address, 0, MEM_RELEASE) != 0;
|
||||
}
|
||||
|
||||
// static
|
||||
bool OS::HasLazyCommits() {
|
||||
// TODO(alph): implement for the platform.
|
||||
return false;
|
||||
}
|
||||
|
||||
void OS::Sleep(TimeDelta interval) {
|
||||
::Sleep(static_cast<DWORD>(interval.InMilliseconds()));
|
||||
}
|
||||
@ -1204,108 +1275,6 @@ int OS::ActivationFrameAlignment() {
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
|
||||
|
||||
VirtualMemory::VirtualMemory(size_t size, void* hint)
|
||||
: address_(ReserveRegion(size, hint)), size_(size) {}
|
||||
|
||||
VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
|
||||
: address_(NULL), size_(0) {
|
||||
DCHECK((alignment % OS::AllocateAlignment()) == 0);
|
||||
hint = AlignedAddress(hint, alignment);
|
||||
size_t request_size = RoundUp(size + alignment,
|
||||
static_cast<intptr_t>(OS::AllocateAlignment()));
|
||||
void* address = ReserveRegion(request_size, hint);
|
||||
if (address == NULL) return;
|
||||
uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
|
||||
// Try reducing the size by freeing and then reallocating a specific area.
|
||||
bool result = ReleaseRegion(address, request_size);
|
||||
USE(result);
|
||||
DCHECK(result);
|
||||
address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
|
||||
if (address != NULL) {
|
||||
request_size = size;
|
||||
DCHECK(base == static_cast<uint8_t*>(address));
|
||||
} else {
|
||||
// Resizing failed, just go with a bigger area.
|
||||
address = ReserveRegion(request_size, hint);
|
||||
if (address == NULL) return;
|
||||
}
|
||||
address_ = address;
|
||||
size_ = request_size;
|
||||
}
|
||||
|
||||
|
||||
VirtualMemory::~VirtualMemory() {
|
||||
if (IsReserved()) {
|
||||
bool result = ReleaseRegion(address(), size());
|
||||
DCHECK(result);
|
||||
USE(result);
|
||||
}
|
||||
}
|
||||
|
||||
void VirtualMemory::Reset() {
|
||||
address_ = NULL;
|
||||
size_ = 0;
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
|
||||
return CommitRegion(address, size, is_executable);
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::Uncommit(void* address, size_t size) {
|
||||
DCHECK(IsReserved());
|
||||
return UncommitRegion(address, size);
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::Guard(void* address) {
|
||||
if (NULL == VirtualAlloc(address,
|
||||
OS::CommitPageSize(),
|
||||
MEM_COMMIT,
|
||||
PAGE_NOACCESS)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
|
||||
return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS, hint);
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
|
||||
int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
|
||||
if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::UncommitRegion(void* base, size_t size) {
|
||||
return VirtualFree(base, size, MEM_DECOMMIT) != 0;
|
||||
}
|
||||
|
||||
bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
|
||||
void* free_start, size_t free_size) {
|
||||
return VirtualFree(free_start, free_size, MEM_DECOMMIT) != 0;
|
||||
}
|
||||
|
||||
bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
|
||||
return VirtualFree(base, 0, MEM_RELEASE) != 0;
|
||||
}
|
||||
|
||||
|
||||
bool VirtualMemory::HasLazyCommits() {
|
||||
// TODO(alph): implement for the platform.
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Win32 thread support.
|
||||
|
||||
|
@ -197,6 +197,22 @@ class V8_BASE_EXPORT OS {
|
||||
// Get the Alignment guaranteed by Allocate().
|
||||
static size_t AllocateAlignment();
|
||||
|
||||
static void* ReserveRegion(size_t size, void* hint);
|
||||
|
||||
static void* ReserveAlignedRegion(size_t size, size_t alignment, void* hint,
|
||||
size_t* allocated);
|
||||
|
||||
static bool CommitRegion(void* address, size_t size, bool is_executable);
|
||||
|
||||
static bool UncommitRegion(void* address, size_t size);
|
||||
|
||||
static bool ReleasePartialRegion(void* base, size_t size, void* free_start,
|
||||
size_t free_size);
|
||||
|
||||
static bool ReleaseRegion(void* address, size_t size);
|
||||
|
||||
static bool HasLazyCommits();
|
||||
|
||||
// Sleep for a specified time interval.
|
||||
static void Sleep(TimeDelta interval);
|
||||
|
||||
@ -285,141 +301,6 @@ class V8_BASE_EXPORT OS {
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(OS);
|
||||
};
|
||||
|
||||
// Represents and controls an area of reserved memory.
|
||||
// Control of the reserved memory can be assigned to another VirtualMemory
|
||||
// object by calling TakeControl. This removes the reserved memory from the
|
||||
// 'from' instance.
|
||||
class V8_BASE_EXPORT VirtualMemory {
|
||||
public:
|
||||
// Empty VirtualMemory object, controlling no reserved memory.
|
||||
VirtualMemory();
|
||||
|
||||
// Reserves virtual memory with size.
|
||||
explicit VirtualMemory(size_t size, void* hint);
|
||||
|
||||
// Reserves virtual memory containing an area of the given size that
|
||||
// is aligned per alignment. This may not be at the position returned
|
||||
// by address().
|
||||
VirtualMemory(size_t size, size_t alignment, void* hint);
|
||||
|
||||
// Construct a virtual memory by assigning it some already mapped address
|
||||
// and size.
|
||||
VirtualMemory(void* address, size_t size) : address_(address), size_(size) {}
|
||||
|
||||
// Releases the reserved memory, if any, controlled by this VirtualMemory
|
||||
// object.
|
||||
~VirtualMemory();
|
||||
|
||||
// Returns whether the memory has been reserved.
|
||||
bool IsReserved() const { return address_ != nullptr; }
|
||||
|
||||
// Initialize or resets an embedded VirtualMemory object.
|
||||
void Reset();
|
||||
|
||||
// Returns the start address of the reserved memory.
|
||||
// If the memory was reserved with an alignment, this address is not
|
||||
// necessarily aligned. The user might need to round it up to a multiple of
|
||||
// the alignment to get the start of the aligned block.
|
||||
void* address() const {
|
||||
DCHECK(IsReserved());
|
||||
return address_;
|
||||
}
|
||||
|
||||
void* end() const {
|
||||
DCHECK(IsReserved());
|
||||
return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(address_) +
|
||||
size_);
|
||||
}
|
||||
|
||||
// Returns the size of the reserved memory. The returned value is only
|
||||
// meaningful when IsReserved() returns true.
|
||||
// If the memory was reserved with an alignment, this size may be larger
|
||||
// than the requested size.
|
||||
size_t size() const { return size_; }
|
||||
|
||||
// Commits real memory. Returns whether the operation succeeded.
|
||||
bool Commit(void* address, size_t size, bool is_executable);
|
||||
|
||||
// Uncommit real memory. Returns whether the operation succeeded.
|
||||
bool Uncommit(void* address, size_t size);
|
||||
|
||||
// Creates a single guard page at the given address.
|
||||
bool Guard(void* address);
|
||||
|
||||
// Releases the memory after |free_start|. Returns the bytes released.
|
||||
size_t ReleasePartial(void* free_start) {
|
||||
DCHECK(IsReserved());
|
||||
// Notice: Order is important here. The VirtualMemory object might live
|
||||
// inside the allocated region.
|
||||
const size_t size = size_ - (reinterpret_cast<size_t>(free_start) -
|
||||
reinterpret_cast<size_t>(address_));
|
||||
CHECK(InVM(free_start, size));
|
||||
DCHECK_LT(address_, free_start);
|
||||
DCHECK_LT(free_start, reinterpret_cast<void*>(
|
||||
reinterpret_cast<size_t>(address_) + size_));
|
||||
const bool result = ReleasePartialRegion(address_, size_, free_start, size);
|
||||
USE(result);
|
||||
DCHECK(result);
|
||||
size_ -= size;
|
||||
return size;
|
||||
}
|
||||
|
||||
void Release() {
|
||||
DCHECK(IsReserved());
|
||||
// Notice: Order is important here. The VirtualMemory object might live
|
||||
// inside the allocated region.
|
||||
void* address = address_;
|
||||
size_t size = size_;
|
||||
CHECK(InVM(address, size));
|
||||
Reset();
|
||||
bool result = ReleaseRegion(address, size);
|
||||
USE(result);
|
||||
DCHECK(result);
|
||||
}
|
||||
|
||||
// Assign control of the reserved region to a different VirtualMemory object.
|
||||
// The old object is no longer functional (IsReserved() returns false).
|
||||
void TakeControl(VirtualMemory* from) {
|
||||
DCHECK(!IsReserved());
|
||||
address_ = from->address_;
|
||||
size_ = from->size_;
|
||||
from->Reset();
|
||||
}
|
||||
|
||||
static void* ReserveRegion(size_t size, void* hint);
|
||||
|
||||
static bool CommitRegion(void* base, size_t size, bool is_executable);
|
||||
|
||||
static bool UncommitRegion(void* base, size_t size);
|
||||
|
||||
// Must be called with a base pointer that has been returned by ReserveRegion
|
||||
// and the same size it was reserved with.
|
||||
static bool ReleaseRegion(void* base, size_t size);
|
||||
|
||||
// Must be called with a base pointer that has been returned by ReserveRegion
|
||||
// and the same size it was reserved with.
|
||||
// [free_start, free_start + free_size] is the memory that will be released.
|
||||
static bool ReleasePartialRegion(void* base, size_t size, void* free_start,
|
||||
size_t free_size);
|
||||
|
||||
// Returns true if OS performs lazy commits, i.e. the memory allocation call
|
||||
// defers actual physical memory allocation till the first memory access.
|
||||
// Otherwise returns false.
|
||||
static bool HasLazyCommits();
|
||||
|
||||
private:
|
||||
bool InVM(void* address, size_t size) {
|
||||
return (reinterpret_cast<uintptr_t>(address_) <=
|
||||
reinterpret_cast<uintptr_t>(address)) &&
|
||||
((reinterpret_cast<uintptr_t>(address_) + size_) >=
|
||||
(reinterpret_cast<uintptr_t>(address) + size));
|
||||
}
|
||||
|
||||
void* address_; // Start address of the virtual memory.
|
||||
size_t size_; // Size of the virtual memory.
|
||||
};
|
||||
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Thread
|
||||
//
|
||||
|
@ -138,7 +138,7 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
|
||||
void Free(void* data, size_t length) override {
|
||||
#if USE_VM
|
||||
if (RoundToPageSize(&length)) {
|
||||
base::VirtualMemory::ReleaseRegion(data, length);
|
||||
base::OS::ReleaseRegion(data, length);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
@ -156,9 +156,9 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
|
||||
}
|
||||
#if USE_VM
|
||||
void* VirtualMemoryAllocate(size_t length) {
|
||||
void* data = base::VirtualMemory::ReserveRegion(length, nullptr);
|
||||
if (data && !base::VirtualMemory::CommitRegion(data, length, false)) {
|
||||
base::VirtualMemory::ReleaseRegion(data, length);
|
||||
void* data = base::OS::ReserveRegion(length, nullptr);
|
||||
if (data && !base::OS::CommitRegion(data, length, false)) {
|
||||
base::OS::ReleaseRegion(data, length);
|
||||
return nullptr;
|
||||
}
|
||||
MSAN_MEMORY_IS_INITIALIZED(data, length);
|
||||
|
@ -13,7 +13,7 @@ namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
void SequentialMarkingDeque::SetUp() {
|
||||
base::VirtualMemory reservation;
|
||||
VirtualMemory reservation;
|
||||
if (!AllocVirtualMemory(kMaxSize, heap_->GetRandomMmapAddr(), &reservation)) {
|
||||
V8::FatalProcessOutOfMemory("SequentialMarkingDeque::SetUp");
|
||||
}
|
||||
|
@ -7,6 +7,7 @@
|
||||
|
||||
#include <deque>
|
||||
|
||||
#include "src/allocation.h"
|
||||
#include "src/base/platform/mutex.h"
|
||||
#include "src/base/platform/platform.h"
|
||||
#include "src/cancelable-task.h"
|
||||
@ -131,7 +132,7 @@ class SequentialMarkingDeque {
|
||||
|
||||
base::Mutex mutex_;
|
||||
|
||||
base::VirtualMemory backing_store_;
|
||||
VirtualMemory backing_store_;
|
||||
size_t backing_store_committed_size_;
|
||||
HeapObject** array_;
|
||||
// array_[(top - 1) & mask_] is the top element in the deque. The Deque is
|
||||
|
@ -8,7 +8,6 @@
|
||||
|
||||
#include "src/base/bits.h"
|
||||
#include "src/base/macros.h"
|
||||
#include "src/base/platform/platform.h"
|
||||
#include "src/base/platform/semaphore.h"
|
||||
#include "src/counters.h"
|
||||
#include "src/heap/array-buffer-tracker.h"
|
||||
@ -118,7 +117,7 @@ bool CodeRange::SetUp(size_t requested) {
|
||||
|
||||
DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
|
||||
|
||||
base::VirtualMemory reservation;
|
||||
VirtualMemory reservation;
|
||||
if (!AlignedAllocVirtualMemory(
|
||||
requested,
|
||||
Max(kCodeRangeAreaAlignment,
|
||||
@ -408,16 +407,14 @@ bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) {
|
||||
|
||||
bool MemoryAllocator::CommitMemory(Address base, size_t size,
|
||||
Executability executable) {
|
||||
if (!base::VirtualMemory::CommitRegion(base, size,
|
||||
executable == EXECUTABLE)) {
|
||||
if (!base::OS::CommitRegion(base, size, executable == EXECUTABLE)) {
|
||||
return false;
|
||||
}
|
||||
UpdateAllocatedSpaceLimits(base, base + size);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
|
||||
void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
|
||||
Executability executable) {
|
||||
// TODO(gc) make code_range part of memory allocator?
|
||||
// Code which is part of the code-range does not have its own VirtualMemory.
|
||||
@ -439,7 +436,7 @@ void MemoryAllocator::FreeMemory(Address base, size_t size,
|
||||
code_range()->FreeRawMemory(base, size);
|
||||
} else {
|
||||
DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid());
|
||||
bool result = base::VirtualMemory::ReleaseRegion(base, size);
|
||||
bool result = base::OS::ReleaseRegion(base, size);
|
||||
USE(result);
|
||||
DCHECK(result);
|
||||
}
|
||||
@ -447,8 +444,8 @@ void MemoryAllocator::FreeMemory(Address base, size_t size,
|
||||
|
||||
Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
|
||||
void* hint,
|
||||
base::VirtualMemory* controller) {
|
||||
base::VirtualMemory reservation;
|
||||
VirtualMemory* controller) {
|
||||
VirtualMemory reservation;
|
||||
if (!AlignedAllocVirtualMemory(size, alignment, hint, &reservation))
|
||||
return nullptr;
|
||||
|
||||
@ -465,9 +462,9 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
|
||||
|
||||
Address MemoryAllocator::AllocateAlignedMemory(
|
||||
size_t reserve_size, size_t commit_size, size_t alignment,
|
||||
Executability executable, void* hint, base::VirtualMemory* controller) {
|
||||
Executability executable, void* hint, VirtualMemory* controller) {
|
||||
DCHECK(commit_size <= reserve_size);
|
||||
base::VirtualMemory reservation;
|
||||
VirtualMemory reservation;
|
||||
Address base =
|
||||
ReserveAlignedMemory(reserve_size, alignment, hint, &reservation);
|
||||
if (base == NULL) return NULL;
|
||||
@ -525,7 +522,7 @@ void MemoryChunk::InitializationMemoryFence() {
|
||||
MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
|
||||
Address area_start, Address area_end,
|
||||
Executability executable, Space* owner,
|
||||
base::VirtualMemory* reservation) {
|
||||
VirtualMemory* reservation) {
|
||||
MemoryChunk* chunk = FromAddress(base);
|
||||
|
||||
DCHECK(base == chunk->address());
|
||||
@ -686,7 +683,7 @@ bool MemoryChunk::CommitArea(size_t requested) {
|
||||
}
|
||||
|
||||
size_t MemoryChunk::CommittedPhysicalMemory() {
|
||||
if (!base::VirtualMemory::HasLazyCommits() || owner()->identity() == LO_SPACE)
|
||||
if (!base::OS::HasLazyCommits() || owner()->identity() == LO_SPACE)
|
||||
return size();
|
||||
return high_water_mark_.Value();
|
||||
}
|
||||
@ -719,7 +716,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
|
||||
size_t chunk_size;
|
||||
Heap* heap = isolate_->heap();
|
||||
Address base = nullptr;
|
||||
base::VirtualMemory reservation;
|
||||
VirtualMemory reservation;
|
||||
Address area_start = nullptr;
|
||||
Address area_end = nullptr;
|
||||
void* address_hint = heap->GetRandomMmapAddr();
|
||||
@ -860,7 +857,7 @@ size_t Page::AvailableInFreeList() {
|
||||
size_t Page::ShrinkToHighWaterMark() {
|
||||
// Shrinking only makes sense outside of the CodeRange, where we don't care
|
||||
// about address space fragmentation.
|
||||
base::VirtualMemory* reservation = reserved_memory();
|
||||
VirtualMemory* reservation = reserved_memory();
|
||||
if (!reservation->IsReserved()) return 0;
|
||||
|
||||
// Shrink pages to high water mark. The water mark points either to a filler
|
||||
@ -938,7 +935,7 @@ void Page::DestroyBlackArea(Address start, Address end) {
|
||||
void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
|
||||
size_t bytes_to_free,
|
||||
Address new_area_end) {
|
||||
base::VirtualMemory* reservation = chunk->reserved_memory();
|
||||
VirtualMemory* reservation = chunk->reserved_memory();
|
||||
DCHECK(reservation->IsReserved());
|
||||
chunk->size_ -= bytes_to_free;
|
||||
chunk->area_end_ = new_area_end;
|
||||
@ -966,7 +963,7 @@ void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
|
||||
isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
|
||||
chunk->IsEvacuationCandidate());
|
||||
|
||||
base::VirtualMemory* reservation = chunk->reserved_memory();
|
||||
VirtualMemory* reservation = chunk->reserved_memory();
|
||||
const size_t size =
|
||||
reservation->IsReserved() ? reservation->size() : chunk->size();
|
||||
DCHECK_GE(size_.Value(), static_cast<size_t>(size));
|
||||
@ -985,7 +982,7 @@ void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
|
||||
DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
|
||||
chunk->ReleaseAllocatedMemory();
|
||||
|
||||
base::VirtualMemory* reservation = chunk->reserved_memory();
|
||||
VirtualMemory* reservation = chunk->reserved_memory();
|
||||
if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
|
||||
UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize);
|
||||
} else {
|
||||
@ -1078,7 +1075,7 @@ MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
|
||||
if (!CommitBlock(reinterpret_cast<Address>(chunk), size, NOT_EXECUTABLE)) {
|
||||
return nullptr;
|
||||
}
|
||||
base::VirtualMemory reservation(start, size);
|
||||
VirtualMemory reservation(start, size);
|
||||
MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
|
||||
NOT_EXECUTABLE, owner, &reservation);
|
||||
size_.Increment(size);
|
||||
@ -1099,7 +1096,7 @@ bool MemoryAllocator::CommitBlock(Address start, size_t size,
|
||||
|
||||
|
||||
bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
|
||||
if (!base::VirtualMemory::UncommitRegion(start, size)) return false;
|
||||
if (!base::OS::UncommitRegion(start, size)) return false;
|
||||
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
|
||||
return true;
|
||||
}
|
||||
@ -1151,9 +1148,8 @@ intptr_t MemoryAllocator::GetCommitPageSize() {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
|
||||
Address start, size_t commit_size,
|
||||
bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
|
||||
size_t commit_size,
|
||||
size_t reserved_size) {
|
||||
// Commit page header (not executable).
|
||||
Address header = start;
|
||||
@ -1463,7 +1459,7 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
|
||||
|
||||
|
||||
size_t PagedSpace::CommittedPhysicalMemory() {
|
||||
if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
|
||||
if (!base::OS::HasLazyCommits()) return CommittedMemory();
|
||||
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
|
||||
size_t size = 0;
|
||||
for (Page* page : *this) {
|
||||
@ -2688,7 +2684,7 @@ void NewSpace::RecordPromotion(HeapObject* obj) {
|
||||
|
||||
|
||||
size_t NewSpace::CommittedPhysicalMemory() {
|
||||
if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
|
||||
if (!base::OS::HasLazyCommits()) return CommittedMemory();
|
||||
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
|
||||
size_t size = to_space_.CommittedPhysicalMemory();
|
||||
if (from_space_.is_committed()) {
|
||||
|
@ -354,7 +354,7 @@ class MemoryChunk {
|
||||
+ kUIntptrSize // uintptr_t flags_
|
||||
+ kPointerSize // Address area_start_
|
||||
+ kPointerSize // Address area_end_
|
||||
+ 2 * kPointerSize // base::VirtualMemory reservation_
|
||||
+ 2 * kPointerSize // VirtualMemory reservation_
|
||||
+ kPointerSize // Address owner_
|
||||
+ kPointerSize // Heap* heap_
|
||||
+ kIntptrSize // intptr_t progress_bar_
|
||||
@ -631,12 +631,12 @@ class MemoryChunk {
|
||||
static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
|
||||
Address area_start, Address area_end,
|
||||
Executability executable, Space* owner,
|
||||
base::VirtualMemory* reservation);
|
||||
VirtualMemory* reservation);
|
||||
|
||||
// Should be called when memory chunk is about to be freed.
|
||||
void ReleaseAllocatedMemory();
|
||||
|
||||
base::VirtualMemory* reserved_memory() { return &reservation_; }
|
||||
VirtualMemory* reserved_memory() { return &reservation_; }
|
||||
|
||||
size_t size_;
|
||||
uintptr_t flags_;
|
||||
@ -646,7 +646,7 @@ class MemoryChunk {
|
||||
Address area_end_;
|
||||
|
||||
// If the chunk needs to remember its memory reservation, it is stored here.
|
||||
base::VirtualMemory reservation_;
|
||||
VirtualMemory reservation_;
|
||||
|
||||
// The identity of the owning space. This is tagged as a failure pointer, but
|
||||
// no failure can be in an object, so this can be distinguished from any entry
|
||||
@ -1070,7 +1070,7 @@ class CodeRange {
|
||||
Isolate* isolate_;
|
||||
|
||||
// The reserved range of virtual memory that all code objects are put in.
|
||||
base::VirtualMemory virtual_memory_;
|
||||
VirtualMemory virtual_memory_;
|
||||
|
||||
// The global mutex guards free_list_ and allocation_list_ as GC threads may
|
||||
// access both lists concurrently to the main thread.
|
||||
@ -1338,14 +1338,14 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
|
||||
Executability executable, Space* space);
|
||||
|
||||
Address ReserveAlignedMemory(size_t requested, size_t alignment, void* hint,
|
||||
base::VirtualMemory* controller);
|
||||
VirtualMemory* controller);
|
||||
Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
|
||||
size_t alignment, Executability executable,
|
||||
void* hint, base::VirtualMemory* controller);
|
||||
void* hint, VirtualMemory* controller);
|
||||
|
||||
bool CommitMemory(Address addr, size_t size, Executability executable);
|
||||
|
||||
void FreeMemory(base::VirtualMemory* reservation, Executability executable);
|
||||
void FreeMemory(VirtualMemory* reservation, Executability executable);
|
||||
void FreeMemory(Address addr, size_t size, Executability executable);
|
||||
|
||||
// Partially release |bytes_to_free| bytes starting at |start_free|. Note that
|
||||
@ -1371,8 +1371,8 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
|
||||
// filling it up with a recognizable non-NULL bit pattern.
|
||||
void ZapBlock(Address start, size_t size);
|
||||
|
||||
MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
|
||||
Address start, size_t commit_size,
|
||||
MUST_USE_RESULT bool CommitExecutableMemory(VirtualMemory* vm, Address start,
|
||||
size_t commit_size,
|
||||
size_t reserved_size);
|
||||
|
||||
CodeRange* code_range() { return code_range_; }
|
||||
@ -1435,7 +1435,7 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
|
||||
base::AtomicValue<void*> lowest_ever_allocated_;
|
||||
base::AtomicValue<void*> highest_ever_allocated_;
|
||||
|
||||
base::VirtualMemory last_chunk_;
|
||||
VirtualMemory last_chunk_;
|
||||
Unmapper unmapper_;
|
||||
|
||||
friend class heap::TestCodeRangeScope;
|
||||
@ -2749,8 +2749,7 @@ class NewSpace : public Space {
|
||||
// The semispaces.
|
||||
SemiSpace to_space_;
|
||||
SemiSpace from_space_;
|
||||
base::VirtualMemory reservation_;
|
||||
|
||||
VirtualMemory reservation_;
|
||||
|
||||
HistogramInfo* allocated_histogram_;
|
||||
HistogramInfo* promoted_histogram_;
|
||||
|
@ -32,7 +32,7 @@ void StoreBuffer::SetUp() {
|
||||
// Allocate 3x the buffer size, so that we can start the new store buffer
|
||||
// aligned to 2x the size. This lets us use a bit test to detect the end of
|
||||
// the area.
|
||||
base::VirtualMemory reservation;
|
||||
VirtualMemory reservation;
|
||||
if (!AllocVirtualMemory(kStoreBufferSize * 3, heap_->GetRandomMmapAddr(),
|
||||
&reservation)) {
|
||||
V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
|
||||
|
@ -208,7 +208,7 @@ class StoreBuffer {
|
||||
// IN_GC mode.
|
||||
StoreBufferMode mode_;
|
||||
|
||||
base::VirtualMemory virtual_memory_;
|
||||
VirtualMemory virtual_memory_;
|
||||
|
||||
// Callbacks are more efficient than reading out the gc state for every
|
||||
// store buffer operation.
|
||||
|
@ -130,7 +130,7 @@ TEST(AlignedAllocOOM) {
|
||||
TEST(AllocVirtualMemoryOOM) {
|
||||
AllocationPlatform platform;
|
||||
CHECK(!platform.oom_callback_called);
|
||||
v8::base::VirtualMemory result;
|
||||
v8::internal::VirtualMemory result;
|
||||
bool success =
|
||||
v8::internal::AllocVirtualMemory(GetHugeMemoryAmount(), nullptr, &result);
|
||||
// On a few systems, allocation somehow succeeds.
|
||||
@ -141,7 +141,7 @@ TEST(AllocVirtualMemoryOOM) {
|
||||
TEST(AlignedAllocVirtualMemoryOOM) {
|
||||
AllocationPlatform platform;
|
||||
CHECK(!platform.oom_callback_called);
|
||||
v8::base::VirtualMemory result;
|
||||
v8::internal::VirtualMemory result;
|
||||
bool success = v8::internal::AlignedAllocVirtualMemory(
|
||||
GetHugeMemoryAmount(), v8::base::OS::AllocateAlignment(), nullptr,
|
||||
&result);
|
||||
|
@ -25,32 +25,28 @@
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
// Tests of the TokenLock class from lock.h
|
||||
|
||||
#include <pthread.h>
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h> // for usleep()
|
||||
|
||||
#include "src/v8.h"
|
||||
|
||||
#include "src/base/platform/platform.h"
|
||||
#include "test/cctest/cctest.h"
|
||||
|
||||
using OS = v8::base::OS;
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
TEST(VirtualMemory) {
|
||||
v8::base::VirtualMemory* vm =
|
||||
new v8::base::VirtualMemory(1 * MB, v8::base::OS::GetRandomMmapAddr());
|
||||
CHECK(vm->IsReserved());
|
||||
void* block_addr = vm->address();
|
||||
TEST(OSReserveMemory) {
|
||||
size_t mem_size = 0;
|
||||
void* mem_addr = OS::ReserveAlignedRegion(1 * MB, OS::AllocateAlignment(),
|
||||
OS::GetRandomMmapAddr(), &mem_size);
|
||||
CHECK_NE(0, mem_size);
|
||||
CHECK_NOT_NULL(mem_addr);
|
||||
size_t block_size = 4 * KB;
|
||||
CHECK(vm->Commit(block_addr, block_size, false));
|
||||
CHECK(OS::CommitRegion(mem_addr, block_size, false));
|
||||
// Check whether we can write to memory.
|
||||
int* addr = static_cast<int*>(block_addr);
|
||||
addr[KB-1] = 2;
|
||||
CHECK(vm->Uncommit(block_addr, block_size));
|
||||
delete vm;
|
||||
int* addr = static_cast<int*>(mem_addr);
|
||||
addr[KB - 1] = 2;
|
||||
CHECK(OS::UncommitRegion(mem_addr, block_size));
|
||||
OS::ReleaseRegion(mem_addr, mem_size);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
|
@ -25,26 +25,29 @@
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
// Tests of the TokenLock class from lock.h
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "src/v8.h"
|
||||
|
||||
#include "src/base/platform/platform.h"
|
||||
#include "src/base/win32-headers.h"
|
||||
#include "test/cctest/cctest.h"
|
||||
|
||||
TEST(VirtualMemory) {
|
||||
v8::base::VirtualMemory* vm =
|
||||
new v8::base::VirtualMemory(1 * i::MB, v8::base::OS::GetRandomMmapAddr());
|
||||
CHECK(vm->IsReserved());
|
||||
void* block_addr = vm->address();
|
||||
size_t block_size = 4 * i::KB;
|
||||
CHECK(vm->Commit(block_addr, block_size, false));
|
||||
using OS = v8::base::OS;
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
TEST(OSReserveMemory) {
|
||||
size_t mem_size = 0;
|
||||
void* mem_addr = OS::ReserveAlignedRegion(1 * MB, OS::AllocateAlignment(),
|
||||
OS::GetRandomMmapAddr(), &mem_size);
|
||||
CHECK_NE(0, mem_size);
|
||||
CHECK_NOT_NULL(mem_addr);
|
||||
size_t block_size = 4 * KB;
|
||||
CHECK(OS::CommitRegion(mem_addr, block_size, false));
|
||||
// Check whether we can write to memory.
|
||||
int* addr = static_cast<int*>(block_addr);
|
||||
addr[i::KB - 1] = 2;
|
||||
CHECK(vm->Uncommit(block_addr, block_size));
|
||||
delete vm;
|
||||
int* addr = static_cast<int*>(mem_addr);
|
||||
addr[KB - 1] = 2;
|
||||
CHECK(OS::UncommitRegion(mem_addr, block_size));
|
||||
OS::ReleaseRegion(mem_addr, mem_size);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
Loading…
Reference in New Issue
Block a user