[ptr-compr] Introduce BoundedPageAllocator and use it instead of CodeRange.
This is a reland of 16816e53be
Bug: v8:8096
Cq-Include-Trybots: luci.chromium.try:linux_chromium_rel_ng
Change-Id: I257fc391931a0a4bf01f2e8136183aaed044231c
Reviewed-on: https://chromium-review.googlesource.com/1226915
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#55928}
This commit is contained in:
parent
b175a30dba
commit
37d87f610f
2
BUILD.gn
2
BUILD.gn
@ -3016,6 +3016,8 @@ v8_component("v8_libbase") {
|
||||
"src/base/base-export.h",
|
||||
"src/base/bits.cc",
|
||||
"src/base/bits.h",
|
||||
"src/base/bounded-page-allocator.cc",
|
||||
"src/base/bounded-page-allocator.h",
|
||||
"src/base/build_config.h",
|
||||
"src/base/compiler-specific.h",
|
||||
"src/base/cpu.cc",
|
||||
|
@ -161,7 +161,9 @@ void* AllocatePages(v8::PageAllocator* page_allocator, void* address,
|
||||
if (!OnCriticalMemoryPressure(request_size)) break;
|
||||
}
|
||||
#if defined(LEAK_SANITIZER)
|
||||
if (result != nullptr) {
|
||||
if (result != nullptr && page_allocator == GetPlatformPageAllocator()) {
|
||||
// Notify LSAN only about the plaform memory allocations or we will
|
||||
// "allocate"/"deallocate" certain parts of memory twice.
|
||||
__lsan_register_root_region(result, size);
|
||||
}
|
||||
#endif
|
||||
@ -174,7 +176,9 @@ bool FreePages(v8::PageAllocator* page_allocator, void* address,
|
||||
DCHECK_EQ(0UL, size & (page_allocator->AllocatePageSize() - 1));
|
||||
bool result = page_allocator->FreePages(address, size);
|
||||
#if defined(LEAK_SANITIZER)
|
||||
if (result) {
|
||||
if (result && page_allocator == GetPlatformPageAllocator()) {
|
||||
// Notify LSAN only about the plaform memory allocations or we will
|
||||
// "allocate"/"deallocate" certain parts of memory twice.
|
||||
__lsan_unregister_root_region(address, size);
|
||||
}
|
||||
#endif
|
||||
@ -187,7 +191,9 @@ bool ReleasePages(v8::PageAllocator* page_allocator, void* address, size_t size,
|
||||
DCHECK_LT(new_size, size);
|
||||
bool result = page_allocator->ReleasePages(address, size, new_size);
|
||||
#if defined(LEAK_SANITIZER)
|
||||
if (result) {
|
||||
if (result && page_allocator == GetPlatformPageAllocator()) {
|
||||
// Notify LSAN only about the plaform memory allocations or we will
|
||||
// "allocate"/"deallocate" certain parts of memory twice.
|
||||
__lsan_unregister_root_region(address, size);
|
||||
__lsan_register_root_region(address, new_size);
|
||||
}
|
||||
@ -225,11 +231,12 @@ VirtualMemory::VirtualMemory(v8::PageAllocator* page_allocator, size_t size,
|
||||
: page_allocator_(page_allocator), address_(kNullAddress), size_(0) {
|
||||
DCHECK_NOT_NULL(page_allocator);
|
||||
size_t page_size = page_allocator_->AllocatePageSize();
|
||||
size_t alloc_size = RoundUp(size, page_size);
|
||||
alignment = RoundUp(alignment, page_size);
|
||||
size = RoundUp(size, page_size);
|
||||
address_ = reinterpret_cast<Address>(AllocatePages(
|
||||
page_allocator_, hint, alloc_size, alignment, PageAllocator::kNoAccess));
|
||||
page_allocator_, hint, size, alignment, PageAllocator::kNoAccess));
|
||||
if (address_ != kNullAddress) {
|
||||
size_ = alloc_size;
|
||||
size_ = size;
|
||||
}
|
||||
}
|
||||
|
||||
@ -260,12 +267,13 @@ size_t VirtualMemory::Release(Address free_start) {
|
||||
// Notice: Order is important here. The VirtualMemory object might live
|
||||
// inside the allocated region.
|
||||
const size_t free_size = size_ - (free_start - address_);
|
||||
size_t old_size = size_;
|
||||
CHECK(InVM(free_start, free_size));
|
||||
DCHECK_LT(address_, free_start);
|
||||
DCHECK_LT(free_start, address_ + size_);
|
||||
CHECK(ReleasePages(page_allocator_, reinterpret_cast<void*>(address_), size_,
|
||||
size_ - free_size));
|
||||
size_ -= free_size;
|
||||
CHECK(ReleasePages(page_allocator_, reinterpret_cast<void*>(address_),
|
||||
old_size, size_));
|
||||
return free_size;
|
||||
}
|
||||
|
||||
|
@ -158,10 +158,11 @@ class V8_EXPORT_PRIVATE VirtualMemory final {
|
||||
VirtualMemory() = default;
|
||||
|
||||
// Reserves virtual memory containing an area of the given size that is
|
||||
// aligned per alignment. This may not be at the position returned by
|
||||
// address().
|
||||
// aligned per |alignment| rounded up to the |page_allocator|'s allocate page
|
||||
// size.
|
||||
// This may not be at the position returned by address().
|
||||
VirtualMemory(v8::PageAllocator* page_allocator, size_t size, void* hint,
|
||||
size_t alignment = AllocatePageSize());
|
||||
size_t alignment = 1);
|
||||
|
||||
// Construct a virtual memory by assigning it some already mapped address
|
||||
// and size.
|
||||
|
@ -8707,11 +8707,10 @@ void Isolate::SetStackLimit(uintptr_t stack_limit) {
|
||||
|
||||
void Isolate::GetCodeRange(void** start, size_t* length_in_bytes) {
|
||||
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
|
||||
if (isolate->heap()->memory_allocator()->code_range()->valid()) {
|
||||
*start = reinterpret_cast<void*>(
|
||||
isolate->heap()->memory_allocator()->code_range()->start());
|
||||
*length_in_bytes =
|
||||
isolate->heap()->memory_allocator()->code_range()->size();
|
||||
i::MemoryAllocator* memory_allocator = isolate->heap()->memory_allocator();
|
||||
if (memory_allocator->code_range_valid()) {
|
||||
*start = reinterpret_cast<void*>(memory_allocator->code_range_start());
|
||||
*length_in_bytes = memory_allocator->code_range_size();
|
||||
} else {
|
||||
*start = nullptr;
|
||||
*length_in_bytes = 0;
|
||||
|
@ -76,7 +76,7 @@ AssemblerOptions AssemblerOptions::Default(
|
||||
options.inline_offheap_trampolines = !serializer;
|
||||
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
|
||||
options.code_range_start =
|
||||
isolate->heap()->memory_allocator()->code_range()->start();
|
||||
isolate->heap()->memory_allocator()->code_range_start();
|
||||
#endif
|
||||
return options;
|
||||
}
|
||||
|
83
src/base/bounded-page-allocator.cc
Normal file
83
src/base/bounded-page-allocator.cc
Normal file
@ -0,0 +1,83 @@
|
||||
// Copyright 2018 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/base/bounded-page-allocator.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace base {
|
||||
|
||||
BoundedPageAllocator::BoundedPageAllocator(v8::PageAllocator* page_allocator,
|
||||
Address start, size_t size,
|
||||
size_t allocate_page_size)
|
||||
: allocate_page_size_(allocate_page_size),
|
||||
commit_page_size_(page_allocator->CommitPageSize()),
|
||||
page_allocator_(page_allocator),
|
||||
region_allocator_(start, size, allocate_page_size_) {
|
||||
CHECK_NOT_NULL(page_allocator);
|
||||
CHECK(IsAligned(allocate_page_size, page_allocator->AllocatePageSize()));
|
||||
CHECK(IsAligned(allocate_page_size_, commit_page_size_));
|
||||
}
|
||||
|
||||
void* BoundedPageAllocator::AllocatePages(void* hint, size_t size,
|
||||
size_t alignment,
|
||||
PageAllocator::Permission access) {
|
||||
LockGuard<Mutex> guard(&mutex_);
|
||||
CHECK(IsAligned(alignment, region_allocator_.page_size()));
|
||||
|
||||
// Region allocator does not support alignments bigger than it's own
|
||||
// allocation alignment.
|
||||
CHECK_LE(alignment, allocate_page_size_);
|
||||
|
||||
// TODO(ishell): Consider using randomized version here.
|
||||
Address address = region_allocator_.AllocateRegion(size);
|
||||
if (address == RegionAllocator::kAllocationFailure) {
|
||||
return nullptr;
|
||||
}
|
||||
CHECK(page_allocator_->SetPermissions(reinterpret_cast<void*>(address), size,
|
||||
access));
|
||||
return reinterpret_cast<void*>(address);
|
||||
}
|
||||
|
||||
bool BoundedPageAllocator::FreePages(void* raw_address, size_t size) {
|
||||
LockGuard<Mutex> guard(&mutex_);
|
||||
|
||||
Address address = reinterpret_cast<Address>(raw_address);
|
||||
size_t freed_size = region_allocator_.FreeRegion(address);
|
||||
if (freed_size != size) return false;
|
||||
CHECK(page_allocator_->SetPermissions(raw_address, size,
|
||||
PageAllocator::kNoAccess));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool BoundedPageAllocator::ReleasePages(void* raw_address, size_t size,
|
||||
size_t new_size) {
|
||||
Address address = reinterpret_cast<Address>(raw_address);
|
||||
#ifdef DEBUG
|
||||
{
|
||||
CHECK_LT(new_size, size);
|
||||
CHECK(IsAligned(size - new_size, commit_page_size_));
|
||||
// There must be an allocated region at given |address| of a size not
|
||||
// smaller than |size|.
|
||||
LockGuard<Mutex> guard(&mutex_);
|
||||
size_t used_region_size = region_allocator_.CheckRegion(address);
|
||||
CHECK_LE(size, used_region_size);
|
||||
}
|
||||
#endif
|
||||
// Keep the region in "used" state just uncommit some pages.
|
||||
Address free_address = address + new_size;
|
||||
size_t free_size = size - new_size;
|
||||
return page_allocator_->SetPermissions(reinterpret_cast<void*>(free_address),
|
||||
free_size, PageAllocator::kNoAccess);
|
||||
}
|
||||
|
||||
bool BoundedPageAllocator::SetPermissions(void* address, size_t size,
|
||||
PageAllocator::Permission access) {
|
||||
DCHECK(IsAligned(reinterpret_cast<Address>(address), commit_page_size_));
|
||||
DCHECK(IsAligned(size, commit_page_size_));
|
||||
DCHECK(region_allocator_.contains(reinterpret_cast<Address>(address), size));
|
||||
return page_allocator_->SetPermissions(address, size, access);
|
||||
}
|
||||
|
||||
} // namespace base
|
||||
} // namespace v8
|
78
src/base/bounded-page-allocator.h
Normal file
78
src/base/bounded-page-allocator.h
Normal file
@ -0,0 +1,78 @@
|
||||
// Copyright 2018 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_BASE_BOUNDED_PAGE_ALLOCATOR_H_
|
||||
#define V8_BASE_BOUNDED_PAGE_ALLOCATOR_H_
|
||||
|
||||
#include "include/v8-platform.h"
|
||||
#include "src/base/platform/mutex.h"
|
||||
#include "src/base/region-allocator.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace base {
|
||||
|
||||
// This is a v8::PageAllocator implementation that allocates pages within the
|
||||
// pre-reserved region of virtual space. This class requires the virtual space
|
||||
// to be kept reserved during the lifetime of this object.
|
||||
// The main application of bounded page allocator are
|
||||
// - V8 heap pointer compression which requires the whole V8 heap to be
|
||||
// allocated within a contiguous range of virtual address space,
|
||||
// - executable page allocation, which allows to use PC-relative 32-bit code
|
||||
// displacement on certain 64-bit platforms.
|
||||
// Bounded page allocator uses other page allocator instance for doing actual
|
||||
// page allocations.
|
||||
// The implementation is thread-safe.
|
||||
class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator {
|
||||
public:
|
||||
typedef uintptr_t Address;
|
||||
|
||||
BoundedPageAllocator(v8::PageAllocator* page_allocator, Address start,
|
||||
size_t size, size_t allocate_page_size);
|
||||
~BoundedPageAllocator() override = default;
|
||||
|
||||
Address begin() const { return region_allocator_.begin(); }
|
||||
size_t size() const { return region_allocator_.size(); }
|
||||
|
||||
// Returns true if given address is in the range controlled by the bounded
|
||||
// page allocator instance.
|
||||
bool contains(Address address) const {
|
||||
return region_allocator_.contains(address);
|
||||
}
|
||||
|
||||
size_t AllocatePageSize() override { return allocate_page_size_; }
|
||||
|
||||
size_t CommitPageSize() override { return commit_page_size_; }
|
||||
|
||||
void SetRandomMmapSeed(int64_t seed) override {
|
||||
page_allocator_->SetRandomMmapSeed(seed);
|
||||
}
|
||||
|
||||
void* GetRandomMmapAddr() override {
|
||||
return page_allocator_->GetRandomMmapAddr();
|
||||
}
|
||||
|
||||
void* AllocatePages(void* address, size_t size, size_t alignment,
|
||||
PageAllocator::Permission access) override;
|
||||
|
||||
bool FreePages(void* address, size_t size) override;
|
||||
|
||||
bool ReleasePages(void* address, size_t size, size_t new_size) override;
|
||||
|
||||
bool SetPermissions(void* address, size_t size,
|
||||
PageAllocator::Permission access) override;
|
||||
|
||||
private:
|
||||
v8::base::Mutex mutex_;
|
||||
const size_t allocate_page_size_;
|
||||
const size_t commit_page_size_;
|
||||
v8::PageAllocator* const page_allocator_;
|
||||
v8::base::RegionAllocator region_allocator_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(BoundedPageAllocator);
|
||||
};
|
||||
|
||||
} // namespace base
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_BASE_BOUNDED_PAGE_ALLOCATOR_H_
|
@ -61,6 +61,10 @@ class V8_BASE_EXPORT RegionAllocator final {
|
||||
return whole_region_.contains(address);
|
||||
}
|
||||
|
||||
bool contains(Address address, size_t size) const {
|
||||
return whole_region_.contains(address, size);
|
||||
}
|
||||
|
||||
// Total size of not yet aquired regions.
|
||||
size_t free_size() const { return free_size_; }
|
||||
|
||||
@ -84,6 +88,12 @@ class V8_BASE_EXPORT RegionAllocator final {
|
||||
return (address - begin()) < size();
|
||||
}
|
||||
|
||||
bool contains(Address address, size_t size) const {
|
||||
STATIC_ASSERT(std::is_unsigned<Address>::value);
|
||||
Address offset = address - begin();
|
||||
return (offset < size_) && (offset <= size_ - size);
|
||||
}
|
||||
|
||||
bool is_used() const { return is_used_; }
|
||||
void set_is_used(bool used) { is_used_ = used; }
|
||||
|
||||
@ -155,6 +165,7 @@ class V8_BASE_EXPORT RegionAllocator final {
|
||||
FRIEND_TEST(RegionAllocatorTest, AllocateRegionRandom);
|
||||
FRIEND_TEST(RegionAllocatorTest, Fragmentation);
|
||||
FRIEND_TEST(RegionAllocatorTest, FindRegion);
|
||||
FRIEND_TEST(RegionAllocatorTest, Contains);
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(RegionAllocator);
|
||||
};
|
||||
|
@ -49,10 +49,10 @@ AssemblerOptions BuiltinAssemblerOptions(Isolate* isolate,
|
||||
return options;
|
||||
}
|
||||
|
||||
CodeRange* code_range = isolate->heap()->memory_allocator()->code_range();
|
||||
bool pc_relative_calls_fit_in_code_range =
|
||||
code_range->valid() &&
|
||||
code_range->size() <= kMaxPCRelativeCodeRangeInMB * MB;
|
||||
isolate->heap()->memory_allocator()->code_range_valid() &&
|
||||
isolate->heap()->memory_allocator()->code_range_size() <=
|
||||
kMaxPCRelativeCodeRangeInMB * MB;
|
||||
|
||||
options.isolate_independent_code = true;
|
||||
options.use_pc_relative_calls_and_jumps = pc_relative_calls_fit_in_code_range;
|
||||
|
@ -64,8 +64,8 @@ void InitializeCode(Heap* heap, Handle<Code> code, int object_size,
|
||||
bool is_turbofanned, int stack_slots,
|
||||
int safepoint_table_offset, int handler_table_offset) {
|
||||
DCHECK(IsAligned(code->address(), kCodeAlignment));
|
||||
DCHECK(!heap->memory_allocator()->code_range()->valid() ||
|
||||
heap->memory_allocator()->code_range()->contains(code->address()) ||
|
||||
DCHECK(!heap->memory_allocator()->code_range_valid() ||
|
||||
heap->memory_allocator()->code_range_contains(code->address()) ||
|
||||
object_size <= heap->code_space()->AreaSize());
|
||||
|
||||
bool has_unwinding_info = desc.unwinding_info != nullptr;
|
||||
@ -2674,8 +2674,8 @@ Handle<Code> Factory::NewCodeForDeserialization(uint32_t size) {
|
||||
heap->ZapCodeObject(result->address(), size);
|
||||
result->set_map_after_allocation(*code_map(), SKIP_WRITE_BARRIER);
|
||||
DCHECK(IsAligned(result->address(), kCodeAlignment));
|
||||
DCHECK(!heap->memory_allocator()->code_range()->valid() ||
|
||||
heap->memory_allocator()->code_range()->contains(result->address()) ||
|
||||
DCHECK(!heap->memory_allocator()->code_range_valid() ||
|
||||
heap->memory_allocator()->code_range_contains(result->address()) ||
|
||||
static_cast<int>(size) <= heap->code_space()->AreaSize());
|
||||
return handle(Code::cast(result), isolate());
|
||||
}
|
||||
@ -2738,10 +2738,9 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) {
|
||||
if (FLAG_verify_heap) new_code->ObjectVerify(isolate());
|
||||
#endif
|
||||
DCHECK(IsAligned(new_code->address(), kCodeAlignment));
|
||||
DCHECK(
|
||||
!heap->memory_allocator()->code_range()->valid() ||
|
||||
heap->memory_allocator()->code_range()->contains(new_code->address()) ||
|
||||
obj_size <= heap->code_space()->AreaSize());
|
||||
DCHECK(!heap->memory_allocator()->code_range_valid() ||
|
||||
heap->memory_allocator()->code_range_contains(new_code->address()) ||
|
||||
obj_size <= heap->code_space()->AreaSize());
|
||||
return new_code;
|
||||
}
|
||||
|
||||
|
@ -6,6 +6,7 @@
|
||||
#define V8_HEAP_SPACES_INL_H_
|
||||
|
||||
#include "src/base/atomic-utils.h"
|
||||
#include "src/base/bounded-page-allocator.h"
|
||||
#include "src/base/v8-fallthrough.h"
|
||||
#include "src/heap/incremental-marking.h"
|
||||
#include "src/heap/spaces.h"
|
||||
@ -545,6 +546,32 @@ bool LocalAllocationBuffer::TryFreeLast(HeapObject* object, int object_size) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// MemoryAllocator
|
||||
|
||||
bool MemoryAllocator::code_range_valid() const {
|
||||
return code_page_allocator_instance_.get() != nullptr;
|
||||
}
|
||||
|
||||
Address MemoryAllocator::code_range_start() const {
|
||||
DCHECK(code_range_valid());
|
||||
// TODO(ishell): once a follow-up CL is landed add assert that
|
||||
// |code_range_| >= |optional RW pages| + |code_page_allocator_instance_|
|
||||
return code_range_start_;
|
||||
}
|
||||
|
||||
size_t MemoryAllocator::code_range_size() const {
|
||||
DCHECK(code_range_valid());
|
||||
// TODO(ishell): once a follow-up CL is landed add assert that
|
||||
// |code_range_| >= |optional RW pages| + |code_page_allocator_instance_|
|
||||
return code_range_size_;
|
||||
}
|
||||
|
||||
bool MemoryAllocator::code_range_contains(Address address) const {
|
||||
DCHECK(code_range_valid());
|
||||
return (address - code_range_start_) < code_range_size_;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
|
@ -94,194 +94,9 @@ PauseAllocationObserversScope::~PauseAllocationObserversScope() {
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// CodeRange
|
||||
|
||||
static base::LazyInstance<CodeRangeAddressHint>::type code_range_address_hint =
|
||||
LAZY_INSTANCE_INITIALIZER;
|
||||
|
||||
CodeRange::CodeRange(Isolate* isolate, v8::PageAllocator* page_allocator,
|
||||
size_t requested)
|
||||
: isolate_(isolate),
|
||||
free_list_(0),
|
||||
allocation_list_(0),
|
||||
current_allocation_block_index_(0),
|
||||
requested_code_range_size_(0) {
|
||||
DCHECK(!virtual_memory_.IsReserved());
|
||||
|
||||
if (requested == 0) {
|
||||
// When a target requires the code range feature, we put all code objects
|
||||
// in a kMaximalCodeRangeSize range of virtual address space, so that
|
||||
// they can call each other with near calls.
|
||||
if (kRequiresCodeRange) {
|
||||
requested = kMaximalCodeRangeSize;
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (requested <= kMinimumCodeRangeSize) {
|
||||
requested = kMinimumCodeRangeSize;
|
||||
}
|
||||
|
||||
const size_t reserved_area =
|
||||
kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
|
||||
if (requested < (kMaximalCodeRangeSize - reserved_area))
|
||||
requested += reserved_area;
|
||||
|
||||
DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
|
||||
|
||||
requested_code_range_size_ = requested;
|
||||
|
||||
VirtualMemory reservation;
|
||||
void* hint = code_range_address_hint.Pointer()->GetAddressHint(requested);
|
||||
if (!AlignedAllocVirtualMemory(
|
||||
page_allocator, requested,
|
||||
Max(kCodeRangeAreaAlignment, page_allocator->AllocatePageSize()),
|
||||
hint, &reservation)) {
|
||||
V8::FatalProcessOutOfMemory(isolate,
|
||||
"CodeRange setup: allocate virtual memory");
|
||||
}
|
||||
|
||||
// We are sure that we have mapped a block of requested addresses.
|
||||
DCHECK_GE(reservation.size(), requested);
|
||||
Address base = reservation.address();
|
||||
|
||||
// On some platforms, specifically Win64, we need to reserve some pages at
|
||||
// the beginning of an executable space.
|
||||
if (reserved_area > 0) {
|
||||
if (!reservation.SetPermissions(base, reserved_area,
|
||||
PageAllocator::kReadWrite))
|
||||
V8::FatalProcessOutOfMemory(isolate, "CodeRange setup: set permissions");
|
||||
|
||||
base += reserved_area;
|
||||
}
|
||||
Address aligned_base = ::RoundUp(base, MemoryChunk::kAlignment);
|
||||
size_t size = reservation.size() - (aligned_base - base) - reserved_area;
|
||||
allocation_list_.emplace_back(aligned_base, size);
|
||||
current_allocation_block_index_ = 0;
|
||||
|
||||
LOG(isolate_,
|
||||
NewEvent("CodeRange", reinterpret_cast<void*>(reservation.address()),
|
||||
requested));
|
||||
virtual_memory_.TakeControl(&reservation);
|
||||
}
|
||||
|
||||
CodeRange::~CodeRange() {
|
||||
if (virtual_memory_.IsReserved()) {
|
||||
Address addr = start();
|
||||
virtual_memory_.Free();
|
||||
code_range_address_hint.Pointer()->NotifyFreedCodeRange(
|
||||
reinterpret_cast<void*>(addr), requested_code_range_size_);
|
||||
}
|
||||
}
|
||||
|
||||
bool CodeRange::CompareFreeBlockAddress(const FreeBlock& left,
|
||||
const FreeBlock& right) {
|
||||
return left.start < right.start;
|
||||
}
|
||||
|
||||
|
||||
bool CodeRange::GetNextAllocationBlock(size_t requested) {
|
||||
for (current_allocation_block_index_++;
|
||||
current_allocation_block_index_ < allocation_list_.size();
|
||||
current_allocation_block_index_++) {
|
||||
if (requested <= allocation_list_[current_allocation_block_index_].size) {
|
||||
return true; // Found a large enough allocation block.
|
||||
}
|
||||
}
|
||||
|
||||
// Sort and merge the free blocks on the free list and the allocation list.
|
||||
free_list_.insert(free_list_.end(), allocation_list_.begin(),
|
||||
allocation_list_.end());
|
||||
allocation_list_.clear();
|
||||
std::sort(free_list_.begin(), free_list_.end(), &CompareFreeBlockAddress);
|
||||
for (size_t i = 0; i < free_list_.size();) {
|
||||
FreeBlock merged = free_list_[i];
|
||||
i++;
|
||||
// Add adjacent free blocks to the current merged block.
|
||||
while (i < free_list_.size() &&
|
||||
free_list_[i].start == merged.start + merged.size) {
|
||||
merged.size += free_list_[i].size;
|
||||
i++;
|
||||
}
|
||||
if (merged.size > 0) {
|
||||
allocation_list_.push_back(merged);
|
||||
}
|
||||
}
|
||||
free_list_.clear();
|
||||
|
||||
for (current_allocation_block_index_ = 0;
|
||||
current_allocation_block_index_ < allocation_list_.size();
|
||||
current_allocation_block_index_++) {
|
||||
if (requested <= allocation_list_[current_allocation_block_index_].size) {
|
||||
return true; // Found a large enough allocation block.
|
||||
}
|
||||
}
|
||||
current_allocation_block_index_ = 0;
|
||||
// Code range is full or too fragmented.
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
Address CodeRange::AllocateRawMemory(const size_t requested_size,
|
||||
const size_t commit_size,
|
||||
size_t* allocated) {
|
||||
// requested_size includes the header and two guard regions, while commit_size
|
||||
// only includes the header.
|
||||
DCHECK_LE(commit_size,
|
||||
requested_size - 2 * MemoryAllocator::CodePageGuardSize());
|
||||
FreeBlock current;
|
||||
if (!ReserveBlock(requested_size, ¤t)) {
|
||||
*allocated = 0;
|
||||
return kNullAddress;
|
||||
}
|
||||
*allocated = current.size;
|
||||
DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
|
||||
if (!isolate_->heap()->memory_allocator()->CommitExecutableMemory(
|
||||
&virtual_memory_, current.start, commit_size, *allocated)) {
|
||||
*allocated = 0;
|
||||
ReleaseBlock(¤t);
|
||||
return kNullAddress;
|
||||
}
|
||||
return current.start;
|
||||
}
|
||||
|
||||
void CodeRange::FreeRawMemory(Address address, size_t length) {
|
||||
DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
|
||||
base::LockGuard<base::Mutex> guard(&code_range_mutex_);
|
||||
free_list_.emplace_back(address, length);
|
||||
virtual_memory_.SetPermissions(address, length, PageAllocator::kNoAccess);
|
||||
}
|
||||
|
||||
bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
|
||||
base::LockGuard<base::Mutex> guard(&code_range_mutex_);
|
||||
DCHECK(allocation_list_.empty() ||
|
||||
current_allocation_block_index_ < allocation_list_.size());
|
||||
if (allocation_list_.empty() ||
|
||||
requested_size > allocation_list_[current_allocation_block_index_].size) {
|
||||
// Find an allocation block large enough.
|
||||
if (!GetNextAllocationBlock(requested_size)) return false;
|
||||
}
|
||||
// Commit the requested memory at the start of the current allocation block.
|
||||
size_t aligned_requested = ::RoundUp(requested_size, MemoryChunk::kAlignment);
|
||||
*block = allocation_list_[current_allocation_block_index_];
|
||||
// Don't leave a small free block, useless for a large object or chunk.
|
||||
if (aligned_requested < (block->size - Page::kPageSize)) {
|
||||
block->size = aligned_requested;
|
||||
}
|
||||
DCHECK(IsAddressAligned(block->start, MemoryChunk::kAlignment));
|
||||
allocation_list_[current_allocation_block_index_].start += block->size;
|
||||
allocation_list_[current_allocation_block_index_].size -= block->size;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void CodeRange::ReleaseBlock(const FreeBlock* block) {
|
||||
base::LockGuard<base::Mutex> guard(&code_range_mutex_);
|
||||
free_list_.push_back(*block);
|
||||
}
|
||||
|
||||
void* CodeRangeAddressHint::GetAddressHint(size_t code_range_size) {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
auto it = recently_freed_.find(code_range_size);
|
||||
@ -307,17 +122,89 @@ MemoryAllocator::MemoryAllocator(Isolate* isolate, size_t capacity,
|
||||
size_t code_range_size)
|
||||
: isolate_(isolate),
|
||||
data_page_allocator_(GetPlatformPageAllocator()),
|
||||
code_page_allocator_(GetPlatformPageAllocator()),
|
||||
code_range_(nullptr),
|
||||
code_page_allocator_(nullptr),
|
||||
code_range_start_(kNullAddress),
|
||||
code_range_size_(0),
|
||||
capacity_(RoundUp(capacity, Page::kPageSize)),
|
||||
size_(0),
|
||||
size_executable_(0),
|
||||
lowest_ever_allocated_(static_cast<Address>(-1ll)),
|
||||
highest_ever_allocated_(kNullAddress),
|
||||
unmapper_(isolate->heap(), this) {
|
||||
code_range_ = new CodeRange(isolate_, code_page_allocator_, code_range_size);
|
||||
InitializeCodePageAllocator(data_page_allocator_, code_range_size);
|
||||
}
|
||||
|
||||
void MemoryAllocator::InitializeCodePageAllocator(
|
||||
v8::PageAllocator* page_allocator, size_t requested) {
|
||||
DCHECK_NULL(code_page_allocator_instance_.get());
|
||||
|
||||
code_page_allocator_ = page_allocator;
|
||||
|
||||
if (requested == 0) {
|
||||
if (!kRequiresCodeRange) return;
|
||||
// When a target requires the code range feature, we put all code objects
|
||||
// in a kMaximalCodeRangeSize range of virtual address space, so that
|
||||
// they can call each other with near calls.
|
||||
requested = kMaximalCodeRangeSize;
|
||||
} else if (requested <= kMinimumCodeRangeSize) {
|
||||
requested = kMinimumCodeRangeSize;
|
||||
}
|
||||
|
||||
const size_t reserved_area =
|
||||
kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
|
||||
if (requested < (kMaximalCodeRangeSize - reserved_area)) {
|
||||
requested += RoundUp(reserved_area, MemoryChunk::kPageSize);
|
||||
// Fullfilling both reserved pages requirement and huge code area
|
||||
// alignments is not supported (requires re-implementation).
|
||||
DCHECK_LE(kCodeRangeAreaAlignment, page_allocator->AllocatePageSize());
|
||||
}
|
||||
DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
|
||||
|
||||
VirtualMemory reservation;
|
||||
|
||||
void* hint = code_range_address_hint.Pointer()->GetAddressHint(requested);
|
||||
if (!AlignedAllocVirtualMemory(
|
||||
page_allocator, requested,
|
||||
Max(kCodeRangeAreaAlignment, page_allocator->AllocatePageSize()),
|
||||
hint, &reservation)) {
|
||||
V8::FatalProcessOutOfMemory(isolate_,
|
||||
"CodeRange setup: allocate virtual memory");
|
||||
}
|
||||
code_range_start_ = reservation.address();
|
||||
code_range_size_ = reservation.size();
|
||||
|
||||
// We are sure that we have mapped a block of requested addresses.
|
||||
DCHECK_GE(reservation.size(), requested);
|
||||
Address base = reservation.address();
|
||||
|
||||
// On some platforms, specifically Win64, we need to reserve some pages at
|
||||
// the beginning of an executable space. See
|
||||
// https://cs.chromium.org/chromium/src/components/crash/content/
|
||||
// app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
|
||||
// for details.
|
||||
if (reserved_area > 0) {
|
||||
if (!reservation.SetPermissions(base, reserved_area,
|
||||
PageAllocator::kReadWrite))
|
||||
V8::FatalProcessOutOfMemory(isolate_, "CodeRange setup: set permissions");
|
||||
|
||||
base += reserved_area;
|
||||
}
|
||||
Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
|
||||
size_t size =
|
||||
RoundDown(reservation.size() - (aligned_base - base) - reserved_area,
|
||||
MemoryChunk::kPageSize);
|
||||
DCHECK(IsAligned(aligned_base, kCodeRangeAreaAlignment));
|
||||
|
||||
LOG(isolate_,
|
||||
NewEvent("CodeRange", reinterpret_cast<void*>(reservation.address()),
|
||||
requested));
|
||||
|
||||
heap_reservation_.TakeControl(&reservation);
|
||||
code_page_allocator_instance_ = base::make_unique<base::BoundedPageAllocator>(
|
||||
page_allocator, aligned_base, size,
|
||||
static_cast<size_t>(MemoryChunk::kAlignment));
|
||||
code_page_allocator_ = code_page_allocator_instance_.get();
|
||||
}
|
||||
|
||||
void MemoryAllocator::TearDown() {
|
||||
unmapper()->TearDown();
|
||||
@ -331,9 +218,6 @@ void MemoryAllocator::TearDown() {
|
||||
if (last_chunk_.IsReserved()) {
|
||||
last_chunk_.Free();
|
||||
}
|
||||
|
||||
delete code_range_;
|
||||
code_range_ = nullptr;
|
||||
}
|
||||
|
||||
class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
|
||||
@ -505,27 +389,17 @@ bool MemoryAllocator::CommitMemory(Address base, size_t size) {
|
||||
|
||||
void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
|
||||
Executability executable) {
|
||||
// TODO(ishell): make code_range part of memory allocator?
|
||||
// Code which is part of the code-range does not have its own VirtualMemory.
|
||||
DCHECK(code_range() == nullptr ||
|
||||
!code_range()->contains(reservation->address()));
|
||||
DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid() ||
|
||||
reservation->size() <= Page::kPageSize);
|
||||
|
||||
// Executability and page allocator must be in sync.
|
||||
CHECK_EQ(reservation->page_allocator(), page_allocator(executable));
|
||||
reservation->Free();
|
||||
}
|
||||
|
||||
|
||||
void MemoryAllocator::FreeMemory(Address base, size_t size,
|
||||
Executability executable) {
|
||||
// TODO(ishell): make code_range part of memory allocator?
|
||||
if (code_range() != nullptr && code_range()->contains(base)) {
|
||||
DCHECK(executable == EXECUTABLE);
|
||||
code_range()->FreeRawMemory(base, size);
|
||||
} else {
|
||||
DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid());
|
||||
CHECK(FreePages(data_page_allocator_, reinterpret_cast<void*>(base), size));
|
||||
}
|
||||
// TODO(ishell): use proper page allocator
|
||||
CHECK(FreePages(page_allocator(executable), reinterpret_cast<void*>(base),
|
||||
size));
|
||||
}
|
||||
|
||||
Address MemoryAllocator::AllocateAlignedMemory(
|
||||
@ -604,12 +478,8 @@ void MemoryChunk::SetReadAndExecutable() {
|
||||
size_t page_size = MemoryAllocator::GetCommitPageSize();
|
||||
DCHECK(IsAddressAligned(protect_start, page_size));
|
||||
size_t protect_size = RoundUp(area_size(), page_size);
|
||||
// TODO(ishell): use reservation_.SetPermissions() once it's always
|
||||
// initialized.
|
||||
v8::PageAllocator* page_allocator =
|
||||
heap()->memory_allocator()->code_page_allocator();
|
||||
CHECK(SetPermissions(page_allocator, protect_start, protect_size,
|
||||
PageAllocator::kReadExecute));
|
||||
CHECK(reservation_.SetPermissions(protect_start, protect_size,
|
||||
PageAllocator::kReadExecute));
|
||||
}
|
||||
}
|
||||
|
||||
@ -627,12 +497,8 @@ void MemoryChunk::SetReadAndWritable() {
|
||||
size_t page_size = MemoryAllocator::GetCommitPageSize();
|
||||
DCHECK(IsAddressAligned(unprotect_start, page_size));
|
||||
size_t unprotect_size = RoundUp(area_size(), page_size);
|
||||
// TODO(ishell): use reservation_.SetPermissions() once it's always
|
||||
// initialized.
|
||||
v8::PageAllocator* page_allocator =
|
||||
heap()->memory_allocator()->code_page_allocator();
|
||||
CHECK(SetPermissions(page_allocator, unprotect_start, unprotect_size,
|
||||
PageAllocator::kReadWrite));
|
||||
CHECK(reservation_.SetPermissions(unprotect_start, unprotect_size,
|
||||
PageAllocator::kReadWrite));
|
||||
}
|
||||
}
|
||||
|
||||
@ -700,12 +566,8 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
|
||||
size_t page_size = MemoryAllocator::GetCommitPageSize();
|
||||
DCHECK(IsAddressAligned(area_start, page_size));
|
||||
size_t area_size = RoundUp(area_end - area_start, page_size);
|
||||
// TODO(ishell): use reservation->SetPermissions() once it's always
|
||||
// initialized.
|
||||
v8::PageAllocator* page_allocator =
|
||||
heap->memory_allocator()->page_allocator(executable);
|
||||
CHECK(SetPermissions(page_allocator, area_start, area_size,
|
||||
PageAllocator::kReadWriteExecute));
|
||||
CHECK(reservation.SetPermissions(area_start, area_size,
|
||||
PageAllocator::kReadWriteExecute));
|
||||
}
|
||||
}
|
||||
|
||||
@ -869,29 +731,12 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
|
||||
// Size of header (not executable) plus area (executable).
|
||||
size_t commit_size = ::RoundUp(
|
||||
CodePageGuardStartOffset() + commit_area_size, GetCommitPageSize());
|
||||
// Allocate executable memory either from code range or from the OS.
|
||||
#ifdef V8_TARGET_ARCH_MIPS64
|
||||
// Use code range only for large object space on mips64 to keep address
|
||||
// range within 256-MB memory region.
|
||||
if (code_range()->valid() && reserve_area_size > CodePageAreaSize()) {
|
||||
#else
|
||||
if (code_range()->valid()) {
|
||||
#endif
|
||||
base =
|
||||
code_range()->AllocateRawMemory(chunk_size, commit_size, &chunk_size);
|
||||
DCHECK(IsAligned(base, MemoryChunk::kAlignment));
|
||||
if (base == kNullAddress) return nullptr;
|
||||
size_ += chunk_size;
|
||||
// Update executable memory size.
|
||||
size_executable_ += chunk_size;
|
||||
} else {
|
||||
base = AllocateAlignedMemory(chunk_size, commit_size,
|
||||
MemoryChunk::kAlignment, executable,
|
||||
address_hint, &reservation);
|
||||
if (base == kNullAddress) return nullptr;
|
||||
// Update executable memory size.
|
||||
size_executable_ += reservation.size();
|
||||
}
|
||||
base =
|
||||
AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
|
||||
executable, address_hint, &reservation);
|
||||
if (base == kNullAddress) return nullptr;
|
||||
// Update executable memory size.
|
||||
size_executable_ += reservation.size();
|
||||
|
||||
if (Heap::ShouldZapGarbage()) {
|
||||
ZapBlock(base, CodePageGuardStartOffset(), kZapValue);
|
||||
@ -1246,11 +1091,9 @@ bool MemoryAllocator::CommitBlock(Address start, size_t size) {
|
||||
|
||||
bool MemoryAllocator::UncommitBlock(VirtualMemory* reservation, Address start,
|
||||
size_t size) {
|
||||
// TODO(ishell): use reservation->SetPermissions() once it's always
|
||||
// initialized.
|
||||
if (!SetPermissions(reservation->page_allocator(), start, size,
|
||||
PageAllocator::kNoAccess))
|
||||
if (!reservation->SetPermissions(start, size, PageAllocator::kNoAccess)) {
|
||||
return false;
|
||||
}
|
||||
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
|
||||
return true;
|
||||
}
|
||||
@ -3346,17 +3189,17 @@ void ReadOnlyPage::MakeHeaderRelocatable() {
|
||||
void ReadOnlySpace::SetPermissionsForPages(PageAllocator::Permission access) {
|
||||
const size_t page_size = MemoryAllocator::GetCommitPageSize();
|
||||
const size_t area_start_offset = RoundUp(Page::kObjectStartOffset, page_size);
|
||||
MemoryAllocator* memory_allocator = heap()->memory_allocator();
|
||||
for (Page* p : *this) {
|
||||
ReadOnlyPage* page = static_cast<ReadOnlyPage*>(p);
|
||||
if (access == PageAllocator::kRead) {
|
||||
page->MakeHeaderRelocatable();
|
||||
}
|
||||
// TODO(ishell): use page->reserved_memory()->SetPermissions() once it's
|
||||
// always initialized.
|
||||
|
||||
// Read only pages don't have valid reservation object so we get proper
|
||||
// page allocator manually.
|
||||
v8::PageAllocator* page_allocator =
|
||||
page->IsFlagSet(Page::IS_EXECUTABLE)
|
||||
? heap()->memory_allocator()->code_page_allocator()
|
||||
: heap()->memory_allocator()->data_page_allocator();
|
||||
memory_allocator->page_allocator(page->executable());
|
||||
CHECK(SetPermissions(page_allocator, page->address() + area_start_offset,
|
||||
page->size() - area_start_offset, access));
|
||||
}
|
||||
|
@ -14,6 +14,7 @@
|
||||
|
||||
#include "src/allocation.h"
|
||||
#include "src/base/atomic-utils.h"
|
||||
#include "src/base/bounded-page-allocator.h"
|
||||
#include "src/base/iterator.h"
|
||||
#include "src/base/list.h"
|
||||
#include "src/base/platform/mutex.h"
|
||||
@ -32,7 +33,7 @@ namespace internal {
|
||||
|
||||
namespace heap {
|
||||
class HeapTester;
|
||||
class TestCodeRangeScope;
|
||||
class TestCodePageAllocatorScope;
|
||||
} // namespace heap
|
||||
|
||||
class AllocationObserver;
|
||||
@ -1077,95 +1078,6 @@ class MemoryChunkValidator {
|
||||
};
|
||||
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// All heap objects containing executable code (code objects) must be allocated
|
||||
// from a 2 GB range of memory, so that they can call each other using 32-bit
|
||||
// displacements. This happens automatically on 32-bit platforms, where 32-bit
|
||||
// displacements cover the entire 4GB virtual address space. On 64-bit
|
||||
// platforms, we support this using the CodeRange object, which reserves and
|
||||
// manages a range of virtual memory.
|
||||
class CodeRange {
|
||||
public:
|
||||
CodeRange(Isolate* isolate, v8::PageAllocator* page_allocator,
|
||||
size_t requested_size);
|
||||
~CodeRange();
|
||||
|
||||
bool valid() { return virtual_memory_.IsReserved(); }
|
||||
Address start() {
|
||||
DCHECK(valid());
|
||||
return virtual_memory_.address();
|
||||
}
|
||||
size_t size() {
|
||||
DCHECK(valid());
|
||||
return virtual_memory_.size();
|
||||
}
|
||||
bool contains(Address address) {
|
||||
if (!valid()) return false;
|
||||
Address start = virtual_memory_.address();
|
||||
return start <= address && address < start + virtual_memory_.size();
|
||||
}
|
||||
|
||||
// Allocates a chunk of memory from the large-object portion of
|
||||
// the code range. On platforms with no separate code range, should
|
||||
// not be called.
|
||||
V8_WARN_UNUSED_RESULT Address AllocateRawMemory(const size_t requested_size,
|
||||
const size_t commit_size,
|
||||
size_t* allocated);
|
||||
void FreeRawMemory(Address buf, size_t length);
|
||||
|
||||
private:
|
||||
class FreeBlock {
|
||||
public:
|
||||
FreeBlock() : start(0), size(0) {}
|
||||
FreeBlock(Address start_arg, size_t size_arg)
|
||||
: start(start_arg), size(size_arg) {
|
||||
DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
|
||||
DCHECK(size >= static_cast<size_t>(Page::kPageSize));
|
||||
}
|
||||
FreeBlock(void* start_arg, size_t size_arg)
|
||||
: start(reinterpret_cast<Address>(start_arg)), size(size_arg) {
|
||||
DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
|
||||
DCHECK(size >= static_cast<size_t>(Page::kPageSize));
|
||||
}
|
||||
|
||||
Address start;
|
||||
size_t size;
|
||||
};
|
||||
|
||||
// Finds a block on the allocation list that contains at least the
|
||||
// requested amount of memory. If none is found, sorts and merges
|
||||
// the existing free memory blocks, and searches again.
|
||||
// If none can be found, returns false.
|
||||
bool GetNextAllocationBlock(size_t requested);
|
||||
// Compares the start addresses of two free blocks.
|
||||
static bool CompareFreeBlockAddress(const FreeBlock& left,
|
||||
const FreeBlock& right);
|
||||
bool ReserveBlock(const size_t requested_size, FreeBlock* block);
|
||||
void ReleaseBlock(const FreeBlock* block);
|
||||
|
||||
Isolate* isolate_;
|
||||
|
||||
// The reserved range of virtual memory that all code objects are put in.
|
||||
VirtualMemory virtual_memory_;
|
||||
|
||||
// The global mutex guards free_list_ and allocation_list_ as GC threads may
|
||||
// access both lists concurrently to the main thread.
|
||||
base::Mutex code_range_mutex_;
|
||||
|
||||
// Freed blocks of memory are added to the free list. When the allocation
|
||||
// list is exhausted, the free list is sorted and merged to make the new
|
||||
// allocation list.
|
||||
std::vector<FreeBlock> free_list_;
|
||||
|
||||
// Memory is allocated from the free blocks on the allocation list.
|
||||
// The block at current_allocation_block_index_ is the current block.
|
||||
std::vector<FreeBlock> allocation_list_;
|
||||
size_t current_allocation_block_index_;
|
||||
size_t requested_code_range_size_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(CodeRange);
|
||||
};
|
||||
|
||||
// The process-wide singleton that keeps track of code range regions with the
|
||||
// intention to reuse free code range regions as a workaround for CFG memory
|
||||
// leaks (see crbug.com/870054).
|
||||
@ -1484,10 +1396,17 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
|
||||
: data_page_allocator_;
|
||||
}
|
||||
|
||||
CodeRange* code_range() { return code_range_; }
|
||||
V8_INLINE bool code_range_valid() const;
|
||||
V8_INLINE Address code_range_start() const;
|
||||
V8_INLINE size_t code_range_size() const;
|
||||
V8_INLINE bool code_range_contains(Address address) const;
|
||||
|
||||
Unmapper* unmapper() { return &unmapper_; }
|
||||
|
||||
private:
|
||||
void InitializeCodePageAllocator(v8::PageAllocator* page_allocator,
|
||||
size_t requested);
|
||||
|
||||
// PreFree logically frees the object, i.e., it takes care of the size
|
||||
// bookkeeping and calls the allocation callback.
|
||||
void PreFreeMemory(MemoryChunk* chunk);
|
||||
@ -1536,10 +1455,44 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
|
||||
|
||||
Isolate* isolate_;
|
||||
|
||||
// This object controls virtual space reserved for V8 heap instance.
|
||||
// Depending on the configuration it may contain the following:
|
||||
// - no reservation (on 32-bit architectures)
|
||||
// - code range reservation used by bounded code page allocator (on 64-bit
|
||||
// architectures without pointers compression in V8 heap)
|
||||
// - data + code range reservation (on 64-bit architectures with pointers
|
||||
// compression in V8 heap)
|
||||
VirtualMemory heap_reservation_;
|
||||
|
||||
// Page allocator used for allocating data pages. Depending on the
|
||||
// configuration it may be a page allocator instance provided by v8::Platform
|
||||
// or a BoundedPageAllocator (when pointer compression is enabled).
|
||||
v8::PageAllocator* data_page_allocator_;
|
||||
|
||||
// Page allocator used for allocating code pages. Depending on the
|
||||
// configuration it may be a page allocator instance provided by v8::Platform
|
||||
// or a BoundedPageAllocator (when pointer compression is enabled or
|
||||
// on those 64-bit architectures where pc-relative 32-bit displacement
|
||||
// can be used for call and jump instructions).
|
||||
v8::PageAllocator* code_page_allocator_;
|
||||
|
||||
CodeRange* code_range_;
|
||||
// A part of the |heap_reservation_| that may contain executable code
|
||||
// including reserved page with read-write access in the beginning.
|
||||
// See details below.
|
||||
// TODO(ishell): introduce base::AddressRange code_range_; instead.
|
||||
Address code_range_start_;
|
||||
size_t code_range_size_;
|
||||
|
||||
// This unique pointer owns the instance of bounded code allocator
|
||||
// that controls executable pages allocation. It does not control the
|
||||
// optionally existing page in the beginning of the |code_range_|.
|
||||
// So, summarizing all above, the following condition holds:
|
||||
// 1) |heap_reservation_| >= |code_range_|
|
||||
// 2) |code_range_| >= |optional RW pages| + |code_page_allocator_instance_|.
|
||||
// 3) |heap_reservation_| is AllocatePageSize()-aligned
|
||||
// 4) |code_page_allocator_instance_| is MemoryChunk::kAlignment-aligned
|
||||
// 5) |code_range_| is CommitPageSize()-aligned
|
||||
std::unique_ptr<base::BoundedPageAllocator> code_page_allocator_instance_;
|
||||
|
||||
// Maximum space size in bytes.
|
||||
size_t capacity_;
|
||||
@ -1563,7 +1516,7 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
|
||||
// Data structure to remember allocated executable memory chunks.
|
||||
std::unordered_set<MemoryChunk*> executable_memory_;
|
||||
|
||||
friend class heap::TestCodeRangeScope;
|
||||
friend class heap::TestCodePageAllocatorScope;
|
||||
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
|
||||
};
|
||||
|
@ -205,8 +205,8 @@ bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
|
||||
|
||||
// Check whether we interrupted setup/teardown of a stack frame in JS code.
|
||||
// Avoid this check for C++ code, as that would trigger false positives.
|
||||
if (regs->pc &&
|
||||
isolate->heap()->memory_allocator()->code_range()->contains(
|
||||
if (regs->pc && isolate->heap()->memory_allocator()->code_range_valid() &&
|
||||
isolate->heap()->memory_allocator()->code_range_contains(
|
||||
reinterpret_cast<i::Address>(regs->pc)) &&
|
||||
IsNoFrameRegion(reinterpret_cast<i::Address>(regs->pc))) {
|
||||
// The frame is not setup, so it'd be hard to iterate the stack. Bailout.
|
||||
|
@ -172,80 +172,6 @@ TEST(StressJS) {
|
||||
env->Exit();
|
||||
}
|
||||
|
||||
|
||||
// CodeRange test.
|
||||
// Tests memory management in a CodeRange by allocating and freeing blocks,
|
||||
// using a pseudorandom generator to choose block sizes geometrically
|
||||
// distributed between 2 * Page::kPageSize and 2^5 + 1 * Page::kPageSize.
|
||||
// Ensure that the freed chunks are collected and reused by allocating (in
|
||||
// total) more than the size of the CodeRange.
|
||||
|
||||
// This pseudorandom generator does not need to be particularly good.
|
||||
// Use the lower half of the V8::Random() generator.
|
||||
unsigned int Pseudorandom() {
|
||||
static uint32_t lo = 2345;
|
||||
lo = 18273 * (lo & 0xFFFF) + (lo >> 16); // Provably not 0.
|
||||
return lo & 0xFFFF;
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
// Plain old data class. Represents a block of allocated memory.
|
||||
class Block {
|
||||
public:
|
||||
Block(Address base_arg, int size_arg)
|
||||
: base(base_arg), size(size_arg) {}
|
||||
|
||||
Address base;
|
||||
int size;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
TEST(CodeRange) {
|
||||
const size_t code_range_size = 32*MB;
|
||||
CcTest::InitializeVM();
|
||||
CodeRange code_range(reinterpret_cast<Isolate*>(CcTest::isolate()),
|
||||
GetPlatformPageAllocator(), code_range_size);
|
||||
size_t current_allocated = 0;
|
||||
size_t total_allocated = 0;
|
||||
std::vector<Block> blocks;
|
||||
blocks.reserve(1000);
|
||||
|
||||
while (total_allocated < 5 * code_range_size) {
|
||||
if (current_allocated < code_range_size / 10) {
|
||||
// Allocate a block.
|
||||
// Geometrically distributed sizes, greater than
|
||||
// kMaxRegularHeapObjectSize (which is greater than code page area).
|
||||
// TODO(gc): instead of using 3 use some contant based on code_range_size
|
||||
// kMaxRegularHeapObjectSize.
|
||||
size_t requested = (kMaxRegularHeapObjectSize << (Pseudorandom() % 3)) +
|
||||
Pseudorandom() % 5000 + 1;
|
||||
requested = RoundUp(requested, MemoryAllocator::GetCommitPageSize());
|
||||
size_t allocated = 0;
|
||||
|
||||
// The request size has to be at least 2 code guard pages larger than the
|
||||
// actual commit size.
|
||||
Address base = code_range.AllocateRawMemory(
|
||||
requested, requested - (2 * MemoryAllocator::CodePageGuardSize()),
|
||||
&allocated);
|
||||
CHECK_NE(base, kNullAddress);
|
||||
blocks.emplace_back(base, static_cast<int>(allocated));
|
||||
current_allocated += static_cast<int>(allocated);
|
||||
total_allocated += static_cast<int>(allocated);
|
||||
} else {
|
||||
// Free a block.
|
||||
size_t index = Pseudorandom() % blocks.size();
|
||||
code_range.FreeRawMemory(blocks[index].base, blocks[index].size);
|
||||
current_allocated -= blocks[index].size;
|
||||
if (index < blocks.size() - 1) {
|
||||
blocks[index] = blocks.back();
|
||||
}
|
||||
blocks.pop_back();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace heap
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -27,6 +27,7 @@
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "src/base/bounded-page-allocator.h"
|
||||
#include "src/base/platform/platform.h"
|
||||
#include "src/heap/factory.h"
|
||||
#include "src/heap/spaces-inl.h"
|
||||
@ -59,41 +60,43 @@ class TestMemoryAllocatorScope {
|
||||
DISALLOW_COPY_AND_ASSIGN(TestMemoryAllocatorScope);
|
||||
};
|
||||
|
||||
|
||||
// Temporarily sets a given code range in an isolate.
|
||||
class TestCodeRangeScope {
|
||||
// Temporarily sets a given code page allocator in an isolate.
|
||||
class TestCodePageAllocatorScope {
|
||||
public:
|
||||
TestCodeRangeScope(Isolate* isolate, CodeRange* code_range)
|
||||
TestCodePageAllocatorScope(Isolate* isolate,
|
||||
v8::PageAllocator* code_page_allocator)
|
||||
: isolate_(isolate),
|
||||
old_code_range_(isolate->heap()->memory_allocator()->code_range()) {
|
||||
isolate->heap()->memory_allocator()->code_range_ = code_range;
|
||||
old_code_page_allocator_(
|
||||
isolate->heap()->memory_allocator()->code_page_allocator()) {
|
||||
isolate->heap()->memory_allocator()->code_page_allocator_ =
|
||||
code_page_allocator;
|
||||
}
|
||||
|
||||
~TestCodeRangeScope() {
|
||||
isolate_->heap()->memory_allocator()->code_range_ = old_code_range_;
|
||||
~TestCodePageAllocatorScope() {
|
||||
isolate_->heap()->memory_allocator()->code_page_allocator_ =
|
||||
old_code_page_allocator_;
|
||||
}
|
||||
|
||||
private:
|
||||
Isolate* isolate_;
|
||||
CodeRange* old_code_range_;
|
||||
v8::PageAllocator* old_code_page_allocator_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(TestCodeRangeScope);
|
||||
DISALLOW_COPY_AND_ASSIGN(TestCodePageAllocatorScope);
|
||||
};
|
||||
|
||||
static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
|
||||
CodeRange* code_range, size_t reserve_area_size,
|
||||
size_t commit_area_size, Executability executable,
|
||||
Space* space) {
|
||||
v8::PageAllocator* code_page_allocator,
|
||||
size_t reserve_area_size, size_t commit_area_size,
|
||||
Executability executable, Space* space) {
|
||||
MemoryAllocator* memory_allocator =
|
||||
new MemoryAllocator(isolate, heap->MaxReserved(), 0);
|
||||
{
|
||||
TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
|
||||
TestCodeRangeScope test_code_range_scope(isolate, code_range);
|
||||
TestCodePageAllocatorScope test_code_page_allocator_scope(
|
||||
isolate, code_page_allocator);
|
||||
|
||||
v8::PageAllocator* data_page_allocator =
|
||||
memory_allocator->data_page_allocator();
|
||||
v8::PageAllocator* code_page_allocator =
|
||||
memory_allocator->code_page_allocator();
|
||||
v8::PageAllocator* page_allocator =
|
||||
memory_allocator->page_allocator(executable);
|
||||
|
||||
size_t header_size = (executable == EXECUTABLE)
|
||||
? MemoryAllocator::CodePageGuardStartOffset()
|
||||
@ -103,15 +106,12 @@ static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
|
||||
|
||||
MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(
|
||||
reserve_area_size, commit_area_size, executable, space);
|
||||
size_t alignment = code_range != nullptr && code_range->valid()
|
||||
? MemoryChunk::kAlignment
|
||||
: code_page_allocator->CommitPageSize();
|
||||
size_t reserved_size =
|
||||
((executable == EXECUTABLE))
|
||||
? RoundUp(header_size + guard_size + reserve_area_size + guard_size,
|
||||
alignment)
|
||||
page_allocator->CommitPageSize())
|
||||
: RoundUp(header_size + reserve_area_size,
|
||||
data_page_allocator->CommitPageSize());
|
||||
page_allocator->CommitPageSize());
|
||||
CHECK(memory_chunk->size() == reserved_size);
|
||||
CHECK(memory_chunk->area_start() <
|
||||
memory_chunk->address() + memory_chunk->size());
|
||||
@ -125,39 +125,6 @@ static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
|
||||
delete memory_allocator;
|
||||
}
|
||||
|
||||
TEST(Regress3540) {
|
||||
Isolate* isolate = CcTest::i_isolate();
|
||||
Heap* heap = isolate->heap();
|
||||
MemoryAllocator* memory_allocator =
|
||||
new MemoryAllocator(isolate, heap->MaxReserved(), 0);
|
||||
TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
|
||||
size_t code_range_size =
|
||||
kMinimumCodeRangeSize > 0 ? kMinimumCodeRangeSize : 3 * Page::kPageSize;
|
||||
CodeRange* code_range = new CodeRange(
|
||||
isolate, memory_allocator->code_page_allocator(), code_range_size);
|
||||
|
||||
Address address;
|
||||
size_t size;
|
||||
size_t request_size = code_range_size - Page::kPageSize;
|
||||
address = code_range->AllocateRawMemory(
|
||||
request_size, request_size - (2 * MemoryAllocator::CodePageGuardSize()),
|
||||
&size);
|
||||
CHECK_NE(address, kNullAddress);
|
||||
|
||||
Address null_address;
|
||||
size_t null_size;
|
||||
request_size = code_range_size - Page::kPageSize;
|
||||
null_address = code_range->AllocateRawMemory(
|
||||
request_size, request_size - (2 * MemoryAllocator::CodePageGuardSize()),
|
||||
&null_size);
|
||||
CHECK_EQ(null_address, kNullAddress);
|
||||
|
||||
code_range->FreeRawMemory(address, size);
|
||||
delete code_range;
|
||||
memory_allocator->TearDown();
|
||||
delete memory_allocator;
|
||||
}
|
||||
|
||||
static unsigned int PseudorandomAreaSize() {
|
||||
static uint32_t lo = 2345;
|
||||
lo = 18273 * (lo & 0xFFFFF) + (lo >> 16);
|
||||
@ -180,16 +147,21 @@ TEST(MemoryChunk) {
|
||||
|
||||
// With CodeRange.
|
||||
const size_t code_range_size = 32 * MB;
|
||||
CodeRange* code_range =
|
||||
new CodeRange(isolate, page_allocator, code_range_size);
|
||||
VirtualMemory code_range_reservation;
|
||||
CHECK(AlignedAllocVirtualMemory(page_allocator, code_range_size,
|
||||
MemoryChunk::kAlignment, nullptr,
|
||||
&code_range_reservation));
|
||||
|
||||
VerifyMemoryChunk(isolate, heap, code_range, reserve_area_size,
|
||||
base::BoundedPageAllocator code_page_allocator(
|
||||
page_allocator, code_range_reservation.address(),
|
||||
code_range_reservation.size(), MemoryChunk::kAlignment);
|
||||
|
||||
VerifyMemoryChunk(isolate, heap, &code_page_allocator, reserve_area_size,
|
||||
initial_commit_area_size, EXECUTABLE, heap->code_space());
|
||||
|
||||
VerifyMemoryChunk(isolate, heap, code_range, reserve_area_size,
|
||||
VerifyMemoryChunk(isolate, heap, &code_page_allocator, reserve_area_size,
|
||||
initial_commit_area_size, NOT_EXECUTABLE,
|
||||
heap->old_space());
|
||||
delete code_range;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -322,5 +322,56 @@ TEST(RegionAllocatorTest, FindRegion) {
|
||||
}
|
||||
}
|
||||
|
||||
TEST(RegionAllocatorTest, Contains) {
|
||||
using Region = RegionAllocator::Region;
|
||||
|
||||
struct {
|
||||
Address start;
|
||||
size_t size;
|
||||
} test_cases[] = {{153, 771}, {0, 227}, {-447, 447}};
|
||||
|
||||
for (size_t i = 0; i < arraysize(test_cases); i++) {
|
||||
Address start = test_cases[i].start;
|
||||
size_t size = test_cases[i].size;
|
||||
Address end = start + size; // exclusive
|
||||
|
||||
Region region(start, size, true);
|
||||
|
||||
// Test single-argument contains().
|
||||
CHECK(!region.contains(start - 1041));
|
||||
CHECK(!region.contains(start - 1));
|
||||
CHECK(!region.contains(end));
|
||||
CHECK(!region.contains(end + 1));
|
||||
CHECK(!region.contains(end + 113));
|
||||
|
||||
CHECK(region.contains(start));
|
||||
CHECK(region.contains(start + 1));
|
||||
CHECK(region.contains(start + size / 2));
|
||||
CHECK(region.contains(end - 1));
|
||||
|
||||
// Test two-arguments contains().
|
||||
CHECK(!region.contains(start - 17, 17));
|
||||
CHECK(!region.contains(start - 17, size * 2));
|
||||
CHECK(!region.contains(end, 1));
|
||||
CHECK(!region.contains(end, static_cast<size_t>(0 - end)));
|
||||
|
||||
CHECK(region.contains(start, size));
|
||||
CHECK(region.contains(start, 10));
|
||||
CHECK(region.contains(start + 11, 120));
|
||||
CHECK(region.contains(end - 13, 13));
|
||||
CHECK(!region.contains(end, 0));
|
||||
|
||||
// Zero-size queries.
|
||||
CHECK(!region.contains(start - 10, 0));
|
||||
CHECK(!region.contains(start - 1, 0));
|
||||
CHECK(!region.contains(end, 0));
|
||||
CHECK(!region.contains(end + 10, 0));
|
||||
|
||||
CHECK(region.contains(start, 0));
|
||||
CHECK(region.contains(start + 10, 0));
|
||||
CHECK(region.contains(end - 1, 0));
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace base
|
||||
} // namespace v8
|
||||
|
Loading…
Reference in New Issue
Block a user