[ptr-compr] Introduce IsolateAllocator

to control how the memory for Isolate object is allocated.
This is the support for pointer-compression friendly heap layout.

Bug: v8:8182
Cq-Include-Trybots: luci.chromium.try:linux_chromium_rel_ng
Change-Id: Ida36b81ee22bd865005c394748b62d4c0897d746
Reviewed-on: https://chromium-review.googlesource.com/c/1251548
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#57131}
This commit is contained in:
Igor Sheludko 2018-10-30 13:48:12 +01:00 committed by Commit Bot
parent f46456a35c
commit 2e2604b967
15 changed files with 389 additions and 29 deletions

View File

@ -2154,6 +2154,8 @@ v8_source_set("v8_base") {
"src/interpreter/interpreter-intrinsics.h",
"src/interpreter/interpreter.cc",
"src/interpreter/interpreter.h",
"src/isolate-allocator.cc",
"src/isolate-allocator.h",
"src/isolate-data.h",
"src/isolate-inl.h",
"src/isolate.cc",

View File

@ -146,7 +146,7 @@ class Internals {
static const uint32_t kNumIsolateDataSlots = 4;
static const int kIsolateEmbedderDataOffset = 0 * kApiPointerSize;
static const int kIsolateEmbedderDataOffset = 0;
static const int kExternalMemoryOffset =
kNumIsolateDataSlots * kApiPointerSize;
static const int kExternalMemoryLimitOffset =

View File

@ -357,20 +357,19 @@ void i::V8::FatalProcessOutOfMemory(i::Isolate* isolate, const char* location,
i::HeapStats heap_stats;
if (isolate == nullptr) {
isolate = Isolate::Current();
isolate = Isolate::TryGetCurrent();
}
if (isolate == nullptr) {
// On a background thread -> we cannot retrieve memory information from the
// Isolate. Write easy-to-recognize values on the stack.
// If the Isolate is not available for the current thread we cannot retrieve
// memory information from the Isolate. Write easy-to-recognize values on
// the stack.
memset(last_few_messages, 0x0BADC0DE, Heap::kTraceRingBufferSize + 1);
memset(js_stacktrace, 0x0BADC0DE, Heap::kStacktraceBufferSize + 1);
memset(&heap_stats, 0xBADC0DE, sizeof(heap_stats));
// Note that the embedder's oom handler won't be called in this case. We
// just crash.
FATAL(
"API fatal error handler returned after process out of memory on the "
"background thread");
// Note that the embedder's oom handler is also not available and therefore
// won't be called in this case. We just crash.
FATAL("Fatal process out of memory: %s", location);
UNREACHABLE();
}

View File

@ -45,6 +45,20 @@ void* BoundedPageAllocator::AllocatePages(void* hint, size_t size,
return reinterpret_cast<void*>(address);
}
bool BoundedPageAllocator::AllocatePagesAt(Address address, size_t size,
PageAllocator::Permission access) {
CHECK(IsAligned(address, allocate_page_size_));
CHECK(IsAligned(size, allocate_page_size_));
CHECK(region_allocator_.contains(address, size));
if (!region_allocator_.AllocateRegionAt(address, size)) {
return false;
}
CHECK(page_allocator_->SetPermissions(reinterpret_cast<void*>(address), size,
access));
return true;
}
bool BoundedPageAllocator::FreePages(void* raw_address, size_t size) {
MutexGuard guard(&mutex_);

View File

@ -53,15 +53,17 @@ class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator {
return page_allocator_->GetRandomMmapAddr();
}
void* AllocatePages(void* address, size_t size, size_t alignment,
PageAllocator::Permission access) override;
void* AllocatePages(void* hint, size_t size, size_t alignment,
Permission access) override;
// Allocates pages at given address, returns true on success.
bool AllocatePagesAt(Address address, size_t size, Permission access);
bool FreePages(void* address, size_t size) override;
bool ReleasePages(void* address, size_t size, size_t new_size) override;
bool SetPermissions(void* address, size_t size,
PageAllocator::Permission access) override;
bool SetPermissions(void* address, size_t size, Permission access) override;
bool DiscardSystemPages(void* address, size_t size) override;

View File

@ -372,6 +372,20 @@ inline std::ostream& operator<<(std::ostream& os, DeoptimizeKind kind) {
UNREACHABLE();
}
enum class IsolateAllocationMode {
// Allocate Isolate in C++ heap using default new/delete operators.
kAllocateInCppHeap,
// Allocate Isolate in a committed region inside V8 heap reservation.
kAllocateInV8Heap,
#ifdef V8_COMPRESS_POINTERS
kDefault = kAllocateInV8Heap,
#else
kDefault = kAllocateInCppHeap,
#endif
};
// Indicates whether the lookup is related to sloppy-mode block-scoped
// function hoisting, and is a synthetic assignment for that.
enum class LookupHoistingMode { kNormal, kLegacySloppy };

View File

@ -121,7 +121,7 @@ void CodeRangeAddressHint::NotifyFreedCodeRange(Address code_range_start,
MemoryAllocator::MemoryAllocator(Isolate* isolate, size_t capacity,
size_t code_range_size)
: isolate_(isolate),
data_page_allocator_(GetPlatformPageAllocator()),
data_page_allocator_(isolate->page_allocator()),
code_page_allocator_(nullptr),
capacity_(RoundUp(capacity, Page::kPageSize)),
size_(0),

158
src/isolate-allocator.cc Normal file
View File

@ -0,0 +1,158 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/isolate-allocator.h"
#include "src/base/bounded-page-allocator.h"
#include "src/isolate.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
IsolateAllocator::IsolateAllocator(IsolateAllocationMode mode) {
#if V8_TARGET_ARCH_64_BIT
if (mode == IsolateAllocationMode::kAllocateInV8Heap) {
Address heap_base = InitReservation();
CommitPagesForIsolate(heap_base);
return;
}
#endif // V8_TARGET_ARCH_64_BIT
// Allocate Isolate in C++ heap.
CHECK_EQ(mode, IsolateAllocationMode::kAllocateInCppHeap);
page_allocator_ = GetPlatformPageAllocator();
isolate_memory_ = ::operator new(sizeof(Isolate));
DCHECK(!reservation_.IsReserved());
}
IsolateAllocator::~IsolateAllocator() {
if (reservation_.IsReserved()) {
// The actual memory will be freed when the |reservation_| will die.
return;
}
// The memory was allocated in C++ heap.
::operator delete(isolate_memory_);
}
#if V8_TARGET_ARCH_64_BIT
Address IsolateAllocator::InitReservation() {
v8::PageAllocator* platform_page_allocator = GetPlatformPageAllocator();
// Reserve a 4Gb region so that the middle is 4Gb aligned.
// The VirtualMemory API does not support such an constraint so we have to
// implement it manually here.
size_t reservation_size = size_t{4} * GB;
size_t base_alignment = size_t{4} * GB;
const int kMaxAttempts = 3;
for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
Address hint = RoundDown(reinterpret_cast<Address>(
platform_page_allocator->GetRandomMmapAddr()),
base_alignment) +
base_alignment / 2;
// Within this reservation there will be a sub-region with proper alignment.
VirtualMemory padded_reservation(platform_page_allocator,
reservation_size * 2,
reinterpret_cast<void*>(hint));
if (!padded_reservation.IsReserved()) break;
// Find such a sub-region inside the reservation that it's middle is
// |base_alignment|-aligned.
Address address =
RoundUp(padded_reservation.address() + reservation_size / 2,
base_alignment) -
reservation_size / 2;
CHECK(padded_reservation.InVM(address, reservation_size));
// Now free the padded reservation and immediately try to reserve an exact
// region at aligned address. We have to do this dancing because the
// reservation address requirement is more complex than just a certain
// alignment and not all operating systems support freeing parts of reserved
// address space regions.
padded_reservation.Free();
VirtualMemory reservation(platform_page_allocator, reservation_size,
reinterpret_cast<void*>(address));
if (!reservation.IsReserved()) break;
// The reservation could still be somewhere else but we can accept it
// if the reservation has the required alignment.
Address aligned_address =
RoundUp(reservation.address() + reservation_size / 2, base_alignment) -
reservation_size / 2;
if (reservation.address() == aligned_address) {
reservation_ = std::move(reservation);
break;
}
}
if (!reservation_.IsReserved()) {
V8::FatalProcessOutOfMemory(nullptr,
"Failed to reserve memory for new V8 Isolate");
}
CHECK_EQ(reservation_.size(), reservation_size);
Address heap_base = reservation_.address() + reservation_size / 2;
CHECK(IsAligned(heap_base, base_alignment));
return heap_base;
}
void IsolateAllocator::CommitPagesForIsolate(Address heap_base) {
v8::PageAllocator* platform_page_allocator = GetPlatformPageAllocator();
// Simplify BoundedPageAllocator's life by configuring it to use same page
// size as the Heap will use (MemoryChunk::kPageSize).
size_t page_size = RoundUp(size_t{1} << kPageSizeBits,
platform_page_allocator->AllocatePageSize());
page_allocator_instance_ = base::make_unique<base::BoundedPageAllocator>(
platform_page_allocator, reservation_.address(), reservation_.size(),
page_size);
page_allocator_ = page_allocator_instance_.get();
Address isolate_address = heap_base - Isolate::isolate_root_bias();
Address isolate_end = isolate_address + sizeof(Isolate);
// Inform the bounded page allocator about reserved pages.
{
Address reserved_region_address = RoundDown(isolate_address, page_size);
size_t reserved_region_size =
RoundUp(isolate_end, page_size) - reserved_region_address;
CHECK(page_allocator_instance_->AllocatePagesAt(
reserved_region_address, reserved_region_size,
PageAllocator::Permission::kNoAccess));
}
// Commit pages where the Isolate will be stored.
{
size_t commit_page_size = platform_page_allocator->CommitPageSize();
Address committed_region_address =
RoundDown(isolate_address, commit_page_size);
size_t committed_region_size =
RoundUp(isolate_end, commit_page_size) - committed_region_address;
// We are using |reservation_| directly here because |page_allocator_| has
// bigger commit page size than we actually need.
CHECK(reservation_.SetPermissions(committed_region_address,
committed_region_size,
PageAllocator::kReadWrite));
if (Heap::ShouldZapGarbage()) {
for (Address address = committed_region_address;
address < committed_region_size; address += kPointerSize) {
Memory<Address>(address) = static_cast<Address>(kZapValue);
}
}
}
isolate_memory_ = reinterpret_cast<void*>(isolate_address);
}
#endif // V8_TARGET_ARCH_64_BIT
} // namespace internal
} // namespace v8

58
src/isolate-allocator.h Normal file
View File

@ -0,0 +1,58 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ISOLATE_ALLOCATOR_H_
#define V8_ISOLATE_ALLOCATOR_H_
#include "src/allocation.h"
#include "src/base/bounded-page-allocator.h"
#include "src/base/page-allocator.h"
#include "src/globals.h"
namespace v8 {
// Forward declarations.
namespace base {
class BoundedPageAllocator;
}
namespace internal {
// IsolateAllocator object is responsible for allocating memory for one (!)
// Isolate object. Depending on the allocation mode the memory can be allocated
// 1) in the C++ heap (when pointer compression is disabled)
// 2) in a proper part of a properly aligned region of a reserved address space
// (when pointer compression is enabled).
//
// Isolate::New() first creates IsolateAllocator object which allocates the
// memory and then it constructs Isolate object in this memory. Once it's done
// the Isolate object takes ownership of the IsolateAllocator object to keep
// the memory alive.
// Isolate::Delete() takes care of the proper order of the objects destruction.
class V8_EXPORT_PRIVATE IsolateAllocator final {
public:
explicit IsolateAllocator(IsolateAllocationMode mode);
~IsolateAllocator();
void* isolate_memory() const { return isolate_memory_; }
v8::PageAllocator* page_allocator() const { return page_allocator_; }
private:
Address InitReservation();
void CommitPagesForIsolate(Address heap_base);
// The allocated memory for Isolate instance.
void* isolate_memory_ = nullptr;
v8::PageAllocator* page_allocator_ = nullptr;
std::unique_ptr<base::BoundedPageAllocator> page_allocator_instance_;
VirtualMemory reservation_;
DISALLOW_COPY_AND_ASSIGN(IsolateAllocator);
};
} // namespace internal
} // namespace v8
#endif // V8_ISOLATE_ALLOCATOR_H_

View File

@ -27,6 +27,8 @@ class IsolateData final {
public:
IsolateData() = default;
static constexpr intptr_t kIsolateRootBias = kRootRegisterBias;
// The value of the kRootRegister.
Address isolate_root() const {
return reinterpret_cast<Address>(this) + kIsolateRootBias;
@ -97,8 +99,6 @@ class IsolateData final {
static constexpr intptr_t kRootRegisterSentinel = 0xcafeca11;
private:
static constexpr intptr_t kIsolateRootBias = kRootRegisterBias;
// Static layout definition.
#define FIELDS(V) \
V(kEmbedderDataOffset, Internals::kNumIsolateDataSlots* kPointerSize) \

View File

@ -2626,8 +2626,16 @@ std::atomic<size_t> Isolate::non_disposed_isolates_;
#endif // DEBUG
// static
Isolate* Isolate::New() {
Isolate* isolate = new Isolate();
Isolate* Isolate::New(IsolateAllocationMode mode) {
// IsolateAllocator allocates the memory for the Isolate object according to
// the given allocation mode.
std::unique_ptr<IsolateAllocator> isolate_allocator =
base::make_unique<IsolateAllocator>(mode);
// Construct Isolate object in the allocated memory.
void* isolate_ptr = isolate_allocator->isolate_memory();
Isolate* isolate = new (isolate_ptr) Isolate(std::move(isolate_allocator));
DCHECK_IMPLIES(mode == IsolateAllocationMode::kAllocateInV8Heap,
IsAligned(isolate->isolate_root(), size_t{4} * GB));
#ifdef DEBUG
non_disposed_isolates_++;
@ -2655,14 +2663,25 @@ void Isolate::Delete(Isolate* isolate) {
non_disposed_isolates_--;
#endif // DEBUG
delete isolate;
// Take ownership of the IsolateAllocator to ensure the Isolate memory will
// be available during Isolate descructor call.
std::unique_ptr<IsolateAllocator> isolate_allocator =
std::move(isolate->isolate_allocator_);
isolate->~Isolate();
// Now free the memory owned by the allocator.
isolate_allocator.reset();
// Restore the previous current isolate.
SetIsolateThreadLocals(saved_isolate, saved_data);
}
Isolate::Isolate()
: id_(base::Relaxed_AtomicIncrement(&isolate_counter_, 1)),
v8::PageAllocator* Isolate::page_allocator() {
return isolate_allocator_->page_allocator();
}
Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator)
: isolate_allocator_(std::move(isolate_allocator)),
id_(base::Relaxed_AtomicIncrement(&isolate_counter_, 1)),
stack_guard_(this),
allocator_(FLAG_trace_zone_stats ? new VerboseAccountingAllocator(
&heap_, 256 * KB, 128 * KB)

View File

@ -27,6 +27,7 @@
#include "src/handles.h"
#include "src/heap/factory.h"
#include "src/heap/heap.h"
#include "src/isolate-allocator.h"
#include "src/isolate-data.h"
#include "src/messages.h"
#include "src/objects/code.h"
@ -563,7 +564,8 @@ class Isolate final : private HiddenFactory {
// Creates Isolate object. Must be used instead of constructing Isolate with
// new operator.
static Isolate* New();
static V8_EXPORT_PRIVATE Isolate* New(
IsolateAllocationMode mode = IsolateAllocationMode::kDefault);
// Deletes Isolate object. Must be used instead of delete operator.
// Destroys the non-default isolates.
@ -571,6 +573,9 @@ class Isolate final : private HiddenFactory {
// for legacy API reasons.
static void Delete(Isolate* isolate);
// Page allocator that must be used for allocating V8 heap pages.
v8::PageAllocator* page_allocator();
// Returns the PerIsolateThreadData for the current thread (or nullptr if one
// is not currently set).
static PerIsolateThreadData* CurrentPerIsolateThreadData() {
@ -578,11 +583,16 @@ class Isolate final : private HiddenFactory {
base::Thread::GetThreadLocal(per_isolate_thread_data_key_));
}
// Returns the isolate inside which the current thread is running or nullptr.
V8_INLINE static Isolate* TryGetCurrent() {
DCHECK_EQ(base::Relaxed_Load(&isolate_key_created_), 1);
return reinterpret_cast<Isolate*>(
base::Thread::GetExistingThreadLocal(isolate_key_));
}
// Returns the isolate inside which the current thread is running.
V8_INLINE static Isolate* Current() {
DCHECK_EQ(base::Relaxed_Load(&isolate_key_created_), 1);
Isolate* isolate = reinterpret_cast<Isolate*>(
base::Thread::GetExistingThreadLocal(isolate_key_));
Isolate* isolate = TryGetCurrent();
DCHECK_NOT_NULL(isolate);
return isolate;
}
@ -968,6 +978,9 @@ class Isolate final : private HiddenFactory {
// data (for example, roots, external references, builtins, etc.).
// The kRootRegister is set to this value.
Address isolate_root() const { return isolate_data()->isolate_root(); }
static size_t isolate_root_bias() {
return OFFSET_OF(Isolate, isolate_data_) + IsolateData::kIsolateRootBias;
}
RootsTable& roots_table() { return isolate_data()->roots(); }
@ -1589,7 +1602,7 @@ class Isolate final : private HiddenFactory {
void SetIdle(bool is_idle);
private:
Isolate();
explicit Isolate(std::unique_ptr<IsolateAllocator> isolate_allocator);
~Isolate();
void CheckIsolateLayout();
@ -1691,7 +1704,9 @@ class Isolate final : private HiddenFactory {
// handlers and optimized code).
IsolateData isolate_data_;
std::unique_ptr<IsolateAllocator> isolate_allocator_;
Heap heap_;
base::Atomic32 id_;
EntryStackItem* entry_stack_ = nullptr;
int stack_trace_nesting_level_ = 0;
@ -1903,6 +1918,12 @@ class Isolate final : private HiddenFactory {
base::Mutex thread_data_table_mutex_;
ThreadDataTable thread_data_table_;
// Delete new/delete operators to ensure that Isolate::New() and
// Isolate::Delete() are used for Isolate creation and deletion.
void* operator new(size_t, void* ptr) { return ptr; }
void* operator new(size_t) = delete;
void operator delete(void*) = delete;
friend class heap::HeapTester;
friend class TestSerializer;

View File

@ -20,6 +20,7 @@ namespace v8 {
namespace internal {
typedef TestWithIsolate HeapTest;
typedef TestWithIsolateAndPointerCompression HeapWithPointerCompressionTest;
TEST(Heap, SemiSpaceSize) {
const size_t KB = static_cast<size_t>(i::KB);
@ -73,5 +74,34 @@ TEST_F(HeapTest, ExternalLimitStaysAboveDefaultForExplicitHandling) {
kExternalAllocationSoftLimit);
}
#if V8_TARGET_ARCH_64_BIT
TEST_F(HeapWithPointerCompressionTest, HeapLayout) {
// Produce some garbage.
RunJS(
"let ar = [];"
"for (let i = 0; i < 100; i++) {"
" ar.push(Array(i));"
"}"
"ar.push(Array(32 * 1024 * 1024));");
Address isolate_root = i_isolate()->isolate_root();
EXPECT_TRUE(IsAligned(isolate_root, size_t{4} * GB));
// Check that all memory chunks belong this region.
base::AddressRegion heap_reservation(isolate_root - size_t{2} * GB,
size_t{4} * GB);
MemoryChunkIterator iter(i_isolate()->heap());
for (;;) {
MemoryChunk* chunk = iter.next();
if (chunk == nullptr) break;
Address address = chunk->address();
size_t size = chunk->area_end() - address;
EXPECT_TRUE(heap_reservation.contains(address, size));
}
}
#endif // V8_TARGET_ARCH_64_BIT
} // namespace internal
} // namespace v8

View File

@ -15,12 +15,18 @@
namespace v8 {
IsolateWrapper::IsolateWrapper()
IsolateWrapper::IsolateWrapper(bool enforce_pointer_compression)
: array_buffer_allocator_(
v8::ArrayBuffer::Allocator::NewDefaultAllocator()) {
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = array_buffer_allocator_;
isolate_ = v8::Isolate::New(create_params);
if (enforce_pointer_compression) {
isolate_ = reinterpret_cast<v8::Isolate*>(
i::Isolate::New(i::IsolateAllocationMode::kAllocateInV8Heap));
v8::Isolate::Initialize(isolate_, create_params);
} else {
isolate_ = v8::Isolate::New(create_params);
}
CHECK_NOT_NULL(isolate_);
}

View File

@ -24,7 +24,10 @@ class ArrayBufferAllocator;
// RAII-like Isolate instance wrapper.
class IsolateWrapper final {
public:
IsolateWrapper();
// When enforce_pointer_compression is true the Isolate is created with
// enabled pointer compression. When it's false then the Isolate is created
// with the default pointer compression state for current build.
explicit IsolateWrapper(bool enforce_pointer_compression = false);
~IsolateWrapper();
v8::Isolate* isolate() const { return isolate_; }
@ -60,6 +63,23 @@ class SharedIsolateHolder final {
//
// A set of mixins from which the test fixtures will be constructed.
//
template <typename TMixin>
class WithPrivateIsolateMixin : public TMixin {
public:
explicit WithPrivateIsolateMixin(bool enforce_pointer_compression = false)
: isolate_wrapper_(enforce_pointer_compression) {}
v8::Isolate* v8_isolate() const { return isolate_wrapper_.isolate(); }
static void SetUpTestCase() { TMixin::SetUpTestCase(); }
static void TearDownTestCase() { TMixin::TearDownTestCase(); }
private:
v8::IsolateWrapper isolate_wrapper_;
DISALLOW_COPY_AND_ASSIGN(WithPrivateIsolateMixin);
};
template <typename TMixin>
class WithSharedIsolateMixin : public TMixin {
public:
@ -81,6 +101,17 @@ class WithSharedIsolateMixin : public TMixin {
DISALLOW_COPY_AND_ASSIGN(WithSharedIsolateMixin);
};
template <typename TMixin>
class WithPointerCompressionIsolateMixin
: public WithPrivateIsolateMixin<TMixin> {
public:
WithPointerCompressionIsolateMixin()
: WithPrivateIsolateMixin<TMixin>(true) {}
private:
DISALLOW_COPY_AND_ASSIGN(WithPointerCompressionIsolateMixin);
};
template <typename TMixin>
class WithIsolateScopeMixin : public TMixin {
public:
@ -171,6 +202,12 @@ using TestWithContext = //
WithSharedIsolateMixin< //
::testing::Test>>>;
using TestWithIsolateAndPointerCompression = //
WithContextMixin< //
WithIsolateScopeMixin< //
WithPointerCompressionIsolateMixin< //
::testing::Test>>>;
namespace internal {
// Forward declarations.