[ptr-compr] Move IsolateData from Heap to Isolate

and also move embedder fields from Isolate to IsolateData.

The external memory counter fields are temporarily moved to IsolateData in
order to avoid unexpected Node JS bot failures which happen if the fields
are left in the Heap class.

Bug: v8:8182
Cq-Include-Trybots: luci.chromium.try:linux_chromium_rel_ng
Change-Id: I9d58f235c0ce40e110f595addd03b80b3617aa77
Reviewed-on: https://chromium-review.googlesource.com/c/1278793
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#57037}
This commit is contained in:
Igor Sheludko 2018-10-26 15:30:12 +02:00 committed by Commit Bot
parent 01d5da4bef
commit 3421ad20d8
11 changed files with 176 additions and 118 deletions

View File

@ -144,15 +144,18 @@ class Internals {
static const int kExternalTwoByteRepresentationTag = 0x02;
static const int kExternalOneByteRepresentationTag = 0x0a;
static const uint32_t kNumIsolateDataSlots = 4;
static const int kIsolateEmbedderDataOffset = 0 * kApiPointerSize;
static const int kExternalMemoryOffset = 4 * kApiPointerSize;
static const int kExternalMemoryOffset =
kNumIsolateDataSlots * kApiPointerSize;
static const int kExternalMemoryLimitOffset =
kExternalMemoryOffset + kApiInt64Size;
static const int kExternalMemoryAtLastMarkCompactOffset =
kExternalMemoryLimitOffset + kApiInt64Size;
static const int kIsolateRootsOffset = kExternalMemoryLimitOffset +
kApiInt64Size + kApiInt64Size +
kApiPointerSize + kApiPointerSize;
static const int kIsolateRootsOffset =
kExternalMemoryAtLastMarkCompactOffset + kApiInt64Size;
static const int kUndefinedValueRootIndex = 4;
static const int kTheHoleValueRootIndex = 5;
static const int kNullValueRootIndex = 6;
@ -179,8 +182,6 @@ class Internals {
static const int kUndefinedOddballKind = 5;
static const int kNullOddballKind = 3;
static const uint32_t kNumIsolateDataSlots = 4;
// Soft limit for AdjustAmountofExternalAllocatedMemory. Trigger an
// incremental GC once the external memory reaches this limit.
static constexpr int kExternalAllocationSoftLimit = 64 * 1024 * 1024;

View File

@ -38,6 +38,7 @@
#include <forward_list>
#include "src/deoptimize-reason.h"
#include "src/external-reference.h"
#include "src/flags.h"
#include "src/globals.h"
#include "src/handles.h"

View File

@ -21,6 +21,7 @@
// TODO(mstarzinger): There is one more include to remove in order to no longer
// leak heap internals to users of this interface!
#include "src/heap/spaces-inl.h"
#include "src/isolate-data.h"
#include "src/isolate.h"
#include "src/log.h"
#include "src/msan.h"
@ -55,6 +56,32 @@ HeapObject* AllocationResult::ToObjectChecked() {
return HeapObject::cast(object_);
}
Isolate* Heap::isolate() {
return reinterpret_cast<Isolate*>(
reinterpret_cast<intptr_t>(this) -
reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(16)->heap()) + 16);
}
int64_t Heap::external_memory() {
return isolate()->isolate_data()->external_memory_;
}
void Heap::update_external_memory(int64_t delta) {
isolate()->isolate_data()->external_memory_ += delta;
}
void Heap::update_external_memory_concurrently_freed(intptr_t freed) {
external_memory_concurrently_freed_ += freed;
}
void Heap::account_external_memory_concurrently_freed() {
isolate()->isolate_data()->external_memory_ -=
external_memory_concurrently_freed_;
external_memory_concurrently_freed_ = 0;
}
RootsTable& Heap::roots_table() { return isolate()->roots_table(); }
// TODO(jkummerow): Drop std::remove_pointer after the migration to ObjectPtr.
#define ROOT_ACCESSOR(Type, name, CamelName) \
Type Heap::name() { \
@ -564,12 +591,6 @@ void Heap::UpdateAllocationSite(Map* map, HeapObject* object,
(*pretenuring_feedback)[reinterpret_cast<AllocationSite*>(key)]++;
}
Isolate* Heap::isolate() {
return reinterpret_cast<Isolate*>(
reinterpret_cast<intptr_t>(this) -
reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(16)->heap()) + 16);
}
void Heap::ExternalStringTable::AddString(String* string) {
DCHECK(string->IsExternalString());
DCHECK(!Contains(string));

View File

@ -377,7 +377,7 @@ void Heap::PrintShortHeapStatistics() {
memory_allocator()->unmapper()->NumberOfChunks(),
CommittedMemoryOfHeapAndUnmapper() / KB);
PrintIsolate(isolate_, "External memory reported: %6" PRId64 " KB\n",
external_memory_ / KB);
isolate()->isolate_data()->external_memory_ / KB);
PrintIsolate(isolate_, "Backing store memory: %6" PRIuS " KB\n",
backing_store_bytes_ / KB);
PrintIsolate(isolate_, "External memory global %zu KB\n",
@ -1186,8 +1186,9 @@ void Heap::ReportExternalMemoryPressure() {
static_cast<GCCallbackFlags>(
kGCCallbackFlagSynchronousPhantomCallbackProcessing |
kGCCallbackFlagCollectAllExternalMemory);
if (external_memory_ >
(external_memory_at_last_mark_compact_ + external_memory_hard_limit())) {
if (isolate()->isolate_data()->external_memory_ >
(isolate()->isolate_data()->external_memory_at_last_mark_compact_ +
external_memory_hard_limit())) {
CollectAllGarbage(
kReduceMemoryFootprintMask,
GarbageCollectionReason::kExternalMemoryPressure,
@ -1209,10 +1210,12 @@ void Heap::ReportExternalMemoryPressure() {
// Incremental marking is turned on an has already been started.
const double kMinStepSize = 5;
const double kMaxStepSize = 10;
const double ms_step =
Min(kMaxStepSize,
Max(kMinStepSize, static_cast<double>(external_memory_) /
external_memory_limit_ * kMinStepSize));
const double ms_step = Min(
kMaxStepSize,
Max(kMinStepSize,
static_cast<double>(isolate()->isolate_data()->external_memory_) /
isolate()->isolate_data()->external_memory_limit_ *
kMinStepSize));
const double deadline = MonotonicallyIncreasingTimeInMs() + ms_step;
// Extend the gc callback flags with external memory flags.
current_gc_callback_flags_ = static_cast<GCCallbackFlags>(
@ -1704,8 +1707,11 @@ bool Heap::PerformGarbageCollection(
size_t old_gen_size = OldGenerationSizeOfObjects();
if (collector == MARK_COMPACTOR) {
// Register the amount of external allocated memory.
external_memory_at_last_mark_compact_ = external_memory_;
external_memory_limit_ = external_memory_ + kExternalAllocationSoftLimit;
isolate()->isolate_data()->external_memory_at_last_mark_compact_ =
isolate()->isolate_data()->external_memory_;
isolate()->isolate_data()->external_memory_limit_ =
isolate()->isolate_data()->external_memory_ +
kExternalAllocationSoftLimit;
double max_factor =
heap_controller()->MaxGrowingFactor(max_old_generation_size_);
@ -3189,8 +3195,8 @@ void Heap::CollectGarbageOnMemoryPressure() {
double end = MonotonicallyIncreasingTimeInMs();
// Estimate how much memory we can free.
int64_t potential_garbage =
(CommittedMemory() - SizeOfObjects()) + external_memory_;
int64_t potential_garbage = (CommittedMemory() - SizeOfObjects()) +
isolate()->isolate_data()->external_memory_;
// If we can potentially free large amount of memory, then start GC right
// away instead of waiting for memory reducer.
if (potential_garbage >= kGarbageThresholdInBytes &&
@ -3655,12 +3661,12 @@ Code* Heap::builtin(int index) {
DCHECK(Builtins::IsBuiltinId(index));
// Code::cast cannot be used here since we access builtins
// during the marking phase of mark sweep. See IC::Clear.
return reinterpret_cast<Code*>(builtins_table()[index]);
return reinterpret_cast<Code*>(isolate()->builtins_table()[index]);
}
Address Heap::builtin_address(int index) {
DCHECK(Builtins::IsBuiltinId(index) || index == Builtins::builtin_count);
return reinterpret_cast<Address>(&builtins_table()[index]);
return reinterpret_cast<Address>(&isolate()->builtins_table()[index]);
}
void Heap::set_builtin(int index, HeapObject* builtin) {
@ -3668,7 +3674,7 @@ void Heap::set_builtin(int index, HeapObject* builtin) {
DCHECK(Internals::HasHeapObjectTag(reinterpret_cast<Address>(builtin)));
// The given builtin may be completely uninitialized thus we cannot check its
// type here.
builtins_table()[index] = builtin;
isolate()->builtins_table()[index] = builtin;
}
void Heap::IterateRoots(RootVisitor* v, VisitMode mode) {
@ -4031,9 +4037,14 @@ size_t Heap::OldGenerationSizeOfObjects() {
}
uint64_t Heap::PromotedExternalMemorySize() {
if (external_memory_ <= external_memory_at_last_mark_compact_) return 0;
return static_cast<uint64_t>(external_memory_ -
external_memory_at_last_mark_compact_);
IsolateData* isolate_data = isolate()->isolate_data();
if (isolate_data->external_memory_ <=
isolate_data->external_memory_at_last_mark_compact_) {
return 0;
}
return static_cast<uint64_t>(
isolate_data->external_memory_ -
isolate_data->external_memory_at_last_mark_compact_);
}
bool Heap::ShouldOptimizeForLoadTime() {
@ -4114,7 +4125,8 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
if (FLAG_stress_marking > 0) {
double gained_since_last_gc =
PromotedSinceLastGC() +
(external_memory_ - external_memory_at_last_mark_compact_);
(isolate()->isolate_data()->external_memory_ -
isolate()->isolate_data()->external_memory_at_last_mark_compact_);
double size_before_gc =
OldGenerationObjectsAndPromotedExternalMemorySize() -
gained_since_last_gc;
@ -4399,8 +4411,6 @@ void Heap::SetUp() {
}
write_protect_code_memory_ = FLAG_write_protect_code_memory;
isolate_data_.external_reference_table()->Init(isolate_);
}
void Heap::InitializeHashSeed() {

View File

@ -21,7 +21,6 @@
#include "src/base/atomic-utils.h"
#include "src/globals.h"
#include "src/heap-symbols.h"
#include "src/isolate-data.h"
#include "src/objects.h"
#include "src/objects/fixed-array.h"
#include "src/objects/string-table.h"
@ -508,17 +507,10 @@ class Heap {
int64_t external_memory_hard_limit() { return MaxOldGenerationSize() / 2; }
int64_t external_memory() { return external_memory_; }
void update_external_memory(int64_t delta) { external_memory_ += delta; }
void update_external_memory_concurrently_freed(intptr_t freed) {
external_memory_concurrently_freed_ += freed;
}
void account_external_memory_concurrently_freed() {
external_memory_ -= external_memory_concurrently_freed_;
external_memory_concurrently_freed_ = 0;
}
V8_INLINE int64_t external_memory();
V8_INLINE void update_external_memory(int64_t delta);
V8_INLINE void update_external_memory_concurrently_freed(intptr_t freed);
V8_INLINE void account_external_memory_concurrently_freed();
size_t backing_store_bytes() const { return backing_store_bytes_; }
@ -636,18 +628,12 @@ class Heap {
return array_buffer_collector_;
}
const IsolateData* isolate_data() const { return &isolate_data_; }
IsolateData* isolate_data() { return &isolate_data_; }
// ===========================================================================
// Root set access. ==========================================================
// ===========================================================================
// Shortcut to the roots table stored in |isolate_data_|.
V8_INLINE const RootsTable& roots_table() const {
return isolate_data_.roots();
}
V8_INLINE RootsTable& roots_table() { return isolate_data_.roots(); }
// Shortcut to the roots table stored in the Isolate.
V8_INLINE RootsTable& roots_table();
// Heap root getters.
#define ROOT_ACCESSOR(type, name, CamelName) inline type name();
@ -732,9 +718,6 @@ class Heap {
// Builtins. =================================================================
// ===========================================================================
// Shortcut to the builtins table stored in |isolate_data_|.
V8_INLINE Object** builtins_table() { return isolate_data_.builtins(); }
Code* builtin(int index);
Address builtin_address(int index);
void set_builtin(int index, HeapObject* builtin);
@ -1746,15 +1729,6 @@ class Heap {
bool IsRetainingPathTarget(HeapObject* object, RetainingPathOption* option);
void PrintRetainingPath(HeapObject* object, RetainingPathOption option);
// The amount of external memory registered through the API.
int64_t external_memory_ = 0;
// The limit when to trigger memory pressure from the API.
int64_t external_memory_limit_ = kExternalAllocationSoftLimit;
// Caches the amount of external memory registered at the last MC.
int64_t external_memory_at_last_mark_compact_ = 0;
// The amount of memory that has been freed concurrently.
std::atomic<intptr_t> external_memory_concurrently_freed_{0};
@ -1762,8 +1736,6 @@ class Heap {
// more expedient to get at the isolate directly from within Heap methods.
Isolate* isolate_ = nullptr;
IsolateData isolate_data_;
size_t code_range_size_ = 0;
size_t max_semi_space_size_ = 8 * (kPointerSize / 4) * MB;
size_t initial_semispace_size_ = kMinSemiSpaceSizeInKB * KB;

View File

@ -101,22 +101,46 @@ class IsolateData final {
// Static layout definition.
#define FIELDS(V) \
V(kEmbedderDataOffset, Internals::kNumIsolateDataSlots* kPointerSize) \
V(kExternalMemoryOffset, kInt64Size) \
V(kExternalMemoryLlimitOffset, kInt64Size) \
V(kExternalMemoryAtLastMarkCompactOffset, kInt64Size) \
V(kRootsTableOffset, RootsTable::kEntriesCount* kPointerSize) \
V(kExternalReferenceTableOffset, ExternalReferenceTable::SizeInBytes()) \
V(kBuiltinsTableOffset, Builtins::builtin_count* kPointerSize) \
V(kMagicNumberOffset, kIntptrSize) \
V(kVirtualCallTargetRegisterOffset, kPointerSize) \
/* This padding aligns IsolateData size by 8 bytes. */ \
V(kPaddingOffset, \
8 + RoundUp<8>(static_cast<int>(kPaddingOffset)) - kPaddingOffset) \
/* Total size. */ \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(0, FIELDS)
#undef FIELDS
// These fields are accessed through the API, offsets must be kept in sync
// with v8::internal::Internals (in include/v8-internal.h) constants.
// The layout consitency is verified in Isolate::CheckIsolateLayout() using
// runtime checks.
void* embedder_data_[Internals::kNumIsolateDataSlots] = {};
// TODO(ishell): Move these external memory counters back to Heap once the
// Node JS bot issue is solved.
// The amount of external memory registered through the API.
int64_t external_memory_ = 0;
// The limit when to trigger memory pressure from the API.
int64_t external_memory_limit_ = kExternalAllocationSoftLimit;
// Caches the amount of external memory registered at the last MC.
int64_t external_memory_at_last_mark_compact_ = 0;
RootsTable roots_;
ExternalReferenceTable external_reference_table_;
Object* builtins_[Builtins::builtin_count];
Object* builtins_[Builtins::builtin_count] = {};
// For root register verification.
// TODO(v8:6666): Remove once the root register is fully supported on ia32.
@ -127,9 +151,21 @@ class IsolateData final {
// ia32 (otherwise the arguments adaptor call runs out of registers).
void* virtual_call_target_register_ = nullptr;
// Ensure the size is 8-byte aligned in order to make alignment of the field
// following the IsolateData field predictable. This solves the issue with
// C++ compilers for 32-bit platforms which are not consistent at aligning
// int64_t fields.
// In order to avoid dealing with zero-size arrays the padding size is always
// in the range [8, 15).
STATIC_ASSERT(kPaddingOffsetEnd + 1 - kPaddingOffset >= 8);
char padding_[kPaddingOffsetEnd + 1 - kPaddingOffset];
V8_INLINE static void AssertPredictableLayout();
friend class Isolate;
friend class Heap;
FRIEND_TEST(HeapTest, ExternalLimitDefault);
FRIEND_TEST(HeapTest, ExternalLimitStaysAboveDefaultForExplicitHandling);
DISALLOW_COPY_AND_ASSIGN(IsolateData);
};
@ -139,12 +175,22 @@ class IsolateData final {
// issues because of different compilers used for snapshot generator and
// actual V8 code.
void IsolateData::AssertPredictableLayout() {
STATIC_ASSERT(offsetof(IsolateData, roots_) ==
IsolateData::kRootsTableOffset);
STATIC_ASSERT(std::is_standard_layout<RootsTable>::value);
STATIC_ASSERT(std::is_standard_layout<ExternalReferenceTable>::value);
STATIC_ASSERT(std::is_standard_layout<IsolateData>::value);
STATIC_ASSERT(offsetof(IsolateData, roots_) == kRootsTableOffset);
STATIC_ASSERT(offsetof(IsolateData, external_reference_table_) ==
IsolateData::kExternalReferenceTableOffset);
STATIC_ASSERT(offsetof(IsolateData, builtins_) ==
IsolateData::kBuiltinsTableOffset);
kExternalReferenceTableOffset);
STATIC_ASSERT(offsetof(IsolateData, builtins_) == kBuiltinsTableOffset);
STATIC_ASSERT(offsetof(IsolateData, magic_number_) == kMagicNumberOffset);
STATIC_ASSERT(offsetof(IsolateData, virtual_call_target_register_) ==
kVirtualCallTargetRegisterOffset);
STATIC_ASSERT(offsetof(IsolateData, external_memory_) ==
kExternalMemoryOffset);
STATIC_ASSERT(offsetof(IsolateData, external_memory_limit_) ==
kExternalMemoryLlimitOffset);
STATIC_ASSERT(offsetof(IsolateData, external_memory_at_last_mark_compact_) ==
kExternalMemoryAtLastMarkCompactOffset);
STATIC_ASSERT(sizeof(IsolateData) == IsolateData::kSize);
}

View File

@ -12,15 +12,6 @@
namespace v8 {
namespace internal {
base::AddressRegion Isolate::root_register_addressable_region() {
// TODO(ishell): limit this region to the IsolateData object once all the
// data is moved there.
Address start = reinterpret_cast<Address>(this);
Address end =
reinterpret_cast<Address>(heap_.isolate_data()) + sizeof(IsolateData);
return base::AddressRegion(start, end - start);
}
bool Isolate::FromWritableHeapObject(HeapObject* obj, Isolate** isolate) {
i::MemoryChunk* chunk = i::MemoryChunk::FromHeapObject(obj);
if (chunk->owner()->identity() == i::RO_SPACE) {

View File

@ -2646,8 +2646,7 @@ std::atomic<size_t> Isolate::non_disposed_isolates_;
#endif // DEBUG
Isolate::Isolate()
: embedder_data_(),
entry_stack_(nullptr),
: entry_stack_(nullptr),
stack_trace_nesting_level_(0),
incomplete_message_(nullptr),
bootstrapper_(nullptr),
@ -2718,6 +2717,7 @@ Isolate::Isolate()
cancelable_task_manager_(new CancelableTaskManager()),
abort_on_uncaught_exception_callback_(nullptr),
total_regexp_code_generated_(0) {
CheckIsolateLayout();
id_ = base::Relaxed_AtomicIncrement(&isolate_counter_, 1);
TRACE_ISOLATE(constructor);
@ -2766,6 +2766,24 @@ Isolate::Isolate()
}
}
void Isolate::CheckIsolateLayout() {
CHECK_EQ(OFFSET_OF(Isolate, isolate_data_), 0);
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, isolate_data_.embedder_data_)),
Internals::kIsolateEmbedderDataOffset);
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, isolate_data_.roots_)),
Internals::kIsolateRootsOffset);
CHECK_EQ(Internals::kExternalMemoryOffset % 8, 0);
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, isolate_data_.external_memory_)),
Internals::kExternalMemoryOffset);
CHECK_EQ(Internals::kExternalMemoryLimitOffset % 8, 0);
CHECK_EQ(static_cast<int>(
OFFSET_OF(Isolate, isolate_data_.external_memory_limit_)),
Internals::kExternalMemoryLimitOffset);
CHECK_EQ(Internals::kExternalMemoryAtLastMarkCompactOffset % 8, 0);
CHECK_EQ(static_cast<int>(OFFSET_OF(
Isolate, isolate_data_.external_memory_at_last_mark_compact_)),
Internals::kExternalMemoryAtLastMarkCompactOffset);
}
void Isolate::TearDown() {
TRACE_ISOLATE(tear_down);
@ -3186,6 +3204,8 @@ bool Isolate::Init(StartupDeserializer* des) {
DCHECK(!heap_.HasBeenSetUp());
heap_.SetUp();
isolate_data_.external_reference_table()->Init(this);
// Setup the wasm engine.
if (wasm_engine_ == nullptr) {
SetWasmEngine(wasm::WasmEngine::GetWasmEngine());
@ -3282,24 +3302,6 @@ bool Isolate::Init(StartupDeserializer* des) {
std::ofstream(GetTurboCfgFileName(this).c_str(), std::ios_base::trunc);
}
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, embedder_data_)),
Internals::kIsolateEmbedderDataOffset);
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, heap_.isolate_data_.roots_)),
Internals::kIsolateRootsOffset);
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, heap_.external_memory_)),
Internals::kExternalMemoryOffset);
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, heap_.external_memory_limit_)),
Internals::kExternalMemoryLimitOffset);
CHECK_EQ(static_cast<int>(
OFFSET_OF(Isolate, heap_.external_memory_at_last_mark_compact_)),
Internals::kExternalMemoryAtLastMarkCompactOffset);
CHECK_EQ(static_cast<int>(OFFSET_OF(
Isolate, heap_.isolate_data_.external_reference_table_)),
Internals::kIsolateRootsOffset +
IsolateData::kExternalReferenceTableOffset);
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, heap_.isolate_data_.builtins_)),
Internals::kIsolateRootsOffset + IsolateData::kBuiltinsTableOffset);
{
HandleScope scope(this);
ast_string_constants_ = new AstStringConstants(this, heap()->HashSeed());

View File

@ -27,6 +27,7 @@
#include "src/handles.h"
#include "src/heap/factory.h"
#include "src/heap/heap.h"
#include "src/isolate-data.h"
#include "src/messages.h"
#include "src/objects/code.h"
#include "src/objects/debug-objects.h"
@ -999,8 +1000,8 @@ class Isolate : private HiddenFactory {
StackGuard* stack_guard() { return &stack_guard_; }
Heap* heap() { return &heap_; }
const IsolateData* isolate_data() const { return heap_.isolate_data(); }
IsolateData* isolate_data() { return heap_.isolate_data(); }
const IsolateData* isolate_data() const { return &isolate_data_; }
IsolateData* isolate_data() { return &isolate_data_; }
// Generated code can embed this address to get access to the isolate-specific
// data (for example, roots, external references, builtins, etc.).
@ -1009,10 +1010,17 @@ class Isolate : private HiddenFactory {
RootsTable& roots_table() { return isolate_data()->roots(); }
// kRootRegister may be used to address any location that falls into this
// region. Fields outside this region are not guaranteed to live at a static
// offset from kRootRegister.
inline base::AddressRegion root_register_addressable_region();
// A sub-region of the Isolate object that has "predictable" layout which
// depends only on the pointer size and therefore it's guaranteed that there
// will be no compatibility issues because of different compilers used for
// snapshot generator and actual V8 code.
// Thus, kRootRegister may be used to address any location that falls into
// this region.
// See IsolateData::AssertPredictableLayout() for details.
base::AddressRegion root_register_addressable_region() const {
return base::AddressRegion(reinterpret_cast<Address>(&isolate_data_),
sizeof(IsolateData));
}
Object* root(RootIndex index) { return roots_table()[index]; }
@ -1025,6 +1033,8 @@ class Isolate : private HiddenFactory {
return isolate_data()->external_reference_table();
}
V8_INLINE Object** builtins_table() { return isolate_data_.builtins(); }
StubCache* load_stub_cache() { return load_stub_cache_; }
StubCache* store_stub_cache() { return store_stub_cache_; }
DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; }
@ -1124,11 +1134,11 @@ class Isolate : private HiddenFactory {
void SetData(uint32_t slot, void* data) {
DCHECK_LT(slot, Internals::kNumIsolateDataSlots);
embedder_data_[slot] = data;
isolate_data_.embedder_data_[slot] = data;
}
void* GetData(uint32_t slot) {
DCHECK_LT(slot, Internals::kNumIsolateDataSlots);
return embedder_data_[slot];
return isolate_data_.embedder_data_[slot];
}
bool serializer_enabled() const { return serializer_enabled_; }
@ -1617,18 +1627,14 @@ class Isolate : private HiddenFactory {
protected:
Isolate();
void CheckIsolateLayout();
bool IsArrayOrObjectOrStringPrototype(Object* object);
private:
friend struct GlobalState;
friend struct InitializeGlobalState;
// These fields are accessed through the API, offsets must be kept in sync
// with v8::internal::Internals (in include/v8.h) constants. This is also
// verified in Isolate::Init() using runtime checks.
void* embedder_data_[Internals::kNumIsolateDataSlots];
Heap heap_;
class ThreadDataTable {
public:
ThreadDataTable() = default;
@ -1737,6 +1743,12 @@ class Isolate : private HiddenFactory {
return "";
}
// This class contains a collection of data accessible from both C++ runtime
// and compiled code (including assembly stubs, builtins, interpreter bytecode
// handlers and optimized code).
IsolateData isolate_data_;
Heap heap_;
base::Atomic32 id_;
EntryStackItem* entry_stack_;
int stack_trace_nesting_level_;

View File

@ -7,7 +7,7 @@
#include "src/assembler-arch.h"
#include "src/base/template-utils.h"
#include "src/heap/heap.h"
#include "src/builtins/builtins.h"
namespace v8 {
namespace internal {

View File

@ -61,14 +61,16 @@ TEST_F(HeapTest, ASLR) {
TEST_F(HeapTest, ExternalLimitDefault) {
Heap* heap = i_isolate()->heap();
EXPECT_EQ(kExternalAllocationSoftLimit, heap->external_memory_limit_);
EXPECT_EQ(kExternalAllocationSoftLimit,
heap->isolate()->isolate_data()->external_memory_limit_);
}
TEST_F(HeapTest, ExternalLimitStaysAboveDefaultForExplicitHandling) {
v8_isolate()->AdjustAmountOfExternalAllocatedMemory(+10 * MB);
v8_isolate()->AdjustAmountOfExternalAllocatedMemory(-10 * MB);
Heap* heap = i_isolate()->heap();
EXPECT_GE(heap->external_memory_limit_, kExternalAllocationSoftLimit);
EXPECT_GE(heap->isolate()->isolate_data()->external_memory_limit_,
kExternalAllocationSoftLimit);
}
} // namespace internal