[roheap] Add BasicMemoryChunk
BasicMemoryChunk sits above the MemoryChunk in the chunk hierarchy and is responsible for storing the bare minimum data to identify a chunk of memory, without worrying about GC etc. This change also completes the MemoryChunk offset asserts, which were previously missing for few key properties. Bug: v8:7464 Change-Id: Id4c7716c4ed5722ceca3cbc66d668aed016c74b0 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1688843 Commit-Queue: Maciej Goszczycki <goszczycki@google.com> Reviewed-by: Ulan Degenbaev <ulan@chromium.org> Cr-Commit-Position: refs/heads/master@{#62652}
This commit is contained in:
parent
b86a506d2d
commit
01db8ede94
2
BUILD.gn
2
BUILD.gn
@ -2230,6 +2230,8 @@ v8_source_set("v8_base_without_compiler") {
|
||||
"src/heap/array-buffer-tracker.cc",
|
||||
"src/heap/array-buffer-tracker.h",
|
||||
"src/heap/barrier.h",
|
||||
"src/heap/basic-memory-chunk.cc",
|
||||
"src/heap/basic-memory-chunk.h",
|
||||
"src/heap/code-stats.cc",
|
||||
"src/heap/code-stats.h",
|
||||
"src/heap/combined-heap.cc",
|
||||
|
54
src/heap/basic-memory-chunk.cc
Normal file
54
src/heap/basic-memory-chunk.cc
Normal file
@ -0,0 +1,54 @@
|
||||
// Copyright 2019 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/heap/basic-memory-chunk.h"
|
||||
|
||||
#include <cstdlib>
|
||||
|
||||
#include "src/heap/heap-write-barrier-inl.h"
|
||||
#include "src/objects/heap-object.h"
|
||||
#include "src/objects/slots-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// Verify write barrier offsets match the the real offsets.
|
||||
STATIC_ASSERT(BasicMemoryChunk::Flag::INCREMENTAL_MARKING ==
|
||||
heap_internals::MemoryChunk::kMarkingBit);
|
||||
STATIC_ASSERT(BasicMemoryChunk::Flag::FROM_PAGE ==
|
||||
heap_internals::MemoryChunk::kFromPageBit);
|
||||
STATIC_ASSERT(BasicMemoryChunk::Flag::TO_PAGE ==
|
||||
heap_internals::MemoryChunk::kToPageBit);
|
||||
STATIC_ASSERT(BasicMemoryChunk::kFlagsOffset ==
|
||||
heap_internals::MemoryChunk::kFlagsOffset);
|
||||
STATIC_ASSERT(BasicMemoryChunk::kHeapOffset ==
|
||||
heap_internals::MemoryChunk::kHeapOffset);
|
||||
|
||||
BasicMemoryChunk::BasicMemoryChunk(size_t size, Address area_start,
|
||||
Address area_end) {
|
||||
const Address base = reinterpret_cast<Address>(this);
|
||||
size_ = size;
|
||||
marking_bitmap_ = static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
|
||||
header_sentinel_ = HeapObject::FromAddress(base).ptr();
|
||||
DCHECK(HasHeaderSentinel(area_start));
|
||||
area_start_ = area_start;
|
||||
area_end_ = area_end;
|
||||
}
|
||||
|
||||
// static
|
||||
bool BasicMemoryChunk::HasHeaderSentinel(Address slot_addr) {
|
||||
Address base = BaseAddress(slot_addr);
|
||||
if (slot_addr < base + kHeaderSize) return false;
|
||||
return HeapObject::FromAddress(base) ==
|
||||
ObjectSlot(base + kHeaderSentinelOffset).Relaxed_Load();
|
||||
}
|
||||
|
||||
void BasicMemoryChunk::ReleaseMarkingBitmap() {
|
||||
DCHECK_NOT_NULL(marking_bitmap_);
|
||||
free(marking_bitmap_);
|
||||
marking_bitmap_ = nullptr;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
229
src/heap/basic-memory-chunk.h
Normal file
229
src/heap/basic-memory-chunk.h
Normal file
@ -0,0 +1,229 @@
|
||||
// Copyright 2019 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_HEAP_BASIC_MEMORY_CHUNK_H_
|
||||
#define V8_HEAP_BASIC_MEMORY_CHUNK_H_
|
||||
|
||||
#include <type_traits>
|
||||
|
||||
#include "src/base/atomic-utils.h"
|
||||
#include "src/common/globals.h"
|
||||
#include "src/heap/marking.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
class MemoryChunk;
|
||||
|
||||
class BasicMemoryChunk {
|
||||
public:
|
||||
enum Flag {
|
||||
NO_FLAGS = 0u,
|
||||
IS_EXECUTABLE = 1u << 0,
|
||||
POINTERS_TO_HERE_ARE_INTERESTING = 1u << 1,
|
||||
POINTERS_FROM_HERE_ARE_INTERESTING = 1u << 2,
|
||||
// A page in the from-space or a young large page that was not scavenged
|
||||
// yet.
|
||||
FROM_PAGE = 1u << 3,
|
||||
// A page in the to-space or a young large page that was scavenged.
|
||||
TO_PAGE = 1u << 4,
|
||||
LARGE_PAGE = 1u << 5,
|
||||
EVACUATION_CANDIDATE = 1u << 6,
|
||||
NEVER_EVACUATE = 1u << 7,
|
||||
|
||||
// Large objects can have a progress bar in their page header. These object
|
||||
// are scanned in increments and will be kept black while being scanned.
|
||||
// Even if the mutator writes to them they will be kept black and a white
|
||||
// to grey transition is performed in the value.
|
||||
HAS_PROGRESS_BAR = 1u << 8,
|
||||
|
||||
// |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
|
||||
// from new to old space during evacuation.
|
||||
PAGE_NEW_OLD_PROMOTION = 1u << 9,
|
||||
|
||||
// |PAGE_NEW_NEW_PROMOTION|: A page tagged with this flag has been moved
|
||||
// within the new space during evacuation.
|
||||
PAGE_NEW_NEW_PROMOTION = 1u << 10,
|
||||
|
||||
// This flag is intended to be used for testing. Works only when both
|
||||
// FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
|
||||
// are set. It forces the page to become an evacuation candidate at next
|
||||
// candidates selection cycle.
|
||||
FORCE_EVACUATION_CANDIDATE_FOR_TESTING = 1u << 11,
|
||||
|
||||
// This flag is intended to be used for testing.
|
||||
NEVER_ALLOCATE_ON_PAGE = 1u << 12,
|
||||
|
||||
// The memory chunk is already logically freed, however the actual freeing
|
||||
// still has to be performed.
|
||||
PRE_FREED = 1u << 13,
|
||||
|
||||
// |POOLED|: When actually freeing this chunk, only uncommit and do not
|
||||
// give up the reservation as we still reuse the chunk at some point.
|
||||
POOLED = 1u << 14,
|
||||
|
||||
// |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
|
||||
// has been aborted and needs special handling by the sweeper.
|
||||
COMPACTION_WAS_ABORTED = 1u << 15,
|
||||
|
||||
// |COMPACTION_WAS_ABORTED_FOR_TESTING|: During stress testing evacuation
|
||||
// on pages is sometimes aborted. The flag is used to avoid repeatedly
|
||||
// triggering on the same page.
|
||||
COMPACTION_WAS_ABORTED_FOR_TESTING = 1u << 16,
|
||||
|
||||
// |SWEEP_TO_ITERATE|: The page requires sweeping using external markbits
|
||||
// to iterate the page.
|
||||
SWEEP_TO_ITERATE = 1u << 17,
|
||||
|
||||
// |INCREMENTAL_MARKING|: Indicates whether incremental marking is currently
|
||||
// enabled.
|
||||
INCREMENTAL_MARKING = 1u << 18,
|
||||
NEW_SPACE_BELOW_AGE_MARK = 1u << 19,
|
||||
|
||||
// The memory chunk freeing bookkeeping has been performed but the chunk has
|
||||
// not yet been freed.
|
||||
UNREGISTERED = 1u << 20,
|
||||
|
||||
// The memory chunk belongs to the read-only heap and does not participate
|
||||
// in garbage collection. This is used instead of owner for identity
|
||||
// checking since read-only chunks have no owner once they are detached.
|
||||
READ_ONLY_HEAP = 1u << 21,
|
||||
};
|
||||
|
||||
static const intptr_t kAlignment =
|
||||
(static_cast<uintptr_t>(1) << kPageSizeBits);
|
||||
|
||||
static const intptr_t kAlignmentMask = kAlignment - 1;
|
||||
|
||||
BasicMemoryChunk(size_t size, Address area_start, Address area_end);
|
||||
|
||||
static Address BaseAddress(Address a) { return a & ~kAlignmentMask; }
|
||||
|
||||
Address address() const { return reinterpret_cast<Address>(this); }
|
||||
|
||||
size_t size() const { return size_; }
|
||||
void set_size(size_t size) { size_ = size; }
|
||||
|
||||
Address area_start() const { return area_start_; }
|
||||
|
||||
Address area_end() const { return area_end_; }
|
||||
void set_area_end(Address area_end) { area_end_ = area_end; }
|
||||
|
||||
size_t area_size() const {
|
||||
return static_cast<size_t>(area_end() - area_start());
|
||||
}
|
||||
|
||||
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
|
||||
void SetFlag(Flag flag) {
|
||||
if (access_mode == AccessMode::NON_ATOMIC) {
|
||||
flags_ |= flag;
|
||||
} else {
|
||||
base::AsAtomicWord::SetBits<uintptr_t>(&flags_, flag, flag);
|
||||
}
|
||||
}
|
||||
|
||||
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
|
||||
bool IsFlagSet(Flag flag) const {
|
||||
return (GetFlags<access_mode>() & flag) != 0;
|
||||
}
|
||||
|
||||
void ClearFlag(Flag flag) { flags_ &= ~flag; }
|
||||
|
||||
// Set or clear multiple flags at a time. The flags in the mask are set to
|
||||
// the value in "flags", the rest retain the current value in |flags_|.
|
||||
void SetFlags(uintptr_t flags, uintptr_t mask) {
|
||||
flags_ = (flags_ & ~mask) | (flags & mask);
|
||||
}
|
||||
|
||||
// Return all current flags.
|
||||
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
|
||||
uintptr_t GetFlags() const {
|
||||
if (access_mode == AccessMode::NON_ATOMIC) {
|
||||
return flags_;
|
||||
} else {
|
||||
return base::AsAtomicWord::Relaxed_Load(&flags_);
|
||||
}
|
||||
}
|
||||
|
||||
bool InReadOnlySpace() const { return IsFlagSet(READ_ONLY_HEAP); }
|
||||
|
||||
// TODO(v8:7464): Add methods for down casting to MemoryChunk.
|
||||
|
||||
bool Contains(Address addr) const {
|
||||
return addr >= area_start() && addr < area_end();
|
||||
}
|
||||
|
||||
// Checks whether |addr| can be a limit of addresses in this page. It's a
|
||||
// limit if it's in the page, or if it's just after the last byte of the page.
|
||||
bool ContainsLimit(Address addr) const {
|
||||
return addr >= area_start() && addr <= area_end();
|
||||
}
|
||||
|
||||
V8_EXPORT_PRIVATE static bool HasHeaderSentinel(Address slot_addr);
|
||||
|
||||
void ReleaseMarkingBitmap();
|
||||
|
||||
static const intptr_t kSizeOffset = 0;
|
||||
static const intptr_t kFlagsOffset = kSizeOffset + kSizetSize;
|
||||
static const intptr_t kMarkBitmapOffset = kFlagsOffset + kUIntptrSize;
|
||||
static const intptr_t kHeapOffset = kMarkBitmapOffset + kSystemPointerSize;
|
||||
static const intptr_t kHeaderSentinelOffset =
|
||||
kHeapOffset + kSystemPointerSize;
|
||||
|
||||
static const size_t kHeaderSize =
|
||||
kSizeOffset + kSizetSize // size_t size
|
||||
+ kUIntptrSize // uintptr_t flags_
|
||||
+ kSystemPointerSize // Bitmap* marking_bitmap_
|
||||
+ kSystemPointerSize // Heap* heap_
|
||||
+ kSystemPointerSize // Address header_sentinel_
|
||||
+ kSystemPointerSize // Address area_start_
|
||||
+ kSystemPointerSize; // Address area_end_
|
||||
|
||||
protected:
|
||||
// Overall size of the chunk, including the header and guards.
|
||||
size_t size_;
|
||||
|
||||
uintptr_t flags_ = NO_FLAGS;
|
||||
|
||||
Bitmap* marking_bitmap_ = nullptr;
|
||||
|
||||
// TODO(v8:7464): Find a way to remove this.
|
||||
// This goes against the spirit for the BasicMemoryChunk, but until C++14/17
|
||||
// is the default it needs to live here because MemoryChunk is not standard
|
||||
// layout under C++11.
|
||||
Heap* heap_;
|
||||
|
||||
// This is used to distinguish the memory chunk header from the interior of a
|
||||
// large page. The memory chunk header stores here an impossible tagged
|
||||
// pointer: the tagger pointer of the page start. A field in a large object is
|
||||
// guaranteed to not contain such a pointer.
|
||||
Address header_sentinel_;
|
||||
|
||||
// Start and end of allocatable memory on this chunk.
|
||||
Address area_start_;
|
||||
Address area_end_;
|
||||
|
||||
friend class BasicMemoryChunkValidator;
|
||||
};
|
||||
|
||||
STATIC_ASSERT(std::is_standard_layout<BasicMemoryChunk>::value);
|
||||
|
||||
class BasicMemoryChunkValidator {
|
||||
// Computed offsets should match the compiler generated ones.
|
||||
STATIC_ASSERT(BasicMemoryChunk::kSizeOffset ==
|
||||
offsetof(BasicMemoryChunk, size_));
|
||||
STATIC_ASSERT(BasicMemoryChunk::kFlagsOffset ==
|
||||
offsetof(BasicMemoryChunk, flags_));
|
||||
STATIC_ASSERT(BasicMemoryChunk::kMarkBitmapOffset ==
|
||||
offsetof(BasicMemoryChunk, marking_bitmap_));
|
||||
STATIC_ASSERT(BasicMemoryChunk::kHeapOffset ==
|
||||
offsetof(BasicMemoryChunk, heap_));
|
||||
STATIC_ASSERT(BasicMemoryChunk::kHeaderSentinelOffset ==
|
||||
offsetof(BasicMemoryChunk, header_sentinel_));
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_HEAP_BASIC_MEMORY_CHUNK_H_
|
@ -47,9 +47,9 @@ V8_EXPORT_PRIVATE void Heap_GenerationalEphemeronKeyBarrierSlow(
|
||||
namespace heap_internals {
|
||||
|
||||
struct MemoryChunk {
|
||||
static constexpr uintptr_t kFlagsOffset = sizeof(size_t);
|
||||
static constexpr uintptr_t kFlagsOffset = kSizetSize;
|
||||
static constexpr uintptr_t kHeapOffset =
|
||||
kFlagsOffset + kUIntptrSize + 4 * kSystemPointerSize;
|
||||
kSizetSize + kUIntptrSize + kSystemPointerSize;
|
||||
static constexpr uintptr_t kMarkingBit = uintptr_t{1} << 18;
|
||||
static constexpr uintptr_t kFromPageBit = uintptr_t{1} << 3;
|
||||
static constexpr uintptr_t kToPageBit = uintptr_t{1} << 4;
|
||||
|
@ -6349,22 +6349,6 @@ bool Heap::PageFlagsAreConsistent(HeapObject object) {
|
||||
return true;
|
||||
}
|
||||
|
||||
static_assert(MemoryChunk::Flag::INCREMENTAL_MARKING ==
|
||||
heap_internals::MemoryChunk::kMarkingBit,
|
||||
"Incremental marking flag inconsistent");
|
||||
static_assert(MemoryChunk::Flag::FROM_PAGE ==
|
||||
heap_internals::MemoryChunk::kFromPageBit,
|
||||
"From page flag inconsistent");
|
||||
static_assert(MemoryChunk::Flag::TO_PAGE ==
|
||||
heap_internals::MemoryChunk::kToPageBit,
|
||||
"To page flag inconsistent");
|
||||
static_assert(MemoryChunk::kFlagsOffset ==
|
||||
heap_internals::MemoryChunk::kFlagsOffset,
|
||||
"Flag offset inconsistent");
|
||||
static_assert(MemoryChunk::kHeapOffset ==
|
||||
heap_internals::MemoryChunk::kHeapOffset,
|
||||
"Heap offset inconsistent");
|
||||
|
||||
void Heap::SetEmbedderStackStateForNextFinalizaton(
|
||||
EmbedderHeapTracer::EmbedderStackState stack_state) {
|
||||
local_embedder_heap_tracer()->SetEmbedderStackStateForNextFinalization(
|
||||
|
@ -204,13 +204,6 @@ bool PagedSpace::TryFreeLast(HeapObject object, int object_size) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool MemoryChunk::HasHeaderSentinel(Address slot_addr) {
|
||||
Address base = BaseAddress(slot_addr);
|
||||
if (slot_addr < base + kHeaderSize) return false;
|
||||
return HeapObject::FromAddress(base) ==
|
||||
ObjectSlot(base + kHeaderSentinelOffset).Relaxed_Load();
|
||||
}
|
||||
|
||||
MemoryChunk* MemoryChunk::FromAnyPointerAddress(Address addr) {
|
||||
while (!HasHeaderSentinel(addr)) {
|
||||
addr = BaseAddress(addr) - 1;
|
||||
|
@ -690,16 +690,11 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
|
||||
Executability executable, Space* owner,
|
||||
VirtualMemory reservation) {
|
||||
MemoryChunk* chunk = FromAddress(base);
|
||||
|
||||
DCHECK_EQ(base, chunk->address());
|
||||
new (chunk) BasicMemoryChunk(size, area_start, area_end);
|
||||
DCHECK(HasHeaderSentinel(area_start));
|
||||
|
||||
chunk->heap_ = heap;
|
||||
chunk->size_ = size;
|
||||
chunk->header_sentinel_ = HeapObject::FromAddress(base).ptr();
|
||||
DCHECK(HasHeaderSentinel(area_start));
|
||||
chunk->area_start_ = area_start;
|
||||
chunk->area_end_ = area_end;
|
||||
chunk->flags_ = Flags(NO_FLAGS);
|
||||
chunk->set_owner(owner);
|
||||
chunk->InitializeReservedMemory();
|
||||
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
|
||||
@ -718,7 +713,6 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
|
||||
chunk->allocated_bytes_ = chunk->area_size();
|
||||
chunk->wasted_memory_ = 0;
|
||||
chunk->young_generation_bitmap_ = nullptr;
|
||||
chunk->marking_bitmap_ = nullptr;
|
||||
chunk->local_tracker_ = nullptr;
|
||||
|
||||
chunk->external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] =
|
||||
@ -728,21 +722,16 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
|
||||
|
||||
chunk->categories_ = nullptr;
|
||||
|
||||
chunk->AllocateMarkingBitmap();
|
||||
heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(chunk,
|
||||
0);
|
||||
if (owner->identity() == RO_SPACE) {
|
||||
heap->incremental_marking()
|
||||
->non_atomic_marking_state()
|
||||
->bitmap(chunk)
|
||||
->MarkAllBits();
|
||||
chunk->SetFlag(READ_ONLY_HEAP);
|
||||
} else {
|
||||
heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(chunk,
|
||||
0);
|
||||
}
|
||||
|
||||
DCHECK_EQ(kFlagsOffset, OFFSET_OF(MemoryChunk, flags_));
|
||||
DCHECK_EQ(kHeapOffset, OFFSET_OF(MemoryChunk, heap_));
|
||||
|
||||
if (executable == EXECUTABLE) {
|
||||
chunk->SetFlag(IS_EXECUTABLE);
|
||||
if (heap->write_protect_code_memory()) {
|
||||
@ -1135,15 +1124,15 @@ void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
|
||||
Address new_area_end) {
|
||||
VirtualMemory* reservation = chunk->reserved_memory();
|
||||
DCHECK(reservation->IsReserved());
|
||||
chunk->size_ -= bytes_to_free;
|
||||
chunk->area_end_ = new_area_end;
|
||||
chunk->set_size(chunk->size() - bytes_to_free);
|
||||
chunk->set_area_end(new_area_end);
|
||||
if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
|
||||
// Add guard page at the end.
|
||||
size_t page_size = GetCommitPageSize();
|
||||
DCHECK_EQ(0, chunk->area_end_ % static_cast<Address>(page_size));
|
||||
DCHECK_EQ(0, chunk->area_end() % static_cast<Address>(page_size));
|
||||
DCHECK_EQ(chunk->address() + chunk->size(),
|
||||
chunk->area_end() + MemoryChunkLayout::CodePageGuardSize());
|
||||
reservation->SetPermissions(chunk->area_end_, page_size,
|
||||
reservation->SetPermissions(chunk->area_end(), page_size,
|
||||
PageAllocator::kNoAccess);
|
||||
}
|
||||
// On e.g. Windows, a reservation may be larger than a page and releasing
|
||||
@ -1421,7 +1410,7 @@ template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
|
||||
|
||||
template <RememberedSetType type>
|
||||
SlotSet* MemoryChunk::AllocateSlotSet() {
|
||||
SlotSet* slot_set = AllocateAndInitializeSlotSet(size_, address());
|
||||
SlotSet* slot_set = AllocateAndInitializeSlotSet(size(), address());
|
||||
SlotSet* old_slot_set = base::AsAtomicPointer::Release_CompareAndSwap(
|
||||
&slot_set_[type], nullptr, slot_set);
|
||||
if (old_slot_set != nullptr) {
|
||||
@ -1540,17 +1529,6 @@ void MemoryChunk::ReleaseYoungGenerationBitmap() {
|
||||
young_generation_bitmap_ = nullptr;
|
||||
}
|
||||
|
||||
void MemoryChunk::AllocateMarkingBitmap() {
|
||||
DCHECK_NULL(marking_bitmap_);
|
||||
marking_bitmap_ = static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
|
||||
}
|
||||
|
||||
void MemoryChunk::ReleaseMarkingBitmap() {
|
||||
DCHECK_NOT_NULL(marking_bitmap_);
|
||||
free(marking_bitmap_);
|
||||
marking_bitmap_ = nullptr;
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// PagedSpace implementation
|
||||
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include "src/base/platform/mutex.h"
|
||||
#include "src/common/globals.h"
|
||||
#include "src/flags/flags.h"
|
||||
#include "src/heap/basic-memory-chunk.h"
|
||||
#include "src/heap/heap.h"
|
||||
#include "src/heap/invalidated-slots.h"
|
||||
#include "src/heap/marking.h"
|
||||
@ -541,7 +542,7 @@ class V8_EXPORT_PRIVATE MemoryChunkLayout {
|
||||
// It is divided into the header and the body. Chunk start is always
|
||||
// 1MB aligned. Start of the body is aligned so it can accommodate
|
||||
// any heap object.
|
||||
class MemoryChunk {
|
||||
class MemoryChunk : public BasicMemoryChunk {
|
||||
public:
|
||||
// Use with std data structures.
|
||||
struct Hasher {
|
||||
@ -550,79 +551,6 @@ class MemoryChunk {
|
||||
}
|
||||
};
|
||||
|
||||
enum Flag {
|
||||
NO_FLAGS = 0u,
|
||||
IS_EXECUTABLE = 1u << 0,
|
||||
POINTERS_TO_HERE_ARE_INTERESTING = 1u << 1,
|
||||
POINTERS_FROM_HERE_ARE_INTERESTING = 1u << 2,
|
||||
// A page in the from-space or a young large page that was not scavenged
|
||||
// yet.
|
||||
FROM_PAGE = 1u << 3,
|
||||
// A page in the to-space or a young large page that was scavenged.
|
||||
TO_PAGE = 1u << 4,
|
||||
LARGE_PAGE = 1u << 5,
|
||||
EVACUATION_CANDIDATE = 1u << 6,
|
||||
NEVER_EVACUATE = 1u << 7,
|
||||
|
||||
// Large objects can have a progress bar in their page header. These object
|
||||
// are scanned in increments and will be kept black while being scanned.
|
||||
// Even if the mutator writes to them they will be kept black and a white
|
||||
// to grey transition is performed in the value.
|
||||
HAS_PROGRESS_BAR = 1u << 8,
|
||||
|
||||
// |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
|
||||
// from new to old space during evacuation.
|
||||
PAGE_NEW_OLD_PROMOTION = 1u << 9,
|
||||
|
||||
// |PAGE_NEW_NEW_PROMOTION|: A page tagged with this flag has been moved
|
||||
// within the new space during evacuation.
|
||||
PAGE_NEW_NEW_PROMOTION = 1u << 10,
|
||||
|
||||
// This flag is intended to be used for testing. Works only when both
|
||||
// FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
|
||||
// are set. It forces the page to become an evacuation candidate at next
|
||||
// candidates selection cycle.
|
||||
FORCE_EVACUATION_CANDIDATE_FOR_TESTING = 1u << 11,
|
||||
|
||||
// This flag is intended to be used for testing.
|
||||
NEVER_ALLOCATE_ON_PAGE = 1u << 12,
|
||||
|
||||
// The memory chunk is already logically freed, however the actual freeing
|
||||
// still has to be performed.
|
||||
PRE_FREED = 1u << 13,
|
||||
|
||||
// |POOLED|: When actually freeing this chunk, only uncommit and do not
|
||||
// give up the reservation as we still reuse the chunk at some point.
|
||||
POOLED = 1u << 14,
|
||||
|
||||
// |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
|
||||
// has been aborted and needs special handling by the sweeper.
|
||||
COMPACTION_WAS_ABORTED = 1u << 15,
|
||||
|
||||
// |COMPACTION_WAS_ABORTED_FOR_TESTING|: During stress testing evacuation
|
||||
// on pages is sometimes aborted. The flag is used to avoid repeatedly
|
||||
// triggering on the same page.
|
||||
COMPACTION_WAS_ABORTED_FOR_TESTING = 1u << 16,
|
||||
|
||||
// |SWEEP_TO_ITERATE|: The page requires sweeping using external markbits
|
||||
// to iterate the page.
|
||||
SWEEP_TO_ITERATE = 1u << 17,
|
||||
|
||||
// |INCREMENTAL_MARKING|: Indicates whether incremental marking is currently
|
||||
// enabled.
|
||||
INCREMENTAL_MARKING = 1u << 18,
|
||||
NEW_SPACE_BELOW_AGE_MARK = 1u << 19,
|
||||
|
||||
// The memory chunk freeing bookkeeping has been performed but the chunk has
|
||||
// not yet been freed.
|
||||
UNREGISTERED = 1u << 20,
|
||||
|
||||
// The memory chunk belongs to the read-only heap and does not participate
|
||||
// in garbage collection. This is used instead of owner for identity
|
||||
// checking since read-only chunks have no owner once they are detached.
|
||||
READ_ONLY_HEAP = 1u << 21,
|
||||
};
|
||||
|
||||
using Flags = uintptr_t;
|
||||
|
||||
static const Flags kPointersToHereAreInterestingMask =
|
||||
@ -651,34 +579,12 @@ class MemoryChunk {
|
||||
kSweepingInProgress,
|
||||
};
|
||||
|
||||
static const intptr_t kAlignment =
|
||||
(static_cast<uintptr_t>(1) << kPageSizeBits);
|
||||
|
||||
static const intptr_t kAlignmentMask = kAlignment - 1;
|
||||
|
||||
static const intptr_t kSizeOffset = 0;
|
||||
static const intptr_t kFlagsOffset = kSizeOffset + kSizetSize;
|
||||
static const intptr_t kMarkBitmapOffset = kFlagsOffset + kUIntptrSize;
|
||||
static const intptr_t kReservationOffset =
|
||||
kMarkBitmapOffset + kSystemPointerSize;
|
||||
static const intptr_t kHeapOffset =
|
||||
kReservationOffset + 3 * kSystemPointerSize;
|
||||
static const intptr_t kHeaderSentinelOffset =
|
||||
kHeapOffset + kSystemPointerSize;
|
||||
|
||||
static const size_t kHeaderSize =
|
||||
kSizeOffset // NOLINT
|
||||
+ kSizetSize // size_t size
|
||||
+ kUIntptrSize // uintptr_t flags_
|
||||
+ kSystemPointerSize // Bitmap* marking_bitmap_
|
||||
+ 3 * kSystemPointerSize // VirtualMemory reservation_
|
||||
+ kSystemPointerSize // Heap* heap_
|
||||
+ kSystemPointerSize // Address header_sentinel_
|
||||
+ kSystemPointerSize // Address area_start_
|
||||
+ kSystemPointerSize // Address area_end_
|
||||
+ kSystemPointerSize // Address owner_
|
||||
+ kSizetSize // size_t progress_bar_
|
||||
+ kIntptrSize // intptr_t live_byte_count_
|
||||
BasicMemoryChunk::kHeaderSize // Parent size.
|
||||
+ 3 * kSystemPointerSize // VirtualMemory reservation_
|
||||
+ kSystemPointerSize // Address owner_
|
||||
+ kSizetSize // size_t progress_bar_
|
||||
+ kIntptrSize // intptr_t live_byte_count_
|
||||
+ kSystemPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array
|
||||
+ kSystemPointerSize *
|
||||
NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
|
||||
@ -706,8 +612,6 @@ class MemoryChunk {
|
||||
// Maximum number of nested code memory modification scopes.
|
||||
static const int kMaxWriteUnprotectCounter = 3;
|
||||
|
||||
static Address BaseAddress(Address a) { return a & ~kAlignmentMask; }
|
||||
|
||||
// Only works if the pointer is in the first kPageSize of the MemoryChunk.
|
||||
static MemoryChunk* FromAddress(Address a) {
|
||||
return reinterpret_cast<MemoryChunk*>(BaseAddress(a));
|
||||
@ -743,22 +647,8 @@ class MemoryChunk {
|
||||
|
||||
void DiscardUnusedMemory(Address addr, size_t size);
|
||||
|
||||
Address address() const {
|
||||
return reinterpret_cast<Address>(const_cast<MemoryChunk*>(this));
|
||||
}
|
||||
|
||||
base::Mutex* mutex() { return mutex_; }
|
||||
|
||||
bool Contains(Address addr) {
|
||||
return addr >= area_start() && addr < area_end();
|
||||
}
|
||||
|
||||
// Checks whether |addr| can be a limit of addresses in this page. It's a
|
||||
// limit if it's in the page, or if it's just after the last byte of the page.
|
||||
bool ContainsLimit(Address addr) {
|
||||
return addr >= area_start() && addr <= area_end();
|
||||
}
|
||||
|
||||
void set_concurrent_sweeping_state(ConcurrentSweepingState state) {
|
||||
concurrent_sweeping_ = state;
|
||||
}
|
||||
@ -769,9 +659,6 @@ class MemoryChunk {
|
||||
|
||||
bool SweepingDone() { return concurrent_sweeping_ == kSweepingDone; }
|
||||
|
||||
size_t size() const { return size_; }
|
||||
void set_size(size_t size) { size_ = size; }
|
||||
|
||||
inline Heap* heap() const {
|
||||
DCHECK_NOT_NULL(heap_);
|
||||
return heap_;
|
||||
@ -830,13 +717,6 @@ class MemoryChunk {
|
||||
void AllocateYoungGenerationBitmap();
|
||||
void ReleaseYoungGenerationBitmap();
|
||||
|
||||
void AllocateMarkingBitmap();
|
||||
void ReleaseMarkingBitmap();
|
||||
|
||||
Address area_start() { return area_start_; }
|
||||
Address area_end() { return area_end_; }
|
||||
size_t area_size() { return static_cast<size_t>(area_end() - area_start()); }
|
||||
|
||||
int FreeListsLength();
|
||||
|
||||
// Approximate amount of physical memory committed for this chunk.
|
||||
@ -881,36 +761,6 @@ class MemoryChunk {
|
||||
return this->address() + (index << kTaggedSizeLog2);
|
||||
}
|
||||
|
||||
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
|
||||
void SetFlag(Flag flag) {
|
||||
if (access_mode == AccessMode::NON_ATOMIC) {
|
||||
flags_ |= flag;
|
||||
} else {
|
||||
base::AsAtomicWord::SetBits<uintptr_t>(&flags_, flag, flag);
|
||||
}
|
||||
}
|
||||
|
||||
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
|
||||
bool IsFlagSet(Flag flag) const {
|
||||
return (GetFlags<access_mode>() & flag) != 0;
|
||||
}
|
||||
|
||||
void ClearFlag(Flag flag) { flags_ &= ~flag; }
|
||||
// Set or clear multiple flags at a time. The flags in the mask are set to
|
||||
// the value in "flags", the rest retain the current value in |flags_|.
|
||||
void SetFlags(uintptr_t flags, uintptr_t mask) {
|
||||
flags_ = (flags_ & ~mask) | (flags & mask);
|
||||
}
|
||||
|
||||
// Return all current flags.
|
||||
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
|
||||
uintptr_t GetFlags() const {
|
||||
if (access_mode == AccessMode::NON_ATOMIC) {
|
||||
return flags_;
|
||||
} else {
|
||||
return base::AsAtomicWord::Relaxed_Load(&flags_);
|
||||
}
|
||||
}
|
||||
|
||||
bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
|
||||
|
||||
@ -938,12 +788,11 @@ class MemoryChunk {
|
||||
return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
|
||||
}
|
||||
|
||||
bool IsFromPage() const { return (flags_ & FROM_PAGE) != 0; }
|
||||
bool IsToPage() const { return (flags_ & TO_PAGE) != 0; }
|
||||
bool IsLargePage() const { return (flags_ & LARGE_PAGE) != 0; }
|
||||
|
||||
bool IsFromPage() const { return IsFlagSet(FROM_PAGE); }
|
||||
bool IsToPage() const { return IsFlagSet(TO_PAGE); }
|
||||
bool IsLargePage() const { return IsFlagSet(LARGE_PAGE); }
|
||||
bool InYoungGeneration() const {
|
||||
return (flags_ & kIsInYoungGenerationMask) != 0;
|
||||
return (GetFlags() & kIsInYoungGenerationMask) != 0;
|
||||
}
|
||||
bool InNewSpace() const { return InYoungGeneration() && !IsLargePage(); }
|
||||
bool InNewLargeObjectSpace() const {
|
||||
@ -957,9 +806,6 @@ class MemoryChunk {
|
||||
|
||||
void set_owner(Space* space) { owner_ = space; }
|
||||
|
||||
bool InReadOnlySpace() const {
|
||||
return IsFlagSet(MemoryChunk::READ_ONLY_HEAP);
|
||||
}
|
||||
bool IsWritable() const {
|
||||
// If this is a read-only space chunk but heap_ is non-null, it has not yet
|
||||
// been sealed and can be written to.
|
||||
@ -970,8 +816,6 @@ class MemoryChunk {
|
||||
// (like read-only chunks have).
|
||||
inline AllocationSpace owner_identity() const;
|
||||
|
||||
static inline bool HasHeaderSentinel(Address slot_addr);
|
||||
|
||||
// Emits a memory barrier. For TSAN builds the other thread needs to perform
|
||||
// MemoryChunk::synchronized_heap() to simulate the barrier.
|
||||
void InitializationMemoryFence();
|
||||
@ -1024,29 +868,12 @@ class MemoryChunk {
|
||||
return reinterpret_cast<ConcurrentBitmap<mode>*>(young_generation_bitmap_);
|
||||
}
|
||||
|
||||
size_t size_;
|
||||
uintptr_t flags_;
|
||||
|
||||
Bitmap* marking_bitmap_;
|
||||
|
||||
// If the chunk needs to remember its memory reservation, it is stored here.
|
||||
VirtualMemory reservation_;
|
||||
|
||||
Heap* heap_;
|
||||
|
||||
// This is used to distinguish the memory chunk header from the interior of a
|
||||
// large page. The memory chunk header stores here an impossible tagged
|
||||
// pointer: the tagger pointer of the page start. A field in a large object is
|
||||
// guaranteed to not contain such a pointer.
|
||||
Address header_sentinel_;
|
||||
|
||||
// The space owning this memory chunk.
|
||||
std::atomic<Space*> owner_;
|
||||
|
||||
// Start and end of allocatable memory on this chunk.
|
||||
Address area_start_;
|
||||
Address area_end_;
|
||||
|
||||
// Used by the incremental marker to keep track of the scanning progress in
|
||||
// large objects that have a progress bar and are scanned in increments.
|
||||
std::atomic<size_t> progress_bar_;
|
||||
@ -1112,10 +939,8 @@ class MemoryChunk {
|
||||
friend class ConcurrentMarkingState;
|
||||
friend class IncrementalMarkingState;
|
||||
friend class MajorAtomicMarkingState;
|
||||
friend class MajorMarkingState;
|
||||
friend class MajorNonAtomicMarkingState;
|
||||
friend class MemoryAllocator;
|
||||
friend class MemoryChunkValidator;
|
||||
friend class MinorMarkingState;
|
||||
friend class MinorNonAtomicMarkingState;
|
||||
friend class PagedSpace;
|
||||
@ -1189,8 +1014,8 @@ class Page : public MemoryChunk {
|
||||
// Returns the address for a given offset to the this page.
|
||||
Address OffsetToAddress(size_t offset) {
|
||||
Address address_in_page = address() + offset;
|
||||
DCHECK_GE(address_in_page, area_start_);
|
||||
DCHECK_LT(address_in_page, area_end_);
|
||||
DCHECK_GE(address_in_page, area_start());
|
||||
DCHECK_LT(address_in_page, area_end());
|
||||
return address_in_page;
|
||||
}
|
||||
|
||||
@ -1291,16 +1116,11 @@ class LargePage : public MemoryChunk {
|
||||
friend class MemoryAllocator;
|
||||
};
|
||||
|
||||
class MemoryChunkValidator {
|
||||
// Computed offsets should match the compiler generated ones.
|
||||
STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
|
||||
|
||||
// Validate our estimates on the header size.
|
||||
STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
|
||||
STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
|
||||
STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
|
||||
};
|
||||
|
||||
// Validate our estimates on the header size.
|
||||
STATIC_ASSERT(sizeof(BasicMemoryChunk) <= BasicMemoryChunk::kHeaderSize);
|
||||
STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
|
||||
STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
|
||||
STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
|
||||
|
||||
// The process-wide singleton that keeps track of code range regions with the
|
||||
// intention to reuse free code range regions as a workaround for CFG memory
|
||||
|
Loading…
Reference in New Issue
Block a user