[heap] Split out memory-chunk.h etc from spaces.h

Also makes memory-chunk.h accessible from outside heap which allows
removal of some heap-inl.h includes.

Bug: v8:10473, v8:10496
Change-Id: Iec4fc5ce8ad201f6ee5fd924cc3cd935324429fc
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2172088
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Auto-Submit: Dan Elphick <delphick@chromium.org>
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67551}
This commit is contained in:
Dan Elphick 2020-05-04 13:24:44 +01:00 committed by Commit Bot
parent 5bebdaef42
commit 3795f5bbfc
59 changed files with 773 additions and 669 deletions

View File

@ -2444,6 +2444,9 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/marking-worklist.h",
"src/heap/marking.cc",
"src/heap/marking.h",
"src/heap/memory-chunk-inl.h",
"src/heap/memory-chunk.cc",
"src/heap/memory-chunk.h",
"src/heap/memory-measurement-inl.h",
"src/heap/memory-measurement.cc",
"src/heap/memory-measurement.h",

View File

@ -12,11 +12,16 @@ include_rules = [
"+src/heap/embedder-tracing.h",
"+src/heap/factory.h",
"+src/heap/factory-inl.h",
# TODO(v8:10496): Don't expose so much (through transitive includes) outside
# of heap/.
"+src/heap/heap.h",
"+src/heap/heap-inl.h",
"+src/heap/heap-write-barrier-inl.h",
"+src/heap/heap-write-barrier.h",
"+src/heap/local-heap.h",
# TODO(v8:10496): Don't expose memory chunk outside of heap/.
"+src/heap/memory-chunk.h",
"+src/heap/memory-chunk-inl.h",
"+src/heap/off-thread-factory-inl.h",
"+src/heap/off-thread-factory.h",
"+src/heap/off-thread-heap.h",

View File

@ -7,7 +7,8 @@
#include "src/builtins/builtins.h"
#include "src/codegen/code-stub-assembler.h"
#include "src/codegen/macro-assembler.h"
#include "src/heap/heap-inl.h" // crbug.com/v8/8499
#include "src/execution/frame-constants.h"
#include "src/heap/memory-chunk.h"
#include "src/ic/accessor-assembler.h"
#include "src/ic/keyed-store-generic.h"
#include "src/logging/counters.h"

View File

@ -17,7 +17,7 @@
#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
#include "src/execution/frames-inl.h"
#include "src/heap/heap-inl.h" // For MemoryChunk.
#include "src/heap/memory-chunk.h"
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
#include "src/numbers/double.h"

View File

@ -16,7 +16,7 @@
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frame-constants.h"
#include "src/execution/frames-inl.h"
#include "src/heap/heap-inl.h" // For MemoryChunk.
#include "src/heap/memory-chunk.h"
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
#include "src/runtime/runtime.h"

View File

@ -12,7 +12,8 @@
#include "src/execution/frames-inl.h"
#include "src/execution/frames.h"
#include "src/execution/protectors.h"
#include "src/heap/heap-inl.h" // For Page/MemoryChunk. TODO(jkummerow): Drop.
#include "src/heap/heap-inl.h" // For MemoryChunk. TODO(jkummerow): Drop.
#include "src/heap/memory-chunk.h"
#include "src/logging/counters.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/cell.h"

View File

@ -15,7 +15,7 @@
#include "src/debug/debug.h"
#include "src/execution/frame-constants.h"
#include "src/execution/frames-inl.h"
#include "src/heap/heap-inl.h" // For MemoryChunk.
#include "src/heap/memory-chunk.h"
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
#include "src/runtime/runtime.h"

View File

@ -16,7 +16,7 @@
#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
#include "src/execution/frames-inl.h"
#include "src/heap/heap-inl.h" // For MemoryChunk.
#include "src/heap/memory-chunk.h"
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
#include "src/objects/heap-number.h"

View File

@ -16,7 +16,7 @@
#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
#include "src/execution/frames-inl.h"
#include "src/heap/heap-inl.h" // For MemoryChunk.
#include "src/heap/memory-chunk.h"
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
#include "src/objects/heap-number.h"

View File

@ -16,7 +16,7 @@
#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
#include "src/execution/frames-inl.h"
#include "src/heap/heap-inl.h" // For MemoryChunk.
#include "src/heap/memory-chunk.h"
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
#include "src/runtime/runtime.h"

View File

@ -16,7 +16,7 @@
#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
#include "src/execution/frames-inl.h"
#include "src/heap/heap-inl.h" // For MemoryChunk.
#include "src/heap/memory-chunk.h"
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
#include "src/objects/smi.h"

View File

@ -18,7 +18,7 @@
#include "src/common/globals.h"
#include "src/debug/debug.h"
#include "src/execution/frames-inl.h"
#include "src/heap/heap-inl.h" // For MemoryChunk.
#include "src/heap/memory-chunk.h"
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
#include "src/objects/objects-inl.h"

View File

@ -2,17 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/backend/code-generator.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/backend/code-generator-impl.h"
#include "src/compiler/backend/code-generator.h"
#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/backend/instruction-codes.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/heap/heap-inl.h" // crbug.com/v8/8499
#include "src/heap/memory-chunk.h"
#include "src/numbers/double.h"
#include "src/utils/boxed-float.h"
#include "src/wasm/wasm-code-manager.h"

View File

@ -2,17 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/backend/code-generator.h"
#include "src/codegen/arm64/assembler-arm64-inl.h"
#include "src/codegen/arm64/macro-assembler-arm64-inl.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/backend/code-generator-impl.h"
#include "src/compiler/backend/code-generator.h"
#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/execution/frame-constants.h"
#include "src/heap/heap-inl.h" // crbug.com/v8/8499
#include "src/heap/memory-chunk.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects.h"

View File

@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/backend/code-generator.h"
#include "src/base/overflowing-math.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/callable.h"
@ -11,12 +9,13 @@
#include "src/codegen/macro-assembler.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/backend/code-generator-impl.h"
#include "src/compiler/backend/code-generator.h"
#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/execution/frame-constants.h"
#include "src/execution/frames.h"
#include "src/heap/heap-inl.h" // crbug.com/v8/8499
#include "src/heap/memory-chunk.h"
#include "src/objects/smi.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects.h"

View File

@ -11,7 +11,7 @@
#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/heap/heap-inl.h" // crbug.com/v8/8499
#include "src/heap/memory-chunk.h"
#include "src/wasm/wasm-code-manager.h"
namespace v8 {

View File

@ -12,7 +12,7 @@
#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/heap/heap-inl.h" // crbug.com/v8/8499
#include "src/heap/memory-chunk.h"
#include "src/wasm/wasm-code-manager.h"
namespace v8 {

View File

@ -2,17 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/backend/code-generator.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/callable.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/backend/code-generator-impl.h"
#include "src/compiler/backend/code-generator.h"
#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/heap/heap-inl.h" // crbug.com/v8/8499
#include "src/heap/memory-chunk.h"
#include "src/numbers/double.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects.h"

View File

@ -2,17 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/backend/code-generator.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/callable.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/backend/code-generator-impl.h"
#include "src/compiler/backend/code-generator.h"
#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/heap/heap-inl.h" // crbug.com/v8/8499
#include "src/heap/memory-chunk.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects.h"

View File

@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/backend/code-generator.h"
#include <limits>
#include "src/base/overflowing-math.h"
@ -11,10 +9,11 @@
#include "src/codegen/optimized-compilation-info.h"
#include "src/codegen/x64/assembler-x64.h"
#include "src/compiler/backend/code-generator-impl.h"
#include "src/compiler/backend/code-generator.h"
#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/heap/heap-inl.h" // crbug.com/v8/8499
#include "src/heap/memory-chunk.h"
#include "src/objects/smi.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects.h"

View File

@ -9,6 +9,7 @@
#include "src/heap/array-buffer-collector.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/heap.h"
#include "src/heap/memory-chunk-inl.h"
#include "src/heap/spaces.h"
#define TRACE_BS(...) \

View File

@ -17,6 +17,7 @@
#include "src/heap/marking-visitor-inl.h"
#include "src/heap/marking-visitor.h"
#include "src/heap/marking.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/memory-measurement-inl.h"
#include "src/heap/memory-measurement.h"
#include "src/heap/objects-visiting-inl.h"

View File

@ -28,6 +28,7 @@ namespace internal {
class Heap;
class Isolate;
class MajorNonAtomicMarkingState;
class MemoryChunk;
struct WeakObjects;
struct MemoryChunkData {

View File

@ -10,6 +10,7 @@
#include "src/handles/handles-inl.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/off-thread-factory-inl.h"
#include "src/heap/read-only-heap.h"
#include "src/logging/log.h"

View File

@ -20,6 +20,7 @@
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/read-only-heap.h"
#include "src/ic/handler-configuration-inl.h"
#include "src/init/bootstrapper.h"

View File

@ -23,6 +23,7 @@
// leak heap internals to users of this interface!
#include "src/execution/isolate-data.h"
#include "src/execution/isolate.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/read-only-spaces.h"
#include "src/heap/spaces-inl.h"
#include "src/objects/allocation-site-inl.h"

View File

@ -45,6 +45,7 @@
#include "src/heap/local-heap.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/mark-compact.h"
#include "src/heap/memory-chunk-inl.h"
#include "src/heap/memory-measurement.h"
#include "src/heap/memory-reducer.h"
#include "src/heap/object-stats.h"

View File

@ -76,6 +76,7 @@ class JSFinalizationRegistry;
class LocalEmbedderHeapTracer;
class LocalHeap;
class MemoryAllocator;
class MemoryChunk;
class MemoryMeasurement;
class MemoryReducer;
class MinorMarkCompactCollector;

View File

@ -16,6 +16,7 @@
#include "src/heap/mark-compact-inl.h"
#include "src/heap/marking-visitor-inl.h"
#include "src/heap/marking-visitor.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"

View File

@ -3,7 +3,9 @@
// found in the LICENSE file.
#include "src/heap/invalidated-slots.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/spaces.h"
#include "src/objects/objects-inl.h"

View File

@ -9,6 +9,7 @@
#include "src/heap/incremental-marking.h"
#include "src/heap/list.h"
#include "src/heap/marking.h"
#include "src/heap/memory-chunk-inl.h"
#include "src/heap/remembered-set.h"
#include "src/heap/slot-set.h"
#include "src/heap/spaces-inl.h"

View File

@ -13,6 +13,7 @@
#include "src/base/macros.h"
#include "src/common/globals.h"
#include "src/heap/heap.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/spaces.h"
#include "src/objects/heap-object.h"

View File

@ -8,6 +8,7 @@
#include "src/common/globals.h"
#include "src/heap/marking-worklist.h"
#include "src/heap/marking.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/spaces.h"
#include "src/heap/worklist.h"

View File

@ -0,0 +1,50 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_MEMORY_CHUNK_INL_H_
#define V8_HEAP_MEMORY_CHUNK_INL_H_
#include "src/heap/memory-chunk.h"
#include "src/heap/spaces-inl.h"
namespace v8 {
namespace internal {
void MemoryChunk::IncrementExternalBackingStoreBytes(
ExternalBackingStoreType type, size_t amount) {
#ifndef V8_ENABLE_THIRD_PARTY_HEAP
base::CheckedIncrement(&external_backing_store_bytes_[type], amount);
owner()->IncrementExternalBackingStoreBytes(type, amount);
#endif
}
void MemoryChunk::DecrementExternalBackingStoreBytes(
ExternalBackingStoreType type, size_t amount) {
#ifndef V8_ENABLE_THIRD_PARTY_HEAP
base::CheckedDecrement(&external_backing_store_bytes_[type], amount);
owner()->DecrementExternalBackingStoreBytes(type, amount);
#endif
}
void MemoryChunk::MoveExternalBackingStoreBytes(ExternalBackingStoreType type,
MemoryChunk* from,
MemoryChunk* to,
size_t amount) {
DCHECK_NOT_NULL(from->owner());
DCHECK_NOT_NULL(to->owner());
base::CheckedDecrement(&(from->external_backing_store_bytes_[type]), amount);
base::CheckedIncrement(&(to->external_backing_store_bytes_[type]), amount);
Space::MoveExternalBackingStoreBytes(type, from->owner(), to->owner(),
amount);
}
AllocationSpace MemoryChunk::owner_identity() const {
if (InReadOnlySpace()) return RO_SPACE;
return owner()->identity();
}
} // namespace internal
} // namespace v8
#endif // V8_HEAP_MEMORY_CHUNK_INL_H_

157
src/heap/memory-chunk.cc Normal file
View File

@ -0,0 +1,157 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/memory-chunk.h"
#include "src/heap/memory-chunk-inl.h"
#include "src/heap/spaces.h"
namespace v8 {
namespace internal {
void MemoryChunk::DiscardUnusedMemory(Address addr, size_t size) {
base::AddressRegion memory_area =
MemoryAllocator::ComputeDiscardMemoryArea(addr, size);
if (memory_area.size() != 0) {
MemoryAllocator* memory_allocator = heap_->memory_allocator();
v8::PageAllocator* page_allocator =
memory_allocator->page_allocator(executable());
CHECK(page_allocator->DiscardSystemPages(
reinterpret_cast<void*>(memory_area.begin()), memory_area.size()));
}
}
size_t MemoryChunkLayout::CodePageGuardStartOffset() {
// We are guarding code pages: the first OS page after the header
// will be protected as non-writable.
return ::RoundUp(Page::kHeaderSize, MemoryAllocator::GetCommitPageSize());
}
size_t MemoryChunkLayout::CodePageGuardSize() {
return MemoryAllocator::GetCommitPageSize();
}
intptr_t MemoryChunkLayout::ObjectStartOffsetInCodePage() {
// We are guarding code pages: the first OS page after the header
// will be protected as non-writable.
return CodePageGuardStartOffset() + CodePageGuardSize();
}
intptr_t MemoryChunkLayout::ObjectEndOffsetInCodePage() {
// We are guarding code pages: the last OS page will be protected as
// non-writable.
return Page::kPageSize -
static_cast<int>(MemoryAllocator::GetCommitPageSize());
}
size_t MemoryChunkLayout::AllocatableMemoryInCodePage() {
size_t memory = ObjectEndOffsetInCodePage() - ObjectStartOffsetInCodePage();
DCHECK_LE(kMaxRegularHeapObjectSize, memory);
return memory;
}
intptr_t MemoryChunkLayout::ObjectStartOffsetInDataPage() {
return RoundUp(MemoryChunk::kHeaderSize, kTaggedSize);
}
size_t MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(
AllocationSpace space) {
if (space == CODE_SPACE) {
return ObjectStartOffsetInCodePage();
}
return ObjectStartOffsetInDataPage();
}
size_t MemoryChunkLayout::AllocatableMemoryInDataPage() {
size_t memory = MemoryChunk::kPageSize - ObjectStartOffsetInDataPage();
DCHECK_LE(kMaxRegularHeapObjectSize, memory);
return memory;
}
size_t MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
AllocationSpace space) {
if (space == CODE_SPACE) {
return AllocatableMemoryInCodePage();
}
return AllocatableMemoryInDataPage();
}
#ifdef THREAD_SANITIZER
void MemoryChunk::SynchronizedHeapLoad() {
CHECK(reinterpret_cast<Heap*>(base::Acquire_Load(
reinterpret_cast<base::AtomicWord*>(&heap_))) != nullptr ||
InReadOnlySpace());
}
#endif
void MemoryChunk::InitializationMemoryFence() {
base::SeqCst_MemoryFence();
#ifdef THREAD_SANITIZER
// Since TSAN does not process memory fences, we use the following annotation
// to tell TSAN that there is no data race when emitting a
// InitializationMemoryFence. Note that the other thread still needs to
// perform MemoryChunk::synchronized_heap().
base::Release_Store(reinterpret_cast<base::AtomicWord*>(&heap_),
reinterpret_cast<base::AtomicWord>(heap_));
#endif
}
void MemoryChunk::DecrementWriteUnprotectCounterAndMaybeSetPermissions(
PageAllocator::Permission permission) {
DCHECK(permission == PageAllocator::kRead ||
permission == PageAllocator::kReadExecute);
DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
DCHECK(owner_identity() == CODE_SPACE || owner_identity() == CODE_LO_SPACE);
// Decrementing the write_unprotect_counter_ and changing the page
// protection mode has to be atomic.
base::MutexGuard guard(page_protection_change_mutex_);
if (write_unprotect_counter_ == 0) {
// This is a corner case that may happen when we have a
// CodeSpaceMemoryModificationScope open and this page was newly
// added.
return;
}
write_unprotect_counter_--;
DCHECK_LT(write_unprotect_counter_, kMaxWriteUnprotectCounter);
if (write_unprotect_counter_ == 0) {
Address protect_start =
address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
size_t page_size = MemoryAllocator::GetCommitPageSize();
DCHECK(IsAligned(protect_start, page_size));
size_t protect_size = RoundUp(area_size(), page_size);
CHECK(reservation_.SetPermissions(protect_start, protect_size, permission));
}
}
void MemoryChunk::SetReadable() {
DecrementWriteUnprotectCounterAndMaybeSetPermissions(PageAllocator::kRead);
}
void MemoryChunk::SetReadAndExecutable() {
DCHECK(!FLAG_jitless);
DecrementWriteUnprotectCounterAndMaybeSetPermissions(
PageAllocator::kReadExecute);
}
void MemoryChunk::SetReadAndWritable() {
DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
DCHECK(owner_identity() == CODE_SPACE || owner_identity() == CODE_LO_SPACE);
// Incrementing the write_unprotect_counter_ and changing the page
// protection mode has to be atomic.
base::MutexGuard guard(page_protection_change_mutex_);
write_unprotect_counter_++;
DCHECK_LE(write_unprotect_counter_, kMaxWriteUnprotectCounter);
if (write_unprotect_counter_ == 1) {
Address unprotect_start =
address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
size_t page_size = MemoryAllocator::GetCommitPageSize();
DCHECK(IsAligned(unprotect_start, page_size));
size_t unprotect_size = RoundUp(area_size(), page_size);
CHECK(reservation_.SetPermissions(unprotect_start, unprotect_size,
PageAllocator::kReadWrite));
}
}
} // namespace internal
} // namespace v8

471
src/heap/memory-chunk.h Normal file
View File

@ -0,0 +1,471 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_MEMORY_CHUNK_H_
#define V8_HEAP_MEMORY_CHUNK_H_
#include <set>
#include <vector>
#include "src/base/macros.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/heap.h"
#include "src/heap/invalidated-slots.h"
#include "src/heap/list.h"
#include "src/heap/memory-chunk.h"
namespace v8 {
namespace internal {
class CodeObjectRegistry;
class FreeListCategory;
class LocalArrayBufferTracker;
class V8_EXPORT_PRIVATE MemoryChunkLayout {
public:
static size_t CodePageGuardStartOffset();
static size_t CodePageGuardSize();
static intptr_t ObjectStartOffsetInCodePage();
static intptr_t ObjectEndOffsetInCodePage();
static size_t AllocatableMemoryInCodePage();
static intptr_t ObjectStartOffsetInDataPage();
static size_t AllocatableMemoryInDataPage();
static size_t ObjectStartOffsetInMemoryChunk(AllocationSpace space);
static size_t AllocatableMemoryInMemoryChunk(AllocationSpace space);
};
// MemoryChunk represents a memory region owned by a specific space.
// It is divided into the header and the body. Chunk start is always
// 1MB aligned. Start of the body is aligned so it can accommodate
// any heap object.
class MemoryChunk : public BasicMemoryChunk {
public:
// Use with std data structures.
struct Hasher {
size_t operator()(MemoryChunk* const chunk) const {
return reinterpret_cast<size_t>(chunk) >> kPageSizeBits;
}
};
using Flags = uintptr_t;
static const Flags kPointersToHereAreInterestingMask =
POINTERS_TO_HERE_ARE_INTERESTING;
static const Flags kPointersFromHereAreInterestingMask =
POINTERS_FROM_HERE_ARE_INTERESTING;
static const Flags kEvacuationCandidateMask = EVACUATION_CANDIDATE;
static const Flags kIsInYoungGenerationMask = FROM_PAGE | TO_PAGE;
static const Flags kIsLargePageMask = LARGE_PAGE;
static const Flags kSkipEvacuationSlotsRecordingMask =
kEvacuationCandidateMask | kIsInYoungGenerationMask;
// |kDone|: The page state when sweeping is complete or sweeping must not be
// performed on that page. Sweeper threads that are done with their work
// will set this value and not touch the page anymore.
// |kPending|: This page is ready for parallel sweeping.
// |kInProgress|: This page is currently swept by a sweeper thread.
enum class ConcurrentSweepingState : intptr_t {
kDone,
kPending,
kInProgress,
};
static const size_t kHeaderSize =
BasicMemoryChunk::kHeaderSize // Parent size.
+ 3 * kSystemPointerSize // VirtualMemory reservation_
+ kSystemPointerSize // Address owner_
+ kSizetSize // size_t progress_bar_
+ kIntptrSize // intptr_t live_byte_count_
+ kSystemPointerSize // SlotSet* sweeping_slot_set_
+ kSystemPointerSize *
NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
+ kSystemPointerSize *
NUMBER_OF_REMEMBERED_SET_TYPES // InvalidatedSlots* array
+ kSystemPointerSize // std::atomic<intptr_t> high_water_mark_
+ kSystemPointerSize // base::Mutex* mutex_
+ kSystemPointerSize // std::atomic<ConcurrentSweepingState>
// concurrent_sweeping_
+ kSystemPointerSize // base::Mutex* page_protection_change_mutex_
+ kSystemPointerSize // unitptr_t write_unprotect_counter_
+ kSizetSize * ExternalBackingStoreType::kNumTypes
// std::atomic<size_t> external_backing_store_bytes_
+ kSizetSize // size_t allocated_bytes_
+ kSizetSize // size_t wasted_memory_
+ kSystemPointerSize * 2 // heap::ListNode
+ kSystemPointerSize // FreeListCategory** categories__
+ kSystemPointerSize // LocalArrayBufferTracker* local_tracker_
+ kIntptrSize // std::atomic<intptr_t> young_generation_live_byte_count_
+ kSystemPointerSize // Bitmap* young_generation_bitmap_
+ kSystemPointerSize // CodeObjectRegistry* code_object_registry_
+ kSystemPointerSize; // PossiblyEmptyBuckets possibly_empty_buckets_
// Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits;
// Maximum number of nested code memory modification scopes.
static const int kMaxWriteUnprotectCounter = 3;
// Only works if the pointer is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromAddress(Address a) {
DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
return reinterpret_cast<MemoryChunk*>(BaseAddress(a));
}
// Only works if the object is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromHeapObject(HeapObject o) {
DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
return reinterpret_cast<MemoryChunk*>(BaseAddress(o.ptr()));
}
void SetOldGenerationPageFlags(bool is_marking);
void SetYoungGenerationPageFlags(bool is_marking);
static inline void UpdateHighWaterMark(Address mark) {
if (mark == kNullAddress) return;
// Need to subtract one from the mark because when a chunk is full the
// top points to the next address after the chunk, which effectively belongs
// to another chunk. See the comment to Page::FromAllocationAreaAddress.
MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
intptr_t old_mark = chunk->high_water_mark_.load(std::memory_order_relaxed);
while ((new_mark > old_mark) &&
!chunk->high_water_mark_.compare_exchange_weak(
old_mark, new_mark, std::memory_order_acq_rel)) {
}
}
static inline void MoveExternalBackingStoreBytes(
ExternalBackingStoreType type, MemoryChunk* from, MemoryChunk* to,
size_t amount);
void DiscardUnusedMemory(Address addr, size_t size);
base::Mutex* mutex() { return mutex_; }
void set_concurrent_sweeping_state(ConcurrentSweepingState state) {
concurrent_sweeping_ = state;
}
ConcurrentSweepingState concurrent_sweeping_state() {
return static_cast<ConcurrentSweepingState>(concurrent_sweeping_.load());
}
bool SweepingDone() {
return concurrent_sweeping_ == ConcurrentSweepingState::kDone;
}
inline Heap* heap() const {
DCHECK_NOT_NULL(heap_);
return heap_;
}
#ifdef THREAD_SANITIZER
// Perform a dummy acquire load to tell TSAN that there is no data race in
// mark-bit initialization. See MemoryChunk::Initialize for the corresponding
// release store.
void SynchronizedHeapLoad();
#endif
template <RememberedSetType type>
bool ContainsSlots() {
return slot_set<type>() != nullptr || typed_slot_set<type>() != nullptr ||
invalidated_slots<type>() != nullptr;
}
template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
SlotSet* slot_set() {
if (access_mode == AccessMode::ATOMIC)
return base::AsAtomicPointer::Acquire_Load(&slot_set_[type]);
return slot_set_[type];
}
template <AccessMode access_mode = AccessMode::ATOMIC>
SlotSet* sweeping_slot_set() {
if (access_mode == AccessMode::ATOMIC)
return base::AsAtomicPointer::Acquire_Load(&sweeping_slot_set_);
return sweeping_slot_set_;
}
template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
TypedSlotSet* typed_slot_set() {
if (access_mode == AccessMode::ATOMIC)
return base::AsAtomicPointer::Acquire_Load(&typed_slot_set_[type]);
return typed_slot_set_[type];
}
template <RememberedSetType type>
V8_EXPORT_PRIVATE SlotSet* AllocateSlotSet();
SlotSet* AllocateSweepingSlotSet();
SlotSet* AllocateSlotSet(SlotSet** slot_set);
// Not safe to be called concurrently.
template <RememberedSetType type>
void ReleaseSlotSet();
void ReleaseSlotSet(SlotSet** slot_set);
void ReleaseSweepingSlotSet();
template <RememberedSetType type>
TypedSlotSet* AllocateTypedSlotSet();
// Not safe to be called concurrently.
template <RememberedSetType type>
void ReleaseTypedSlotSet();
template <RememberedSetType type>
InvalidatedSlots* AllocateInvalidatedSlots();
template <RememberedSetType type>
void ReleaseInvalidatedSlots();
template <RememberedSetType type>
V8_EXPORT_PRIVATE void RegisterObjectWithInvalidatedSlots(HeapObject object);
void InvalidateRecordedSlots(HeapObject object);
template <RememberedSetType type>
bool RegisteredObjectWithInvalidatedSlots(HeapObject object);
template <RememberedSetType type>
InvalidatedSlots* invalidated_slots() {
return invalidated_slots_[type];
}
void ReleaseLocalTracker();
void AllocateYoungGenerationBitmap();
void ReleaseYoungGenerationBitmap();
int FreeListsLength();
// Approximate amount of physical memory committed for this chunk.
V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory();
Address HighWaterMark() { return address() + high_water_mark_; }
size_t ProgressBar() {
DCHECK(IsFlagSet<AccessMode::ATOMIC>(HAS_PROGRESS_BAR));
return progress_bar_.load(std::memory_order_acquire);
}
bool TrySetProgressBar(size_t old_value, size_t new_value) {
DCHECK(IsFlagSet<AccessMode::ATOMIC>(HAS_PROGRESS_BAR));
return progress_bar_.compare_exchange_strong(old_value, new_value,
std::memory_order_acq_rel);
}
void ResetProgressBar() {
if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
progress_bar_.store(0, std::memory_order_release);
}
}
inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
size_t amount);
inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
size_t amount);
size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) {
return external_backing_store_bytes_[type];
}
// Some callers rely on the fact that this can operate on both
// tagged and aligned object addresses.
inline uint32_t AddressToMarkbitIndex(Address addr) const {
return static_cast<uint32_t>(addr - this->address()) >> kTaggedSizeLog2;
}
inline Address MarkbitIndexToAddress(uint32_t index) const {
return this->address() + (index << kTaggedSizeLog2);
}
bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
bool CanAllocate() {
return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
}
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
bool IsEvacuationCandidate() {
DCHECK(!(IsFlagSet<access_mode>(NEVER_EVACUATE) &&
IsFlagSet<access_mode>(EVACUATION_CANDIDATE)));
return IsFlagSet<access_mode>(EVACUATION_CANDIDATE);
}
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
bool ShouldSkipEvacuationSlotRecording() {
uintptr_t flags = GetFlags<access_mode>();
return ((flags & kSkipEvacuationSlotsRecordingMask) != 0) &&
((flags & COMPACTION_WAS_ABORTED) == 0);
}
Executability executable() {
return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
}
bool IsFromPage() const { return IsFlagSet(FROM_PAGE); }
bool IsToPage() const { return IsFlagSet(TO_PAGE); }
bool IsLargePage() const { return IsFlagSet(LARGE_PAGE); }
bool InYoungGeneration() const {
return (GetFlags() & kIsInYoungGenerationMask) != 0;
}
bool InNewSpace() const { return InYoungGeneration() && !IsLargePage(); }
bool InNewLargeObjectSpace() const {
return InYoungGeneration() && IsLargePage();
}
bool InOldSpace() const;
V8_EXPORT_PRIVATE bool InLargeObjectSpace() const;
// Gets the chunk's owner or null if the space has been detached.
Space* owner() const { return owner_; }
void set_owner(Space* space) { owner_ = space; }
bool IsWritable() const {
// If this is a read-only space chunk but heap_ is non-null, it has not yet
// been sealed and can be written to.
return !InReadOnlySpace() || heap_ != nullptr;
}
// Gets the chunk's allocation space, potentially dealing with a null owner_
// (like read-only chunks have).
inline AllocationSpace owner_identity() const;
// Emits a memory barrier. For TSAN builds the other thread needs to perform
// MemoryChunk::synchronized_heap() to simulate the barrier.
void InitializationMemoryFence();
V8_EXPORT_PRIVATE void SetReadable();
V8_EXPORT_PRIVATE void SetReadAndExecutable();
V8_EXPORT_PRIVATE void SetReadAndWritable();
void SetDefaultCodePermissions() {
if (FLAG_jitless) {
SetReadable();
} else {
SetReadAndExecutable();
}
}
heap::ListNode<MemoryChunk>& list_node() { return list_node_; }
CodeObjectRegistry* GetCodeObjectRegistry() { return code_object_registry_; }
PossiblyEmptyBuckets* possibly_empty_buckets() {
return &possibly_empty_buckets_;
}
protected:
static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
Executability executable, Space* owner,
VirtualMemory reservation);
// Release all memory allocated by the chunk. Should be called when memory
// chunk is about to be freed.
void ReleaseAllAllocatedMemory();
// Release memory allocated by the chunk, except that which is needed by
// read-only space chunks.
void ReleaseAllocatedMemoryNeededForWritableChunk();
// Sets the requested page permissions only if the write unprotect counter
// has reached 0.
void DecrementWriteUnprotectCounterAndMaybeSetPermissions(
PageAllocator::Permission permission);
VirtualMemory* reserved_memory() { return &reservation_; }
template <AccessMode mode>
ConcurrentBitmap<mode>* marking_bitmap() const {
return reinterpret_cast<ConcurrentBitmap<mode>*>(marking_bitmap_);
}
template <AccessMode mode>
ConcurrentBitmap<mode>* young_generation_bitmap() const {
return reinterpret_cast<ConcurrentBitmap<mode>*>(young_generation_bitmap_);
}
// If the chunk needs to remember its memory reservation, it is stored here.
VirtualMemory reservation_;
// The space owning this memory chunk.
std::atomic<Space*> owner_;
// Used by the incremental marker to keep track of the scanning progress in
// large objects that have a progress bar and are scanned in increments.
std::atomic<size_t> progress_bar_;
// Count of bytes marked black on page.
intptr_t live_byte_count_;
// A single slot set for small pages (of size kPageSize) or an array of slot
// set for large pages. In the latter case the number of entries in the array
// is ceil(size() / kPageSize).
SlotSet* sweeping_slot_set_;
TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
InvalidatedSlots* invalidated_slots_[NUMBER_OF_REMEMBERED_SET_TYPES];
// Assuming the initial allocation on a page is sequential,
// count highest number of bytes ever allocated on the page.
std::atomic<intptr_t> high_water_mark_;
base::Mutex* mutex_;
std::atomic<ConcurrentSweepingState> concurrent_sweeping_;
base::Mutex* page_protection_change_mutex_;
// This field is only relevant for code pages. It depicts the number of
// times a component requested this page to be read+writeable. The
// counter is decremented when a component resets to read+executable.
// If Value() == 0 => The memory is read and executable.
// If Value() >= 1 => The Memory is read and writable (and maybe executable).
// The maximum value is limited by {kMaxWriteUnprotectCounter} to prevent
// excessive nesting of scopes.
// All executable MemoryChunks are allocated rw based on the assumption that
// they will be used immediately for an allocation. They are initialized
// with the number of open CodeSpaceMemoryModificationScopes. The caller
// that triggers the page allocation is responsible for decrementing the
// counter.
uintptr_t write_unprotect_counter_;
// Byte allocated on the page, which includes all objects on the page
// and the linear allocation area.
size_t allocated_bytes_;
// Tracks off-heap memory used by this memory chunk.
std::atomic<size_t> external_backing_store_bytes_[kNumTypes];
// Freed memory that was not added to the free list.
size_t wasted_memory_;
heap::ListNode<MemoryChunk> list_node_;
FreeListCategory** categories_;
LocalArrayBufferTracker* local_tracker_;
std::atomic<intptr_t> young_generation_live_byte_count_;
Bitmap* young_generation_bitmap_;
CodeObjectRegistry* code_object_registry_;
PossiblyEmptyBuckets possibly_empty_buckets_;
private:
void InitializeReservedMemory() { reservation_.Reset(); }
friend class ConcurrentMarkingState;
friend class MajorMarkingState;
friend class MajorAtomicMarkingState;
friend class MajorNonAtomicMarkingState;
friend class MemoryAllocator;
friend class MinorMarkingState;
friend class MinorNonAtomicMarkingState;
friend class PagedSpace;
};
} // namespace internal
} // namespace v8
#endif // V8_HEAP_MEMORY_CHUNK_H_

View File

@ -10,9 +10,10 @@
#include "src/base/lazy-instance.h"
#include "src/base/lsan.h"
#include "src/base/platform/mutex.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/read-only-spaces.h"
#include "src/heap/third-party/heap-api.h"
#include "src/objects/heap-object-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"

View File

@ -8,6 +8,7 @@
#include "src/execution/isolate.h"
#include "src/heap/combined-heap.h"
#include "src/heap/heap-inl.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/read-only-heap.h"
#include "src/objects/objects-inl.h"
#include "src/objects/string.h"
@ -73,7 +74,8 @@ void ReadOnlyPage::MakeHeaderRelocatable() {
// Detached read-only space needs to have a valid marking bitmap and free list
// categories. Instruct Lsan to ignore them if required.
LSAN_IGNORE_OBJECT(categories_);
for (int i = kFirstCategory; i < free_list()->number_of_categories(); i++) {
for (int i = kFirstCategory; i < owner()->free_list()->number_of_categories();
i++) {
LSAN_IGNORE_OBJECT(categories_[i]);
}
LSAN_IGNORE_OBJECT(marking_bitmap_);

View File

@ -11,6 +11,7 @@
#include "include/v8-platform.h"
#include "src/base/macros.h"
#include "src/heap/list.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/spaces.h"
namespace v8 {

View File

@ -12,6 +12,7 @@
#include "src/codegen/reloc-info.h"
#include "src/common/globals.h"
#include "src/heap/heap.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/slot-set.h"
#include "src/heap/spaces.h"
#include "src/heap/worklist.h"

View File

@ -5,10 +5,10 @@
#ifndef V8_HEAP_SCAVENGER_INL_H_
#define V8_HEAP_SCAVENGER_INL_H_
#include "src/heap/scavenger.h"
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/local-allocator-inl.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/scavenger.h"
#include "src/objects/map.h"
#include "src/objects/objects-inl.h"
#include "src/objects/slots-inl.h"

View File

@ -12,6 +12,7 @@
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/item-parallel-job.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/memory-chunk-inl.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/scavenger-inl.h"
#include "src/heap/sweeper.h"

View File

@ -5,14 +5,14 @@
#ifndef V8_HEAP_SPACES_INL_H_
#define V8_HEAP_SPACES_INL_H_
#include "src/common/globals.h"
#include "src/heap/spaces.h"
#include "src/base/atomic-utils.h"
#include "src/base/bounded-page-allocator.h"
#include "src/base/v8-fallthrough.h"
#include "src/common/globals.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/memory-chunk-inl.h"
#include "src/heap/spaces.h"
#include "src/objects/code-inl.h"
#include "src/sanitizer/msan.h"
@ -207,39 +207,6 @@ bool PagedSpace::TryFreeLast(HeapObject object, int object_size) {
return false;
}
void MemoryChunk::IncrementExternalBackingStoreBytes(
ExternalBackingStoreType type, size_t amount) {
#ifndef V8_ENABLE_THIRD_PARTY_HEAP
base::CheckedIncrement(&external_backing_store_bytes_[type], amount);
owner()->IncrementExternalBackingStoreBytes(type, amount);
#endif
}
void MemoryChunk::DecrementExternalBackingStoreBytes(
ExternalBackingStoreType type, size_t amount) {
#ifndef V8_ENABLE_THIRD_PARTY_HEAP
base::CheckedDecrement(&external_backing_store_bytes_[type], amount);
owner()->DecrementExternalBackingStoreBytes(type, amount);
#endif
}
void MemoryChunk::MoveExternalBackingStoreBytes(ExternalBackingStoreType type,
MemoryChunk* from,
MemoryChunk* to,
size_t amount) {
DCHECK_NOT_NULL(from->owner());
DCHECK_NOT_NULL(to->owner());
base::CheckedDecrement(&(from->external_backing_store_bytes_[type]), amount);
base::CheckedIncrement(&(to->external_backing_store_bytes_[type]), amount);
Space::MoveExternalBackingStoreBytes(type, from->owner(), to->owner(),
amount);
}
AllocationSpace MemoryChunk::owner_identity() const {
if (InReadOnlySpace()) return RO_SPACE;
return owner()->identity();
}
void Page::MarkNeverAllocateForTesting() {
DCHECK(this->owner_identity() != NEW_SPACE);
DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));

View File

@ -468,149 +468,6 @@ Address MemoryAllocator::AllocateAlignedMemory(
return base;
}
void MemoryChunk::DiscardUnusedMemory(Address addr, size_t size) {
base::AddressRegion memory_area =
MemoryAllocator::ComputeDiscardMemoryArea(addr, size);
if (memory_area.size() != 0) {
MemoryAllocator* memory_allocator = heap_->memory_allocator();
v8::PageAllocator* page_allocator =
memory_allocator->page_allocator(executable());
CHECK(page_allocator->DiscardSystemPages(
reinterpret_cast<void*>(memory_area.begin()), memory_area.size()));
}
}
size_t MemoryChunkLayout::CodePageGuardStartOffset() {
// We are guarding code pages: the first OS page after the header
// will be protected as non-writable.
return ::RoundUp(Page::kHeaderSize, MemoryAllocator::GetCommitPageSize());
}
size_t MemoryChunkLayout::CodePageGuardSize() {
return MemoryAllocator::GetCommitPageSize();
}
intptr_t MemoryChunkLayout::ObjectStartOffsetInCodePage() {
// We are guarding code pages: the first OS page after the header
// will be protected as non-writable.
return CodePageGuardStartOffset() + CodePageGuardSize();
}
intptr_t MemoryChunkLayout::ObjectEndOffsetInCodePage() {
// We are guarding code pages: the last OS page will be protected as
// non-writable.
return Page::kPageSize -
static_cast<int>(MemoryAllocator::GetCommitPageSize());
}
size_t MemoryChunkLayout::AllocatableMemoryInCodePage() {
size_t memory = ObjectEndOffsetInCodePage() - ObjectStartOffsetInCodePage();
DCHECK_LE(kMaxRegularHeapObjectSize, memory);
return memory;
}
intptr_t MemoryChunkLayout::ObjectStartOffsetInDataPage() {
return RoundUp(MemoryChunk::kHeaderSize, kTaggedSize);
}
size_t MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(
AllocationSpace space) {
if (space == CODE_SPACE) {
return ObjectStartOffsetInCodePage();
}
return ObjectStartOffsetInDataPage();
}
size_t MemoryChunkLayout::AllocatableMemoryInDataPage() {
size_t memory = MemoryChunk::kPageSize - ObjectStartOffsetInDataPage();
DCHECK_LE(kMaxRegularHeapObjectSize, memory);
return memory;
}
size_t MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
AllocationSpace space) {
if (space == CODE_SPACE) {
return AllocatableMemoryInCodePage();
}
return AllocatableMemoryInDataPage();
}
#ifdef THREAD_SANITIZER
void MemoryChunk::SynchronizedHeapLoad() {
CHECK(reinterpret_cast<Heap*>(base::Acquire_Load(
reinterpret_cast<base::AtomicWord*>(&heap_))) != nullptr ||
InReadOnlySpace());
}
#endif
void MemoryChunk::InitializationMemoryFence() {
base::SeqCst_MemoryFence();
#ifdef THREAD_SANITIZER
// Since TSAN does not process memory fences, we use the following annotation
// to tell TSAN that there is no data race when emitting a
// InitializationMemoryFence. Note that the other thread still needs to
// perform MemoryChunk::synchronized_heap().
base::Release_Store(reinterpret_cast<base::AtomicWord*>(&heap_),
reinterpret_cast<base::AtomicWord>(heap_));
#endif
}
void MemoryChunk::DecrementWriteUnprotectCounterAndMaybeSetPermissions(
PageAllocator::Permission permission) {
DCHECK(permission == PageAllocator::kRead ||
permission == PageAllocator::kReadExecute);
DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
DCHECK(owner_identity() == CODE_SPACE || owner_identity() == CODE_LO_SPACE);
// Decrementing the write_unprotect_counter_ and changing the page
// protection mode has to be atomic.
base::MutexGuard guard(page_protection_change_mutex_);
if (write_unprotect_counter_ == 0) {
// This is a corner case that may happen when we have a
// CodeSpaceMemoryModificationScope open and this page was newly
// added.
return;
}
write_unprotect_counter_--;
DCHECK_LT(write_unprotect_counter_, kMaxWriteUnprotectCounter);
if (write_unprotect_counter_ == 0) {
Address protect_start =
address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
size_t page_size = MemoryAllocator::GetCommitPageSize();
DCHECK(IsAligned(protect_start, page_size));
size_t protect_size = RoundUp(area_size(), page_size);
CHECK(reservation_.SetPermissions(protect_start, protect_size, permission));
}
}
void MemoryChunk::SetReadable() {
DecrementWriteUnprotectCounterAndMaybeSetPermissions(PageAllocator::kRead);
}
void MemoryChunk::SetReadAndExecutable() {
DCHECK(!FLAG_jitless);
DecrementWriteUnprotectCounterAndMaybeSetPermissions(
PageAllocator::kReadExecute);
}
void MemoryChunk::SetReadAndWritable() {
DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
DCHECK(owner_identity() == CODE_SPACE || owner_identity() == CODE_LO_SPACE);
// Incrementing the write_unprotect_counter_ and changing the page
// protection mode has to be atomic.
base::MutexGuard guard(page_protection_change_mutex_);
write_unprotect_counter_++;
DCHECK_LE(write_unprotect_counter_, kMaxWriteUnprotectCounter);
if (write_unprotect_counter_ == 1) {
Address unprotect_start =
address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
size_t page_size = MemoryAllocator::GetCommitPageSize();
DCHECK(IsAligned(unprotect_start, page_size));
size_t unprotect_size = RoundUp(area_size(), page_size);
CHECK(reservation_.SetPermissions(unprotect_start, unprotect_size,
PageAllocator::kReadWrite));
}
}
void CodeObjectRegistry::RegisterNewlyAllocatedCodeObject(Address code) {
auto result = code_object_registry_newly_allocated_.insert(code);
USE(result);
@ -793,22 +650,26 @@ Page* SemiSpace::InitializePage(MemoryChunk* chunk) {
void Page::AllocateFreeListCategories() {
DCHECK_NULL(categories_);
categories_ = new FreeListCategory*[free_list()->number_of_categories()]();
for (int i = kFirstCategory; i <= free_list()->last_category(); i++) {
categories_ =
new FreeListCategory*[owner()->free_list()->number_of_categories()]();
for (int i = kFirstCategory; i <= owner()->free_list()->last_category();
i++) {
DCHECK_NULL(categories_[i]);
categories_[i] = new FreeListCategory();
}
}
void Page::InitializeFreeListCategories() {
for (int i = kFirstCategory; i <= free_list()->last_category(); i++) {
for (int i = kFirstCategory; i <= owner()->free_list()->last_category();
i++) {
categories_[i]->Initialize(static_cast<FreeListCategoryType>(i));
}
}
void Page::ReleaseFreeListCategories() {
if (categories_ != nullptr) {
for (int i = kFirstCategory; i <= free_list()->last_category(); i++) {
for (int i = kFirstCategory; i <= owner()->free_list()->last_category();
i++) {
if (categories_[i] != nullptr) {
delete categories_[i];
categories_[i] = nullptr;
@ -3743,7 +3604,8 @@ void FreeList::PrintCategories(FreeListCategoryType type) {
int MemoryChunk::FreeListsLength() {
int length = 0;
for (int cat = kFirstCategory; cat <= free_list()->last_category(); cat++) {
for (int cat = kFirstCategory; cat <= owner()->free_list()->last_category();
cat++) {
if (categories_[cat] != nullptr) {
length += categories_[cat]->FreeListLength();
}

View File

@ -27,6 +27,7 @@
#include "src/heap/invalidated-slots.h"
#include "src/heap/list.h"
#include "src/heap/marking.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/slot-set.h"
#include "src/objects/free-space.h"
#include "src/objects/heap-object.h"
@ -553,451 +554,6 @@ class V8_EXPORT_PRIVATE CodeObjectRegistry {
std::set<Address> code_object_registry_newly_allocated_;
};
class V8_EXPORT_PRIVATE MemoryChunkLayout {
public:
static size_t CodePageGuardStartOffset();
static size_t CodePageGuardSize();
static intptr_t ObjectStartOffsetInCodePage();
static intptr_t ObjectEndOffsetInCodePage();
static size_t AllocatableMemoryInCodePage();
static intptr_t ObjectStartOffsetInDataPage();
static size_t AllocatableMemoryInDataPage();
static size_t ObjectStartOffsetInMemoryChunk(AllocationSpace space);
static size_t AllocatableMemoryInMemoryChunk(AllocationSpace space);
};
// MemoryChunk represents a memory region owned by a specific space.
// It is divided into the header and the body. Chunk start is always
// 1MB aligned. Start of the body is aligned so it can accommodate
// any heap object.
class MemoryChunk : public BasicMemoryChunk {
public:
// Use with std data structures.
struct Hasher {
size_t operator()(MemoryChunk* const chunk) const {
return reinterpret_cast<size_t>(chunk) >> kPageSizeBits;
}
};
using Flags = uintptr_t;
static const Flags kPointersToHereAreInterestingMask =
POINTERS_TO_HERE_ARE_INTERESTING;
static const Flags kPointersFromHereAreInterestingMask =
POINTERS_FROM_HERE_ARE_INTERESTING;
static const Flags kEvacuationCandidateMask = EVACUATION_CANDIDATE;
static const Flags kIsInYoungGenerationMask = FROM_PAGE | TO_PAGE;
static const Flags kIsLargePageMask = LARGE_PAGE;
static const Flags kSkipEvacuationSlotsRecordingMask =
kEvacuationCandidateMask | kIsInYoungGenerationMask;
// |kDone|: The page state when sweeping is complete or sweeping must not be
// performed on that page. Sweeper threads that are done with their work
// will set this value and not touch the page anymore.
// |kPending|: This page is ready for parallel sweeping.
// |kInProgress|: This page is currently swept by a sweeper thread.
enum class ConcurrentSweepingState : intptr_t {
kDone,
kPending,
kInProgress,
};
static const size_t kHeaderSize =
BasicMemoryChunk::kHeaderSize // Parent size.
+ 3 * kSystemPointerSize // VirtualMemory reservation_
+ kSystemPointerSize // Address owner_
+ kSizetSize // size_t progress_bar_
+ kIntptrSize // intptr_t live_byte_count_
+ kSystemPointerSize // SlotSet* sweeping_slot_set_
+ kSystemPointerSize *
NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
+ kSystemPointerSize *
NUMBER_OF_REMEMBERED_SET_TYPES // InvalidatedSlots* array
+ kSystemPointerSize // std::atomic<intptr_t> high_water_mark_
+ kSystemPointerSize // base::Mutex* mutex_
+ kSystemPointerSize // std::atomic<ConcurrentSweepingState>
// concurrent_sweeping_
+ kSystemPointerSize // base::Mutex* page_protection_change_mutex_
+ kSystemPointerSize // unitptr_t write_unprotect_counter_
+ kSizetSize * ExternalBackingStoreType::kNumTypes
// std::atomic<size_t> external_backing_store_bytes_
+ kSizetSize // size_t allocated_bytes_
+ kSizetSize // size_t wasted_memory_
+ kSystemPointerSize * 2 // heap::ListNode
+ kSystemPointerSize // FreeListCategory** categories__
+ kSystemPointerSize // LocalArrayBufferTracker* local_tracker_
+ kIntptrSize // std::atomic<intptr_t> young_generation_live_byte_count_
+ kSystemPointerSize // Bitmap* young_generation_bitmap_
+ kSystemPointerSize // CodeObjectRegistry* code_object_registry_
+ kSystemPointerSize; // PossiblyEmptyBuckets possibly_empty_buckets_
// Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits;
// Maximum number of nested code memory modification scopes.
static const int kMaxWriteUnprotectCounter = 3;
// Only works if the pointer is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromAddress(Address a) {
DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
return reinterpret_cast<MemoryChunk*>(BaseAddress(a));
}
// Only works if the object is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromHeapObject(HeapObject o) {
DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
return reinterpret_cast<MemoryChunk*>(BaseAddress(o.ptr()));
}
void SetOldGenerationPageFlags(bool is_marking);
void SetYoungGenerationPageFlags(bool is_marking);
static inline void UpdateHighWaterMark(Address mark) {
if (mark == kNullAddress) return;
// Need to subtract one from the mark because when a chunk is full the
// top points to the next address after the chunk, which effectively belongs
// to another chunk. See the comment to Page::FromAllocationAreaAddress.
MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
intptr_t old_mark = chunk->high_water_mark_.load(std::memory_order_relaxed);
while ((new_mark > old_mark) &&
!chunk->high_water_mark_.compare_exchange_weak(
old_mark, new_mark, std::memory_order_acq_rel)) {
}
}
static inline void MoveExternalBackingStoreBytes(
ExternalBackingStoreType type, MemoryChunk* from, MemoryChunk* to,
size_t amount);
void DiscardUnusedMemory(Address addr, size_t size);
base::Mutex* mutex() { return mutex_; }
void set_concurrent_sweeping_state(ConcurrentSweepingState state) {
concurrent_sweeping_ = state;
}
ConcurrentSweepingState concurrent_sweeping_state() {
return static_cast<ConcurrentSweepingState>(concurrent_sweeping_.load());
}
bool SweepingDone() {
return concurrent_sweeping_ == ConcurrentSweepingState::kDone;
}
inline Heap* heap() const {
DCHECK_NOT_NULL(heap_);
return heap_;
}
#ifdef THREAD_SANITIZER
// Perform a dummy acquire load to tell TSAN that there is no data race in
// mark-bit initialization. See MemoryChunk::Initialize for the corresponding
// release store.
void SynchronizedHeapLoad();
#endif
template <RememberedSetType type>
bool ContainsSlots() {
return slot_set<type>() != nullptr || typed_slot_set<type>() != nullptr ||
invalidated_slots<type>() != nullptr;
}
template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
SlotSet* slot_set() {
if (access_mode == AccessMode::ATOMIC)
return base::AsAtomicPointer::Acquire_Load(&slot_set_[type]);
return slot_set_[type];
}
template <AccessMode access_mode = AccessMode::ATOMIC>
SlotSet* sweeping_slot_set() {
if (access_mode == AccessMode::ATOMIC)
return base::AsAtomicPointer::Acquire_Load(&sweeping_slot_set_);
return sweeping_slot_set_;
}
template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
TypedSlotSet* typed_slot_set() {
if (access_mode == AccessMode::ATOMIC)
return base::AsAtomicPointer::Acquire_Load(&typed_slot_set_[type]);
return typed_slot_set_[type];
}
template <RememberedSetType type>
V8_EXPORT_PRIVATE SlotSet* AllocateSlotSet();
SlotSet* AllocateSweepingSlotSet();
SlotSet* AllocateSlotSet(SlotSet** slot_set);
// Not safe to be called concurrently.
template <RememberedSetType type>
void ReleaseSlotSet();
void ReleaseSlotSet(SlotSet** slot_set);
void ReleaseSweepingSlotSet();
template <RememberedSetType type>
TypedSlotSet* AllocateTypedSlotSet();
// Not safe to be called concurrently.
template <RememberedSetType type>
void ReleaseTypedSlotSet();
template <RememberedSetType type>
InvalidatedSlots* AllocateInvalidatedSlots();
template <RememberedSetType type>
void ReleaseInvalidatedSlots();
template <RememberedSetType type>
V8_EXPORT_PRIVATE void RegisterObjectWithInvalidatedSlots(HeapObject object);
void InvalidateRecordedSlots(HeapObject object);
template <RememberedSetType type>
bool RegisteredObjectWithInvalidatedSlots(HeapObject object);
template <RememberedSetType type>
InvalidatedSlots* invalidated_slots() {
return invalidated_slots_[type];
}
void ReleaseLocalTracker();
void AllocateYoungGenerationBitmap();
void ReleaseYoungGenerationBitmap();
int FreeListsLength();
// Approximate amount of physical memory committed for this chunk.
V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory();
Address HighWaterMark() { return address() + high_water_mark_; }
size_t ProgressBar() {
DCHECK(IsFlagSet<AccessMode::ATOMIC>(HAS_PROGRESS_BAR));
return progress_bar_.load(std::memory_order_acquire);
}
bool TrySetProgressBar(size_t old_value, size_t new_value) {
DCHECK(IsFlagSet<AccessMode::ATOMIC>(HAS_PROGRESS_BAR));
return progress_bar_.compare_exchange_strong(old_value, new_value,
std::memory_order_acq_rel);
}
void ResetProgressBar() {
if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
progress_bar_.store(0, std::memory_order_release);
}
}
inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
size_t amount);
inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
size_t amount);
size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) {
return external_backing_store_bytes_[type];
}
// Some callers rely on the fact that this can operate on both
// tagged and aligned object addresses.
inline uint32_t AddressToMarkbitIndex(Address addr) const {
return static_cast<uint32_t>(addr - this->address()) >> kTaggedSizeLog2;
}
inline Address MarkbitIndexToAddress(uint32_t index) const {
return this->address() + (index << kTaggedSizeLog2);
}
bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
bool CanAllocate() {
return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
}
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
bool IsEvacuationCandidate() {
DCHECK(!(IsFlagSet<access_mode>(NEVER_EVACUATE) &&
IsFlagSet<access_mode>(EVACUATION_CANDIDATE)));
return IsFlagSet<access_mode>(EVACUATION_CANDIDATE);
}
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
bool ShouldSkipEvacuationSlotRecording() {
uintptr_t flags = GetFlags<access_mode>();
return ((flags & kSkipEvacuationSlotsRecordingMask) != 0) &&
((flags & COMPACTION_WAS_ABORTED) == 0);
}
Executability executable() {
return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
}
bool IsFromPage() const { return IsFlagSet(FROM_PAGE); }
bool IsToPage() const { return IsFlagSet(TO_PAGE); }
bool IsLargePage() const { return IsFlagSet(LARGE_PAGE); }
bool InYoungGeneration() const {
return (GetFlags() & kIsInYoungGenerationMask) != 0;
}
bool InNewSpace() const { return InYoungGeneration() && !IsLargePage(); }
bool InNewLargeObjectSpace() const {
return InYoungGeneration() && IsLargePage();
}
bool InOldSpace() const;
V8_EXPORT_PRIVATE bool InLargeObjectSpace() const;
// Gets the chunk's owner or null if the space has been detached.
Space* owner() const { return owner_; }
void set_owner(Space* space) { owner_ = space; }
bool IsWritable() const {
// If this is a read-only space chunk but heap_ is non-null, it has not yet
// been sealed and can be written to.
return !InReadOnlySpace() || heap_ != nullptr;
}
// Gets the chunk's allocation space, potentially dealing with a null owner_
// (like read-only chunks have).
inline AllocationSpace owner_identity() const;
// Emits a memory barrier. For TSAN builds the other thread needs to perform
// MemoryChunk::synchronized_heap() to simulate the barrier.
void InitializationMemoryFence();
V8_EXPORT_PRIVATE void SetReadable();
V8_EXPORT_PRIVATE void SetReadAndExecutable();
V8_EXPORT_PRIVATE void SetReadAndWritable();
void SetDefaultCodePermissions() {
if (FLAG_jitless) {
SetReadable();
} else {
SetReadAndExecutable();
}
}
heap::ListNode<MemoryChunk>& list_node() { return list_node_; }
CodeObjectRegistry* GetCodeObjectRegistry() { return code_object_registry_; }
FreeList* free_list() { return owner()->free_list(); }
PossiblyEmptyBuckets* possibly_empty_buckets() {
return &possibly_empty_buckets_;
}
protected:
static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
Executability executable, Space* owner,
VirtualMemory reservation);
// Release all memory allocated by the chunk. Should be called when memory
// chunk is about to be freed.
void ReleaseAllAllocatedMemory();
// Release memory allocated by the chunk, except that which is needed by
// read-only space chunks.
void ReleaseAllocatedMemoryNeededForWritableChunk();
// Sets the requested page permissions only if the write unprotect counter
// has reached 0.
void DecrementWriteUnprotectCounterAndMaybeSetPermissions(
PageAllocator::Permission permission);
VirtualMemory* reserved_memory() { return &reservation_; }
template <AccessMode mode>
ConcurrentBitmap<mode>* marking_bitmap() const {
return reinterpret_cast<ConcurrentBitmap<mode>*>(marking_bitmap_);
}
template <AccessMode mode>
ConcurrentBitmap<mode>* young_generation_bitmap() const {
return reinterpret_cast<ConcurrentBitmap<mode>*>(young_generation_bitmap_);
}
// If the chunk needs to remember its memory reservation, it is stored here.
VirtualMemory reservation_;
// The space owning this memory chunk.
std::atomic<Space*> owner_;
// Used by the incremental marker to keep track of the scanning progress in
// large objects that have a progress bar and are scanned in increments.
std::atomic<size_t> progress_bar_;
// Count of bytes marked black on page.
intptr_t live_byte_count_;
// A single slot set for small pages (of size kPageSize) or an array of slot
// set for large pages. In the latter case the number of entries in the array
// is ceil(size() / kPageSize).
SlotSet* sweeping_slot_set_;
TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
InvalidatedSlots* invalidated_slots_[NUMBER_OF_REMEMBERED_SET_TYPES];
// Assuming the initial allocation on a page is sequential,
// count highest number of bytes ever allocated on the page.
std::atomic<intptr_t> high_water_mark_;
base::Mutex* mutex_;
std::atomic<ConcurrentSweepingState> concurrent_sweeping_;
base::Mutex* page_protection_change_mutex_;
// This field is only relevant for code pages. It depicts the number of
// times a component requested this page to be read+writeable. The
// counter is decremented when a component resets to read+executable.
// If Value() == 0 => The memory is read and executable.
// If Value() >= 1 => The Memory is read and writable (and maybe executable).
// The maximum value is limited by {kMaxWriteUnprotectCounter} to prevent
// excessive nesting of scopes.
// All executable MemoryChunks are allocated rw based on the assumption that
// they will be used immediatelly for an allocation. They are initialized
// with the number of open CodeSpaceMemoryModificationScopes. The caller
// that triggers the page allocation is responsible for decrementing the
// counter.
uintptr_t write_unprotect_counter_;
// Byte allocated on the page, which includes all objects on the page
// and the linear allocation area.
size_t allocated_bytes_;
// Tracks off-heap memory used by this memory chunk.
std::atomic<size_t> external_backing_store_bytes_[kNumTypes];
// Freed memory that was not added to the free list.
size_t wasted_memory_;
heap::ListNode<MemoryChunk> list_node_;
FreeListCategory** categories_;
LocalArrayBufferTracker* local_tracker_;
std::atomic<intptr_t> young_generation_live_byte_count_;
Bitmap* young_generation_bitmap_;
CodeObjectRegistry* code_object_registry_;
PossiblyEmptyBuckets possibly_empty_buckets_;
private:
void InitializeReservedMemory() { reservation_.Reset(); }
friend class ConcurrentMarkingState;
friend class MajorMarkingState;
friend class MajorAtomicMarkingState;
friend class MajorNonAtomicMarkingState;
friend class MemoryAllocator;
friend class MinorMarkingState;
friend class MinorNonAtomicMarkingState;
friend class PagedSpace;
};
STATIC_ASSERT(sizeof(std::atomic<intptr_t>) == kSystemPointerSize);
// -----------------------------------------------------------------------------
@ -1055,7 +611,8 @@ class Page : public MemoryChunk {
template <typename Callback>
inline void ForAllFreeListCategories(Callback callback) {
for (int i = kFirstCategory; i < free_list()->number_of_categories(); i++) {
for (int i = kFirstCategory;
i < owner()->free_list()->number_of_categories(); i++) {
callback(categories_[i]);
}
}

View File

@ -394,7 +394,7 @@ int Sweeper::RawSweep(
if (free_list_mode == IGNORE_FREE_LIST) return 0;
return static_cast<int>(
p->free_list()->GuaranteedAllocatable(max_freed_bytes));
p->owner()->free_list()->GuaranteedAllocatable(max_freed_bytes));
}
void Sweeper::SweepSpaceFromTask(AllocationSpace identity) {

View File

@ -14,6 +14,7 @@
#include "src/handles/maybe-handles.h"
#include "src/heap/factory-inl.h"
#include "src/heap/heap-inl.h"
#include "src/heap/memory-chunk.h"
#include "src/ic/ic.h"
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"

View File

@ -6,7 +6,8 @@
#include "src/common/globals.h"
#include "src/handles/handles-inl.h"
#include "src/heap/heap-inl.h" // For LooksValid implementation.
#include "src/heap/heap-inl.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/read-only-heap.h"
#include "src/numbers/conversions.h"
#include "src/objects/map.h"

View File

@ -10,6 +10,7 @@
#include "src/execution/messages.h"
#include "src/handles/maybe-handles.h"
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/heap/memory-chunk.h"
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
#include "src/objects/hash-table-inl.h"

View File

@ -5,6 +5,7 @@
#include "src/snapshot/deserializer-allocator.h"
#include "src/heap/heap-inl.h" // crbug.com/v8/8499
#include "src/heap/memory-chunk.h"
namespace v8 {
namespace internal {

View File

@ -6,6 +6,7 @@
#include "src/codegen/assembler-inl.h"
#include "src/heap/heap-inl.h" // For Space::identity().
#include "src/heap/memory-chunk-inl.h"
#include "src/heap/read-only-heap.h"
#include "src/interpreter/interpreter.h"
#include "src/objects/code.h"

View File

@ -9,6 +9,7 @@
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
#include "src/heap/memory-chunk.h"
#include "test/cctest/cctest.h"
namespace v8 {

View File

@ -6,6 +6,7 @@
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h"
#include "src/heap/mark-compact.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/remembered-set.h"
#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"

View File

@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdlib.h>
#include <utility>
#include "src/api/api-inl.h"
@ -42,6 +43,7 @@
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/memory-reducer.h"
#include "src/heap/remembered-set.h"
#include "src/ic/ic.h"

View File

@ -4,12 +4,12 @@
#include <stdlib.h>
#include "src/init/v8.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/invalidated-slots.h"
#include "src/heap/memory-chunk.h"
#include "src/init/v8.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
#include "test/cctest/heap/heap-utils.h"

View File

@ -31,6 +31,7 @@
#include "src/base/platform/platform.h"
#include "src/heap/factory.h"
#include "src/heap/large-spaces.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/spaces.h"
#include "src/objects/free-space.h"

View File

@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/heap/heap-inl.h"
#include "src/heap/memory-chunk-inl.h"
#include "src/objects/cell.h"
#include "src/objects/feedback-cell.h"
#include "src/objects/script.h"

View File

@ -2,12 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/heap.h"
#include <cmath>
#include <iostream>
#include <limits>
#include "src/handles/handles-inl.h"
#include "src/heap/heap.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/spaces-inl.h"
#include "src/objects/objects-inl.h"
#include "test/unittests/test-utils.h"

View File

@ -3,13 +3,16 @@
// found in the LICENSE file.
#include "src/heap/spaces.h"
#include <memory>
#include "src/common/globals.h"
#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/heap.h"
#include "src/heap/large-spaces.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/spaces-inl.h"
#include "test/unittests/test-utils.h"