[heap] Split out paged-spaces.h

Splits out all of PagedSpace and subclasses into paged-spaces.h. Also
moves CodeObjectRegistry to code-object-registry.h.

Bug: v8:10473, v8:10506
Change-Id: I35fab1e545e958eb32f3e39a5e2ce8fb087c2a53
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2201763
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Commit-Queue: Dan Elphick <delphick@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67811}
This commit is contained in:
Dan Elphick 2020-05-14 17:26:48 +01:00 committed by Commit Bot
parent b079058b12
commit 8686ea8121
21 changed files with 1938 additions and 1819 deletions

View File

@ -2401,6 +2401,8 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/barrier.h",
"src/heap/basic-memory-chunk.cc",
"src/heap/basic-memory-chunk.h",
"src/heap/code-object-registry.cc",
"src/heap/code-object-registry.h",
"src/heap/code-stats.cc",
"src/heap/code-stats.h",
"src/heap/combined-heap.cc",
@ -2473,6 +2475,9 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/off-thread-factory.h",
"src/heap/off-thread-heap.cc",
"src/heap/off-thread-heap.h",
"src/heap/paged-spaces-inl.h",
"src/heap/paged-spaces.cc",
"src/heap/paged-spaces.h",
"src/heap/read-only-heap-inl.h",
"src/heap/read-only-heap.cc",
"src/heap/read-only-heap.h",

View File

@ -0,0 +1,75 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/code-object-registry.h"
#include <algorithm>
#include "src/base/logging.h"
namespace v8 {
namespace internal {
void CodeObjectRegistry::RegisterNewlyAllocatedCodeObject(Address code) {
auto result = code_object_registry_newly_allocated_.insert(code);
USE(result);
DCHECK(result.second);
}
void CodeObjectRegistry::RegisterAlreadyExistingCodeObject(Address code) {
code_object_registry_already_existing_.push_back(code);
}
void CodeObjectRegistry::Clear() {
code_object_registry_already_existing_.clear();
code_object_registry_newly_allocated_.clear();
}
void CodeObjectRegistry::Finalize() {
code_object_registry_already_existing_.shrink_to_fit();
}
bool CodeObjectRegistry::Contains(Address object) const {
return (code_object_registry_newly_allocated_.find(object) !=
code_object_registry_newly_allocated_.end()) ||
(std::binary_search(code_object_registry_already_existing_.begin(),
code_object_registry_already_existing_.end(),
object));
}
Address CodeObjectRegistry::GetCodeObjectStartFromInnerAddress(
Address address) const {
// Let's first find the object which comes right before address in the vector
// of already existing code objects.
Address already_existing_set_ = 0;
Address newly_allocated_set_ = 0;
if (!code_object_registry_already_existing_.empty()) {
auto it =
std::upper_bound(code_object_registry_already_existing_.begin(),
code_object_registry_already_existing_.end(), address);
if (it != code_object_registry_already_existing_.begin()) {
already_existing_set_ = *(--it);
}
}
// Next, let's find the object which comes right before address in the set
// of newly allocated code objects.
if (!code_object_registry_newly_allocated_.empty()) {
auto it = code_object_registry_newly_allocated_.upper_bound(address);
if (it != code_object_registry_newly_allocated_.begin()) {
newly_allocated_set_ = *(--it);
}
}
// The code objects which contains address has to be in one of the two
// data structures.
DCHECK(already_existing_set_ != 0 || newly_allocated_set_ != 0);
// The address which is closest to the given address is the code object.
return already_existing_set_ > newly_allocated_set_ ? already_existing_set_
: newly_allocated_set_;
}
} // namespace internal
} // namespace v8

View File

@ -0,0 +1,38 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_CODE_OBJECT_REGISTRY_H_
#define V8_HEAP_CODE_OBJECT_REGISTRY_H_
#include <set>
#include <vector>
#include "src/base/macros.h"
#include "src/common/globals.h"
namespace v8 {
namespace internal {
// The CodeObjectRegistry holds all start addresses of code objects of a given
// MemoryChunk. Each MemoryChunk owns a separate CodeObjectRegistry. The
// CodeObjectRegistry allows fast lookup from an inner pointer of a code object
// to the actual code object.
class V8_EXPORT_PRIVATE CodeObjectRegistry {
public:
void RegisterNewlyAllocatedCodeObject(Address code);
void RegisterAlreadyExistingCodeObject(Address code);
void Clear();
void Finalize();
bool Contains(Address code) const;
Address GetCodeObjectStartFromInnerAddress(Address address) const;
private:
std::vector<Address> code_object_registry_already_existing_;
std::set<Address> code_object_registry_newly_allocated_;
};
} // namespace internal
} // namespace v8
#endif // V8_HEAP_CODE_OBJECT_REGISTRY_H_

View File

@ -7,7 +7,7 @@
#include "src/codegen/code-comments.h"
#include "src/codegen/reloc-info.h"
#include "src/heap/large-spaces.h"
#include "src/heap/spaces-inl.h" // For PagedSpaceObjectIterator.
#include "src/heap/paged-spaces-inl.h" // For PagedSpaceObjectIterator.
#include "src/objects/objects-inl.h"
namespace v8 {

View File

@ -23,7 +23,9 @@
// leak heap internals to users of this interface!
#include "src/execution/isolate-data.h"
#include "src/execution/isolate.h"
#include "src/heap/code-object-registry.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/paged-spaces-inl.h"
#include "src/heap/read-only-spaces.h"
#include "src/heap/spaces-inl.h"
#include "src/objects/allocation-site-inl.h"

View File

@ -30,6 +30,7 @@
#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/barrier.h"
#include "src/heap/code-object-registry.h"
#include "src/heap/code-stats.h"
#include "src/heap/combined-heap.h"
#include "src/heap/concurrent-marking.h"
@ -51,6 +52,7 @@
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/paged-spaces-inl.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/remembered-set-inl.h"
#include "src/heap/safepoint.h"

View File

@ -7,6 +7,7 @@
#include "src/common/globals.h"
#include "src/heap/heap.h"
#include "src/heap/paged-spaces.h"
#include "src/heap/spaces.h"
namespace v8 {

View File

@ -16,6 +16,7 @@
#include "src/heap/array-buffer-collector.h"
#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/code-object-registry.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/invalidated-slots-inl.h"

View File

@ -6,6 +6,7 @@
#include "src/base/platform/platform.h"
#include "src/heap/array-buffer-tracker.h"
#include "src/heap/code-object-registry.h"
#include "src/heap/memory-chunk-inl.h"
#include "src/heap/spaces.h"
#include "src/objects/heap-object.h"

View File

@ -5,8 +5,8 @@
#include "src/heap/off-thread-heap.h"
#include "src/common/globals.h"
#include "src/heap/paged-spaces-inl.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/spaces.h"
#include "src/objects/objects-body-descriptors-inl.h"
#include "src/roots/roots.h"
#include "src/snapshot/references.h"

View File

@ -9,6 +9,7 @@
#include "src/common/globals.h"
#include "src/heap/large-spaces.h"
#include "src/heap/paged-spaces.h"
#include "src/heap/spaces.h"
#include "src/objects/heap-object.h"

216
src/heap/paged-spaces-inl.h Normal file
View File

@ -0,0 +1,216 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_PAGED_SPACES_INL_H_
#define V8_HEAP_PAGED_SPACES_INL_H_
#include "src/heap/incremental-marking.h"
#include "src/heap/paged-spaces.h"
#include "src/objects/code-inl.h"
#include "src/objects/heap-object.h"
#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
// PagedSpaceObjectIterator
HeapObject PagedSpaceObjectIterator::Next() {
do {
HeapObject next_obj = FromCurrentPage();
if (!next_obj.is_null()) return next_obj;
} while (AdvanceToNextPage());
return HeapObject();
}
HeapObject PagedSpaceObjectIterator::FromCurrentPage() {
while (cur_addr_ != cur_end_) {
if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
cur_addr_ = space_->limit();
continue;
}
HeapObject obj = HeapObject::FromAddress(cur_addr_);
const int obj_size = obj.Size();
cur_addr_ += obj_size;
DCHECK_LE(cur_addr_, cur_end_);
if (!obj.IsFreeSpaceOrFiller()) {
if (obj.IsCode()) {
DCHECK_IMPLIES(
space_->identity() != CODE_SPACE,
space_->identity() == RO_SPACE && Code::cast(obj).is_builtin());
DCHECK_CODEOBJECT_SIZE(obj_size, space_);
} else {
DCHECK_OBJECT_SIZE(obj_size);
}
return obj;
}
}
return HeapObject();
}
bool PagedSpace::Contains(Address addr) const {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
return true;
}
return Page::FromAddress(addr)->owner() == this;
}
bool PagedSpace::Contains(Object o) const {
if (!o.IsHeapObject()) return false;
return Page::FromAddress(o.ptr())->owner() == this;
}
void PagedSpace::UnlinkFreeListCategories(Page* page) {
DCHECK_EQ(this, page->owner());
page->ForAllFreeListCategories([this](FreeListCategory* category) {
free_list()->RemoveCategory(category);
});
}
size_t PagedSpace::RelinkFreeListCategories(Page* page) {
DCHECK_EQ(this, page->owner());
size_t added = 0;
page->ForAllFreeListCategories([this, &added](FreeListCategory* category) {
added += category->available();
category->Relink(free_list());
});
DCHECK_IMPLIES(!page->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE),
page->AvailableInFreeList() ==
page->AvailableInFreeListFromAllocatedBytes());
return added;
}
bool PagedSpace::TryFreeLast(HeapObject object, int object_size) {
if (allocation_info_.top() != kNullAddress) {
const Address object_address = object.address();
if ((allocation_info_.top() - object_size) == object_address) {
allocation_info_.set_top(object_address);
return true;
}
}
return false;
}
bool PagedSpace::EnsureLinearAllocationArea(int size_in_bytes,
AllocationOrigin origin) {
if (allocation_info_.top() + size_in_bytes <= allocation_info_.limit()) {
return true;
}
return SlowRefillLinearAllocationArea(size_in_bytes, origin);
}
HeapObject PagedSpace::AllocateLinearly(int size_in_bytes) {
Address current_top = allocation_info_.top();
Address new_top = current_top + size_in_bytes;
DCHECK_LE(new_top, allocation_info_.limit());
allocation_info_.set_top(new_top);
return HeapObject::FromAddress(current_top);
}
HeapObject PagedSpace::TryAllocateLinearlyAligned(
int* size_in_bytes, AllocationAlignment alignment) {
Address current_top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(current_top, alignment);
Address new_top = current_top + filler_size + *size_in_bytes;
if (new_top > allocation_info_.limit()) return HeapObject();
allocation_info_.set_top(new_top);
if (filler_size > 0) {
*size_in_bytes += filler_size;
return Heap::PrecedeWithFiller(ReadOnlyRoots(heap()),
HeapObject::FromAddress(current_top),
filler_size);
}
return HeapObject::FromAddress(current_top);
}
AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationOrigin origin) {
DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
if (!EnsureLinearAllocationArea(size_in_bytes, origin)) {
return AllocationResult::Retry(identity());
}
HeapObject object = AllocateLinearly(size_in_bytes);
DCHECK(!object.is_null());
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
if (FLAG_trace_allocations_origins) {
UpdateAllocationOrigins(origin);
}
return object;
}
AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
DCHECK(identity() == OLD_SPACE || identity() == RO_SPACE);
DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
int allocation_size = size_in_bytes;
HeapObject object = TryAllocateLinearlyAligned(&allocation_size, alignment);
if (object.is_null()) {
// We don't know exactly how much filler we need to align until space is
// allocated, so assume the worst case.
int filler_size = Heap::GetMaximumFillToAlign(alignment);
allocation_size += filler_size;
if (!EnsureLinearAllocationArea(allocation_size, origin)) {
return AllocationResult::Retry(identity());
}
allocation_size = size_in_bytes;
object = TryAllocateLinearlyAligned(&allocation_size, alignment);
DCHECK(!object.is_null());
}
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
if (FLAG_trace_allocations_origins) {
UpdateAllocationOrigins(origin);
}
return object;
}
AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
if (top_on_previous_step_ && top() < top_on_previous_step_ &&
SupportsInlineAllocation()) {
// Generated code decreased the top() pointer to do folded allocations.
// The top_on_previous_step_ can be one byte beyond the current page.
DCHECK_NE(top(), kNullAddress);
DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
Page::FromAllocationAreaAddress(top_on_previous_step_ - 1));
top_on_previous_step_ = top();
}
size_t bytes_since_last =
top_on_previous_step_ ? top() - top_on_previous_step_ : 0;
DCHECK_IMPLIES(!SupportsInlineAllocation(), bytes_since_last == 0);
#ifdef V8_HOST_ARCH_32_BIT
AllocationResult result =
alignment != kWordAligned
? AllocateRawAligned(size_in_bytes, alignment, origin)
: AllocateRawUnaligned(size_in_bytes, origin);
#else
AllocationResult result = AllocateRawUnaligned(size_in_bytes, origin);
#endif
HeapObject heap_obj;
if (!result.IsRetry() && result.To(&heap_obj) && !is_local_space()) {
AllocationStep(static_cast<int>(size_in_bytes + bytes_since_last),
heap_obj.address(), size_in_bytes);
StartNextInlineAllocationStep();
DCHECK_IMPLIES(
heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
}
return result;
}
} // namespace internal
} // namespace v8
#endif // V8_HEAP_PAGED_SPACES_INL_H_

1004
src/heap/paged-spaces.cc Normal file

File diff suppressed because it is too large Load Diff

585
src/heap/paged-spaces.h Normal file
View File

@ -0,0 +1,585 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_PAGED_SPACES_H_
#define V8_HEAP_PAGED_SPACES_H_
#include <memory>
#include <utility>
#include "src/base/bounds.h"
#include "src/base/macros.h"
#include "src/base/optional.h"
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/spaces.h"
namespace v8 {
namespace internal {
class Heap;
class HeapObject;
class Isolate;
class ObjectVisitor;
// -----------------------------------------------------------------------------
// Heap object iterator in old/map spaces.
//
// A PagedSpaceObjectIterator iterates objects from the bottom of the given
// space to its top or from the bottom of the given page to its top.
//
// If objects are allocated in the page during iteration the iterator may
// or may not iterate over those objects. The caller must create a new
// iterator in order to be sure to visit these new objects.
class V8_EXPORT_PRIVATE PagedSpaceObjectIterator : public ObjectIterator {
public:
// Creates a new object iterator in a given space.
PagedSpaceObjectIterator(Heap* heap, PagedSpace* space);
PagedSpaceObjectIterator(Heap* heap, PagedSpace* space, Page* page);
// Creates a new object iterator in a given off-thread space.
explicit PagedSpaceObjectIterator(OffThreadSpace* space);
// Advance to the next object, skipping free spaces and other fillers and
// skipping the special garbage section of which there is one per space.
// Returns nullptr when the iteration has ended.
inline HeapObject Next() override;
private:
// Fast (inlined) path of next().
inline HeapObject FromCurrentPage();
// Slow path of next(), goes into the next page. Returns false if the
// iteration has ended.
bool AdvanceToNextPage();
Address cur_addr_; // Current iteration point.
Address cur_end_; // End iteration point.
PagedSpace* space_;
PageRange page_range_;
PageRange::iterator current_page_;
};
class V8_EXPORT_PRIVATE PagedSpace
: NON_EXPORTED_BASE(public SpaceWithLinearArea) {
public:
using iterator = PageIterator;
using const_iterator = ConstPageIterator;
static const size_t kCompactionMemoryWanted = 500 * KB;
// Creates a space with an id.
PagedSpace(Heap* heap, AllocationSpace id, Executability executable,
FreeList* free_list,
LocalSpaceKind local_space_kind = LocalSpaceKind::kNone);
~PagedSpace() override { TearDown(); }
// Checks whether an object/address is in this space.
inline bool Contains(Address a) const;
inline bool Contains(Object o) const;
bool ContainsSlow(Address addr) const;
// Does the space need executable memory?
Executability executable() { return executable_; }
// Prepares for a mark-compact GC.
void PrepareForMarkCompact();
// Current capacity without growing (Size() + Available()).
size_t Capacity() { return accounting_stats_.Capacity(); }
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory() override;
// Sets the capacity, the available space and the wasted space to zero.
// The stats are rebuilt during sweeping by adding each page to the
// capacity and the size when it is encountered. As free spaces are
// discovered during the sweeping they are subtracted from the size and added
// to the available and wasted totals. The free list is cleared as well.
void ClearAllocatorState() {
accounting_stats_.ClearSize();
free_list_->Reset();
}
// Available bytes without growing. These are the bytes on the free list.
// The bytes in the linear allocation area are not included in this total
// because updating the stats would slow down allocation. New pages are
// immediately added to the free list so they show up here.
size_t Available() override { return free_list_->Available(); }
// Allocated bytes in this space. Garbage bytes that were not found due to
// concurrent sweeping are counted as being allocated! The bytes in the
// current linear allocation area (between top and limit) are also counted
// here.
size_t Size() override { return accounting_stats_.Size(); }
// As size, but the bytes in lazily swept pages are estimated and the bytes
// in the current linear allocation area are not included.
size_t SizeOfObjects() override;
// Wasted bytes in this space. These are just the bytes that were thrown away
// due to being too small to use for allocation.
virtual size_t Waste() { return free_list_->wasted_bytes(); }
// Allocate the requested number of bytes in the space if possible, return a
// failure object if not.
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawUnaligned(
int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
// Allocate the requested number of bytes in the space double aligned if
// possible, return a failure object if not.
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
// Allocate the requested number of bytes in the space and consider allocation
// alignment if needed.
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
// Allocate the requested number of bytes in the space from a background
// thread.
V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
SlowGetLinearAllocationAreaBackground(LocalHeap* local_heap,
size_t min_size_in_bytes,
size_t max_size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin);
size_t Free(Address start, size_t size_in_bytes, SpaceAccountingMode mode) {
if (size_in_bytes == 0) return 0;
heap()->CreateFillerObjectAt(start, static_cast<int>(size_in_bytes),
ClearRecordedSlots::kNo);
if (mode == SpaceAccountingMode::kSpaceAccounted) {
return AccountedFree(start, size_in_bytes);
} else {
return UnaccountedFree(start, size_in_bytes);
}
}
// Give a block of memory to the space's free list. It might be added to
// the free list or accounted as waste.
// If add_to_freelist is false then just accounting stats are updated and
// no attempt to add area to free list is made.
size_t AccountedFree(Address start, size_t size_in_bytes) {
size_t wasted = free_list_->Free(start, size_in_bytes, kLinkCategory);
Page* page = Page::FromAddress(start);
accounting_stats_.DecreaseAllocatedBytes(size_in_bytes, page);
DCHECK_GE(size_in_bytes, wasted);
return size_in_bytes - wasted;
}
size_t UnaccountedFree(Address start, size_t size_in_bytes) {
size_t wasted = free_list_->Free(start, size_in_bytes, kDoNotLinkCategory);
DCHECK_GE(size_in_bytes, wasted);
return size_in_bytes - wasted;
}
inline bool TryFreeLast(HeapObject object, int object_size);
void ResetFreeList();
// Empty space linear allocation area, returning unused area to free list.
void FreeLinearAllocationArea();
void MarkLinearAllocationAreaBlack();
void UnmarkLinearAllocationArea();
void DecreaseAllocatedBytes(size_t bytes, Page* page) {
accounting_stats_.DecreaseAllocatedBytes(bytes, page);
}
void IncreaseAllocatedBytes(size_t bytes, Page* page) {
accounting_stats_.IncreaseAllocatedBytes(bytes, page);
}
void DecreaseCapacity(size_t bytes) {
accounting_stats_.DecreaseCapacity(bytes);
}
void IncreaseCapacity(size_t bytes) {
accounting_stats_.IncreaseCapacity(bytes);
}
void RefineAllocatedBytesAfterSweeping(Page* page);
Page* InitializePage(MemoryChunk* chunk);
void ReleasePage(Page* page);
// Adds the page to this space and returns the number of bytes added to the
// free list of the space.
size_t AddPage(Page* page);
void RemovePage(Page* page);
// Remove a page if it has at least |size_in_bytes| bytes available that can
// be used for allocation.
Page* RemovePageSafe(int size_in_bytes);
void SetReadable();
void SetReadAndExecutable();
void SetReadAndWritable();
void SetDefaultCodePermissions() {
if (FLAG_jitless) {
SetReadable();
} else {
SetReadAndExecutable();
}
}
#ifdef VERIFY_HEAP
// Verify integrity of this space.
virtual void Verify(Isolate* isolate, ObjectVisitor* visitor);
void VerifyLiveBytes();
// Overridden by subclasses to verify space-specific object
// properties (e.g., only maps or free-list nodes are in map space).
virtual void VerifyObject(HeapObject obj) {}
#endif
#ifdef DEBUG
void VerifyCountersAfterSweeping(Heap* heap);
void VerifyCountersBeforeConcurrentSweeping();
// Print meta info and objects in this space.
void Print() override;
// Report code object related statistics
static void ReportCodeStatistics(Isolate* isolate);
static void ResetCodeStatistics(Isolate* isolate);
#endif
bool CanExpand(size_t size);
// Returns the number of total pages in this space.
int CountTotalPages();
// Return size of allocatable area on a page in this space.
inline int AreaSize() { return static_cast<int>(area_size_); }
bool is_local_space() { return local_space_kind_ != LocalSpaceKind::kNone; }
bool is_off_thread_space() {
return local_space_kind_ == LocalSpaceKind::kOffThreadSpace;
}
bool is_compaction_space() {
return base::IsInRange(local_space_kind_,
LocalSpaceKind::kFirstCompactionSpace,
LocalSpaceKind::kLastCompactionSpace);
}
LocalSpaceKind local_space_kind() { return local_space_kind_; }
// Merges {other} into the current space. Note that this modifies {other},
// e.g., removes its bump pointer area and resets statistics.
void MergeLocalSpace(LocalSpace* other);
// Refills the free list from the corresponding free list filled by the
// sweeper.
virtual void RefillFreeList();
base::Mutex* mutex() { return &space_mutex_; }
inline void UnlinkFreeListCategories(Page* page);
inline size_t RelinkFreeListCategories(Page* page);
Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
const Page* first_page() const {
return reinterpret_cast<const Page*>(Space::first_page());
}
iterator begin() { return iterator(first_page()); }
iterator end() { return iterator(nullptr); }
const_iterator begin() const { return const_iterator(first_page()); }
const_iterator end() const { return const_iterator(nullptr); }
// Shrink immortal immovable pages of the space to be exactly the size needed
// using the high water mark.
void ShrinkImmortalImmovablePages();
size_t ShrinkPageToHighWaterMark(Page* page);
std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
void SetLinearAllocationArea(Address top, Address limit);
private:
// Set space linear allocation area.
void SetTopAndLimit(Address top, Address limit) {
DCHECK(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.Reset(top, limit);
}
void DecreaseLimit(Address new_limit);
void UpdateInlineAllocationLimit(size_t min_size) override;
bool SupportsInlineAllocation() override {
return identity() == OLD_SPACE && !is_local_space();
}
protected:
// PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages.
virtual bool snapshotable() { return true; }
bool HasPages() { return first_page() != nullptr; }
// Cleans up the space, frees all pages in this space except those belonging
// to the initial chunk, uncommits addresses in the initial chunk.
void TearDown();
// Expands the space by allocating a fixed number of pages. Returns false if
// it cannot allocate requested number of pages from OS, or if the hard heap
// size limit has been hit.
bool Expand();
// Sets up a linear allocation area that fits the given number of bytes.
// Returns false if there is not enough space and the caller has to retry
// after collecting garbage.
inline bool EnsureLinearAllocationArea(int size_in_bytes,
AllocationOrigin origin);
// Allocates an object from the linear allocation area. Assumes that the
// linear allocation area is large enought to fit the object.
inline HeapObject AllocateLinearly(int size_in_bytes);
// Tries to allocate an aligned object from the linear allocation area.
// Returns nullptr if the linear allocation area does not fit the object.
// Otherwise, returns the object pointer and writes the allocation size
// (object size + alignment filler size) to the size_in_bytes.
inline HeapObject TryAllocateLinearlyAligned(int* size_in_bytes,
AllocationAlignment alignment);
V8_WARN_UNUSED_RESULT bool RefillLinearAllocationAreaFromFreeList(
size_t size_in_bytes, AllocationOrigin origin);
// If sweeping is still in progress try to sweep unswept pages. If that is
// not successful, wait for the sweeper threads and retry free-list
// allocation. Returns false if there is not enough space and the caller
// has to retry after collecting garbage.
V8_WARN_UNUSED_RESULT bool EnsureSweptAndRetryAllocation(
int size_in_bytes, AllocationOrigin origin);
V8_WARN_UNUSED_RESULT bool SweepAndRetryAllocation(int required_freed_bytes,
int max_pages,
int size_in_bytes,
AllocationOrigin origin);
// Slow path of AllocateRaw. This function is space-dependent. Returns false
// if there is not enough space and the caller has to retry after
// collecting garbage.
V8_WARN_UNUSED_RESULT virtual bool SlowRefillLinearAllocationArea(
int size_in_bytes, AllocationOrigin origin);
// Implementation of SlowAllocateRaw. Returns false if there is not enough
// space and the caller has to retry after collecting garbage.
V8_WARN_UNUSED_RESULT bool RawSlowRefillLinearAllocationArea(
int size_in_bytes, AllocationOrigin origin);
V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
size_t max_size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin);
Executability executable_;
LocalSpaceKind local_space_kind_;
size_t area_size_;
// Accounting information for this space.
AllocationStats accounting_stats_;
// Mutex guarding any concurrent access to the space.
base::Mutex space_mutex_;
// Mutex guarding concurrent allocation.
base::Mutex allocation_mutex_;
friend class IncrementalMarking;
friend class MarkCompactCollector;
// Used in cctest.
friend class heap::HeapTester;
};
// -----------------------------------------------------------------------------
// Base class for compaction space and off-thread space.
class V8_EXPORT_PRIVATE LocalSpace : public PagedSpace {
public:
LocalSpace(Heap* heap, AllocationSpace id, Executability executable,
LocalSpaceKind local_space_kind)
: PagedSpace(heap, id, executable, FreeList::CreateFreeList(),
local_space_kind) {
DCHECK_NE(local_space_kind, LocalSpaceKind::kNone);
}
protected:
// The space is temporary and not included in any snapshots.
bool snapshotable() override { return false; }
};
// -----------------------------------------------------------------------------
// Compaction space that is used temporarily during compaction.
class V8_EXPORT_PRIVATE CompactionSpace : public LocalSpace {
public:
CompactionSpace(Heap* heap, AllocationSpace id, Executability executable,
LocalSpaceKind local_space_kind)
: LocalSpace(heap, id, executable, local_space_kind) {
DCHECK(is_compaction_space());
}
protected:
V8_WARN_UNUSED_RESULT bool SlowRefillLinearAllocationArea(
int size_in_bytes, AllocationOrigin origin) override;
};
// A collection of |CompactionSpace|s used by a single compaction task.
class CompactionSpaceCollection : public Malloced {
public:
explicit CompactionSpaceCollection(Heap* heap,
LocalSpaceKind local_space_kind)
: old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE,
local_space_kind),
code_space_(heap, CODE_SPACE, Executability::EXECUTABLE,
local_space_kind) {}
CompactionSpace* Get(AllocationSpace space) {
switch (space) {
case OLD_SPACE:
return &old_space_;
case CODE_SPACE:
return &code_space_;
default:
UNREACHABLE();
}
UNREACHABLE();
}
private:
CompactionSpace old_space_;
CompactionSpace code_space_;
};
// -----------------------------------------------------------------------------
// Old generation regular object space.
class OldSpace : public PagedSpace {
public:
// Creates an old space object. The constructor does not allocate pages
// from OS.
explicit OldSpace(Heap* heap)
: PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE,
FreeList::CreateFreeList()) {}
static bool IsAtPageStart(Address addr) {
return static_cast<intptr_t>(addr & kPageAlignmentMask) ==
MemoryChunkLayout::ObjectStartOffsetInDataPage();
}
size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final {
if (V8_ARRAY_BUFFER_EXTENSION_BOOL &&
type == ExternalBackingStoreType::kArrayBuffer)
return heap()->OldArrayBufferBytes();
return external_backing_store_bytes_[type];
}
};
// -----------------------------------------------------------------------------
// Old generation code object space.
class CodeSpace : public PagedSpace {
public:
// Creates an old space object. The constructor does not allocate pages
// from OS.
explicit CodeSpace(Heap* heap)
: PagedSpace(heap, CODE_SPACE, EXECUTABLE, FreeList::CreateFreeList()) {}
};
// For contiguous spaces, top should be in the space (or at the end) and limit
// should be the end of the space.
#define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
SLOW_DCHECK((space).page_low() <= (info).top() && \
(info).top() <= (space).page_high() && \
(info).limit() <= (space).page_high())
// -----------------------------------------------------------------------------
// Old space for all map objects
class MapSpace : public PagedSpace {
public:
// Creates a map space object.
explicit MapSpace(Heap* heap)
: PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE, new FreeListMap()) {}
int RoundSizeDownToObjectAlignment(int size) override {
if (base::bits::IsPowerOfTwo(Map::kSize)) {
return RoundDown(size, Map::kSize);
} else {
return (size / Map::kSize) * Map::kSize;
}
}
void SortFreeList();
#ifdef VERIFY_HEAP
void VerifyObject(HeapObject obj) override;
#endif
};
// -----------------------------------------------------------------------------
// Off-thread space that is used for folded allocation on a different thread.
class V8_EXPORT_PRIVATE OffThreadSpace : public LocalSpace {
public:
explicit OffThreadSpace(Heap* heap)
: LocalSpace(heap, OLD_SPACE, NOT_EXECUTABLE,
LocalSpaceKind::kOffThreadSpace) {
#ifdef V8_ENABLE_THIRD_PARTY_HEAP
// OffThreadSpace doesn't work with third-party heap.
UNREACHABLE();
#endif
}
protected:
V8_WARN_UNUSED_RESULT bool SlowRefillLinearAllocationArea(
int size_in_bytes, AllocationOrigin origin) override;
void RefillFreeList() override;
};
// Iterates over the chunks (pages and large object pages) that can contain
// pointers to new space or to evacuation candidates.
class OldGenerationMemoryChunkIterator {
public:
inline explicit OldGenerationMemoryChunkIterator(Heap* heap);
// Return nullptr when the iterator is done.
inline MemoryChunk* next();
private:
enum State {
kOldSpaceState,
kMapState,
kCodeState,
kLargeObjectState,
kCodeLargeObjectState,
kFinishedState
};
Heap* heap_;
State state_;
PageIterator old_iterator_;
PageIterator code_iterator_;
PageIterator map_iterator_;
LargePageIterator lo_iterator_;
LargePageIterator code_lo_iterator_;
};
} // namespace internal
} // namespace v8
#endif // V8_HEAP_PAGED_SPACES_H_

View File

@ -12,6 +12,7 @@
#include "src/base/macros.h"
#include "src/heap/list.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/paged-spaces.h"
#include "src/heap/spaces.h"
namespace v8 {

View File

@ -63,42 +63,6 @@ HeapObject SemiSpaceObjectIterator::Next() {
return HeapObject();
}
// -----------------------------------------------------------------------------
// PagedSpaceObjectIterator
HeapObject PagedSpaceObjectIterator::Next() {
do {
HeapObject next_obj = FromCurrentPage();
if (!next_obj.is_null()) return next_obj;
} while (AdvanceToNextPage());
return HeapObject();
}
HeapObject PagedSpaceObjectIterator::FromCurrentPage() {
while (cur_addr_ != cur_end_) {
if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
cur_addr_ = space_->limit();
continue;
}
HeapObject obj = HeapObject::FromAddress(cur_addr_);
const int obj_size = obj.Size();
cur_addr_ += obj_size;
DCHECK_LE(cur_addr_, cur_end_);
if (!obj.IsFreeSpaceOrFiller()) {
if (obj.IsCode()) {
DCHECK_IMPLIES(
space_->identity() != CODE_SPACE,
space_->identity() == RO_SPACE && Code::cast(obj).is_builtin());
DCHECK_CODEOBJECT_SIZE(obj_size, space_);
} else {
DCHECK_OBJECT_SIZE(obj_size);
}
return obj;
}
}
return HeapObject();
}
void Space::IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
size_t amount) {
base::CheckedIncrement(&external_backing_store_bytes_[type], amount);
@ -165,50 +129,6 @@ bool NewSpace::FromSpaceContains(Object o) const {
return from_space_.Contains(o);
}
bool PagedSpace::Contains(Address addr) const {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
return true;
}
return Page::FromAddress(addr)->owner() == this;
}
bool PagedSpace::Contains(Object o) const {
if (!o.IsHeapObject()) return false;
return Page::FromAddress(o.ptr())->owner() == this;
}
void PagedSpace::UnlinkFreeListCategories(Page* page) {
DCHECK_EQ(this, page->owner());
page->ForAllFreeListCategories([this](FreeListCategory* category) {
free_list()->RemoveCategory(category);
});
}
size_t PagedSpace::RelinkFreeListCategories(Page* page) {
DCHECK_EQ(this, page->owner());
size_t added = 0;
page->ForAllFreeListCategories([this, &added](FreeListCategory* category) {
added += category->available();
category->Relink(free_list());
});
DCHECK_IMPLIES(!page->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE),
page->AvailableInFreeList() ==
page->AvailableInFreeListFromAllocatedBytes());
return added;
}
bool PagedSpace::TryFreeLast(HeapObject object, int object_size) {
if (allocation_info_.top() != kNullAddress) {
const Address object_address = object.address();
if ((allocation_info_.top() - object_size) == object_address) {
allocation_info_.set_top(object_address);
return true;
}
}
return false;
}
void Page::MarkNeverAllocateForTesting() {
DCHECK(this->owner_identity() != NEW_SPACE);
DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
@ -345,122 +265,6 @@ AllocationResult LocalAllocationBuffer::AllocateRawAligned(
return AllocationResult(HeapObject::FromAddress(current_top));
}
bool PagedSpace::EnsureLinearAllocationArea(int size_in_bytes,
AllocationOrigin origin) {
if (allocation_info_.top() + size_in_bytes <= allocation_info_.limit()) {
return true;
}
return SlowRefillLinearAllocationArea(size_in_bytes, origin);
}
HeapObject PagedSpace::AllocateLinearly(int size_in_bytes) {
Address current_top = allocation_info_.top();
Address new_top = current_top + size_in_bytes;
DCHECK_LE(new_top, allocation_info_.limit());
allocation_info_.set_top(new_top);
return HeapObject::FromAddress(current_top);
}
HeapObject PagedSpace::TryAllocateLinearlyAligned(
int* size_in_bytes, AllocationAlignment alignment) {
Address current_top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(current_top, alignment);
Address new_top = current_top + filler_size + *size_in_bytes;
if (new_top > allocation_info_.limit()) return HeapObject();
allocation_info_.set_top(new_top);
if (filler_size > 0) {
*size_in_bytes += filler_size;
return Heap::PrecedeWithFiller(ReadOnlyRoots(heap()),
HeapObject::FromAddress(current_top),
filler_size);
}
return HeapObject::FromAddress(current_top);
}
AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationOrigin origin) {
DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
if (!EnsureLinearAllocationArea(size_in_bytes, origin)) {
return AllocationResult::Retry(identity());
}
HeapObject object = AllocateLinearly(size_in_bytes);
DCHECK(!object.is_null());
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
if (FLAG_trace_allocations_origins) {
UpdateAllocationOrigins(origin);
}
return object;
}
AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
DCHECK(identity() == OLD_SPACE || identity() == RO_SPACE);
DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
int allocation_size = size_in_bytes;
HeapObject object = TryAllocateLinearlyAligned(&allocation_size, alignment);
if (object.is_null()) {
// We don't know exactly how much filler we need to align until space is
// allocated, so assume the worst case.
int filler_size = Heap::GetMaximumFillToAlign(alignment);
allocation_size += filler_size;
if (!EnsureLinearAllocationArea(allocation_size, origin)) {
return AllocationResult::Retry(identity());
}
allocation_size = size_in_bytes;
object = TryAllocateLinearlyAligned(&allocation_size, alignment);
DCHECK(!object.is_null());
}
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
if (FLAG_trace_allocations_origins) {
UpdateAllocationOrigins(origin);
}
return object;
}
AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
if (top_on_previous_step_ && top() < top_on_previous_step_ &&
SupportsInlineAllocation()) {
// Generated code decreased the top() pointer to do folded allocations.
// The top_on_previous_step_ can be one byte beyond the current page.
DCHECK_NE(top(), kNullAddress);
DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
Page::FromAllocationAreaAddress(top_on_previous_step_ - 1));
top_on_previous_step_ = top();
}
size_t bytes_since_last =
top_on_previous_step_ ? top() - top_on_previous_step_ : 0;
DCHECK_IMPLIES(!SupportsInlineAllocation(), bytes_since_last == 0);
#ifdef V8_HOST_ARCH_32_BIT
AllocationResult result =
alignment != kWordAligned
? AllocateRawAligned(size_in_bytes, alignment, origin)
: AllocateRawUnaligned(size_in_bytes, origin);
#else
AllocationResult result = AllocateRawUnaligned(size_in_bytes, origin);
#endif
HeapObject heap_obj;
if (!result.IsRetry() && result.To(&heap_obj) && !is_local_space()) {
AllocationStep(static_cast<int>(size_in_bytes + bytes_since_last),
heap_obj.address(), size_in_bytes);
StartNextInlineAllocationStep();
DCHECK_IMPLIES(
heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
}
return result;
}
// -----------------------------------------------------------------------------
// NewSpace

File diff suppressed because it is too large Load Diff

View File

@ -18,7 +18,6 @@
#include "src/base/export-template.h"
#include "src/base/iterator.h"
#include "src/base/macros.h"
#include "src/base/optional.h"
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
#include "src/flags/flags.h"
@ -539,24 +538,6 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
DISALLOW_COPY_AND_ASSIGN(Space);
};
// The CodeObjectRegistry holds all start addresses of code objects of a given
// MemoryChunk. Each MemoryChunk owns a separate CodeObjectRegistry. The
// CodeObjectRegistry allows fast lookup from an inner pointer of a code object
// to the actual code object.
class V8_EXPORT_PRIVATE CodeObjectRegistry {
public:
void RegisterNewlyAllocatedCodeObject(Address code);
void RegisterAlreadyExistingCodeObject(Address code);
void Clear();
void Finalize();
bool Contains(Address code) const;
Address GetCodeObjectStartFromInnerAddress(Address address) const;
private:
std::vector<Address> code_object_registry_already_existing_;
std::set<Address> code_object_registry_newly_allocated_;
};
STATIC_ASSERT(sizeof(std::atomic<intptr_t>) == kSystemPointerSize);
// -----------------------------------------------------------------------------
@ -1135,44 +1116,6 @@ class PageRange {
Page* end_;
};
// -----------------------------------------------------------------------------
// Heap object iterator in new/old/map spaces.
//
// A PagedSpaceObjectIterator iterates objects from the bottom of the given
// space to its top or from the bottom of the given page to its top.
//
// If objects are allocated in the page during iteration the iterator may
// or may not iterate over those objects. The caller must create a new
// iterator in order to be sure to visit these new objects.
class V8_EXPORT_PRIVATE PagedSpaceObjectIterator : public ObjectIterator {
public:
// Creates a new object iterator in a given space.
PagedSpaceObjectIterator(Heap* heap, PagedSpace* space);
PagedSpaceObjectIterator(Heap* heap, PagedSpace* space, Page* page);
// Creates a new object iterator in a given off-thread space.
explicit PagedSpaceObjectIterator(OffThreadSpace* space);
// Advance to the next object, skipping free spaces and other fillers and
// skipping the special garbage section of which there is one per space.
// Returns nullptr when the iteration has ended.
inline HeapObject Next() override;
private:
// Fast (inlined) path of next().
inline HeapObject FromCurrentPage();
// Slow path of next(), goes into the next page. Returns false if the
// iteration has ended.
bool AdvanceToNextPage();
Address cur_addr_; // Current iteration point.
Address cur_end_; // End iteration point.
PagedSpace* space_;
PageRange page_range_;
PageRange::iterator current_page_;
};
// -----------------------------------------------------------------------------
// A space has a circular list of pages. The next page can be accessed via
// Page::next_page() call.
@ -1822,349 +1765,6 @@ class SpaceWithLinearArea : public Space {
AllocationOrigin::kNumberOfAllocationOrigins)] = {0};
};
class V8_EXPORT_PRIVATE PagedSpace
: NON_EXPORTED_BASE(public SpaceWithLinearArea) {
public:
using iterator = PageIterator;
using const_iterator = ConstPageIterator;
static const size_t kCompactionMemoryWanted = 500 * KB;
// Creates a space with an id.
PagedSpace(Heap* heap, AllocationSpace id, Executability executable,
FreeList* free_list,
LocalSpaceKind local_space_kind = LocalSpaceKind::kNone);
~PagedSpace() override { TearDown(); }
// Checks whether an object/address is in this space.
inline bool Contains(Address a) const;
inline bool Contains(Object o) const;
bool ContainsSlow(Address addr) const;
// Does the space need executable memory?
Executability executable() { return executable_; }
// Prepares for a mark-compact GC.
void PrepareForMarkCompact();
// Current capacity without growing (Size() + Available()).
size_t Capacity() { return accounting_stats_.Capacity(); }
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory() override;
// Sets the capacity, the available space and the wasted space to zero.
// The stats are rebuilt during sweeping by adding each page to the
// capacity and the size when it is encountered. As free spaces are
// discovered during the sweeping they are subtracted from the size and added
// to the available and wasted totals. The free list is cleared as well.
void ClearAllocatorState() {
accounting_stats_.ClearSize();
free_list_->Reset();
}
// Available bytes without growing. These are the bytes on the free list.
// The bytes in the linear allocation area are not included in this total
// because updating the stats would slow down allocation. New pages are
// immediately added to the free list so they show up here.
size_t Available() override { return free_list_->Available(); }
// Allocated bytes in this space. Garbage bytes that were not found due to
// concurrent sweeping are counted as being allocated! The bytes in the
// current linear allocation area (between top and limit) are also counted
// here.
size_t Size() override { return accounting_stats_.Size(); }
// As size, but the bytes in lazily swept pages are estimated and the bytes
// in the current linear allocation area are not included.
size_t SizeOfObjects() override;
// Wasted bytes in this space. These are just the bytes that were thrown away
// due to being too small to use for allocation.
virtual size_t Waste() { return free_list_->wasted_bytes(); }
// Allocate the requested number of bytes in the space if possible, return a
// failure object if not.
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawUnaligned(
int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
// Allocate the requested number of bytes in the space double aligned if
// possible, return a failure object if not.
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
// Allocate the requested number of bytes in the space and consider allocation
// alignment if needed.
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
// Allocate the requested number of bytes in the space from a background
// thread.
V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
SlowGetLinearAllocationAreaBackground(LocalHeap* local_heap,
size_t min_size_in_bytes,
size_t max_size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin);
size_t Free(Address start, size_t size_in_bytes, SpaceAccountingMode mode) {
if (size_in_bytes == 0) return 0;
heap()->CreateFillerObjectAt(start, static_cast<int>(size_in_bytes),
ClearRecordedSlots::kNo);
if (mode == SpaceAccountingMode::kSpaceAccounted) {
return AccountedFree(start, size_in_bytes);
} else {
return UnaccountedFree(start, size_in_bytes);
}
}
// Give a block of memory to the space's free list. It might be added to
// the free list or accounted as waste.
// If add_to_freelist is false then just accounting stats are updated and
// no attempt to add area to free list is made.
size_t AccountedFree(Address start, size_t size_in_bytes) {
size_t wasted = free_list_->Free(start, size_in_bytes, kLinkCategory);
Page* page = Page::FromAddress(start);
accounting_stats_.DecreaseAllocatedBytes(size_in_bytes, page);
DCHECK_GE(size_in_bytes, wasted);
return size_in_bytes - wasted;
}
size_t UnaccountedFree(Address start, size_t size_in_bytes) {
size_t wasted = free_list_->Free(start, size_in_bytes, kDoNotLinkCategory);
DCHECK_GE(size_in_bytes, wasted);
return size_in_bytes - wasted;
}
inline bool TryFreeLast(HeapObject object, int object_size);
void ResetFreeList();
// Empty space linear allocation area, returning unused area to free list.
void FreeLinearAllocationArea();
void MarkLinearAllocationAreaBlack();
void UnmarkLinearAllocationArea();
void DecreaseAllocatedBytes(size_t bytes, Page* page) {
accounting_stats_.DecreaseAllocatedBytes(bytes, page);
}
void IncreaseAllocatedBytes(size_t bytes, Page* page) {
accounting_stats_.IncreaseAllocatedBytes(bytes, page);
}
void DecreaseCapacity(size_t bytes) {
accounting_stats_.DecreaseCapacity(bytes);
}
void IncreaseCapacity(size_t bytes) {
accounting_stats_.IncreaseCapacity(bytes);
}
void RefineAllocatedBytesAfterSweeping(Page* page);
Page* InitializePage(MemoryChunk* chunk);
void ReleasePage(Page* page);
// Adds the page to this space and returns the number of bytes added to the
// free list of the space.
size_t AddPage(Page* page);
void RemovePage(Page* page);
// Remove a page if it has at least |size_in_bytes| bytes available that can
// be used for allocation.
Page* RemovePageSafe(int size_in_bytes);
void SetReadable();
void SetReadAndExecutable();
void SetReadAndWritable();
void SetDefaultCodePermissions() {
if (FLAG_jitless) {
SetReadable();
} else {
SetReadAndExecutable();
}
}
#ifdef VERIFY_HEAP
// Verify integrity of this space.
virtual void Verify(Isolate* isolate, ObjectVisitor* visitor);
void VerifyLiveBytes();
// Overridden by subclasses to verify space-specific object
// properties (e.g., only maps or free-list nodes are in map space).
virtual void VerifyObject(HeapObject obj) {}
#endif
#ifdef DEBUG
void VerifyCountersAfterSweeping(Heap* heap);
void VerifyCountersBeforeConcurrentSweeping();
// Print meta info and objects in this space.
void Print() override;
// Report code object related statistics
static void ReportCodeStatistics(Isolate* isolate);
static void ResetCodeStatistics(Isolate* isolate);
#endif
bool CanExpand(size_t size);
// Returns the number of total pages in this space.
int CountTotalPages();
// Return size of allocatable area on a page in this space.
inline int AreaSize() { return static_cast<int>(area_size_); }
bool is_local_space() { return local_space_kind_ != LocalSpaceKind::kNone; }
bool is_off_thread_space() {
return local_space_kind_ == LocalSpaceKind::kOffThreadSpace;
}
bool is_compaction_space() {
return base::IsInRange(local_space_kind_,
LocalSpaceKind::kFirstCompactionSpace,
LocalSpaceKind::kLastCompactionSpace);
}
LocalSpaceKind local_space_kind() { return local_space_kind_; }
// Merges {other} into the current space. Note that this modifies {other},
// e.g., removes its bump pointer area and resets statistics.
void MergeLocalSpace(LocalSpace* other);
// Refills the free list from the corresponding free list filled by the
// sweeper.
virtual void RefillFreeList();
base::Mutex* mutex() { return &space_mutex_; }
inline void UnlinkFreeListCategories(Page* page);
inline size_t RelinkFreeListCategories(Page* page);
Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
const Page* first_page() const {
return reinterpret_cast<const Page*>(Space::first_page());
}
iterator begin() { return iterator(first_page()); }
iterator end() { return iterator(nullptr); }
const_iterator begin() const { return const_iterator(first_page()); }
const_iterator end() const { return const_iterator(nullptr); }
// Shrink immortal immovable pages of the space to be exactly the size needed
// using the high water mark.
void ShrinkImmortalImmovablePages();
size_t ShrinkPageToHighWaterMark(Page* page);
std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
void SetLinearAllocationArea(Address top, Address limit);
private:
// Set space linear allocation area.
void SetTopAndLimit(Address top, Address limit) {
DCHECK(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.Reset(top, limit);
}
void DecreaseLimit(Address new_limit);
void UpdateInlineAllocationLimit(size_t min_size) override;
bool SupportsInlineAllocation() override {
return identity() == OLD_SPACE && !is_local_space();
}
protected:
// PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages.
virtual bool snapshotable() { return true; }
bool HasPages() { return first_page() != nullptr; }
// Cleans up the space, frees all pages in this space except those belonging
// to the initial chunk, uncommits addresses in the initial chunk.
void TearDown();
// Expands the space by allocating a fixed number of pages. Returns false if
// it cannot allocate requested number of pages from OS, or if the hard heap
// size limit has been hit.
bool Expand();
// Sets up a linear allocation area that fits the given number of bytes.
// Returns false if there is not enough space and the caller has to retry
// after collecting garbage.
inline bool EnsureLinearAllocationArea(int size_in_bytes,
AllocationOrigin origin);
// Allocates an object from the linear allocation area. Assumes that the
// linear allocation area is large enought to fit the object.
inline HeapObject AllocateLinearly(int size_in_bytes);
// Tries to allocate an aligned object from the linear allocation area.
// Returns nullptr if the linear allocation area does not fit the object.
// Otherwise, returns the object pointer and writes the allocation size
// (object size + alignment filler size) to the size_in_bytes.
inline HeapObject TryAllocateLinearlyAligned(int* size_in_bytes,
AllocationAlignment alignment);
V8_WARN_UNUSED_RESULT bool RefillLinearAllocationAreaFromFreeList(
size_t size_in_bytes, AllocationOrigin origin);
// If sweeping is still in progress try to sweep unswept pages. If that is
// not successful, wait for the sweeper threads and retry free-list
// allocation. Returns false if there is not enough space and the caller
// has to retry after collecting garbage.
V8_WARN_UNUSED_RESULT bool EnsureSweptAndRetryAllocation(
int size_in_bytes, AllocationOrigin origin);
V8_WARN_UNUSED_RESULT bool SweepAndRetryAllocation(int required_freed_bytes,
int max_pages,
int size_in_bytes,
AllocationOrigin origin);
// Slow path of AllocateRaw. This function is space-dependent. Returns false
// if there is not enough space and the caller has to retry after
// collecting garbage.
V8_WARN_UNUSED_RESULT virtual bool SlowRefillLinearAllocationArea(
int size_in_bytes, AllocationOrigin origin);
// Implementation of SlowAllocateRaw. Returns false if there is not enough
// space and the caller has to retry after collecting garbage.
V8_WARN_UNUSED_RESULT bool RawSlowRefillLinearAllocationArea(
int size_in_bytes, AllocationOrigin origin);
V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
size_t max_size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin);
Executability executable_;
LocalSpaceKind local_space_kind_;
size_t area_size_;
// Accounting information for this space.
AllocationStats accounting_stats_;
// Mutex guarding any concurrent access to the space.
base::Mutex space_mutex_;
// Mutex guarding concurrent allocation.
base::Mutex allocation_mutex_;
friend class IncrementalMarking;
friend class MarkCompactCollector;
// Used in cctest.
friend class heap::HeapTester;
};
enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
// -----------------------------------------------------------------------------
@ -2642,180 +2242,6 @@ class V8_EXPORT_PRIVATE PauseAllocationObserversScope {
DISALLOW_COPY_AND_ASSIGN(PauseAllocationObserversScope);
};
// -----------------------------------------------------------------------------
// Base class for compaction space and off-thread space.
class V8_EXPORT_PRIVATE LocalSpace : public PagedSpace {
public:
LocalSpace(Heap* heap, AllocationSpace id, Executability executable,
LocalSpaceKind local_space_kind)
: PagedSpace(heap, id, executable, FreeList::CreateFreeList(),
local_space_kind) {
DCHECK_NE(local_space_kind, LocalSpaceKind::kNone);
}
protected:
// The space is temporary and not included in any snapshots.
bool snapshotable() override { return false; }
};
// -----------------------------------------------------------------------------
// Compaction space that is used temporarily during compaction.
class V8_EXPORT_PRIVATE CompactionSpace : public LocalSpace {
public:
CompactionSpace(Heap* heap, AllocationSpace id, Executability executable,
LocalSpaceKind local_space_kind)
: LocalSpace(heap, id, executable, local_space_kind) {
DCHECK(is_compaction_space());
}
protected:
V8_WARN_UNUSED_RESULT bool SlowRefillLinearAllocationArea(
int size_in_bytes, AllocationOrigin origin) override;
};
// A collection of |CompactionSpace|s used by a single compaction task.
class CompactionSpaceCollection : public Malloced {
public:
explicit CompactionSpaceCollection(Heap* heap,
LocalSpaceKind local_space_kind)
: old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE,
local_space_kind),
code_space_(heap, CODE_SPACE, Executability::EXECUTABLE,
local_space_kind) {}
CompactionSpace* Get(AllocationSpace space) {
switch (space) {
case OLD_SPACE:
return &old_space_;
case CODE_SPACE:
return &code_space_;
default:
UNREACHABLE();
}
UNREACHABLE();
}
private:
CompactionSpace old_space_;
CompactionSpace code_space_;
};
// -----------------------------------------------------------------------------
// Old generation regular object space.
class OldSpace : public PagedSpace {
public:
// Creates an old space object. The constructor does not allocate pages
// from OS.
explicit OldSpace(Heap* heap)
: PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE,
FreeList::CreateFreeList()) {}
static bool IsAtPageStart(Address addr) {
return static_cast<intptr_t>(addr & kPageAlignmentMask) ==
MemoryChunkLayout::ObjectStartOffsetInDataPage();
}
size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final {
if (V8_ARRAY_BUFFER_EXTENSION_BOOL &&
type == ExternalBackingStoreType::kArrayBuffer)
return heap()->OldArrayBufferBytes();
return external_backing_store_bytes_[type];
}
};
// -----------------------------------------------------------------------------
// Old generation code object space.
class CodeSpace : public PagedSpace {
public:
// Creates an old space object. The constructor does not allocate pages
// from OS.
explicit CodeSpace(Heap* heap)
: PagedSpace(heap, CODE_SPACE, EXECUTABLE, FreeList::CreateFreeList()) {}
};
// For contiguous spaces, top should be in the space (or at the end) and limit
// should be the end of the space.
#define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
SLOW_DCHECK((space).page_low() <= (info).top() && \
(info).top() <= (space).page_high() && \
(info).limit() <= (space).page_high())
// -----------------------------------------------------------------------------
// Old space for all map objects
class MapSpace : public PagedSpace {
public:
// Creates a map space object.
explicit MapSpace(Heap* heap)
: PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE, new FreeListMap()) {}
int RoundSizeDownToObjectAlignment(int size) override {
if (base::bits::IsPowerOfTwo(Map::kSize)) {
return RoundDown(size, Map::kSize);
} else {
return (size / Map::kSize) * Map::kSize;
}
}
void SortFreeList();
#ifdef VERIFY_HEAP
void VerifyObject(HeapObject obj) override;
#endif
};
// -----------------------------------------------------------------------------
// Off-thread space that is used for folded allocation on a different thread.
class V8_EXPORT_PRIVATE OffThreadSpace : public LocalSpace {
public:
explicit OffThreadSpace(Heap* heap)
: LocalSpace(heap, OLD_SPACE, NOT_EXECUTABLE,
LocalSpaceKind::kOffThreadSpace) {
#ifdef V8_ENABLE_THIRD_PARTY_HEAP
// OffThreadSpace doesn't work with third-party heap.
UNREACHABLE();
#endif
}
protected:
V8_WARN_UNUSED_RESULT bool SlowRefillLinearAllocationArea(
int size_in_bytes, AllocationOrigin origin) override;
void RefillFreeList() override;
};
// Iterates over the chunks (pages and large object pages) that can contain
// pointers to new space or to evacuation candidates.
class OldGenerationMemoryChunkIterator {
public:
inline explicit OldGenerationMemoryChunkIterator(Heap* heap);
// Return nullptr when the iterator is done.
inline MemoryChunk* next();
private:
enum State {
kOldSpaceState,
kMapState,
kCodeState,
kLargeObjectState,
kCodeLargeObjectState,
kFinishedState
};
Heap* heap_;
State state_;
PageIterator old_iterator_;
PageIterator code_iterator_;
PageIterator map_iterator_;
LargePageIterator lo_iterator_;
LargePageIterator code_lo_iterator_;
};
} // namespace internal
} // namespace v8

View File

@ -6,6 +6,7 @@
#include "src/execution/vm-state-inl.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/code-object-registry.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/mark-compact-inl.h"

View File

@ -6,10 +6,10 @@
#include "include/libplatform/libplatform.h"
#include "include/v8.h"
#include "src/execution/frames.h"
#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h"
#include "src/heap/paged-spaces-inl.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/spaces.h"
#include "src/objects/objects-inl.h"

View File

@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/spaces.h"
#include "src/heap/code-object-registry.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {