Reland "heap: Factor out raw allocation functions into HeapAllocator"
This is a reland of dec62c2d0f
Revert was not necessary as test was independently flaking.
Original change's description:
> heap: Factor out raw allocation functions into HeapAllocator
>
> This CL is mostly mechanic and provides runtime and static
> dispatch for allocation of objects using HeapAllocator.
>
> Future CLs will remove the Heap bottelenecks.
>
> Bug: v8:12615
> Change-Id: Id2becf7da4bd5273f96abc0e1a4ac6c04bddb1cb
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3474674
> Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
> Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#79229}
Bug: v8:12615
Change-Id: I505ebde7afd2b0d03e11ef4cbcf1d4d09c6826a1
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3484322
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Auto-Submit: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79236}
This commit is contained in:
parent
22d8d3be5a
commit
09c001a79a
@ -1348,6 +1348,9 @@ filegroup(
|
||||
"src/heap/gc-idle-time-handler.h",
|
||||
"src/heap/gc-tracer.cc",
|
||||
"src/heap/gc-tracer.h",
|
||||
"src/heap/heap-allocator-inl.h",
|
||||
"src/heap/heap-allocator.cc",
|
||||
"src/heap/heap-allocator.h",
|
||||
"src/heap/heap-controller.cc",
|
||||
"src/heap/heap-controller.h",
|
||||
"src/heap/heap-inl.h",
|
||||
|
3
BUILD.gn
3
BUILD.gn
@ -2979,6 +2979,8 @@ v8_header_set("v8_internal_headers") {
|
||||
"src/heap/free-list.h",
|
||||
"src/heap/gc-idle-time-handler.h",
|
||||
"src/heap/gc-tracer.h",
|
||||
"src/heap/heap-allocator-inl.h",
|
||||
"src/heap/heap-allocator.h",
|
||||
"src/heap/heap-controller.h",
|
||||
"src/heap/heap-inl.h",
|
||||
"src/heap/heap-layout-tracer.h",
|
||||
@ -4182,6 +4184,7 @@ v8_source_set("v8_base_without_compiler") {
|
||||
"src/heap/free-list.cc",
|
||||
"src/heap/gc-idle-time-handler.cc",
|
||||
"src/heap/gc-tracer.cc",
|
||||
"src/heap/heap-allocator.cc",
|
||||
"src/heap/heap-controller.cc",
|
||||
"src/heap/heap-layout-tracer.cc",
|
||||
"src/heap/heap-write-barrier.cc",
|
||||
|
@ -13,6 +13,15 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
enum class AllocationOrigin {
|
||||
kGeneratedCode = 0,
|
||||
kRuntime = 1,
|
||||
kGC = 2,
|
||||
kFirstAllocationOrigin = kGeneratedCode,
|
||||
kLastAllocationOrigin = kGC,
|
||||
kNumberOfAllocationOrigins = kLastAllocationOrigin + 1
|
||||
};
|
||||
|
||||
// The result of an allocation attempt. Either represents a successful
|
||||
// allocation that can be turned into an object or a failed attempt.
|
||||
class AllocationResult final {
|
||||
|
246
src/heap/heap-allocator-inl.h
Normal file
246
src/heap/heap-allocator-inl.h
Normal file
@ -0,0 +1,246 @@
|
||||
// Copyright 2020 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_HEAP_HEAP_ALLOCATOR_INL_H_
|
||||
#define V8_HEAP_HEAP_ALLOCATOR_INL_H_
|
||||
|
||||
#include "src/base/logging.h"
|
||||
#include "src/common/globals.h"
|
||||
#include "src/heap/concurrent-allocator-inl.h"
|
||||
#include "src/heap/heap-allocator.h"
|
||||
#include "src/heap/large-spaces.h"
|
||||
#include "src/heap/new-spaces.h"
|
||||
#include "src/heap/paged-spaces.h"
|
||||
#include "src/heap/read-only-spaces.h"
|
||||
#include "src/heap/third-party/heap-api.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
PagedSpace* HeapAllocator::code_space() const {
|
||||
return static_cast<PagedSpace*>(spaces_[CODE_SPACE]);
|
||||
}
|
||||
|
||||
CodeLargeObjectSpace* HeapAllocator::code_lo_space() const {
|
||||
return static_cast<CodeLargeObjectSpace*>(spaces_[CODE_LO_SPACE]);
|
||||
}
|
||||
|
||||
OldLargeObjectSpace* HeapAllocator::lo_space() const {
|
||||
return static_cast<OldLargeObjectSpace*>(spaces_[LO_SPACE]);
|
||||
}
|
||||
|
||||
PagedSpace* HeapAllocator::map_space() const {
|
||||
return static_cast<PagedSpace*>(spaces_[MAP_SPACE]);
|
||||
}
|
||||
|
||||
NewSpace* HeapAllocator::new_space() const {
|
||||
return static_cast<NewSpace*>(spaces_[NEW_SPACE]);
|
||||
}
|
||||
|
||||
NewLargeObjectSpace* HeapAllocator::new_lo_space() const {
|
||||
return static_cast<NewLargeObjectSpace*>(spaces_[NEW_LO_SPACE]);
|
||||
}
|
||||
|
||||
PagedSpace* HeapAllocator::old_space() const {
|
||||
return static_cast<PagedSpace*>(spaces_[OLD_SPACE]);
|
||||
}
|
||||
|
||||
ReadOnlySpace* HeapAllocator::read_only_space() const {
|
||||
return read_only_space_;
|
||||
}
|
||||
|
||||
template <AllocationType type>
|
||||
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult HeapAllocator::AllocateRaw(
|
||||
int size_in_bytes, AllocationOrigin origin, AllocationAlignment alignment) {
|
||||
DCHECK_EQ(heap_->gc_state(), Heap::NOT_IN_GC);
|
||||
DCHECK(AllowHandleAllocation::IsAllowed());
|
||||
DCHECK(AllowHeapAllocation::IsAllowed());
|
||||
|
||||
if (FLAG_single_generation && type == AllocationType::kYoung) {
|
||||
return AllocateRaw(size_in_bytes, AllocationType::kOld, origin, alignment);
|
||||
}
|
||||
|
||||
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
|
||||
if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
|
||||
if (!heap_->always_allocate() && allocation_timeout_-- <= 0) {
|
||||
return AllocationResult::Failure();
|
||||
}
|
||||
}
|
||||
#endif // V8_ENABLE_ALLOCATION_TIMEOUT
|
||||
|
||||
#ifdef DEBUG
|
||||
IncrementObjectCounters();
|
||||
#endif // DEBUG
|
||||
|
||||
if (heap_->CanSafepoint()) {
|
||||
heap_->main_thread_local_heap()->Safepoint();
|
||||
}
|
||||
|
||||
const size_t large_object_threshold = heap_->MaxRegularHeapObjectSize(type);
|
||||
const bool large_object =
|
||||
static_cast<size_t>(size_in_bytes) > large_object_threshold;
|
||||
|
||||
HeapObject object;
|
||||
AllocationResult allocation;
|
||||
|
||||
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
|
||||
allocation = heap_->tp_heap_->Allocate(size_in_bytes, type, alignment);
|
||||
} else {
|
||||
if (V8_UNLIKELY(large_object)) {
|
||||
allocation =
|
||||
AllocateRawLargeInternal(size_in_bytes, type, origin, alignment);
|
||||
} else {
|
||||
switch (type) {
|
||||
case AllocationType::kYoung:
|
||||
allocation =
|
||||
new_space()->AllocateRaw(size_in_bytes, alignment, origin);
|
||||
break;
|
||||
case AllocationType::kOld:
|
||||
allocation =
|
||||
old_space()->AllocateRaw(size_in_bytes, alignment, origin);
|
||||
break;
|
||||
case AllocationType::kCode:
|
||||
DCHECK_EQ(alignment, AllocationAlignment::kTaggedAligned);
|
||||
DCHECK(AllowCodeAllocation::IsAllowed());
|
||||
allocation = code_space()->AllocateRawUnaligned(size_in_bytes);
|
||||
break;
|
||||
case AllocationType::kMap:
|
||||
DCHECK_EQ(alignment, AllocationAlignment::kTaggedAligned);
|
||||
allocation = map_space()->AllocateRawUnaligned(size_in_bytes);
|
||||
break;
|
||||
case AllocationType::kReadOnly:
|
||||
DCHECK(read_only_space()->writable());
|
||||
DCHECK_EQ(AllocationOrigin::kRuntime, origin);
|
||||
allocation = read_only_space()->AllocateRaw(size_in_bytes, alignment);
|
||||
break;
|
||||
case AllocationType::kSharedMap:
|
||||
allocation = shared_map_allocator_->AllocateRaw(size_in_bytes,
|
||||
alignment, origin);
|
||||
break;
|
||||
case AllocationType::kSharedOld:
|
||||
allocation = shared_old_allocator_->AllocateRaw(size_in_bytes,
|
||||
alignment, origin);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (allocation.To(&object)) {
|
||||
if (AllocationType::kCode == type && !V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
|
||||
// Unprotect the memory chunk of the object if it was not unprotected
|
||||
// already.
|
||||
heap_->UnprotectAndRegisterMemoryChunk(
|
||||
object, UnprotectMemoryOrigin::kMainThread);
|
||||
heap_->ZapCodeObject(object.address(), size_in_bytes);
|
||||
if (!large_object) {
|
||||
MemoryChunk::FromHeapObject(object)
|
||||
->GetCodeObjectRegistry()
|
||||
->RegisterNewlyAllocatedCodeObject(object.address());
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
|
||||
if (AllocationType::kReadOnly != type) {
|
||||
DCHECK_TAG_ALIGNED(object.address());
|
||||
Page::FromHeapObject(object)->object_start_bitmap()->SetBit(
|
||||
object.address());
|
||||
}
|
||||
#endif // V8_ENABLE_CONSERVATIVE_STACK_SCANNING
|
||||
|
||||
for (auto& tracker : heap_->allocation_trackers_) {
|
||||
tracker->AllocationEvent(object.address(), size_in_bytes);
|
||||
}
|
||||
}
|
||||
|
||||
return allocation;
|
||||
}
|
||||
|
||||
AllocationResult HeapAllocator::AllocateRaw(int size_in_bytes,
|
||||
AllocationType type,
|
||||
AllocationOrigin origin,
|
||||
AllocationAlignment alignment) {
|
||||
switch (type) {
|
||||
case AllocationType::kYoung:
|
||||
return AllocateRaw<AllocationType::kYoung>(size_in_bytes, origin,
|
||||
alignment);
|
||||
case AllocationType::kOld:
|
||||
return AllocateRaw<AllocationType::kOld>(size_in_bytes, origin,
|
||||
alignment);
|
||||
case AllocationType::kCode:
|
||||
return AllocateRaw<AllocationType::kCode>(size_in_bytes, origin,
|
||||
alignment);
|
||||
case AllocationType::kMap:
|
||||
return AllocateRaw<AllocationType::kMap>(size_in_bytes, origin,
|
||||
alignment);
|
||||
case AllocationType::kReadOnly:
|
||||
return AllocateRaw<AllocationType::kReadOnly>(size_in_bytes, origin,
|
||||
alignment);
|
||||
case AllocationType::kSharedMap:
|
||||
return AllocateRaw<AllocationType::kSharedMap>(size_in_bytes, origin,
|
||||
alignment);
|
||||
case AllocationType::kSharedOld:
|
||||
return AllocateRaw<AllocationType::kSharedOld>(size_in_bytes, origin,
|
||||
alignment);
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
AllocationResult HeapAllocator::AllocateRawData(int size_in_bytes,
|
||||
AllocationType type,
|
||||
AllocationOrigin origin,
|
||||
AllocationAlignment alignment) {
|
||||
switch (type) {
|
||||
case AllocationType::kYoung:
|
||||
return AllocateRaw<AllocationType::kYoung>(size_in_bytes, origin,
|
||||
alignment);
|
||||
case AllocationType::kOld:
|
||||
return AllocateRaw<AllocationType::kOld>(size_in_bytes, origin,
|
||||
alignment);
|
||||
case AllocationType::kCode:
|
||||
case AllocationType::kMap:
|
||||
case AllocationType::kReadOnly:
|
||||
case AllocationType::kSharedMap:
|
||||
case AllocationType::kSharedOld:
|
||||
UNREACHABLE();
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
template <HeapAllocator::AllocationRetryMode mode>
|
||||
V8_WARN_UNUSED_RESULT V8_INLINE HeapObject HeapAllocator::AllocateRawWith(
|
||||
int size, AllocationType allocation, AllocationOrigin origin,
|
||||
AllocationAlignment alignment) {
|
||||
AllocationResult result;
|
||||
HeapObject object;
|
||||
if (allocation == AllocationType::kYoung) {
|
||||
result = AllocateRaw<AllocationType::kYoung>(size, origin, alignment);
|
||||
if (result.To(&object)) {
|
||||
return object;
|
||||
}
|
||||
} else if (allocation == AllocationType::kOld) {
|
||||
result = AllocateRaw<AllocationType::kOld>(size, origin, alignment);
|
||||
if (result.To(&object)) {
|
||||
return object;
|
||||
}
|
||||
}
|
||||
switch (mode) {
|
||||
case kLightRetry:
|
||||
result = AllocateRawWithLightRetrySlowPath(size, allocation, origin,
|
||||
alignment);
|
||||
break;
|
||||
case kRetryOrFail:
|
||||
result = AllocateRawWithRetryOrFailSlowPath(size, allocation, origin,
|
||||
alignment);
|
||||
break;
|
||||
}
|
||||
if (result.To(&object)) {
|
||||
return object;
|
||||
}
|
||||
return HeapObject();
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_HEAP_HEAP_ALLOCATOR_INL_H_
|
173
src/heap/heap-allocator.cc
Normal file
173
src/heap/heap-allocator.cc
Normal file
@ -0,0 +1,173 @@
|
||||
// Copyright 2020 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/heap/heap-allocator.h"
|
||||
|
||||
#include "src/base/logging.h"
|
||||
#include "src/common/globals.h"
|
||||
#include "src/execution/isolate.h"
|
||||
#include "src/heap/heap-allocator-inl.h"
|
||||
#include "src/heap/heap-inl.h"
|
||||
#include "src/logging/counters.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
class Heap;
|
||||
|
||||
HeapAllocator::HeapAllocator(Heap* heap) : heap_(heap) {}
|
||||
|
||||
void HeapAllocator::Setup() {
|
||||
for (int i = FIRST_SPACE; i <= LAST_SPACE; ++i) {
|
||||
spaces_[i] = heap_->space(i);
|
||||
}
|
||||
shared_old_allocator_ = heap_->shared_old_allocator_.get();
|
||||
shared_map_allocator_ = heap_->shared_map_allocator_.get();
|
||||
}
|
||||
|
||||
void HeapAllocator::SetReadOnlySpace(ReadOnlySpace* read_only_space) {
|
||||
read_only_space_ = read_only_space;
|
||||
}
|
||||
|
||||
AllocationResult HeapAllocator::AllocateRawLargeInternal(
|
||||
int size_in_bytes, AllocationType allocation, AllocationOrigin origin,
|
||||
AllocationAlignment alignment) {
|
||||
DCHECK_GT(size_in_bytes, heap_->MaxRegularHeapObjectSize(allocation));
|
||||
switch (allocation) {
|
||||
case AllocationType::kYoung:
|
||||
return new_lo_space()->AllocateRaw(size_in_bytes);
|
||||
case AllocationType::kOld:
|
||||
return lo_space()->AllocateRaw(size_in_bytes);
|
||||
case AllocationType::kCode:
|
||||
return code_lo_space()->AllocateRaw(size_in_bytes);
|
||||
case AllocationType::kMap:
|
||||
case AllocationType::kReadOnly:
|
||||
case AllocationType::kSharedMap:
|
||||
case AllocationType::kSharedOld:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
constexpr AllocationSpace AllocationTypeToGCSpace(AllocationType type) {
|
||||
switch (type) {
|
||||
case AllocationType::kYoung:
|
||||
return NEW_SPACE;
|
||||
case AllocationType::kOld:
|
||||
case AllocationType::kCode:
|
||||
case AllocationType::kMap:
|
||||
// OLD_SPACE indicates full GC.
|
||||
return OLD_SPACE;
|
||||
case AllocationType::kReadOnly:
|
||||
case AllocationType::kSharedMap:
|
||||
case AllocationType::kSharedOld:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
AllocationResult HeapAllocator::AllocateRawWithLightRetrySlowPath(
|
||||
int size, AllocationType allocation, AllocationOrigin origin,
|
||||
AllocationAlignment alignment) {
|
||||
AllocationResult result = AllocateRaw(size, allocation, origin, alignment);
|
||||
if (!result.IsFailure()) {
|
||||
return result;
|
||||
}
|
||||
|
||||
// Two GCs before returning failure.
|
||||
for (int i = 0; i < 2; i++) {
|
||||
if (IsSharedAllocationType(allocation)) {
|
||||
heap_->CollectSharedGarbage(GarbageCollectionReason::kAllocationFailure);
|
||||
} else {
|
||||
heap_->CollectGarbage(AllocationTypeToGCSpace(allocation),
|
||||
GarbageCollectionReason::kAllocationFailure);
|
||||
}
|
||||
result = AllocateRaw(size, allocation, origin, alignment);
|
||||
if (!result.IsFailure()) {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
AllocationResult HeapAllocator::AllocateRawWithRetryOrFailSlowPath(
|
||||
int size, AllocationType allocation, AllocationOrigin origin,
|
||||
AllocationAlignment alignment) {
|
||||
AllocationResult result =
|
||||
AllocateRawWithLightRetrySlowPath(size, allocation, origin, alignment);
|
||||
if (!result.IsFailure()) return result;
|
||||
|
||||
heap_->isolate()->counters()->gc_last_resort_from_handles()->Increment();
|
||||
if (IsSharedAllocationType(allocation)) {
|
||||
heap_->CollectSharedGarbage(GarbageCollectionReason::kLastResort);
|
||||
|
||||
// We need always_allocate() to be true both on the client- and
|
||||
// server-isolate. It is used in both code paths.
|
||||
AlwaysAllocateScope shared_scope(
|
||||
heap_->isolate()->shared_isolate()->heap());
|
||||
AlwaysAllocateScope client_scope(heap_);
|
||||
result = AllocateRaw(size, allocation, origin, alignment);
|
||||
} else {
|
||||
heap_->CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
|
||||
|
||||
AlwaysAllocateScope scope(heap_);
|
||||
result = AllocateRaw(size, allocation, origin, alignment);
|
||||
}
|
||||
|
||||
if (!result.IsFailure()) {
|
||||
return result;
|
||||
}
|
||||
|
||||
v8::internal::V8::FatalProcessOutOfMemory(heap_->isolate(),
|
||||
"CALL_AND_RETRY_LAST", true);
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
|
||||
void HeapAllocator::IncrementObjectCounters() {
|
||||
heap_->isolate()->counters()->objs_since_last_full()->Increment();
|
||||
heap_->isolate()->counters()->objs_since_last_young()->Increment();
|
||||
}
|
||||
|
||||
#endif // DEBUG
|
||||
|
||||
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
|
||||
|
||||
void HeapAllocator::SetAllocationTimeout(int allocation_timeout) {
|
||||
allocation_timeout_ = allocation_timeout;
|
||||
}
|
||||
|
||||
void HeapAllocator::UpdateAllocationTimeout() {
|
||||
if (FLAG_random_gc_interval <= 0 && FLAG_gc_interval < 0) return;
|
||||
|
||||
int new_timeout;
|
||||
if (FLAG_random_gc_interval > 0) {
|
||||
new_timeout = allocation_timeout_ <= 0
|
||||
? heap_->isolate()->fuzzer_rng()->NextInt(
|
||||
FLAG_random_gc_interval + 1)
|
||||
: allocation_timeout_;
|
||||
|
||||
} else {
|
||||
DCHECK_GE(FLAG_gc_interval, 0);
|
||||
new_timeout = FLAG_gc_interval;
|
||||
}
|
||||
DCHECK_GE(new_timeout, 0);
|
||||
|
||||
// Reset the allocation timeout, but make sure to allow at least a few
|
||||
// allocations after a collection. The reason for this is that we have a lot
|
||||
// of allocation sequences and we assume that a garbage collection will allow
|
||||
// the subsequent allocation attempts to go through.
|
||||
//
|
||||
// TODO(v8:12615): Move `kFewAllocationsHeadroom` behind
|
||||
// `FLAG_random_gc_interval`.
|
||||
constexpr int kFewAllocationsHeadroom = 6;
|
||||
allocation_timeout_ = std::max(kFewAllocationsHeadroom, new_timeout);
|
||||
}
|
||||
|
||||
#endif // V8_ENABLE_ALLOCATION_TIMEOUT
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
117
src/heap/heap-allocator.h
Normal file
117
src/heap/heap-allocator.h
Normal file
@ -0,0 +1,117 @@
|
||||
// Copyright 2020 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_HEAP_HEAP_ALLOCATOR_H_
|
||||
#define V8_HEAP_HEAP_ALLOCATOR_H_
|
||||
|
||||
#include "include/v8config.h"
|
||||
#include "src/base/macros.h"
|
||||
#include "src/common/globals.h"
|
||||
#include "src/heap/allocation-result.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
class CodeLargeObjectSpace;
|
||||
class ConcurrentAllocator;
|
||||
class Heap;
|
||||
class NewSpace;
|
||||
class NewLargeObjectSpace;
|
||||
class OldLargeObjectSpace;
|
||||
class PagedSpace;
|
||||
class ReadOnlySpace;
|
||||
class Space;
|
||||
|
||||
// Allocator for the main thread. All exposed functions internally call the
|
||||
// right bottleneck.
|
||||
class V8_EXPORT_PRIVATE HeapAllocator final {
|
||||
public:
|
||||
explicit HeapAllocator(Heap*);
|
||||
|
||||
void Setup();
|
||||
void SetReadOnlySpace(ReadOnlySpace*);
|
||||
|
||||
// Supports all `AllocationType` types.
|
||||
//
|
||||
// Returns a failed result on an unsuccessful allocation attempt.
|
||||
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
|
||||
AllocateRaw(int size_in_bytes, AllocationType allocation,
|
||||
AllocationOrigin origin = AllocationOrigin::kRuntime,
|
||||
AllocationAlignment alignment = kTaggedAligned);
|
||||
|
||||
// Supports all `AllocationType` types. Use when type is statically known.
|
||||
//
|
||||
// Returns a failed result on an unsuccessful allocation attempt.
|
||||
template <AllocationType type>
|
||||
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult AllocateRaw(
|
||||
int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime,
|
||||
AllocationAlignment alignment = kTaggedAligned);
|
||||
|
||||
// Supports only `AllocationType::kYoung` and `AllocationType::kOld`.
|
||||
//
|
||||
// Returns a failed result on an unsuccessful allocation attempt.
|
||||
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
|
||||
AllocateRawData(int size_in_bytes, AllocationType allocation,
|
||||
AllocationOrigin origin = AllocationOrigin::kRuntime,
|
||||
AllocationAlignment alignment = kTaggedAligned);
|
||||
|
||||
enum AllocationRetryMode { kLightRetry, kRetryOrFail };
|
||||
|
||||
// Supports all `AllocationType` types and allows specifying retry handling.
|
||||
template <AllocationRetryMode mode>
|
||||
V8_WARN_UNUSED_RESULT V8_INLINE HeapObject
|
||||
AllocateRawWith(int size, AllocationType allocation,
|
||||
AllocationOrigin origin = AllocationOrigin::kRuntime,
|
||||
AllocationAlignment alignment = kTaggedAligned);
|
||||
|
||||
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
|
||||
void UpdateAllocationTimeout();
|
||||
void SetAllocationTimeout(int allocation_timeout);
|
||||
#endif // V8_ENABLE_ALLOCATION_TIMEOUT
|
||||
|
||||
private:
|
||||
V8_INLINE PagedSpace* code_space() const;
|
||||
V8_INLINE CodeLargeObjectSpace* code_lo_space() const;
|
||||
V8_INLINE PagedSpace* map_space() const;
|
||||
V8_INLINE NewSpace* new_space() const;
|
||||
V8_INLINE NewLargeObjectSpace* new_lo_space() const;
|
||||
V8_INLINE OldLargeObjectSpace* lo_space() const;
|
||||
V8_INLINE PagedSpace* old_space() const;
|
||||
V8_INLINE ReadOnlySpace* read_only_space() const;
|
||||
|
||||
V8_WARN_UNUSED_RESULT AllocationResult AllocateRawLargeInternal(
|
||||
int size_in_bytes, AllocationType allocation, AllocationOrigin origin,
|
||||
AllocationAlignment alignment);
|
||||
|
||||
V8_WARN_UNUSED_RESULT AllocationResult AllocateRawWithRetryOrFailSlowPath(
|
||||
int size, AllocationType allocation, AllocationOrigin origin,
|
||||
AllocationAlignment alignment);
|
||||
|
||||
V8_WARN_UNUSED_RESULT AllocationResult AllocateRawWithLightRetrySlowPath(
|
||||
int size, AllocationType allocation, AllocationOrigin origin,
|
||||
AllocationAlignment alignment);
|
||||
|
||||
#ifdef DEBUG
|
||||
void IncrementObjectCounters();
|
||||
#endif // DEBUG
|
||||
|
||||
Heap* const heap_;
|
||||
Space* spaces_[LAST_SPACE + 1];
|
||||
ReadOnlySpace* read_only_space_;
|
||||
|
||||
ConcurrentAllocator* shared_old_allocator_;
|
||||
ConcurrentAllocator* shared_map_allocator_;
|
||||
|
||||
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
|
||||
// If the --gc-interval flag is set to a positive value, this variable
|
||||
// holds the value indicating the number of allocations remain until the
|
||||
// next failure and garbage collection.
|
||||
int allocation_timeout_ = 0;
|
||||
#endif // V8_ENABLE_ALLOCATION_TIMEOUT
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_HEAP_HEAP_ALLOCATOR_H_
|
@ -21,6 +21,7 @@
|
||||
#include "src/heap/code-object-registry.h"
|
||||
#include "src/heap/concurrent-allocator-inl.h"
|
||||
#include "src/heap/concurrent-allocator.h"
|
||||
#include "src/heap/heap-allocator-inl.h"
|
||||
#include "src/heap/heap-write-barrier.h"
|
||||
#include "src/heap/heap.h"
|
||||
#include "src/heap/large-spaces.h"
|
||||
@ -199,146 +200,26 @@ int Heap::MaxRegularHeapObjectSize(AllocationType allocation) {
|
||||
AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
|
||||
AllocationOrigin origin,
|
||||
AllocationAlignment alignment) {
|
||||
DCHECK_EQ(gc_state(), NOT_IN_GC);
|
||||
DCHECK(AllowHandleAllocation::IsAllowed());
|
||||
DCHECK(AllowHeapAllocation::IsAllowed());
|
||||
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
|
||||
if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
|
||||
if (!always_allocate() && Heap::allocation_timeout_-- <= 0) {
|
||||
return AllocationResult::Failure();
|
||||
}
|
||||
}
|
||||
#endif // V8_ENABLE_ALLOCATION_TIMEOUT
|
||||
#ifdef DEBUG
|
||||
IncrementObjectCounters();
|
||||
#endif // DEBUG
|
||||
|
||||
if (CanSafepoint()) {
|
||||
main_thread_local_heap()->Safepoint();
|
||||
}
|
||||
|
||||
const size_t large_object_threshold = MaxRegularHeapObjectSize(type);
|
||||
const bool large_object =
|
||||
static_cast<size_t>(size_in_bytes) > large_object_threshold;
|
||||
|
||||
HeapObject object;
|
||||
AllocationResult allocation;
|
||||
|
||||
if (FLAG_single_generation && type == AllocationType::kYoung) {
|
||||
type = AllocationType::kOld;
|
||||
}
|
||||
|
||||
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
|
||||
allocation = tp_heap_->Allocate(size_in_bytes, type, alignment);
|
||||
} else {
|
||||
if (V8_UNLIKELY(large_object)) {
|
||||
allocation =
|
||||
AllocateRawLargeInternal(size_in_bytes, type, origin, alignment);
|
||||
} else {
|
||||
switch (type) {
|
||||
case AllocationType::kYoung:
|
||||
allocation =
|
||||
new_space_->AllocateRaw(size_in_bytes, alignment, origin);
|
||||
break;
|
||||
case AllocationType::kOld:
|
||||
allocation =
|
||||
old_space_->AllocateRaw(size_in_bytes, alignment, origin);
|
||||
break;
|
||||
case AllocationType::kCode:
|
||||
DCHECK_EQ(alignment, AllocationAlignment::kTaggedAligned);
|
||||
DCHECK(AllowCodeAllocation::IsAllowed());
|
||||
allocation = code_space_->AllocateRawUnaligned(size_in_bytes);
|
||||
break;
|
||||
case AllocationType::kMap: {
|
||||
DCHECK_EQ(alignment, AllocationAlignment::kTaggedAligned);
|
||||
PagedSpace* allocation_space =
|
||||
V8_LIKELY(map_space_) ? static_cast<PagedSpace*>(map_space_)
|
||||
: static_cast<PagedSpace*>(old_space_);
|
||||
allocation = allocation_space->AllocateRawUnaligned(size_in_bytes);
|
||||
break;
|
||||
}
|
||||
case AllocationType::kReadOnly:
|
||||
DCHECK(CanAllocateInReadOnlySpace());
|
||||
DCHECK_EQ(AllocationOrigin::kRuntime, origin);
|
||||
allocation = read_only_space_->AllocateRaw(size_in_bytes, alignment);
|
||||
break;
|
||||
case AllocationType::kSharedMap: {
|
||||
ConcurrentAllocator* allocator = V8_LIKELY(shared_map_allocator_)
|
||||
? shared_map_allocator_.get()
|
||||
: shared_old_allocator_.get();
|
||||
allocation = allocator->AllocateRaw(size_in_bytes, alignment, origin);
|
||||
break;
|
||||
}
|
||||
case AllocationType::kSharedOld:
|
||||
allocation = shared_old_allocator_->AllocateRaw(size_in_bytes,
|
||||
alignment, origin);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (allocation.To(&object)) {
|
||||
if (AllocationType::kCode == type && !V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
|
||||
// Unprotect the memory chunk of the object if it was not unprotected
|
||||
// already.
|
||||
UnprotectAndRegisterMemoryChunk(object,
|
||||
UnprotectMemoryOrigin::kMainThread);
|
||||
ZapCodeObject(object.address(), size_in_bytes);
|
||||
if (!large_object) {
|
||||
MemoryChunk::FromHeapObject(object)
|
||||
->GetCodeObjectRegistry()
|
||||
->RegisterNewlyAllocatedCodeObject(object.address());
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
|
||||
if (AllocationType::kReadOnly != type) {
|
||||
DCHECK_TAG_ALIGNED(object.address());
|
||||
Page::FromHeapObject(object)->object_start_bitmap()->SetBit(
|
||||
object.address());
|
||||
}
|
||||
#endif // V8_ENABLE_CONSERVATIVE_STACK_SCANNING
|
||||
|
||||
for (auto& tracker : allocation_trackers_) {
|
||||
tracker->AllocationEvent(object.address(), size_in_bytes);
|
||||
}
|
||||
}
|
||||
|
||||
return allocation;
|
||||
return heap_allocator_.AllocateRaw(size_in_bytes, type, origin, alignment);
|
||||
}
|
||||
|
||||
template <Heap::AllocationRetryMode mode>
|
||||
HeapObject Heap::AllocateRawWith(int size, AllocationType allocation,
|
||||
AllocationOrigin origin,
|
||||
AllocationAlignment alignment) {
|
||||
DCHECK(AllowHandleAllocation::IsAllowed());
|
||||
DCHECK(AllowHeapAllocation::IsAllowed());
|
||||
DCHECK_EQ(gc_state(), NOT_IN_GC);
|
||||
if (allocation == AllocationType::kYoung) {
|
||||
auto result = AllocateRaw(size, AllocationType::kYoung, origin, alignment);
|
||||
HeapObject object;
|
||||
if (result.To(&object)) return object;
|
||||
|
||||
} else if (allocation == AllocationType::kOld) {
|
||||
auto result = AllocateRaw(size, AllocationType::kOld, origin, alignment);
|
||||
HeapObject object;
|
||||
if (result.To(&object)) return object;
|
||||
}
|
||||
switch (mode) {
|
||||
case kLightRetry:
|
||||
return AllocateRawWithLightRetrySlowPath(size, allocation, origin,
|
||||
alignment);
|
||||
case kRetryOrFail:
|
||||
return AllocateRawWithRetryOrFailSlowPath(size, allocation, origin,
|
||||
alignment);
|
||||
}
|
||||
UNREACHABLE();
|
||||
return heap_allocator_.AllocateRawWith < mode ==
|
||||
AllocationRetryMode::kLightRetry
|
||||
? HeapAllocator::kLightRetry
|
||||
: HeapAllocator::kRetryOrFail >
|
||||
(size, allocation, origin, alignment);
|
||||
}
|
||||
|
||||
Address Heap::AllocateRawOrFail(int size, AllocationType allocation,
|
||||
AllocationOrigin origin,
|
||||
AllocationAlignment alignment) {
|
||||
return AllocateRawWith<kRetryOrFail>(size, allocation, origin, alignment)
|
||||
return heap_allocator_
|
||||
.AllocateRawWith<HeapAllocator::kRetryOrFail>(size, allocation, origin,
|
||||
alignment)
|
||||
.address();
|
||||
}
|
||||
|
||||
|
143
src/heap/heap.cc
143
src/heap/heap.cc
@ -204,6 +204,7 @@ class ScavengeTaskObserver : public AllocationObserver {
|
||||
|
||||
Heap::Heap()
|
||||
: isolate_(isolate()),
|
||||
heap_allocator_(this),
|
||||
memory_pressure_level_(MemoryPressureLevel::kNone),
|
||||
global_pretenuring_feedback_(kInitialFeedbackCapacity),
|
||||
safepoint_(std::make_unique<IsolateSafepoint>(this)),
|
||||
@ -1022,15 +1023,8 @@ void Heap::GarbageCollectionPrologue(
|
||||
if (force_gc_on_next_allocation_) force_gc_on_next_allocation_ = false;
|
||||
|
||||
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
|
||||
// Reset the allocation timeout, but make sure to allow at least a few
|
||||
// allocations after a collection. The reason for this is that we have a lot
|
||||
// of allocation sequences and we assume that a garbage collection will allow
|
||||
// the subsequent allocation attempts to go through.
|
||||
if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
|
||||
allocation_timeout_ =
|
||||
std::max(6, NextAllocationTimeout(allocation_timeout_));
|
||||
}
|
||||
#endif
|
||||
heap_allocator_.UpdateAllocationTimeout();
|
||||
#endif // V8_ENABLE_ALLOCATION_TIMEOUT
|
||||
|
||||
// There may be an allocation memento behind objects in new space. Upon
|
||||
// evacuation of a non-full new space (or if we are on the last page) there
|
||||
@ -5637,118 +5631,13 @@ void Heap::DisableInlineAllocation() {
|
||||
}
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
constexpr AllocationSpace AllocationTypeToGCSpace(AllocationType type) {
|
||||
switch (type) {
|
||||
case AllocationType::kYoung:
|
||||
return NEW_SPACE;
|
||||
case AllocationType::kOld:
|
||||
case AllocationType::kCode:
|
||||
case AllocationType::kMap:
|
||||
// OLD_SPACE indicates full GC.
|
||||
return OLD_SPACE;
|
||||
case AllocationType::kReadOnly:
|
||||
case AllocationType::kSharedMap:
|
||||
case AllocationType::kSharedOld:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
HeapObject Heap::AllocateRawWithLightRetrySlowPath(
|
||||
int size, AllocationType allocation, AllocationOrigin origin,
|
||||
AllocationAlignment alignment) {
|
||||
HeapObject result;
|
||||
AllocationResult alloc = AllocateRaw(size, allocation, origin, alignment);
|
||||
if (alloc.To(&result)) {
|
||||
// DCHECK that the successful allocation is not "exception". The one
|
||||
// exception to this is when allocating the "exception" object itself, in
|
||||
// which case this must be an ROSpace allocation and the exception object
|
||||
// in the roots has to be unset.
|
||||
DCHECK((CanAllocateInReadOnlySpace() &&
|
||||
allocation == AllocationType::kReadOnly &&
|
||||
ReadOnlyRoots(this).unchecked_exception() == Smi::zero()) ||
|
||||
result != ReadOnlyRoots(this).exception());
|
||||
return result;
|
||||
}
|
||||
// Two GCs before panicking. In newspace will almost always succeed.
|
||||
for (int i = 0; i < 2; i++) {
|
||||
if (IsSharedAllocationType(allocation)) {
|
||||
CollectSharedGarbage(GarbageCollectionReason::kAllocationFailure);
|
||||
} else {
|
||||
CollectGarbage(AllocationTypeToGCSpace(allocation),
|
||||
GarbageCollectionReason::kAllocationFailure);
|
||||
}
|
||||
alloc = AllocateRaw(size, allocation, origin, alignment);
|
||||
if (alloc.To(&result)) {
|
||||
DCHECK(result != ReadOnlyRoots(this).exception());
|
||||
return result;
|
||||
}
|
||||
}
|
||||
return HeapObject();
|
||||
}
|
||||
|
||||
AllocationResult Heap::AllocateRawLargeInternal(int size_in_bytes,
|
||||
AllocationType allocation,
|
||||
AllocationOrigin origin,
|
||||
AllocationAlignment alignment) {
|
||||
DCHECK_GT(size_in_bytes, MaxRegularHeapObjectSize(allocation));
|
||||
switch (allocation) {
|
||||
case AllocationType::kYoung:
|
||||
return new_lo_space_->AllocateRaw(size_in_bytes);
|
||||
case AllocationType::kOld:
|
||||
return lo_space_->AllocateRaw(size_in_bytes);
|
||||
case AllocationType::kCode:
|
||||
return code_lo_space_->AllocateRaw(size_in_bytes);
|
||||
case AllocationType::kMap:
|
||||
case AllocationType::kReadOnly:
|
||||
case AllocationType::kSharedMap:
|
||||
case AllocationType::kSharedOld:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
HeapObject Heap::AllocateRawWithRetryOrFailSlowPath(
|
||||
int size, AllocationType allocation, AllocationOrigin origin,
|
||||
AllocationAlignment alignment) {
|
||||
AllocationResult alloc;
|
||||
HeapObject result =
|
||||
AllocateRawWithLightRetrySlowPath(size, allocation, origin, alignment);
|
||||
if (!result.is_null()) return result;
|
||||
|
||||
isolate()->counters()->gc_last_resort_from_handles()->Increment();
|
||||
if (IsSharedAllocationType(allocation)) {
|
||||
CollectSharedGarbage(GarbageCollectionReason::kLastResort);
|
||||
|
||||
// We need always_allocate() to be true both on the client- and
|
||||
// server-isolate. It is used in both code paths.
|
||||
AlwaysAllocateScope shared_scope(isolate()->shared_isolate()->heap());
|
||||
AlwaysAllocateScope client_scope(isolate()->heap());
|
||||
alloc = AllocateRaw(size, allocation, origin, alignment);
|
||||
} else {
|
||||
CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
|
||||
|
||||
AlwaysAllocateScope scope(this);
|
||||
alloc = AllocateRaw(size, allocation, origin, alignment);
|
||||
}
|
||||
|
||||
if (alloc.To(&result)) {
|
||||
DCHECK(result != ReadOnlyRoots(this).exception());
|
||||
return result;
|
||||
}
|
||||
// TODO(1181417): Fix this.
|
||||
FatalProcessOutOfMemory("CALL_AND_RETRY_LAST");
|
||||
}
|
||||
|
||||
void Heap::SetUp(LocalHeap* main_thread_local_heap) {
|
||||
DCHECK_NULL(main_thread_local_heap_);
|
||||
main_thread_local_heap_ = main_thread_local_heap;
|
||||
|
||||
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
|
||||
allocation_timeout_ = NextAllocationTimeout();
|
||||
#endif
|
||||
heap_allocator_.UpdateAllocationTimeout();
|
||||
#endif // V8_ENABLE_ALLOCATION_TIMEOUT
|
||||
|
||||
#ifdef V8_ENABLE_THIRD_PARTY_HEAP
|
||||
tp_heap_ = third_party_heap::Heap::New(isolate());
|
||||
@ -5846,6 +5735,7 @@ void Heap::SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap) {
|
||||
read_only_space_ == ro_heap->read_only_space());
|
||||
space_[RO_SPACE] = nullptr;
|
||||
read_only_space_ = ro_heap->read_only_space();
|
||||
heap_allocator_.SetReadOnlySpace(read_only_space_);
|
||||
}
|
||||
|
||||
void Heap::ReplaceReadOnlySpace(SharedReadOnlySpace* space) {
|
||||
@ -5856,6 +5746,7 @@ void Heap::ReplaceReadOnlySpace(SharedReadOnlySpace* space) {
|
||||
}
|
||||
|
||||
read_only_space_ = space;
|
||||
heap_allocator_.SetReadOnlySpace(read_only_space_);
|
||||
}
|
||||
|
||||
class StressConcurrentAllocationObserver : public AllocationObserver {
|
||||
@ -5966,6 +5857,7 @@ void Heap::SetUpSpaces(LinearAllocationArea* new_allocation_info,
|
||||
}
|
||||
|
||||
main_thread_local_heap()->SetUpMainThread();
|
||||
heap_allocator_.Setup();
|
||||
}
|
||||
|
||||
void Heap::InitializeHashSeed() {
|
||||
@ -5981,19 +5873,6 @@ void Heap::InitializeHashSeed() {
|
||||
0, reinterpret_cast<byte*>(&new_hash_seed), kInt64Size);
|
||||
}
|
||||
|
||||
int Heap::NextAllocationTimeout(int current_timeout) {
|
||||
if (FLAG_random_gc_interval > 0) {
|
||||
// If current timeout hasn't reached 0 the GC was caused by something
|
||||
// different than --stress-atomic-gc flag and we don't update the timeout.
|
||||
if (current_timeout <= 0) {
|
||||
return isolate()->fuzzer_rng()->NextInt(FLAG_random_gc_interval + 1);
|
||||
} else {
|
||||
return current_timeout;
|
||||
}
|
||||
}
|
||||
return FLAG_gc_interval;
|
||||
}
|
||||
|
||||
void Heap::PrintMaxMarkingLimitReached() {
|
||||
PrintF("\n### Maximum marking limit reached = %.02lf\n",
|
||||
max_marking_limit_reached_);
|
||||
@ -7577,5 +7456,11 @@ void StrongRootBlockAllocator::deallocate(Address* p, size_t n) noexcept {
|
||||
base::Free(block);
|
||||
}
|
||||
|
||||
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
|
||||
void Heap::set_allocation_timeout(int allocation_timeout) {
|
||||
heap_allocator_.SetAllocationTimeout(allocation_timeout);
|
||||
}
|
||||
#endif // V8_ENABLE_ALLOCATION_TIMEOUT
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "src/common/globals.h"
|
||||
#include "src/heap/allocation-observer.h"
|
||||
#include "src/heap/allocation-result.h"
|
||||
#include "src/heap/heap-allocator.h"
|
||||
#include "src/init/heap-symbols.h"
|
||||
#include "src/objects/allocation-site.h"
|
||||
#include "src/objects/fixed-array.h"
|
||||
@ -126,15 +127,6 @@ enum ExternalBackingStoreType { kArrayBuffer, kExternalString, kNumTypes };
|
||||
|
||||
enum class RetainingPathOption { kDefault, kTrackEphemeronPath };
|
||||
|
||||
enum class AllocationOrigin {
|
||||
kGeneratedCode = 0,
|
||||
kRuntime = 1,
|
||||
kGC = 2,
|
||||
kFirstAllocationOrigin = kGeneratedCode,
|
||||
kLastAllocationOrigin = kGC,
|
||||
kNumberOfAllocationOrigins = kLastAllocationOrigin + 1
|
||||
};
|
||||
|
||||
// These values are persisted to logs. Entries should not be renumbered and
|
||||
// numeric values should never be reused. If you add new items here, update
|
||||
// src/tools/metrics/histograms/enums.xml in chromium.
|
||||
@ -1585,8 +1577,8 @@ class Heap {
|
||||
#endif
|
||||
|
||||
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
|
||||
void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
|
||||
#endif
|
||||
void V8_EXPORT_PRIVATE set_allocation_timeout(int allocation_timeout);
|
||||
#endif // V8_ENABLE_ALLOCATION_TIMEOUT
|
||||
|
||||
#ifdef DEBUG
|
||||
void VerifyCountersAfterSweeping();
|
||||
@ -1844,8 +1836,6 @@ class Heap {
|
||||
GCIdleTimeHeapState heap_state, double start_ms,
|
||||
double deadline_in_ms);
|
||||
|
||||
int NextAllocationTimeout(int current_timeout = 0);
|
||||
|
||||
void PrintMaxMarkingLimitReached();
|
||||
void PrintMaxNewSpaceSizeReached();
|
||||
|
||||
@ -2057,13 +2047,6 @@ class Heap {
|
||||
AllocationOrigin origin = AllocationOrigin::kRuntime,
|
||||
AllocationAlignment alignment = kTaggedAligned);
|
||||
|
||||
// Allocates an uninitialized large object. Used as dispatch by
|
||||
// `AllocateRaw()` for large objects. Do not call this from anywhere else.
|
||||
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
|
||||
AllocateRawLargeInternal(int size_in_bytes, AllocationType allocation,
|
||||
AllocationOrigin origin = AllocationOrigin::kRuntime,
|
||||
AllocationAlignment alignment = kTaggedAligned);
|
||||
|
||||
// This method will try to allocate objects quickly (AllocationType::kYoung)
|
||||
// otherwise it falls back to a slower path indicated by the mode.
|
||||
enum AllocationRetryMode { kLightRetry, kRetryOrFail };
|
||||
@ -2079,25 +2062,6 @@ class Heap {
|
||||
AllocationOrigin origin = AllocationOrigin::kRuntime,
|
||||
AllocationAlignment alignment = kTaggedAligned);
|
||||
|
||||
// This method will try to perform an allocation of a given size of a given
|
||||
// AllocationType. If the allocation fails, a regular full garbage collection
|
||||
// is triggered and the allocation is retried. This is performed multiple
|
||||
// times. If after that retry procedure the allocation still fails nullptr is
|
||||
// returned.
|
||||
V8_WARN_UNUSED_RESULT HeapObject AllocateRawWithLightRetrySlowPath(
|
||||
int size, AllocationType allocation, AllocationOrigin origin,
|
||||
AllocationAlignment alignment = kTaggedAligned);
|
||||
|
||||
// This method will try to perform an allocation of a given size of a given
|
||||
// AllocationType. If the allocation fails, a regular full garbage collection
|
||||
// is triggered and the allocation is retried. This is performed multiple
|
||||
// times. If after that retry procedure the allocation still fails a "hammer"
|
||||
// garbage collection is triggered which tries to significantly reduce memory.
|
||||
// If the allocation still fails after that a fatal error is thrown.
|
||||
V8_WARN_UNUSED_RESULT HeapObject AllocateRawWithRetryOrFailSlowPath(
|
||||
int size, AllocationType allocation, AllocationOrigin origin,
|
||||
AllocationAlignment alignment = kTaggedAligned);
|
||||
|
||||
// Allocates a heap object based on the map.
|
||||
V8_WARN_UNUSED_RESULT AllocationResult Allocate(Handle<Map> map,
|
||||
AllocationType allocation);
|
||||
@ -2149,6 +2113,8 @@ class Heap {
|
||||
// more expedient to get at the isolate directly from within Heap methods.
|
||||
Isolate* isolate_ = nullptr;
|
||||
|
||||
HeapAllocator heap_allocator_;
|
||||
|
||||
// These limits are initialized in Heap::ConfigureHeap based on the resource
|
||||
// constraints and flags.
|
||||
size_t code_range_size_ = 0;
|
||||
@ -2442,13 +2408,6 @@ class Heap {
|
||||
base::Mutex unprotected_memory_chunks_mutex_;
|
||||
std::unordered_set<MemoryChunk*> unprotected_memory_chunks_;
|
||||
|
||||
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
|
||||
// If the --gc-interval flag is set to a positive value, this
|
||||
// variable holds the value indicating the number of allocations
|
||||
// remain until the next failure and garbage collection.
|
||||
int allocation_timeout_ = 0;
|
||||
#endif // V8_ENABLE_ALLOCATION_TIMEOUT
|
||||
|
||||
std::unordered_map<HeapObject, HeapObject, Object::Hasher> retainer_;
|
||||
std::unordered_map<HeapObject, Root, Object::Hasher> retaining_root_;
|
||||
// If an object is retained by an ephemeron, then the retaining key of the
|
||||
@ -2473,6 +2432,7 @@ class Heap {
|
||||
friend class EvacuateVisitorBase;
|
||||
friend class GCCallbacksScope;
|
||||
friend class GCTracer;
|
||||
friend class HeapAllocator;
|
||||
friend class HeapObjectIterator;
|
||||
friend class ScavengeTaskObserver;
|
||||
friend class IgnoreLocalGCRequests;
|
||||
@ -2562,6 +2522,7 @@ class V8_NODISCARD AlwaysAllocateScope {
|
||||
friend class AlwaysAllocateScopeForTesting;
|
||||
friend class Evacuator;
|
||||
friend class Heap;
|
||||
friend class HeapAllocator;
|
||||
friend class Isolate;
|
||||
|
||||
explicit inline AlwaysAllocateScope(Heap* heap);
|
||||
|
Loading…
Reference in New Issue
Block a user