heap: Factor out AllocationResult from heap.h
Allows separating out the allocator from Heap without requiring a heap.h include. Drive-by: - Rename "Retry" to "Failure". - Avoid implicit constructors. - Rename "RetrySpace" to "GarbageCollectionSpace" which is its only use. Bug: v8:12615 Change-Id: Idac17cded8f0b2b645a2be9045ab31ffd71999b3 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3456562 Reviewed-by: Dominik Inführ <dinfuehr@chromium.org> Commit-Queue: Michael Lippautz <mlippautz@chromium.org> Cr-Commit-Position: refs/heads/main@{#79063}
This commit is contained in:
parent
9ac64a9d98
commit
d89579b3ce
@ -1289,6 +1289,7 @@ filegroup(
|
||||
"src/handles/persistent-handles.h",
|
||||
"src/heap/allocation-observer.cc",
|
||||
"src/heap/allocation-observer.h",
|
||||
"src/heap/allocation-result.h",
|
||||
"src/heap/allocation-stats.h",
|
||||
"src/heap/array-buffer-sweeper.cc",
|
||||
"src/heap/array-buffer-sweeper.h",
|
||||
|
1
BUILD.gn
1
BUILD.gn
@ -2947,6 +2947,7 @@ v8_header_set("v8_internal_headers") {
|
||||
"src/handles/maybe-handles.h",
|
||||
"src/handles/persistent-handles.h",
|
||||
"src/heap/allocation-observer.h",
|
||||
"src/heap/allocation-result.h",
|
||||
"src/heap/allocation-stats.h",
|
||||
"src/heap/array-buffer-sweeper.h",
|
||||
"src/heap/barrier.h",
|
||||
|
76
src/heap/allocation-result.h
Normal file
76
src/heap/allocation-result.h
Normal file
@ -0,0 +1,76 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_HEAP_ALLOCATION_RESULT_H_
|
||||
#define V8_HEAP_ALLOCATION_RESULT_H_
|
||||
|
||||
#include "src/common/globals.h"
|
||||
#include "src/objects/heap-object.h"
|
||||
#include "src/objects/objects.h"
|
||||
#include "src/objects/smi.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// The result of an allocation attempt. Either represents a successful
|
||||
// allocation that can be turned into an object or a failed attempt.
|
||||
class AllocationResult final {
|
||||
public:
|
||||
static AllocationResult Failure(AllocationSpace space) {
|
||||
return AllocationResult(space);
|
||||
}
|
||||
|
||||
static AllocationResult FromObject(HeapObject heap_object) {
|
||||
return AllocationResult(heap_object);
|
||||
}
|
||||
|
||||
// Empty constructor creates a failed result that will turn into a full
|
||||
// garbage collection.
|
||||
AllocationResult() : AllocationResult(AllocationSpace::OLD_SPACE) {}
|
||||
|
||||
bool IsFailure() const { return object_.IsSmi(); }
|
||||
|
||||
template <typename T>
|
||||
bool To(T* obj) const {
|
||||
if (IsFailure()) return false;
|
||||
*obj = T::cast(object_);
|
||||
return true;
|
||||
}
|
||||
|
||||
HeapObject ToObjectChecked() const {
|
||||
CHECK(!IsFailure());
|
||||
return HeapObject::cast(object_);
|
||||
}
|
||||
|
||||
HeapObject ToObject() const {
|
||||
DCHECK(!IsFailure());
|
||||
return HeapObject::cast(object_);
|
||||
}
|
||||
|
||||
Address ToAddress() const {
|
||||
DCHECK(!IsFailure());
|
||||
return HeapObject::cast(object_).address();
|
||||
}
|
||||
|
||||
// Returns the space that should be passed to a garbage collection call.
|
||||
AllocationSpace ToGarbageCollectionSpace() const {
|
||||
DCHECK(IsFailure());
|
||||
return static_cast<AllocationSpace>(Smi::ToInt(object_));
|
||||
}
|
||||
|
||||
private:
|
||||
explicit AllocationResult(AllocationSpace space)
|
||||
: object_(Smi::FromInt(static_cast<int>(space))) {}
|
||||
|
||||
explicit AllocationResult(HeapObject heap_object) : object_(heap_object) {}
|
||||
|
||||
Object object_;
|
||||
};
|
||||
|
||||
STATIC_ASSERT(sizeof(AllocationResult) == kSystemPointerSize);
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_HEAP_ALLOCATION_RESULT_H_
|
@ -37,11 +37,9 @@ AllocationResult ConcurrentAllocator::AllocateRaw(int object_size,
|
||||
AllocationResult ConcurrentAllocator::AllocateInLab(
|
||||
int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
|
||||
AllocationResult allocation = lab_.AllocateRawAligned(object_size, alignment);
|
||||
if (allocation.IsRetry()) {
|
||||
return AllocateInLabSlow(object_size, alignment, origin);
|
||||
} else {
|
||||
return allocation;
|
||||
}
|
||||
return allocation.IsFailure()
|
||||
? AllocateInLabSlow(object_size, alignment, origin)
|
||||
: allocation;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
|
@ -37,7 +37,7 @@ void StressConcurrentAllocatorTask::RunInternal() {
|
||||
AllocationResult result = local_heap.AllocateRaw(
|
||||
kSmallObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
|
||||
AllocationAlignment::kTaggedAligned);
|
||||
if (!result.IsRetry()) {
|
||||
if (!result.IsFailure()) {
|
||||
heap->CreateFillerObjectAtBackground(
|
||||
result.ToAddress(), kSmallObjectSize,
|
||||
ClearFreedMemoryMode::kDontClearFreedMemory);
|
||||
@ -48,7 +48,7 @@ void StressConcurrentAllocatorTask::RunInternal() {
|
||||
result = local_heap.AllocateRaw(kMediumObjectSize, AllocationType::kOld,
|
||||
AllocationOrigin::kRuntime,
|
||||
AllocationAlignment::kTaggedAligned);
|
||||
if (!result.IsRetry()) {
|
||||
if (!result.IsFailure()) {
|
||||
heap->CreateFillerObjectAtBackground(
|
||||
result.ToAddress(), kMediumObjectSize,
|
||||
ClearFreedMemoryMode::kDontClearFreedMemory);
|
||||
@ -59,7 +59,7 @@ void StressConcurrentAllocatorTask::RunInternal() {
|
||||
result = local_heap.AllocateRaw(kLargeObjectSize, AllocationType::kOld,
|
||||
AllocationOrigin::kRuntime,
|
||||
AllocationAlignment::kTaggedAligned);
|
||||
if (!result.IsRetry()) {
|
||||
if (!result.IsFailure()) {
|
||||
heap->CreateFillerObjectAtBackground(
|
||||
result.ToAddress(), kLargeObjectSize,
|
||||
ClearFreedMemoryMode::kDontClearFreedMemory);
|
||||
@ -122,11 +122,11 @@ void ConcurrentAllocator::UnmarkLinearAllocationArea() {
|
||||
AllocationResult ConcurrentAllocator::AllocateInLabSlow(
|
||||
int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
|
||||
if (!EnsureLab(origin)) {
|
||||
return AllocationResult::Retry(space_->identity());
|
||||
return AllocationResult::Failure(space_->identity());
|
||||
}
|
||||
|
||||
AllocationResult allocation = lab_.AllocateRawAligned(object_size, alignment);
|
||||
DCHECK(!allocation.IsRetry());
|
||||
DCHECK(!allocation.IsFailure());
|
||||
|
||||
return allocation;
|
||||
}
|
||||
@ -145,7 +145,7 @@ bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) {
|
||||
HeapObject object = HeapObject::FromAddress(result->first);
|
||||
LocalAllocationBuffer saved_lab = std::move(lab_);
|
||||
lab_ = LocalAllocationBuffer::FromResult(
|
||||
space_->heap(), AllocationResult(object), result->second);
|
||||
space_->heap(), AllocationResult::FromObject(object), result->second);
|
||||
DCHECK(lab_.IsValid());
|
||||
if (!lab_.TryMerge(&saved_lab)) {
|
||||
saved_lab.CloseAndMakeIterable();
|
||||
@ -157,7 +157,7 @@ AllocationResult ConcurrentAllocator::AllocateOutsideLab(
|
||||
int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
|
||||
auto result = space_->RawRefillLabBackground(local_heap_, object_size,
|
||||
object_size, alignment, origin);
|
||||
if (!result) return AllocationResult::Retry(space_->identity());
|
||||
if (!result) return AllocationResult::Failure(space_->identity());
|
||||
|
||||
HeapObject object = HeapObject::FromAddress(result->first);
|
||||
|
||||
@ -166,7 +166,7 @@ AllocationResult ConcurrentAllocator::AllocateOutsideLab(
|
||||
object_size);
|
||||
}
|
||||
|
||||
return AllocationResult(object);
|
||||
return AllocationResult::FromObject(object);
|
||||
}
|
||||
|
||||
bool ConcurrentAllocator::IsBlackAllocationEnabled() const {
|
||||
|
@ -66,26 +66,6 @@ T ForwardingAddress(T heap_obj) {
|
||||
}
|
||||
}
|
||||
|
||||
AllocationSpace AllocationResult::RetrySpace() {
|
||||
DCHECK(IsRetry());
|
||||
return static_cast<AllocationSpace>(Smi::ToInt(object_));
|
||||
}
|
||||
|
||||
HeapObject AllocationResult::ToObjectChecked() {
|
||||
CHECK(!IsRetry());
|
||||
return HeapObject::cast(object_);
|
||||
}
|
||||
|
||||
HeapObject AllocationResult::ToObject() {
|
||||
DCHECK(!IsRetry());
|
||||
return HeapObject::cast(object_);
|
||||
}
|
||||
|
||||
Address AllocationResult::ToAddress() {
|
||||
DCHECK(!IsRetry());
|
||||
return HeapObject::cast(object_).address();
|
||||
}
|
||||
|
||||
// static
|
||||
base::EnumSet<CodeFlushMode> Heap::GetCodeFlushMode(Isolate* isolate) {
|
||||
if (isolate->disable_bytecode_flushing()) {
|
||||
@ -215,7 +195,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
|
||||
if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
|
||||
if (!always_allocate() && Heap::allocation_timeout_-- <= 0) {
|
||||
AllocationSpace space = FLAG_single_generation ? OLD_SPACE : NEW_SPACE;
|
||||
return AllocationResult::Retry(space);
|
||||
return AllocationResult::Failure(space);
|
||||
}
|
||||
}
|
||||
#endif // V8_ENABLE_ALLOCATION_TIMEOUT
|
||||
|
@ -5636,7 +5636,7 @@ HeapObject Heap::AllocateRawWithLightRetrySlowPath(
|
||||
if (IsSharedAllocationType(allocation)) {
|
||||
CollectSharedGarbage(GarbageCollectionReason::kAllocationFailure);
|
||||
} else {
|
||||
CollectGarbage(alloc.RetrySpace(),
|
||||
CollectGarbage(alloc.ToGarbageCollectionSpace(),
|
||||
GarbageCollectionReason::kAllocationFailure);
|
||||
}
|
||||
alloc = AllocateRaw(size, allocation, origin, alignment);
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "src/common/assert-scope.h"
|
||||
#include "src/common/globals.h"
|
||||
#include "src/heap/allocation-observer.h"
|
||||
#include "src/heap/allocation-result.h"
|
||||
#include "src/init/heap-symbols.h"
|
||||
#include "src/objects/allocation-site.h"
|
||||
#include "src/objects/fixed-array.h"
|
||||
@ -213,44 +214,6 @@ class StrongRootsEntry final {
|
||||
friend class Heap;
|
||||
};
|
||||
|
||||
class AllocationResult {
|
||||
public:
|
||||
static inline AllocationResult Retry(AllocationSpace space) {
|
||||
return AllocationResult(space);
|
||||
}
|
||||
|
||||
// Implicit constructor from Object.
|
||||
AllocationResult(Object object) // NOLINT
|
||||
: object_(object) {
|
||||
// AllocationResults can't return Smis, which are used to represent
|
||||
// failure and the space to retry in.
|
||||
CHECK(!object.IsSmi());
|
||||
}
|
||||
|
||||
AllocationResult() : object_(Smi::FromInt(NEW_SPACE)) {}
|
||||
|
||||
inline bool IsRetry() { return object_.IsSmi(); }
|
||||
inline HeapObject ToObjectChecked();
|
||||
inline HeapObject ToObject();
|
||||
inline Address ToAddress();
|
||||
inline AllocationSpace RetrySpace();
|
||||
|
||||
template <typename T>
|
||||
bool To(T* obj) {
|
||||
if (IsRetry()) return false;
|
||||
*obj = T::cast(object_);
|
||||
return true;
|
||||
}
|
||||
|
||||
private:
|
||||
explicit AllocationResult(AllocationSpace space)
|
||||
: object_(Smi::FromInt(static_cast<int>(space))) {}
|
||||
|
||||
Object object_;
|
||||
};
|
||||
|
||||
STATIC_ASSERT(sizeof(AllocationResult) == kSystemPointerSize);
|
||||
|
||||
#ifdef DEBUG
|
||||
struct CommentStatistic {
|
||||
const char* comment;
|
||||
|
@ -135,11 +135,11 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
|
||||
// If so, fail the allocation.
|
||||
if (!heap()->CanExpandOldGeneration(object_size) ||
|
||||
!heap()->ShouldExpandOldGenerationOnSlowAllocation()) {
|
||||
return AllocationResult::Retry(identity());
|
||||
return AllocationResult::Failure(identity());
|
||||
}
|
||||
|
||||
LargePage* page = AllocateLargePage(object_size, executable);
|
||||
if (page == nullptr) return AllocationResult::Retry(identity());
|
||||
if (page == nullptr) return AllocationResult::Failure(identity());
|
||||
page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
|
||||
HeapObject object = page->GetObject();
|
||||
UpdatePendingObject(object);
|
||||
@ -156,7 +156,7 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
|
||||
heap()->NotifyOldGenerationExpansion(identity(), page);
|
||||
AdvanceAndInvokeAllocationObservers(object.address(),
|
||||
static_cast<size_t>(object_size));
|
||||
return object;
|
||||
return AllocationResult::FromObject(object);
|
||||
}
|
||||
|
||||
AllocationResult OldLargeObjectSpace::AllocateRawBackground(
|
||||
@ -171,11 +171,11 @@ AllocationResult OldLargeObjectSpace::AllocateRawBackground(
|
||||
// If so, fail the allocation.
|
||||
if (!heap()->CanExpandOldGenerationBackground(local_heap, object_size) ||
|
||||
!heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap)) {
|
||||
return AllocationResult::Retry(identity());
|
||||
return AllocationResult::Failure(identity());
|
||||
}
|
||||
|
||||
LargePage* page = AllocateLargePage(object_size, executable);
|
||||
if (page == nullptr) return AllocationResult::Retry(identity());
|
||||
if (page == nullptr) return AllocationResult::Failure(identity());
|
||||
page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
|
||||
HeapObject object = page->GetObject();
|
||||
heap()->StartIncrementalMarkingIfAllocationLimitIsReachedBackground();
|
||||
@ -189,7 +189,7 @@ AllocationResult OldLargeObjectSpace::AllocateRawBackground(
|
||||
if (identity() == CODE_LO_SPACE) {
|
||||
heap()->isolate()->AddCodeMemoryChunk(page);
|
||||
}
|
||||
return object;
|
||||
return AllocationResult::FromObject(object);
|
||||
}
|
||||
|
||||
LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
|
||||
@ -483,16 +483,16 @@ AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
|
||||
// Do not allocate more objects if promoting the existing object would exceed
|
||||
// the old generation capacity.
|
||||
if (!heap()->CanExpandOldGeneration(SizeOfObjects())) {
|
||||
return AllocationResult::Retry(identity());
|
||||
return AllocationResult::Failure(identity());
|
||||
}
|
||||
|
||||
// Allocation for the first object must succeed independent from the capacity.
|
||||
if (SizeOfObjects() > 0 && static_cast<size_t>(object_size) > Available()) {
|
||||
return AllocationResult::Retry(identity());
|
||||
return AllocationResult::Failure(identity());
|
||||
}
|
||||
|
||||
LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
|
||||
if (page == nullptr) return AllocationResult::Retry(identity());
|
||||
if (page == nullptr) return AllocationResult::Failure(identity());
|
||||
|
||||
// The size of the first object may exceed the capacity.
|
||||
capacity_ = std::max(capacity_, SizeOfObjects());
|
||||
@ -513,7 +513,7 @@ AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
|
||||
DCHECK_EQ(page->owner_identity(), NEW_LO_SPACE);
|
||||
AdvanceAndInvokeAllocationObservers(result.address(),
|
||||
static_cast<size_t>(object_size));
|
||||
return result;
|
||||
return AllocationResult::FromObject(result);
|
||||
}
|
||||
|
||||
size_t NewLargeObjectSpace::Available() { return capacity_ - SizeOfObjects(); }
|
||||
|
@ -84,15 +84,15 @@ AllocationResult EvacuationAllocator::AllocateInLAB(
|
||||
int object_size, AllocationAlignment alignment) {
|
||||
AllocationResult allocation;
|
||||
if (!new_space_lab_.IsValid() && !NewLocalAllocationBuffer()) {
|
||||
return AllocationResult::Retry(OLD_SPACE);
|
||||
return AllocationResult::Failure(OLD_SPACE);
|
||||
}
|
||||
allocation = new_space_lab_.AllocateRawAligned(object_size, alignment);
|
||||
if (allocation.IsRetry()) {
|
||||
if (allocation.IsFailure()) {
|
||||
if (!NewLocalAllocationBuffer()) {
|
||||
return AllocationResult::Retry(OLD_SPACE);
|
||||
return AllocationResult::Failure(OLD_SPACE);
|
||||
} else {
|
||||
allocation = new_space_lab_.AllocateRawAligned(object_size, alignment);
|
||||
CHECK(!allocation.IsRetry());
|
||||
CHECK(!allocation.IsFailure());
|
||||
}
|
||||
}
|
||||
return allocation;
|
||||
@ -102,7 +102,7 @@ bool EvacuationAllocator::NewLocalAllocationBuffer() {
|
||||
if (lab_allocation_will_fail_) return false;
|
||||
AllocationResult result =
|
||||
new_space_->AllocateRawSynchronized(kLabSize, kTaggedAligned);
|
||||
if (result.IsRetry()) {
|
||||
if (result.IsFailure()) {
|
||||
lab_allocation_will_fail_ = true;
|
||||
return false;
|
||||
}
|
||||
|
@ -72,7 +72,8 @@ Address LocalHeap::AllocateRawOrFail(int object_size, AllocationType type,
|
||||
AllocationAlignment alignment) {
|
||||
DCHECK(!FLAG_enable_third_party_heap);
|
||||
AllocationResult result = AllocateRaw(object_size, type, origin, alignment);
|
||||
if (!result.IsRetry()) return result.ToObject().address();
|
||||
HeapObject object;
|
||||
if (result.To(&object)) return object.address();
|
||||
return PerformCollectionAndAllocateAgain(object_size, type, origin,
|
||||
alignment);
|
||||
}
|
||||
|
@ -398,7 +398,7 @@ Address LocalHeap::PerformCollectionAndAllocateAgain(
|
||||
|
||||
AllocationResult result = AllocateRaw(object_size, type, origin, alignment);
|
||||
|
||||
if (!result.IsRetry()) {
|
||||
if (!result.IsFailure()) {
|
||||
allocation_failed_ = false;
|
||||
main_thread_parked_ = false;
|
||||
return result.ToObjectChecked().address();
|
||||
|
@ -1757,7 +1757,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
|
||||
AllocationSpace space_allocated_in = NEW_SPACE;
|
||||
AllocationResult allocation = local_allocator_->Allocate(
|
||||
NEW_SPACE, size, AllocationOrigin::kGC, alignment);
|
||||
if (allocation.IsRetry()) {
|
||||
if (allocation.IsFailure()) {
|
||||
allocation = AllocateInOldSpace(size, alignment);
|
||||
space_allocated_in = OLD_SPACE;
|
||||
}
|
||||
@ -1771,7 +1771,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
|
||||
AllocationAlignment alignment) {
|
||||
AllocationResult allocation = local_allocator_->Allocate(
|
||||
OLD_SPACE, size_in_bytes, AllocationOrigin::kGC, alignment);
|
||||
if (allocation.IsRetry()) {
|
||||
if (allocation.IsFailure()) {
|
||||
heap_->FatalProcessOutOfMemory(
|
||||
"MarkCompactCollector: semi-space copy, fallback in old gen");
|
||||
}
|
||||
|
@ -102,17 +102,14 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
|
||||
result = AllocateFastUnaligned(size_in_bytes, origin);
|
||||
}
|
||||
|
||||
if (!result.IsRetry()) {
|
||||
return result;
|
||||
} else {
|
||||
return AllocateRawSlow(size_in_bytes, alignment, origin);
|
||||
}
|
||||
return result.IsFailure() ? AllocateRawSlow(size_in_bytes, alignment, origin)
|
||||
: result;
|
||||
}
|
||||
|
||||
AllocationResult NewSpace::AllocateFastUnaligned(int size_in_bytes,
|
||||
AllocationOrigin origin) {
|
||||
if (!allocation_info_->CanIncrementTop(size_in_bytes)) {
|
||||
return AllocationResult::Retry(NEW_SPACE);
|
||||
return AllocationResult::Failure(NEW_SPACE);
|
||||
}
|
||||
HeapObject obj =
|
||||
HeapObject::FromAddress(allocation_info_->IncrementTop(size_in_bytes));
|
||||
@ -124,7 +121,7 @@ AllocationResult NewSpace::AllocateFastUnaligned(int size_in_bytes,
|
||||
UpdateAllocationOrigins(origin);
|
||||
}
|
||||
|
||||
return obj;
|
||||
return AllocationResult::FromObject(obj);
|
||||
}
|
||||
|
||||
AllocationResult NewSpace::AllocateFastAligned(
|
||||
@ -135,7 +132,7 @@ AllocationResult NewSpace::AllocateFastAligned(
|
||||
int aligned_size_in_bytes = size_in_bytes + filler_size;
|
||||
|
||||
if (!allocation_info_->CanIncrementTop(aligned_size_in_bytes)) {
|
||||
return AllocationResult::Retry(NEW_SPACE);
|
||||
return AllocationResult::Failure(NEW_SPACE);
|
||||
}
|
||||
HeapObject obj = HeapObject::FromAddress(
|
||||
allocation_info_->IncrementTop(aligned_size_in_bytes));
|
||||
@ -153,7 +150,7 @@ AllocationResult NewSpace::AllocateFastAligned(
|
||||
UpdateAllocationOrigins(origin);
|
||||
}
|
||||
|
||||
return obj;
|
||||
return AllocationResult::FromObject(obj);
|
||||
}
|
||||
|
||||
V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
|
||||
|
@ -619,13 +619,13 @@ AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
|
||||
AllocationOrigin origin) {
|
||||
DCHECK(!FLAG_enable_third_party_heap);
|
||||
if (!EnsureAllocation(size_in_bytes, kTaggedAligned)) {
|
||||
return AllocationResult::Retry(NEW_SPACE);
|
||||
return AllocationResult::Failure(NEW_SPACE);
|
||||
}
|
||||
|
||||
DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
|
||||
|
||||
AllocationResult result = AllocateFastUnaligned(size_in_bytes, origin);
|
||||
DCHECK(!result.IsRetry());
|
||||
DCHECK(!result.IsFailure());
|
||||
|
||||
InvokeAllocationObservers(result.ToAddress(), size_in_bytes, size_in_bytes,
|
||||
size_in_bytes);
|
||||
@ -638,7 +638,7 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
|
||||
AllocationOrigin origin) {
|
||||
DCHECK(!FLAG_enable_third_party_heap);
|
||||
if (!EnsureAllocation(size_in_bytes, alignment)) {
|
||||
return AllocationResult::Retry(NEW_SPACE);
|
||||
return AllocationResult::Failure(NEW_SPACE);
|
||||
}
|
||||
|
||||
DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
|
||||
@ -647,7 +647,7 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
|
||||
|
||||
AllocationResult result = AllocateFastAligned(
|
||||
size_in_bytes, &aligned_size_in_bytes, alignment, origin);
|
||||
DCHECK(!result.IsRetry());
|
||||
DCHECK(!result.IsFailure());
|
||||
|
||||
InvokeAllocationObservers(result.ToAddress(), size_in_bytes,
|
||||
aligned_size_in_bytes, aligned_size_in_bytes);
|
||||
|
@ -95,9 +95,9 @@ bool PagedSpace::EnsureLabMain(int size_in_bytes, AllocationOrigin origin) {
|
||||
|
||||
AllocationResult PagedSpace::AllocateFastUnaligned(int size_in_bytes) {
|
||||
if (!allocation_info_->CanIncrementTop(size_in_bytes)) {
|
||||
return AllocationResult::Retry(identity());
|
||||
return AllocationResult::Failure(identity());
|
||||
}
|
||||
return AllocationResult(
|
||||
return AllocationResult::FromObject(
|
||||
HeapObject::FromAddress(allocation_info_->IncrementTop(size_in_bytes)));
|
||||
}
|
||||
|
||||
@ -108,7 +108,7 @@ AllocationResult PagedSpace::AllocateFastAligned(
|
||||
int filler_size = Heap::GetFillToAlign(current_top, alignment);
|
||||
int aligned_size = filler_size + size_in_bytes;
|
||||
if (!allocation_info_->CanIncrementTop(aligned_size)) {
|
||||
return AllocationResult::Retry(identity());
|
||||
return AllocationResult::Failure(identity());
|
||||
}
|
||||
HeapObject obj =
|
||||
HeapObject::FromAddress(allocation_info_->IncrementTop(aligned_size));
|
||||
@ -116,18 +116,18 @@ AllocationResult PagedSpace::AllocateFastAligned(
|
||||
if (filler_size > 0) {
|
||||
obj = heap()->PrecedeWithFiller(obj, filler_size);
|
||||
}
|
||||
return AllocationResult(obj);
|
||||
return AllocationResult::FromObject(obj);
|
||||
}
|
||||
|
||||
AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
|
||||
AllocationOrigin origin) {
|
||||
DCHECK(!FLAG_enable_third_party_heap);
|
||||
if (!EnsureLabMain(size_in_bytes, origin)) {
|
||||
return AllocationResult::Retry(identity());
|
||||
return AllocationResult::Failure(identity());
|
||||
}
|
||||
|
||||
AllocationResult result = AllocateFastUnaligned(size_in_bytes);
|
||||
DCHECK(!result.IsRetry());
|
||||
DCHECK(!result.IsFailure());
|
||||
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(result.ToObjectChecked().address(),
|
||||
size_in_bytes);
|
||||
|
||||
@ -152,12 +152,12 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
|
||||
int filler_size = Heap::GetMaximumFillToAlign(alignment);
|
||||
allocation_size += filler_size;
|
||||
if (!EnsureLabMain(allocation_size, origin)) {
|
||||
return AllocationResult::Retry(identity());
|
||||
return AllocationResult::Failure(identity());
|
||||
}
|
||||
int aligned_size_in_bytes;
|
||||
AllocationResult result =
|
||||
AllocateFastAligned(size_in_bytes, &aligned_size_in_bytes, alignment);
|
||||
DCHECK(!result.IsRetry());
|
||||
DCHECK(!result.IsFailure());
|
||||
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(result.ToObjectChecked().address(),
|
||||
size_in_bytes);
|
||||
|
||||
@ -183,11 +183,8 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
|
||||
result = AllocateFastUnaligned(size_in_bytes);
|
||||
}
|
||||
|
||||
if (!result.IsRetry()) {
|
||||
return result;
|
||||
} else {
|
||||
return AllocateRawSlow(size_in_bytes, alignment, origin);
|
||||
}
|
||||
return result.IsFailure() ? AllocateRawSlow(size_in_bytes, alignment, origin)
|
||||
: result;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
|
@ -667,7 +667,7 @@ AllocationResult ReadOnlySpace::AllocateRawAligned(
|
||||
}
|
||||
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
|
||||
|
||||
return object;
|
||||
return AllocationResult::FromObject(object);
|
||||
}
|
||||
|
||||
AllocationResult ReadOnlySpace::AllocateRawUnaligned(int size_in_bytes) {
|
||||
@ -687,7 +687,7 @@ AllocationResult ReadOnlySpace::AllocateRawUnaligned(int size_in_bytes) {
|
||||
accounting_stats_.IncreaseAllocatedBytes(size_in_bytes, chunk);
|
||||
chunk->IncreaseAllocatedBytes(size_in_bytes);
|
||||
|
||||
return object;
|
||||
return AllocationResult::FromObject(object);
|
||||
}
|
||||
|
||||
AllocationResult ReadOnlySpace::AllocateRaw(int size_in_bytes,
|
||||
@ -697,7 +697,7 @@ AllocationResult ReadOnlySpace::AllocateRaw(int size_in_bytes,
|
||||
? AllocateRawAligned(size_in_bytes, alignment)
|
||||
: AllocateRawUnaligned(size_in_bytes);
|
||||
HeapObject heap_obj;
|
||||
if (!result.IsRetry() && result.To(&heap_obj)) {
|
||||
if (result.To(&heap_obj)) {
|
||||
DCHECK(heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
|
||||
}
|
||||
return result;
|
||||
|
@ -153,7 +153,7 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
|
||||
Map::cast(result), instance_type, instance_size, elements_kind,
|
||||
inobject_properties);
|
||||
|
||||
return map;
|
||||
return AllocationResult::FromObject(map);
|
||||
}
|
||||
|
||||
AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
|
||||
@ -184,7 +184,7 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
|
||||
DCHECK(!map.is_in_retained_map_list());
|
||||
map.clear_padding();
|
||||
map.set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
|
||||
return map;
|
||||
return AllocationResult::FromObject(map);
|
||||
}
|
||||
|
||||
void Heap::FinalizePartialMap(Map map) {
|
||||
@ -208,7 +208,7 @@ AllocationResult Heap::Allocate(Handle<Map> map,
|
||||
allocation_type == AllocationType::kYoung ? SKIP_WRITE_BARRIER
|
||||
: UPDATE_WRITE_BARRIER;
|
||||
result.set_map_after_allocation(*map, write_barrier_mode);
|
||||
return result;
|
||||
return AllocationResult::FromObject(result);
|
||||
}
|
||||
|
||||
bool Heap::CreateInitialMaps() {
|
||||
|
@ -140,21 +140,19 @@ AllocationResult LocalAllocationBuffer::AllocateRawAligned(
|
||||
int filler_size = Heap::GetFillToAlign(current_top, alignment);
|
||||
int aligned_size = filler_size + size_in_bytes;
|
||||
if (!allocation_info_.CanIncrementTop(aligned_size)) {
|
||||
return AllocationResult::Retry(NEW_SPACE);
|
||||
return AllocationResult::Failure(NEW_SPACE);
|
||||
}
|
||||
HeapObject object =
|
||||
HeapObject::FromAddress(allocation_info_.IncrementTop(aligned_size));
|
||||
if (filler_size > 0) {
|
||||
return heap_->PrecedeWithFiller(object, filler_size);
|
||||
}
|
||||
|
||||
return AllocationResult(object);
|
||||
return filler_size > 0 ? AllocationResult::FromObject(
|
||||
heap_->PrecedeWithFiller(object, filler_size))
|
||||
: AllocationResult::FromObject(object);
|
||||
}
|
||||
|
||||
LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
|
||||
AllocationResult result,
|
||||
intptr_t size) {
|
||||
if (result.IsRetry()) return InvalidBuffer();
|
||||
if (result.IsFailure()) return InvalidBuffer();
|
||||
HeapObject obj;
|
||||
bool ok = result.To(&obj);
|
||||
USE(ok);
|
||||
|
@ -248,7 +248,7 @@ class LargeObjectConcurrentAllocationThread final : public v8::base::Thread {
|
||||
AllocationResult result = local_heap.AllocateRaw(
|
||||
kLargeObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
|
||||
AllocationAlignment::kTaggedAligned);
|
||||
if (result.IsRetry()) {
|
||||
if (result.IsFailure()) {
|
||||
local_heap.TryPerformCollection();
|
||||
} else {
|
||||
Address address = result.ToAddress();
|
||||
|
@ -1831,7 +1831,7 @@ TEST(TestAlignedOverAllocation) {
|
||||
heap::AbandonCurrentlyFreeMemory(heap->old_space());
|
||||
// Allocate a dummy object to properly set up the linear allocation info.
|
||||
AllocationResult dummy = heap->old_space()->AllocateRawUnaligned(kTaggedSize);
|
||||
CHECK(!dummy.IsRetry());
|
||||
CHECK(!dummy.IsFailure());
|
||||
heap->CreateFillerObjectAt(dummy.ToObjectChecked().address(), kTaggedSize,
|
||||
ClearRecordedSlots::kNo);
|
||||
|
||||
@ -5387,7 +5387,7 @@ AllocationResult HeapTester::AllocateByteArrayForTest(
|
||||
SKIP_WRITE_BARRIER);
|
||||
ByteArray::cast(result).set_length(length);
|
||||
ByteArray::cast(result).clear_padding();
|
||||
return result;
|
||||
return AllocationResult::FromObject(result);
|
||||
}
|
||||
|
||||
bool HeapTester::CodeEnsureLinearAllocationArea(Heap* heap, int size_in_bytes) {
|
||||
|
@ -68,7 +68,8 @@ TEST(UnusedLabImplicitClose) {
|
||||
std::vector<intptr_t> expected_sizes(expected_sizes_raw,
|
||||
expected_sizes_raw + 1);
|
||||
{
|
||||
AllocationResult lab_backing_store(HeapObject::FromAddress(base));
|
||||
AllocationResult lab_backing_store =
|
||||
AllocationResult::FromObject(HeapObject::FromAddress(base));
|
||||
LocalAllocationBuffer lab =
|
||||
LocalAllocationBuffer::FromResult(heap, lab_backing_store, kLabSize);
|
||||
CHECK(lab.IsValid());
|
||||
@ -89,7 +90,8 @@ TEST(SimpleAllocate) {
|
||||
std::vector<intptr_t> expected_sizes(expected_sizes_raw,
|
||||
expected_sizes_raw + 2);
|
||||
{
|
||||
AllocationResult lab_backing_store(HeapObject::FromAddress(base));
|
||||
AllocationResult lab_backing_store =
|
||||
AllocationResult::FromObject(HeapObject::FromAddress(base));
|
||||
LocalAllocationBuffer lab =
|
||||
LocalAllocationBuffer::FromResult(heap, lab_backing_store, kLabSize);
|
||||
CHECK(lab.IsValid());
|
||||
@ -115,7 +117,8 @@ TEST(AllocateUntilLabOOM) {
|
||||
expected_sizes_raw + 5);
|
||||
intptr_t sum = 0;
|
||||
{
|
||||
AllocationResult lab_backing_store(HeapObject::FromAddress(base));
|
||||
AllocationResult lab_backing_store =
|
||||
AllocationResult::FromObject(HeapObject::FromAddress(base));
|
||||
LocalAllocationBuffer lab =
|
||||
LocalAllocationBuffer::FromResult(heap, lab_backing_store, kLabSize);
|
||||
CHECK(lab.IsValid());
|
||||
@ -142,7 +145,8 @@ TEST(AllocateExactlyUntilLimit) {
|
||||
std::vector<intptr_t> expected_sizes(expected_sizes_raw,
|
||||
expected_sizes_raw + 5);
|
||||
{
|
||||
AllocationResult lab_backing_store(HeapObject::FromAddress(base));
|
||||
AllocationResult lab_backing_store =
|
||||
AllocationResult::FromObject(HeapObject::FromAddress(base));
|
||||
LocalAllocationBuffer lab =
|
||||
LocalAllocationBuffer::FromResult(heap, lab_backing_store, kLabSize);
|
||||
CHECK(lab.IsValid());
|
||||
@ -183,7 +187,8 @@ TEST(MergeSuccessful) {
|
||||
expected_sizes2_raw + 10);
|
||||
|
||||
{
|
||||
AllocationResult lab_backing_store1(HeapObject::FromAddress(base1));
|
||||
AllocationResult lab_backing_store1 =
|
||||
AllocationResult::FromObject(HeapObject::FromAddress(base1));
|
||||
LocalAllocationBuffer lab1 =
|
||||
LocalAllocationBuffer::FromResult(heap, lab_backing_store1, kLabSize);
|
||||
CHECK(lab1.IsValid());
|
||||
@ -196,7 +201,8 @@ TEST(MergeSuccessful) {
|
||||
}
|
||||
}
|
||||
|
||||
AllocationResult lab_backing_store2(HeapObject::FromAddress(base2));
|
||||
AllocationResult lab_backing_store2 =
|
||||
AllocationResult::FromObject(HeapObject::FromAddress(base2));
|
||||
LocalAllocationBuffer lab2 =
|
||||
LocalAllocationBuffer::FromResult(heap, lab_backing_store2, kLabSize);
|
||||
CHECK(lab2.IsValid());
|
||||
@ -225,17 +231,20 @@ TEST(MergeFailed) {
|
||||
Address base3 = base2 + kLabSize;
|
||||
|
||||
{
|
||||
AllocationResult lab_backing_store1(HeapObject::FromAddress(base1));
|
||||
AllocationResult lab_backing_store1 =
|
||||
AllocationResult::FromObject(HeapObject::FromAddress(base1));
|
||||
LocalAllocationBuffer lab1 =
|
||||
LocalAllocationBuffer::FromResult(heap, lab_backing_store1, kLabSize);
|
||||
CHECK(lab1.IsValid());
|
||||
|
||||
AllocationResult lab_backing_store2(HeapObject::FromAddress(base2));
|
||||
AllocationResult lab_backing_store2 =
|
||||
AllocationResult::FromObject(HeapObject::FromAddress(base2));
|
||||
LocalAllocationBuffer lab2 =
|
||||
LocalAllocationBuffer::FromResult(heap, lab_backing_store2, kLabSize);
|
||||
CHECK(lab2.IsValid());
|
||||
|
||||
AllocationResult lab_backing_store3(HeapObject::FromAddress(base3));
|
||||
AllocationResult lab_backing_store3 =
|
||||
AllocationResult::FromObject(HeapObject::FromAddress(base3));
|
||||
LocalAllocationBuffer lab3 =
|
||||
LocalAllocationBuffer::FromResult(heap, lab_backing_store3, kLabSize);
|
||||
CHECK(lab3.IsValid());
|
||||
@ -261,7 +270,8 @@ TEST(AllocateAligned) {
|
||||
expected_sizes_raw + 4);
|
||||
|
||||
{
|
||||
AllocationResult lab_backing_store(HeapObject::FromAddress(base));
|
||||
AllocationResult lab_backing_store =
|
||||
AllocationResult::FromObject(HeapObject::FromAddress(base));
|
||||
LocalAllocationBuffer lab =
|
||||
LocalAllocationBuffer::FromResult(heap, lab_backing_store, kLabSize);
|
||||
CHECK(lab.IsValid());
|
||||
|
@ -82,9 +82,9 @@ AllocationResult HeapTester::AllocateMapForTest(Isolate* isolate) {
|
||||
if (!alloc.To(&obj)) return alloc;
|
||||
obj.set_map_after_allocation(ReadOnlyRoots(heap).meta_map(),
|
||||
SKIP_WRITE_BARRIER);
|
||||
return isolate->factory()->InitializeMap(Map::cast(obj), JS_OBJECT_TYPE,
|
||||
JSObject::kHeaderSize,
|
||||
TERMINAL_FAST_ELEMENTS_KIND, 0);
|
||||
return AllocationResult::FromObject(isolate->factory()->InitializeMap(
|
||||
Map::cast(obj), JS_OBJECT_TYPE, JSObject::kHeaderSize,
|
||||
TERMINAL_FAST_ELEMENTS_KIND, 0));
|
||||
}
|
||||
|
||||
// This is the same as Factory::NewFixedArray, except it doesn't retry
|
||||
@ -104,7 +104,7 @@ AllocationResult HeapTester::AllocateFixedArrayForTest(
|
||||
array.set_length(length);
|
||||
MemsetTagged(array.data_start(), ReadOnlyRoots(heap).undefined_value(),
|
||||
length);
|
||||
return array;
|
||||
return AllocationResult::FromObject(array);
|
||||
}
|
||||
|
||||
HEAP_TEST(MarkCompactCollector) {
|
||||
@ -128,7 +128,7 @@ HEAP_TEST(MarkCompactCollector) {
|
||||
do {
|
||||
allocation =
|
||||
AllocateFixedArrayForTest(heap, arraysize, AllocationType::kYoung);
|
||||
} while (!allocation.IsRetry());
|
||||
} while (!allocation.IsFailure());
|
||||
CcTest::CollectGarbage(NEW_SPACE);
|
||||
AllocateFixedArrayForTest(heap, arraysize, AllocationType::kYoung)
|
||||
.ToObjectChecked();
|
||||
@ -137,7 +137,7 @@ HEAP_TEST(MarkCompactCollector) {
|
||||
// keep allocating maps until it fails
|
||||
do {
|
||||
allocation = AllocateMapForTest(isolate);
|
||||
} while (!allocation.IsRetry());
|
||||
} while (!allocation.IsFailure());
|
||||
CcTest::CollectGarbage(MAP_SPACE);
|
||||
AllocateMapForTest(isolate).ToObjectChecked();
|
||||
|
||||
|
@ -344,14 +344,14 @@ TEST(OldLargeObjectSpace) {
|
||||
while (true) {
|
||||
{
|
||||
AllocationResult allocation = lo->AllocateRaw(lo_size);
|
||||
if (allocation.IsRetry()) break;
|
||||
if (allocation.IsFailure()) break;
|
||||
ho = HeapObject::cast(allocation.ToObjectChecked());
|
||||
Handle<HeapObject> keep_alive(ho, isolate);
|
||||
}
|
||||
}
|
||||
|
||||
CHECK(!lo->IsEmpty());
|
||||
CHECK(lo->AllocateRaw(lo_size).IsRetry());
|
||||
CHECK(lo->AllocateRaw(lo_size).IsFailure());
|
||||
}
|
||||
|
||||
#ifndef DEBUG
|
||||
@ -411,7 +411,7 @@ TEST(SizeOfInitialHeap) {
|
||||
|
||||
static HeapObject AllocateUnaligned(NewSpace* space, int size) {
|
||||
AllocationResult allocation = space->AllocateRaw(size, kTaggedAligned);
|
||||
CHECK(!allocation.IsRetry());
|
||||
CHECK(!allocation.IsFailure());
|
||||
HeapObject filler;
|
||||
CHECK(allocation.To(&filler));
|
||||
space->heap()->CreateFillerObjectAt(filler.address(), size,
|
||||
@ -421,7 +421,7 @@ static HeapObject AllocateUnaligned(NewSpace* space, int size) {
|
||||
|
||||
static HeapObject AllocateUnaligned(PagedSpace* space, int size) {
|
||||
AllocationResult allocation = space->AllocateRaw(size, kTaggedAligned);
|
||||
CHECK(!allocation.IsRetry());
|
||||
CHECK(!allocation.IsFailure());
|
||||
HeapObject filler;
|
||||
CHECK(allocation.To(&filler));
|
||||
space->heap()->CreateFillerObjectAt(filler.address(), size,
|
||||
@ -431,7 +431,7 @@ static HeapObject AllocateUnaligned(PagedSpace* space, int size) {
|
||||
|
||||
static HeapObject AllocateUnaligned(OldLargeObjectSpace* space, int size) {
|
||||
AllocationResult allocation = space->AllocateRaw(size);
|
||||
CHECK(!allocation.IsRetry());
|
||||
CHECK(!allocation.IsFailure());
|
||||
HeapObject filler;
|
||||
CHECK(allocation.To(&filler));
|
||||
return filler;
|
||||
|
Loading…
Reference in New Issue
Block a user