Reland "[Heap] Create a fast path for young allocations."
Disable a flaky test.
This is a reland of cbf028e8b8
Original change's description:
> [Heap] Create a fast path for young allocations.
>
> Bug: v8:9714
> Change-Id: I3be6ea615142c8282bb67370626c7596cedf826c
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1800304
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Commit-Queue: Victor Gomes <victorgomes@google.com>
> Auto-Submit: Victor Gomes <victorgomes@google.com>
> Cr-Commit-Position: refs/heads/master@{#63729}
Bug: v8:9714
Change-Id: Ifbd8617be1b8c58cb1552fe88c52eafd9d6e9c7d
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1801840
Commit-Queue: Victor Gomes <victorgomes@google.com>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Auto-Submit: Victor Gomes <victorgomes@google.com>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63744}
This commit is contained in:
parent
3f8fc137c3
commit
7b7df7db4d
@ -117,11 +117,11 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
|
||||
CodePageCollectionMemoryModificationScope code_allocation(heap);
|
||||
HeapObject result;
|
||||
if (retry_allocation_or_fail) {
|
||||
result =
|
||||
heap->AllocateRawWithRetryOrFail(object_size, AllocationType::kCode);
|
||||
result = heap->AllocateRawWith<Heap::kRetryOrFail>(object_size,
|
||||
AllocationType::kCode);
|
||||
} else {
|
||||
result =
|
||||
heap->AllocateRawWithLightRetry(object_size, AllocationType::kCode);
|
||||
result = heap->AllocateRawWith<Heap::kLightRetry>(object_size,
|
||||
AllocationType::kCode);
|
||||
// Return an empty handle if we cannot allocate the code object.
|
||||
if (result.is_null()) return MaybeHandle<Code>();
|
||||
}
|
||||
@ -209,8 +209,8 @@ HeapObject Factory::AllocateRawWithImmortalMap(int size,
|
||||
AllocationType allocation,
|
||||
Map map,
|
||||
AllocationAlignment alignment) {
|
||||
HeapObject result = isolate()->heap()->AllocateRawWithRetryOrFail(
|
||||
size, allocation, alignment);
|
||||
HeapObject result = isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(
|
||||
size, allocation, AllocationOrigin::kRuntime, alignment);
|
||||
result.set_map_after_allocation(map, SKIP_WRITE_BARRIER);
|
||||
return result;
|
||||
}
|
||||
@ -222,7 +222,7 @@ HeapObject Factory::AllocateRawWithAllocationSite(
|
||||
int size = map->instance_size();
|
||||
if (!allocation_site.is_null()) size += AllocationMemento::kSize;
|
||||
HeapObject result =
|
||||
isolate()->heap()->AllocateRawWithRetryOrFail(size, allocation);
|
||||
isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(size, allocation);
|
||||
WriteBarrierMode write_barrier_mode = allocation == AllocationType::kYoung
|
||||
? SKIP_WRITE_BARRIER
|
||||
: UPDATE_WRITE_BARRIER;
|
||||
@ -247,7 +247,7 @@ void Factory::InitializeAllocationMemento(AllocationMemento memento,
|
||||
|
||||
HeapObject Factory::AllocateRawArray(int size, AllocationType allocation) {
|
||||
HeapObject result =
|
||||
isolate()->heap()->AllocateRawWithRetryOrFail(size, allocation);
|
||||
isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(size, allocation);
|
||||
if (size > kMaxRegularHeapObjectSize && FLAG_use_marking_progress_bar) {
|
||||
MemoryChunk* chunk = MemoryChunk::FromHeapObject(result);
|
||||
chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
|
||||
@ -275,7 +275,7 @@ HeapObject Factory::New(Handle<Map> map, AllocationType allocation) {
|
||||
DCHECK(map->instance_type() != MAP_TYPE);
|
||||
int size = map->instance_size();
|
||||
HeapObject result =
|
||||
isolate()->heap()->AllocateRawWithRetryOrFail(size, allocation);
|
||||
isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(size, allocation);
|
||||
// New space objects are allocated white.
|
||||
WriteBarrierMode write_barrier_mode = allocation == AllocationType::kYoung
|
||||
? SKIP_WRITE_BARRIER
|
||||
@ -289,8 +289,8 @@ Handle<HeapObject> Factory::NewFillerObject(int size, bool double_align,
|
||||
AllocationOrigin origin) {
|
||||
AllocationAlignment alignment = double_align ? kDoubleAligned : kWordAligned;
|
||||
Heap* heap = isolate()->heap();
|
||||
HeapObject result =
|
||||
heap->AllocateRawWithRetryOrFail(size, allocation, origin, alignment);
|
||||
HeapObject result = heap->AllocateRawWith<Heap::kRetryOrFail>(
|
||||
size, allocation, origin, alignment);
|
||||
heap->CreateFillerObjectAt(result.address(), size, ClearRecordedSlots::kNo);
|
||||
return Handle<HeapObject>(result, isolate());
|
||||
}
|
||||
@ -1864,7 +1864,7 @@ Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors,
|
||||
DCHECK_LT(0, number_of_all_descriptors);
|
||||
int size = DescriptorArray::SizeFor(number_of_all_descriptors);
|
||||
HeapObject obj =
|
||||
isolate()->heap()->AllocateRawWithRetryOrFail(size, allocation);
|
||||
isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(size, allocation);
|
||||
obj.set_map_after_allocation(*descriptor_array_map(), SKIP_WRITE_BARRIER);
|
||||
DescriptorArray array = DescriptorArray::cast(obj);
|
||||
array.Initialize(*empty_enum_cache(), *undefined_value(),
|
||||
@ -1915,7 +1915,7 @@ Handle<Map> Factory::NewMap(InstanceType type, int instance_size,
|
||||
!Map::CanHaveFastTransitionableElementsKind(type),
|
||||
IsDictionaryElementsKind(elements_kind) ||
|
||||
IsTerminalElementsKind(elements_kind));
|
||||
HeapObject result = isolate()->heap()->AllocateRawWithRetryOrFail(
|
||||
HeapObject result = isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(
|
||||
Map::kSize, AllocationType::kMap);
|
||||
result.set_map_after_allocation(*meta_map(), SKIP_WRITE_BARRIER);
|
||||
return handle(InitializeMap(Map::cast(result), type, instance_size,
|
||||
@ -1993,7 +1993,7 @@ Handle<JSObject> Factory::CopyJSObjectWithAllocationSite(
|
||||
int object_size = map->instance_size();
|
||||
int adjusted_object_size =
|
||||
site.is_null() ? object_size : object_size + AllocationMemento::kSize;
|
||||
HeapObject raw_clone = isolate()->heap()->AllocateRawWithRetryOrFail(
|
||||
HeapObject raw_clone = isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(
|
||||
adjusted_object_size, AllocationType::kYoung);
|
||||
|
||||
DCHECK(Heap::InYoungGeneration(raw_clone) || FLAG_single_generation);
|
||||
@ -2678,8 +2678,8 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) {
|
||||
{
|
||||
int obj_size = code->Size();
|
||||
CodePageCollectionMemoryModificationScope code_allocation(heap);
|
||||
HeapObject result =
|
||||
heap->AllocateRawWithRetryOrFail(obj_size, AllocationType::kCode);
|
||||
HeapObject result = heap->AllocateRawWith<Heap::kRetryOrFail>(
|
||||
obj_size, AllocationType::kCode);
|
||||
|
||||
// Copy code object.
|
||||
Address old_addr = code->address();
|
||||
|
@ -159,7 +159,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
|
||||
AllocationAlignment alignment) {
|
||||
DCHECK(AllowHandleAllocation::IsAllowed());
|
||||
DCHECK(AllowHeapAllocation::IsAllowed());
|
||||
DCHECK(gc_state_ == NOT_IN_GC);
|
||||
DCHECK_EQ(gc_state_, NOT_IN_GC);
|
||||
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
|
||||
if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
|
||||
if (!always_allocate() && Heap::allocation_timeout_-- <= 0) {
|
||||
@ -238,6 +238,39 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
|
||||
return allocation;
|
||||
}
|
||||
|
||||
template <Heap::AllocationRetryMode mode>
|
||||
HeapObject Heap::AllocateRawWith(int size, AllocationType allocation,
|
||||
AllocationOrigin origin,
|
||||
AllocationAlignment alignment) {
|
||||
DCHECK(AllowHandleAllocation::IsAllowed());
|
||||
DCHECK(AllowHeapAllocation::IsAllowed());
|
||||
DCHECK_EQ(gc_state_, NOT_IN_GC);
|
||||
Heap* heap = isolate()->heap();
|
||||
Address* top = heap->NewSpaceAllocationTopAddress();
|
||||
Address* limit = heap->NewSpaceAllocationLimitAddress();
|
||||
if (allocation == AllocationType::kYoung &&
|
||||
alignment == AllocationAlignment::kWordAligned &&
|
||||
size < kMaxRegularHeapObjectSize &&
|
||||
(*limit - *top >= static_cast<unsigned>(size)) &&
|
||||
V8_LIKELY(!FLAG_single_generation && FLAG_inline_new)) {
|
||||
DCHECK(IsAligned(size, kTaggedSize));
|
||||
HeapObject obj = HeapObject::FromAddress(*top);
|
||||
*top += size;
|
||||
heap->CreateFillerObjectAt(obj.address(), size, ClearRecordedSlots::kNo);
|
||||
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size);
|
||||
return obj;
|
||||
}
|
||||
switch (mode) {
|
||||
case kLightRetry:
|
||||
return AllocateRawWithLightRetrySlowPath(size, allocation, origin,
|
||||
alignment);
|
||||
case kRetryOrFail:
|
||||
return AllocateRawWithRetryOrFailSlowPath(size, allocation, origin,
|
||||
alignment);
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void Heap::OnAllocationEvent(HeapObject object, int size_in_bytes) {
|
||||
for (auto& tracker : allocation_trackers_) {
|
||||
tracker->AllocationEvent(object.address(), size_in_bytes);
|
||||
|
@ -4897,9 +4897,9 @@ HeapObject Heap::EnsureImmovableCode(HeapObject heap_object, int object_size) {
|
||||
return heap_object;
|
||||
}
|
||||
|
||||
HeapObject Heap::AllocateRawWithLightRetry(int size, AllocationType allocation,
|
||||
AllocationOrigin origin,
|
||||
AllocationAlignment alignment) {
|
||||
HeapObject Heap::AllocateRawWithLightRetrySlowPath(
|
||||
int size, AllocationType allocation, AllocationOrigin origin,
|
||||
AllocationAlignment alignment) {
|
||||
HeapObject result;
|
||||
AllocationResult alloc = AllocateRaw(size, allocation, origin, alignment);
|
||||
if (alloc.To(&result)) {
|
||||
@ -4919,12 +4919,12 @@ HeapObject Heap::AllocateRawWithLightRetry(int size, AllocationType allocation,
|
||||
return HeapObject();
|
||||
}
|
||||
|
||||
HeapObject Heap::AllocateRawWithRetryOrFail(int size, AllocationType allocation,
|
||||
AllocationOrigin origin,
|
||||
AllocationAlignment alignment) {
|
||||
HeapObject Heap::AllocateRawWithRetryOrFailSlowPath(
|
||||
int size, AllocationType allocation, AllocationOrigin origin,
|
||||
AllocationAlignment alignment) {
|
||||
AllocationResult alloc;
|
||||
HeapObject result =
|
||||
AllocateRawWithLightRetry(size, allocation, origin, alignment);
|
||||
AllocateRawWithLightRetrySlowPath(size, allocation, origin, alignment);
|
||||
if (!result.is_null()) return result;
|
||||
|
||||
isolate()->counters()->gc_last_resort_from_handles()->Increment();
|
||||
|
@ -1746,20 +1746,23 @@ class Heap {
|
||||
AllocationOrigin origin = AllocationOrigin::kRuntime,
|
||||
AllocationAlignment alignment = kWordAligned);
|
||||
|
||||
// This method will try to allocate objects quickly (AllocationType::kYoung)
|
||||
// otherwise it falls back to a slower path indicated by the mode.
|
||||
enum AllocationRetryMode { kLightRetry, kRetryOrFail };
|
||||
template <AllocationRetryMode mode>
|
||||
V8_WARN_UNUSED_RESULT inline HeapObject AllocateRawWith(
|
||||
int size, AllocationType allocation,
|
||||
AllocationOrigin origin = AllocationOrigin::kRuntime,
|
||||
AllocationAlignment alignment = kWordAligned);
|
||||
|
||||
// This method will try to perform an allocation of a given size of a given
|
||||
// AllocationType. If the allocation fails, a regular full garbage collection
|
||||
// is triggered and the allocation is retried. This is performed multiple
|
||||
// times. If after that retry procedure the allocation still fails nullptr is
|
||||
// returned.
|
||||
HeapObject AllocateRawWithLightRetry(
|
||||
V8_WARN_UNUSED_RESULT HeapObject AllocateRawWithLightRetrySlowPath(
|
||||
int size, AllocationType allocation, AllocationOrigin origin,
|
||||
AllocationAlignment alignment = kWordAligned);
|
||||
HeapObject AllocateRawWithLightRetry(
|
||||
int size, AllocationType allocation,
|
||||
AllocationAlignment alignment = kWordAligned) {
|
||||
return AllocateRawWithLightRetry(size, allocation,
|
||||
AllocationOrigin::kRuntime, alignment);
|
||||
}
|
||||
|
||||
// This method will try to perform an allocation of a given size of a given
|
||||
// AllocationType. If the allocation fails, a regular full garbage collection
|
||||
@ -1767,17 +1770,11 @@ class Heap {
|
||||
// times. If after that retry procedure the allocation still fails a "hammer"
|
||||
// garbage collection is triggered which tries to significantly reduce memory.
|
||||
// If the allocation still fails after that a fatal error is thrown.
|
||||
HeapObject AllocateRawWithRetryOrFail(
|
||||
V8_WARN_UNUSED_RESULT HeapObject AllocateRawWithRetryOrFailSlowPath(
|
||||
int size, AllocationType allocation, AllocationOrigin origin,
|
||||
AllocationAlignment alignment = kWordAligned);
|
||||
HeapObject AllocateRawWithRetryOrFail(
|
||||
int size, AllocationType allocation,
|
||||
AllocationAlignment alignment = kWordAligned) {
|
||||
return AllocateRawWithRetryOrFail(size, allocation,
|
||||
AllocationOrigin::kRuntime, alignment);
|
||||
}
|
||||
|
||||
HeapObject AllocateRawCodeInLargeObjectSpace(int size);
|
||||
V8_WARN_UNUSED_RESULT HeapObject AllocateRawCodeInLargeObjectSpace(int size);
|
||||
|
||||
// Allocates a heap object based on the map.
|
||||
V8_WARN_UNUSED_RESULT AllocationResult Allocate(Map map,
|
||||
|
@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Flags: --expose-gc
|
||||
|
||||
var Debug = debug.Debug;
|
||||
|
||||
@ -28,6 +29,8 @@ function RunTest(formals_and_body, args, value1, value2) {
|
||||
// function and relocation of the suspended generator activation.
|
||||
Debug.setListener(listener);
|
||||
|
||||
gc();
|
||||
|
||||
// Add a breakpoint on line 3 (the second yield).
|
||||
var bp = Debug.setBreakPoint(gen, 3);
|
||||
|
||||
|
@ -11,6 +11,9 @@
|
||||
# not work, but we expect it to not crash.
|
||||
'debug/debug-step-turbofan': [PASS, FAIL],
|
||||
|
||||
# BUG (v8:9721)
|
||||
'debug/es6/generators-relocation': [FAIL],
|
||||
|
||||
# Issue 3641: The new 'then' semantics suppress some exceptions.
|
||||
# These tests may be changed or removed when 'chain' is deprecated.
|
||||
'debug/es6/debug-promises/reject-with-throw-in-reject': [FAIL],
|
||||
|
Loading…
Reference in New Issue
Block a user