Remove FLAG_young_generation_large_objects

The flag has been turned on for a long time and we do not intend to
support a mode without young LO objects.

A side effect is that it removes a branch in AllocateRaw for the young
generation.

Drive-by: Reinstantiate the LO space verifier checking that only
certain types can appear as large objects.

Bug: v8:12615
Change-Id: I8c33019a04670f20459ea2faa9dc2f98b8cda40b
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3450420
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79044}
This commit is contained in:
Michael Lippautz 2022-02-09 21:10:58 +01:00 committed by V8 LUCI CQ
parent 658012eac1
commit 96162c7579
9 changed files with 38 additions and 122 deletions

View File

@ -1247,15 +1247,9 @@ TNode<HeapObject> CodeStubAssembler::AllocateRaw(TNode<IntPtrT> size_in_bytes,
TNode<Smi> runtime_flags = SmiConstant(Smi::FromInt(
AllocateDoubleAlignFlag::encode(needs_double_alignment) |
AllowLargeObjectAllocationFlag::encode(allow_large_object_allocation)));
if (FLAG_young_generation_large_objects) {
result =
CallRuntime(Runtime::kAllocateInYoungGeneration, NoContextConstant(),
SmiTag(size_in_bytes), runtime_flags);
} else {
result =
CallRuntime(Runtime::kAllocateInOldGeneration, NoContextConstant(),
SmiTag(size_in_bytes), runtime_flags);
}
Goto(&out);
BIND(&next);
@ -1376,10 +1370,6 @@ TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes,
bool const new_space = !(flags & AllocationFlag::kPretenured);
bool const allow_large_objects =
flags & AllocationFlag::kAllowLargeObjectAllocation;
// For optimized allocations, we don't allow the allocation to happen in a
// different generation than requested.
bool const always_allocated_in_requested_space =
!new_space || !allow_large_objects || FLAG_young_generation_large_objects;
if (!allow_large_objects) {
intptr_t size_constant;
if (TryToIntPtrConstant(size_in_bytes, &size_constant)) {
@ -1388,8 +1378,7 @@ TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes,
CSA_DCHECK(this, IsRegularHeapObjectSize(size_in_bytes));
}
}
if (!(flags & AllocationFlag::kDoubleAlignment) &&
always_allocated_in_requested_space) {
if (!(flags & AllocationFlag::kDoubleAlignment)) {
return OptimizedAllocate(
size_in_bytes,
new_space ? AllocationType::kYoung : AllocationType::kOld,
@ -4367,7 +4356,7 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
TVARIABLE(Map, var_target_map, source_map);
Label done(this, {&var_result}), is_cow(this),
new_space_check(this, {&var_target_map});
new_space_handler(this, {&var_target_map});
// If source_map is either FixedDoubleArrayMap, or FixedCOWArrayMap but
// we can't just use COW, use FixedArrayMap as the target map. Otherwise, use
@ -4375,11 +4364,11 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
if (IsDoubleElementsKind(from_kind)) {
CSA_DCHECK(this, IsFixedDoubleArrayMap(source_map));
var_target_map = FixedArrayMapConstant();
Goto(&new_space_check);
Goto(&new_space_handler);
} else {
CSA_DCHECK(this, Word32BinaryNot(IsFixedDoubleArrayMap(source_map)));
Branch(TaggedEqual(var_target_map.value(), FixedCOWArrayMapConstant()),
&is_cow, &new_space_check);
&is_cow, &new_space_handler);
BIND(&is_cow);
{
@ -4389,34 +4378,19 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
// 2) we're asked to extract only part of the |source| (|first| != 0).
if (extract_flags & ExtractFixedArrayFlag::kDontCopyCOW) {
Branch(IntPtrOrSmiNotEqual(IntPtrOrSmiConstant<TIndex>(0), first),
&new_space_check, [&] {
&new_space_handler, [&] {
var_result = source;
Goto(&done);
});
} else {
var_target_map = FixedArrayMapConstant();
Goto(&new_space_check);
Goto(&new_space_handler);
}
}
}
BIND(&new_space_check);
BIND(&new_space_handler);
{
bool handle_old_space = !FLAG_young_generation_large_objects;
if (handle_old_space) {
int constant_count;
handle_old_space =
!TryGetIntPtrOrSmiConstantValue(count, &constant_count) ||
(constant_count >
FixedArray::GetMaxLengthForNewSpaceAllocation(PACKED_ELEMENTS));
}
Label old_space(this, Label::kDeferred);
if (handle_old_space) {
GotoIfFixedArraySizeDoesntFitInNewSpace(capacity, &old_space,
FixedArray::kHeaderSize);
}
Comment("Copy FixedArray in young generation");
// We use PACKED_ELEMENTS to tell AllocateFixedArray and
// CopyFixedArrayElements that we want a FixedArray.
@ -4456,50 +4430,6 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
var_holes_converted);
}
Goto(&done);
if (handle_old_space) {
BIND(&old_space);
{
Comment("Copy FixedArray in old generation");
Label copy_one_by_one(this);
// Try to use memcpy if we don't need to convert holes to undefined.
if (convert_holes == HoleConversionMode::kDontConvert &&
source_elements_kind) {
// Only try memcpy if we're not copying object pointers.
GotoIfNot(IsFastSmiElementsKind(*source_elements_kind),
&copy_one_by_one);
const ElementsKind to_smi_kind = PACKED_SMI_ELEMENTS;
to_elements = AllocateFixedArray(
to_smi_kind, capacity, allocation_flags, var_target_map.value());
var_result = to_elements;
FillFixedArrayWithValue(to_smi_kind, to_elements, count, capacity,
RootIndex::kTheHoleValue);
// CopyElements will try to use memcpy if it's not conflicting with
// GC. Otherwise it will copy elements by elements, but skip write
// barriers (since we're copying smis to smis).
CopyElements(to_smi_kind, to_elements, IntPtrConstant(0), source,
ParameterToIntPtr(first), ParameterToIntPtr(count),
SKIP_WRITE_BARRIER);
Goto(&done);
} else {
Goto(&copy_one_by_one);
}
BIND(&copy_one_by_one);
{
to_elements = AllocateFixedArray(to_kind, capacity, allocation_flags,
var_target_map.value());
var_result = to_elements;
CopyFixedArrayElements(from_kind, source, to_kind, to_elements, first,
count, capacity, UPDATE_WRITE_BARRIER,
convert_holes, var_holes_converted);
Goto(&done);
}
}
}
}
BIND(&done);

View File

@ -1834,11 +1834,6 @@ const Operator* SimplifiedOperatorBuilder::Allocate(Type type,
const Operator* SimplifiedOperatorBuilder::AllocateRaw(
Type type, AllocationType allocation,
AllowLargeObjects allow_large_objects) {
// We forbid optimized allocations to allocate in a different generation than
// requested.
DCHECK(!(allow_large_objects == AllowLargeObjects::kTrue &&
allocation == AllocationType::kYoung &&
!FLAG_young_generation_large_objects));
return zone()->New<Operator1<AllocateParameters>>(
IrOpcode::kAllocateRaw, Operator::kEliminatable, "AllocateRaw", 1, 1, 1,
1, 1, 1, AllocateParameters(type, allocation, allow_large_objects));

View File

@ -1387,11 +1387,6 @@ DEFINE_BOOL(clear_free_memory, false, "initialize free memory with 0")
DEFINE_BOOL(crash_on_aborted_evacuation, false,
"crash when evacuation of page fails")
DEFINE_BOOL_READONLY(
young_generation_large_objects, true,
"allocates large objects by default in the young generation large "
"object space")
// assembler-ia32.cc / assembler-arm.cc / assembler-arm64.cc / assembler-x64.cc
#ifdef V8_ENABLE_DEBUG_CODE
DEFINE_BOOL(debug_code, DEBUG_BOOL,

View File

@ -243,15 +243,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
} else {
if (AllocationType::kYoung == type) {
if (large_object) {
if (FLAG_young_generation_large_objects) {
allocation = new_lo_space_->AllocateRaw(size_in_bytes);
} else {
// If young generation large objects are disabled we have to tenure
// the allocation and violate the given allocation type. This could be
// dangerous. We may want to remove
// FLAG_young_generation_large_objects and avoid patching.
allocation = lo_space_->AllocateRaw(size_in_bytes);
}
} else {
allocation = new_space_->AllocateRaw(size_in_bytes, alignment, origin);
}

View File

@ -5824,15 +5824,13 @@ void Heap::SetUpSpaces(LinearAllocationArea* new_allocation_info,
space_[NEW_SPACE] = new_space_ = new NewSpace(
this, memory_allocator_->data_page_allocator(), initial_semispace_size_,
max_semi_space_size_, new_allocation_info);
space_[NEW_LO_SPACE] = new_lo_space_ =
new NewLargeObjectSpace(this, NewSpaceCapacity());
}
space_[OLD_SPACE] = old_space_ = new OldSpace(this, old_allocation_info);
space_[CODE_SPACE] = code_space_ = new CodeSpace(this);
space_[MAP_SPACE] = map_space_ = new MapSpace(this);
space_[LO_SPACE] = lo_space_ = new OldLargeObjectSpace(this);
if (has_young_gen) {
space_[NEW_LO_SPACE] = new_lo_space_ =
new NewLargeObjectSpace(this, NewSpaceCapacity());
}
space_[CODE_LO_SPACE] = code_lo_space_ = new CodeLargeObjectSpace(this);
for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);

View File

@ -380,19 +380,31 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
CHECK(ReadOnlyHeap::Contains(map) || heap()->map_space()->Contains(map));
// We have only the following types in the large object space:
if (!(object.IsAbstractCode(cage_base) || object.IsSeqString(cage_base) ||
object.IsExternalString(cage_base) ||
object.IsThinString(cage_base) || object.IsFixedArray(cage_base) ||
object.IsFixedDoubleArray(cage_base) ||
object.IsWeakFixedArray(cage_base) ||
object.IsWeakArrayList(cage_base) ||
object.IsPropertyArray(cage_base) || object.IsByteArray(cage_base) ||
object.IsFeedbackVector(cage_base) || object.IsBigInt(cage_base) ||
object.IsFreeSpace(cage_base) ||
object.IsFeedbackMetadata(cage_base) || object.IsContext(cage_base) ||
object.IsUncompiledDataWithoutPreparseData(cage_base) ||
object.IsPreparseData(cage_base)) &&
!FLAG_young_generation_large_objects) {
const bool is_valid_lo_space_object = //
object.IsAbstractCode(cage_base) || //
object.IsBigInt(cage_base) || //
object.IsByteArray(cage_base) || //
object.IsContext(cage_base) || //
object.IsExternalString(cage_base) || //
object.IsFeedbackMetadata(cage_base) || //
object.IsFeedbackVector(cage_base) || //
object.IsFixedArray(cage_base) || //
object.IsFixedDoubleArray(cage_base) || //
object.IsFreeSpace(cage_base) || //
object.IsPreparseData(cage_base) || //
object.IsPropertyArray(cage_base) || //
object.IsScopeInfo() || //
object.IsSeqString(cage_base) || //
object.IsSwissNameDictionary() || //
object.IsThinString(cage_base) || //
object.IsUncompiledDataWithoutPreparseData(cage_base) || //
#if V8_ENABLE_WEBASSEMBLY //
object.IsWasmArray() || //
#endif //
object.IsWeakArrayList(cage_base) || //
object.IsWeakFixedArray(cage_base);
if (!is_valid_lo_space_object) {
object.Print();
FATAL("Found invalid Object (instance_type=%i) in large object space.",
object.map(cage_base).instance_type());
}

View File

@ -207,7 +207,6 @@ bool Scavenger::HandleLargeObject(Map map, HeapObject object, int object_size,
// TODO(hpayer): Make this check size based, i.e.
// object_size > kMaxRegularHeapObjectSize
if (V8_UNLIKELY(
FLAG_young_generation_large_objects &&
BasicMemoryChunk::FromHeapObject(object)->InNewLargeObjectSpace())) {
DCHECK_EQ(NEW_LO_SPACE,
MemoryChunk::FromHeapObject(object)->owner_identity());

View File

@ -471,8 +471,6 @@ RUNTIME_FUNCTION(Runtime_AllocateInYoungGeneration) {
AllowLargeObjectAllocationFlag::decode(flags);
CHECK(IsAligned(size, kTaggedSize));
CHECK_GT(size, 0);
CHECK(FLAG_young_generation_large_objects ||
size <= kMaxRegularHeapObjectSize);
if (!allow_large_object_allocation) {
CHECK(size <= kMaxRegularHeapObjectSize);
}

View File

@ -6010,7 +6010,6 @@ TEST(Regress618958) {
TEST(YoungGenerationLargeObjectAllocationScavenge) {
if (FLAG_minor_mc) return;
if (!FLAG_young_generation_large_objects) return;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
@ -6040,7 +6039,6 @@ TEST(YoungGenerationLargeObjectAllocationScavenge) {
TEST(YoungGenerationLargeObjectAllocationMarkCompact) {
if (FLAG_minor_mc) return;
if (!FLAG_young_generation_large_objects) return;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
@ -6070,7 +6068,6 @@ TEST(YoungGenerationLargeObjectAllocationMarkCompact) {
TEST(YoungGenerationLargeObjectAllocationReleaseScavenger) {
if (FLAG_minor_mc) return;
if (!FLAG_young_generation_large_objects) return;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();