[heap] remove flag optimize_ephemerons
Originally intended for benchmarking but not good enough for that since recent changes cannot be easily disabled. Bug: chromium:844008 Change-Id: I4c7075fe2f1d5e85b76a0c40c854b0dbf7d4564b Reviewed-on: https://chromium-review.googlesource.com/1127376 Reviewed-by: Ulan Degenbaev <ulan@chromium.org> Commit-Queue: Dominik Inführ <dinfuehr@google.com> Cr-Commit-Position: refs/heads/master@{#54281}
This commit is contained in:
parent
a1cb1eb9fc
commit
5f9686616c
@ -699,11 +699,6 @@ DEFINE_BOOL(concurrent_marking, V8_CONCURRENT_MARKING_BOOL,
|
||||
"use concurrent marking")
|
||||
DEFINE_BOOL(parallel_marking, true, "use parallel marking in atomic pause")
|
||||
DEFINE_IMPLICATION(parallel_marking, concurrent_marking)
|
||||
DEFINE_BOOL(parallel_ephemeron_visiting, true,
|
||||
"use parallel visiting of ephemerons in atomic pause")
|
||||
DEFINE_BOOL(
|
||||
parallel_ephemeron_marking, true,
|
||||
"use parallel marking of objects after visiting ephemerons in atomic pause")
|
||||
DEFINE_INT(ephemeron_fixpoint_iterations, 0,
|
||||
"number of fixpoint iterations it takes to switch to linear "
|
||||
"ephemeron algorithm")
|
||||
@ -792,11 +787,6 @@ DEFINE_BOOL(manual_evacuation_candidates_selection, false,
|
||||
DEFINE_BOOL(fast_promotion_new_space, false,
|
||||
"fast promote new space on high survival rates")
|
||||
|
||||
DEFINE_BOOL(optimize_ephemerons, true,
|
||||
"use optimized handling of ephemerons in the GC")
|
||||
DEFINE_NEG_NEG_IMPLICATION(optimize_ephemerons, parallel_ephemeron_marking)
|
||||
DEFINE_NEG_NEG_IMPLICATION(optimize_ephemerons, parallel_ephemeron_visiting)
|
||||
|
||||
DEFINE_BOOL(clear_free_memory, false, "initialize free memory with 0")
|
||||
|
||||
DEFINE_BOOL(young_generation_large_objects, false,
|
||||
@ -1391,8 +1381,6 @@ DEFINE_NEG_IMPLICATION(single_threaded_gc, parallel_compaction)
|
||||
DEFINE_NEG_IMPLICATION(single_threaded_gc, parallel_marking)
|
||||
DEFINE_NEG_IMPLICATION(single_threaded_gc, parallel_pointer_update)
|
||||
DEFINE_NEG_IMPLICATION(single_threaded_gc, parallel_scavenge)
|
||||
DEFINE_NEG_IMPLICATION(single_threaded_gc, parallel_ephemeron_marking)
|
||||
DEFINE_NEG_IMPLICATION(single_threaded_gc, parallel_ephemeron_visiting)
|
||||
DEFINE_NEG_IMPLICATION(single_threaded_gc, concurrent_store_buffer)
|
||||
#ifdef ENABLE_MINOR_MC
|
||||
DEFINE_NEG_IMPLICATION(single_threaded_gc, minor_mc_parallel_marking)
|
||||
|
@ -361,32 +361,30 @@ class ConcurrentMarkingVisitor final
|
||||
if (!ShouldVisit(table)) return 0;
|
||||
weak_objects_->ephemeron_hash_tables.Push(task_id_, table);
|
||||
|
||||
if (V8_LIKELY(FLAG_optimize_ephemerons)) {
|
||||
for (int i = 0; i < table->Capacity(); i++) {
|
||||
Object** key_slot =
|
||||
table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i));
|
||||
HeapObject* key = HeapObject::cast(table->KeyAt(i));
|
||||
MarkCompactCollector::RecordSlot(table, key_slot, key);
|
||||
for (int i = 0; i < table->Capacity(); i++) {
|
||||
Object** key_slot =
|
||||
table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i));
|
||||
HeapObject* key = HeapObject::cast(table->KeyAt(i));
|
||||
MarkCompactCollector::RecordSlot(table, key_slot, key);
|
||||
|
||||
Object** value_slot = table->RawFieldOfElementAt(
|
||||
EphemeronHashTable::EntryToValueIndex(i));
|
||||
Object** value_slot =
|
||||
table->RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
|
||||
|
||||
if (marking_state_.IsBlackOrGrey(key)) {
|
||||
VisitPointer(table, value_slot);
|
||||
if (marking_state_.IsBlackOrGrey(key)) {
|
||||
VisitPointer(table, value_slot);
|
||||
|
||||
} else {
|
||||
Object* value_obj = table->ValueAt(i);
|
||||
} else {
|
||||
Object* value_obj = table->ValueAt(i);
|
||||
|
||||
if (value_obj->IsHeapObject()) {
|
||||
HeapObject* value = HeapObject::cast(value_obj);
|
||||
MarkCompactCollector::RecordSlot(table, value_slot, value);
|
||||
if (value_obj->IsHeapObject()) {
|
||||
HeapObject* value = HeapObject::cast(value_obj);
|
||||
MarkCompactCollector::RecordSlot(table, value_slot, value);
|
||||
|
||||
// Revisit ephemerons with both key and value unreachable at end
|
||||
// of concurrent marking cycle.
|
||||
if (marking_state_.IsWhite(value)) {
|
||||
weak_objects_->discovered_ephemerons.Push(task_id_,
|
||||
Ephemeron{key, value});
|
||||
}
|
||||
// Revisit ephemerons with both key and value unreachable at end
|
||||
// of concurrent marking cycle.
|
||||
if (marking_state_.IsWhite(value)) {
|
||||
weak_objects_->discovered_ephemerons.Push(task_id_,
|
||||
Ephemeron{key, value});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -92,31 +92,29 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
|
||||
VisitEphemeronHashTable(Map* map, EphemeronHashTable* table) {
|
||||
collector_->AddEphemeronHashTable(table);
|
||||
|
||||
if (V8_LIKELY(FLAG_optimize_ephemerons)) {
|
||||
for (int i = 0; i < table->Capacity(); i++) {
|
||||
Object** key_slot =
|
||||
table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i));
|
||||
HeapObject* key = HeapObject::cast(table->KeyAt(i));
|
||||
collector_->RecordSlot(table, key_slot, key);
|
||||
for (int i = 0; i < table->Capacity(); i++) {
|
||||
Object** key_slot =
|
||||
table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i));
|
||||
HeapObject* key = HeapObject::cast(table->KeyAt(i));
|
||||
collector_->RecordSlot(table, key_slot, key);
|
||||
|
||||
Object** value_slot =
|
||||
table->RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
|
||||
Object** value_slot =
|
||||
table->RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
|
||||
|
||||
if (marking_state()->IsBlackOrGrey(key)) {
|
||||
VisitPointer(table, value_slot);
|
||||
if (marking_state()->IsBlackOrGrey(key)) {
|
||||
VisitPointer(table, value_slot);
|
||||
|
||||
} else {
|
||||
Object* value_obj = *value_slot;
|
||||
} else {
|
||||
Object* value_obj = *value_slot;
|
||||
|
||||
if (value_obj->IsHeapObject()) {
|
||||
HeapObject* value = HeapObject::cast(value_obj);
|
||||
collector_->RecordSlot(table, value_slot, value);
|
||||
if (value_obj->IsHeapObject()) {
|
||||
HeapObject* value = HeapObject::cast(value_obj);
|
||||
collector_->RecordSlot(table, value_slot, value);
|
||||
|
||||
// Revisit ephemerons with both key and value unreachable at end
|
||||
// of concurrent marking cycle.
|
||||
if (marking_state()->IsWhite(value)) {
|
||||
collector_->AddEphemeron(key, value);
|
||||
}
|
||||
// Revisit ephemerons with both key and value unreachable at end
|
||||
// of concurrent marking cycle.
|
||||
if (marking_state()->IsWhite(value)) {
|
||||
collector_->AddEphemeron(key, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1770,7 +1770,7 @@ void MarkCompactCollector::MarkLiveObjects() {
|
||||
|
||||
{
|
||||
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_MAIN);
|
||||
if (FLAG_parallel_marking && FLAG_parallel_ephemeron_marking) {
|
||||
if (FLAG_parallel_marking) {
|
||||
DCHECK(FLAG_concurrent_marking);
|
||||
heap_->concurrent_marking()->RescheduleTasksIfNeeded();
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user