[heap] remove flag optimize_ephemerons

Originally intended for benchmarking but not good enough for that since
recent changes cannot be easily disabled.

Bug: chromium:844008
Change-Id: I4c7075fe2f1d5e85b76a0c40c854b0dbf7d4564b
Reviewed-on: https://chromium-review.googlesource.com/1127376
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@google.com>
Cr-Commit-Position: refs/heads/master@{#54281}
This commit is contained in:
Dominik Inführ 2018-07-06 09:59:26 +02:00 committed by Commit Bot
parent a1cb1eb9fc
commit 5f9686616c
4 changed files with 38 additions and 54 deletions

View File

@ -699,11 +699,6 @@ DEFINE_BOOL(concurrent_marking, V8_CONCURRENT_MARKING_BOOL,
"use concurrent marking") "use concurrent marking")
DEFINE_BOOL(parallel_marking, true, "use parallel marking in atomic pause") DEFINE_BOOL(parallel_marking, true, "use parallel marking in atomic pause")
DEFINE_IMPLICATION(parallel_marking, concurrent_marking) DEFINE_IMPLICATION(parallel_marking, concurrent_marking)
DEFINE_BOOL(parallel_ephemeron_visiting, true,
"use parallel visiting of ephemerons in atomic pause")
DEFINE_BOOL(
parallel_ephemeron_marking, true,
"use parallel marking of objects after visiting ephemerons in atomic pause")
DEFINE_INT(ephemeron_fixpoint_iterations, 0, DEFINE_INT(ephemeron_fixpoint_iterations, 0,
"number of fixpoint iterations it takes to switch to linear " "number of fixpoint iterations it takes to switch to linear "
"ephemeron algorithm") "ephemeron algorithm")
@ -792,11 +787,6 @@ DEFINE_BOOL(manual_evacuation_candidates_selection, false,
DEFINE_BOOL(fast_promotion_new_space, false, DEFINE_BOOL(fast_promotion_new_space, false,
"fast promote new space on high survival rates") "fast promote new space on high survival rates")
DEFINE_BOOL(optimize_ephemerons, true,
"use optimized handling of ephemerons in the GC")
DEFINE_NEG_NEG_IMPLICATION(optimize_ephemerons, parallel_ephemeron_marking)
DEFINE_NEG_NEG_IMPLICATION(optimize_ephemerons, parallel_ephemeron_visiting)
DEFINE_BOOL(clear_free_memory, false, "initialize free memory with 0") DEFINE_BOOL(clear_free_memory, false, "initialize free memory with 0")
DEFINE_BOOL(young_generation_large_objects, false, DEFINE_BOOL(young_generation_large_objects, false,
@ -1391,8 +1381,6 @@ DEFINE_NEG_IMPLICATION(single_threaded_gc, parallel_compaction)
DEFINE_NEG_IMPLICATION(single_threaded_gc, parallel_marking) DEFINE_NEG_IMPLICATION(single_threaded_gc, parallel_marking)
DEFINE_NEG_IMPLICATION(single_threaded_gc, parallel_pointer_update) DEFINE_NEG_IMPLICATION(single_threaded_gc, parallel_pointer_update)
DEFINE_NEG_IMPLICATION(single_threaded_gc, parallel_scavenge) DEFINE_NEG_IMPLICATION(single_threaded_gc, parallel_scavenge)
DEFINE_NEG_IMPLICATION(single_threaded_gc, parallel_ephemeron_marking)
DEFINE_NEG_IMPLICATION(single_threaded_gc, parallel_ephemeron_visiting)
DEFINE_NEG_IMPLICATION(single_threaded_gc, concurrent_store_buffer) DEFINE_NEG_IMPLICATION(single_threaded_gc, concurrent_store_buffer)
#ifdef ENABLE_MINOR_MC #ifdef ENABLE_MINOR_MC
DEFINE_NEG_IMPLICATION(single_threaded_gc, minor_mc_parallel_marking) DEFINE_NEG_IMPLICATION(single_threaded_gc, minor_mc_parallel_marking)

View File

@ -361,32 +361,30 @@ class ConcurrentMarkingVisitor final
if (!ShouldVisit(table)) return 0; if (!ShouldVisit(table)) return 0;
weak_objects_->ephemeron_hash_tables.Push(task_id_, table); weak_objects_->ephemeron_hash_tables.Push(task_id_, table);
if (V8_LIKELY(FLAG_optimize_ephemerons)) { for (int i = 0; i < table->Capacity(); i++) {
for (int i = 0; i < table->Capacity(); i++) { Object** key_slot =
Object** key_slot = table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i));
table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i)); HeapObject* key = HeapObject::cast(table->KeyAt(i));
HeapObject* key = HeapObject::cast(table->KeyAt(i)); MarkCompactCollector::RecordSlot(table, key_slot, key);
MarkCompactCollector::RecordSlot(table, key_slot, key);
Object** value_slot = table->RawFieldOfElementAt( Object** value_slot =
EphemeronHashTable::EntryToValueIndex(i)); table->RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
if (marking_state_.IsBlackOrGrey(key)) { if (marking_state_.IsBlackOrGrey(key)) {
VisitPointer(table, value_slot); VisitPointer(table, value_slot);
} else { } else {
Object* value_obj = table->ValueAt(i); Object* value_obj = table->ValueAt(i);
if (value_obj->IsHeapObject()) { if (value_obj->IsHeapObject()) {
HeapObject* value = HeapObject::cast(value_obj); HeapObject* value = HeapObject::cast(value_obj);
MarkCompactCollector::RecordSlot(table, value_slot, value); MarkCompactCollector::RecordSlot(table, value_slot, value);
// Revisit ephemerons with both key and value unreachable at end // Revisit ephemerons with both key and value unreachable at end
// of concurrent marking cycle. // of concurrent marking cycle.
if (marking_state_.IsWhite(value)) { if (marking_state_.IsWhite(value)) {
weak_objects_->discovered_ephemerons.Push(task_id_, weak_objects_->discovered_ephemerons.Push(task_id_,
Ephemeron{key, value}); Ephemeron{key, value});
}
} }
} }
} }

View File

@ -92,31 +92,29 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
VisitEphemeronHashTable(Map* map, EphemeronHashTable* table) { VisitEphemeronHashTable(Map* map, EphemeronHashTable* table) {
collector_->AddEphemeronHashTable(table); collector_->AddEphemeronHashTable(table);
if (V8_LIKELY(FLAG_optimize_ephemerons)) { for (int i = 0; i < table->Capacity(); i++) {
for (int i = 0; i < table->Capacity(); i++) { Object** key_slot =
Object** key_slot = table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i));
table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i)); HeapObject* key = HeapObject::cast(table->KeyAt(i));
HeapObject* key = HeapObject::cast(table->KeyAt(i)); collector_->RecordSlot(table, key_slot, key);
collector_->RecordSlot(table, key_slot, key);
Object** value_slot = Object** value_slot =
table->RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i)); table->RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
if (marking_state()->IsBlackOrGrey(key)) { if (marking_state()->IsBlackOrGrey(key)) {
VisitPointer(table, value_slot); VisitPointer(table, value_slot);
} else { } else {
Object* value_obj = *value_slot; Object* value_obj = *value_slot;
if (value_obj->IsHeapObject()) { if (value_obj->IsHeapObject()) {
HeapObject* value = HeapObject::cast(value_obj); HeapObject* value = HeapObject::cast(value_obj);
collector_->RecordSlot(table, value_slot, value); collector_->RecordSlot(table, value_slot, value);
// Revisit ephemerons with both key and value unreachable at end // Revisit ephemerons with both key and value unreachable at end
// of concurrent marking cycle. // of concurrent marking cycle.
if (marking_state()->IsWhite(value)) { if (marking_state()->IsWhite(value)) {
collector_->AddEphemeron(key, value); collector_->AddEphemeron(key, value);
}
} }
} }
} }

View File

@ -1770,7 +1770,7 @@ void MarkCompactCollector::MarkLiveObjects() {
{ {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_MAIN); TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_MAIN);
if (FLAG_parallel_marking && FLAG_parallel_ephemeron_marking) { if (FLAG_parallel_marking) {
DCHECK(FLAG_concurrent_marking); DCHECK(FLAG_concurrent_marking);
heap_->concurrent_marking()->RescheduleTasksIfNeeded(); heap_->concurrent_marking()->RescheduleTasksIfNeeded();
} }