[heap] Flag for disabling ephemeron optimizations

Disables all optimizations for WeakMaps/WeakSets.

Bug: chromium:844008
Change-Id: I5395ce981bbdd0e8e03ba0821103afde0889cf56
Reviewed-on: https://chromium-review.googlesource.com/1102513
Commit-Queue: Dominik Inführ <dinfuehr@google.com>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#53851}
This commit is contained in:
Dominik Inführ 2018-06-18 21:28:59 +02:00 committed by Commit Bot
parent bc43c8e8fc
commit 996a6c1096
5 changed files with 60 additions and 27 deletions

View File

@ -779,6 +779,11 @@ DEFINE_BOOL(manual_evacuation_candidates_selection, false,
DEFINE_BOOL(fast_promotion_new_space, false,
"fast promote new space on high survival rates")
DEFINE_BOOL(optimize_ephemerons, true,
"use optimized handling of ephemerons in the GC")
DEFINE_NEG_NEG_IMPLICATION(optimize_ephemerons, parallel_ephemeron_marking)
DEFINE_NEG_NEG_IMPLICATION(optimize_ephemerons, parallel_ephemeron_visiting)
// assembler-ia32.cc / assembler-arm.cc / assembler-x64.cc
DEFINE_BOOL(debug_code, DEBUG_BOOL,
"generate extra code (assertions) for debugging")

View File

@ -361,14 +361,15 @@ class ConcurrentMarkingVisitor final
if (!ShouldVisit(table)) return 0;
weak_objects_->ephemeron_hash_tables.Push(task_id_, table);
if (V8_LIKELY(FLAG_optimize_ephemerons)) {
for (int i = 0; i < table->Capacity(); i++) {
Object** key_slot =
table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i));
HeapObject* key = HeapObject::cast(table->KeyAt(i));
MarkCompactCollector::RecordSlot(table, key_slot, key);
Object** value_slot =
table->RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
Object** value_slot = table->RawFieldOfElementAt(
EphemeronHashTable::EntryToValueIndex(i));
if (marking_state_.IsBlackOrGrey(key)) {
VisitPointer(table, value_slot);
@ -382,6 +383,7 @@ class ConcurrentMarkingVisitor final
}
}
}
}
return table->SizeFromMap(map);
}

View File

@ -92,6 +92,7 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
VisitEphemeronHashTable(Map* map, EphemeronHashTable* table) {
collector_->AddEphemeronHashTable(table);
if (V8_LIKELY(FLAG_optimize_ephemerons)) {
for (int i = 0; i < table->Capacity(); i++) {
Object** key_slot =
table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i));
@ -112,6 +113,7 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
}
}
}
}
return table->SizeFromMap(map);
}

View File

@ -1925,6 +1925,26 @@ class EphemeronHashTableMarkingTask : public ItemParallelJob::Task {
}
}
}
// Record slots if that wasn't done already in concurrent or
// incremental marking
if (V8_UNLIKELY(!FLAG_optimize_ephemerons)) {
Object** key_slot =
table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i));
HeapObject* key = HeapObject::cast(table->KeyAt(i));
if (collector_->marking_state()->IsBlackOrGrey(key)) {
collector_->RecordSlot(table, key_slot, key);
Object* value = table->ValueAt(i);
if (value->IsHeapObject()) {
Object** value_slot = table->RawFieldOfElementAt(
EphemeronHashTable::EntryToValueIndex(i));
collector_->RecordSlot(table, value_slot,
HeapObject::cast(value));
}
}
}
}
item->MarkFinished();

View File

@ -415,7 +415,11 @@ class MajorNonAtomicMarkingState final
struct WeakObjects {
Worklist<WeakCell*, 64> weak_cells;
Worklist<TransitionArray*, 64> transition_arrays;
// Keep track of all EphemeronHashTables in the heap to process
// them in the atomic pause.
Worklist<EphemeronHashTable*, 64> ephemeron_hash_tables;
// TODO(marja): For old space, we only need the slot, not the host
// object. Optimize this by adding a different storage for old space.
Worklist<std::pair<HeapObject*, HeapObjectReference**>, 64> weak_references;