[heap] Reduce old-to-new invalidations

Reduce number of old-to-new invalidations. MigrateFastToFast,
MigrateFastToSlow and DeleteObjectPropertyFast only need to invalidate
objects in some cases but not in all.

Bug: v8:9454
Change-Id: I901eecb9409c6dfa30cf6b4ee0bdd597862fc229
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1781042
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63557}
This commit is contained in:
Dominik Inführ 2019-09-04 06:47:51 +02:00 committed by Commit Bot
parent e4e86b53cf
commit a14e2f1278
6 changed files with 58 additions and 8 deletions

View File

@ -3387,17 +3387,20 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
}
}
void Heap::NotifyObjectLayoutChange(HeapObject object, int size,
const DisallowHeapAllocation&) {
void Heap::NotifyObjectLayoutChange(
HeapObject object, int size, const DisallowHeapAllocation&,
InvalidateRecordedSlots invalidate_recorded_slots) {
if (incremental_marking()->IsMarking()) {
incremental_marking()->MarkBlackAndVisitObjectDueToLayoutChange(object);
if (incremental_marking()->IsCompacting() &&
invalidate_recorded_slots == InvalidateRecordedSlots::kYes &&
MayContainRecordedSlots(object)) {
MemoryChunk::FromHeapObject(object)
->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object, size);
}
}
if (MayContainRecordedSlots(object)) {
if (invalidate_recorded_slots == InvalidateRecordedSlots::kYes &&
MayContainRecordedSlots(object)) {
MemoryChunk::FromHeapObject(object)
->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object, size);
}
@ -5529,6 +5532,10 @@ Address Heap::store_buffer_overflow_function_address() {
return FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow);
}
void Heap::MoveStoreBufferEntriesToRememberedSet() {
store_buffer()->MoveAllEntriesToRememberedSet();
}
void Heap::ClearRecordedSlot(HeapObject object, ObjectSlot slot) {
DCHECK(!IsLargeObject(object));
Page* page = Page::FromAddress(slot.address());

View File

@ -86,6 +86,8 @@ enum ArrayStorageAllocationMode {
enum class ClearRecordedSlots { kYes, kNo };
enum class InvalidateRecordedSlots { kYes, kNo };
enum class ClearFreedMemoryMode { kClearFreedMemory, kDontClearFreedMemory };
enum ExternalBackingStoreType { kArrayBuffer, kExternalString, kNumTypes };
@ -843,6 +845,8 @@ class Heap {
static intptr_t store_buffer_mask_constant();
static Address store_buffer_overflow_function_address();
void MoveStoreBufferEntriesToRememberedSet();
void ClearRecordedSlot(HeapObject object, ObjectSlot slot);
void ClearRecordedSlotRange(Address start, Address end);
@ -896,8 +900,13 @@ class Heap {
// The runtime uses this function to notify potentially unsafe object layout
// changes that require special synchronization with the concurrent marker.
// The old size is the size of the object before layout change.
void NotifyObjectLayoutChange(HeapObject object, int old_size,
const DisallowHeapAllocation&);
// By default recorded slots in the object are invalidated. Pass
// InvalidateRecordedSlots::kNo if this is not necessary or to perform this
// manually.
void NotifyObjectLayoutChange(
HeapObject object, int old_size, const DisallowHeapAllocation&,
InvalidateRecordedSlots invalidate_recorded_slots =
InvalidateRecordedSlots::kYes);
#ifdef VERIFY_HEAP
// This function checks that either

View File

@ -1532,6 +1532,20 @@ void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object,
}
}
void MemoryChunk::InvalidateRecordedSlots(HeapObject object, int size) {
if (heap()->incremental_marking()->IsCompacting()) {
// We cannot check slot_set_[OLD_TO_OLD] here, since the
// concurrent markers might insert slots concurrently.
RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object, size);
}
heap()->MoveStoreBufferEntriesToRememberedSet();
if (slot_set_[OLD_TO_NEW] != nullptr) {
RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object, size);
}
}
template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(
HeapObject object);
template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(

View File

@ -735,6 +735,7 @@ class MemoryChunk : public BasicMemoryChunk {
template <RememberedSetType type>
void MoveObjectWithInvalidatedSlots(HeapObject old_start,
HeapObject new_start);
void InvalidateRecordedSlots(HeapObject object, int size);
template <RememberedSetType type>
bool RegisteredObjectWithInvalidatedSlots(HeapObject object);
template <RememberedSetType type>

View File

@ -2778,7 +2778,10 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
int old_instance_size = old_map->instance_size();
heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation);
// Invalidate slots manually later in case of tagged to untagged translation.
// In all other cases the recorded slot remains dereferenceable.
heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation,
InvalidateRecordedSlots::kNo);
// Copy (real) inobject properties. If necessary, stop at number_of_fields to
// avoid overwriting |one_pointer_filler_map|.
@ -2796,6 +2799,8 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
if (i < old_number_of_fields && !old_map->IsUnboxedDoubleField(index)) {
// Transition from tagged to untagged slot.
heap->ClearRecordedSlot(*object, object->RawField(index.offset()));
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*object);
chunk->InvalidateRecordedSlots(*object, old_instance_size);
} else {
#ifdef DEBUG
heap->VerifyClearedSlot(*object, object->RawField(index.offset()));
@ -2892,7 +2897,12 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
Heap* heap = isolate->heap();
int old_instance_size = map->instance_size();
heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation);
// Invalidate slots manually later in case the new map has in-object
// properties. If not, it is not possible to store an untagged value
// in a recorded slot.
heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation,
InvalidateRecordedSlots::kNo);
// Resize the object in the heap if necessary.
int new_instance_size = new_map->instance_size();
@ -2918,6 +2928,8 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
heap->ClearRecordedSlotRange(
object->address() + map->GetInObjectPropertyOffset(0),
object->address() + new_instance_size);
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*object);
chunk->InvalidateRecordedSlots(*object, old_instance_size);
for (int i = 0; i < inobject_properties; i++) {
FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i);

View File

@ -132,8 +132,13 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
// for properties stored in the descriptor array.
if (details.location() == kField) {
DisallowHeapAllocation no_allocation;
int receiver_size = receiver_map->instance_size();
// Invalidate slots manually later in case we delete an in-object tagged
// property. In this case we might later store an untagged value in the
// recorded slot.
isolate->heap()->NotifyObjectLayoutChange(
*receiver, receiver_map->instance_size(), no_allocation);
*receiver, receiver_size, no_allocation, InvalidateRecordedSlots::kNo);
FieldIndex index =
FieldIndex::ForPropertyIndex(*receiver_map, details.field_index());
// Special case deleting the last out-of object property.
@ -151,6 +156,8 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
if (index.is_inobject() && !receiver_map->IsUnboxedDoubleField(index)) {
isolate->heap()->ClearRecordedSlot(*receiver,
receiver->RawField(index.offset()));
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*receiver);
chunk->InvalidateRecordedSlots(*receiver, receiver_size);
}
}
}