[heap] Refactor elements write barrier to range write barrier

... in order to make it optimizable for enabled pointer compression.

Bug: v8:9183
Change-Id: I8b92e48cc43dcc823eefb5a8a4a29de7a8ba0e78
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1609545
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Reviewed-by: Clemens Hammacher <clemensh@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#61476}
This commit is contained in:
Igor Sheludko 2019-05-14 12:49:57 +02:00 committed by Commit Bot
parent d0e889b07d
commit c57e8f1434
10 changed files with 141 additions and 166 deletions

View File

@ -178,7 +178,7 @@ void CopyObjectToObjectElements(Isolate* isolate, FixedArrayBase from_base,
(IsObjectElementsKind(from_kind) && IsObjectElementsKind(to_kind))
? UPDATE_WRITE_BARRIER
: SKIP_WRITE_BARRIER;
to->CopyElements(isolate->heap(), to_start, from, from_start, copy_size,
to->CopyElements(isolate, to_start, from, from_start, copy_size,
write_barrier_mode);
}
@ -470,32 +470,30 @@ static void TraceTopFrame(Isolate* isolate) {
JavaScriptFrame::PrintTop(isolate, stdout, false, true);
}
static void SortIndices(
Isolate* isolate, Handle<FixedArray> indices, uint32_t sort_size,
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER) {
static void SortIndices(Isolate* isolate, Handle<FixedArray> indices,
uint32_t sort_size) {
// Use AtomicSlot wrapper to ensure that std::sort uses atomic load and
// store operations that are safe for concurrent marking.
AtomicSlot start(indices->GetFirstElementAddress());
std::sort(start, start + sort_size,
[isolate](Tagged_t elementA, Tagged_t elementB) {
AtomicSlot end(start + sort_size);
std::sort(start, end, [isolate](Tagged_t elementA, Tagged_t elementB) {
#ifdef V8_COMPRESS_POINTERS
Object a(DecompressTaggedAny(isolate->isolate_root(), elementA));
Object b(DecompressTaggedAny(isolate->isolate_root(), elementB));
Object a(DecompressTaggedAny(isolate->isolate_root(), elementA));
Object b(DecompressTaggedAny(isolate->isolate_root(), elementB));
#else
Object a(elementA);
Object b(elementB);
Object a(elementA);
Object b(elementB);
#endif
if (a->IsSmi() || !a->IsUndefined(isolate)) {
if (!b->IsSmi() && b->IsUndefined(isolate)) {
return true;
}
return a->Number() < b->Number();
}
return !b->IsSmi() && b->IsUndefined(isolate);
});
if (write_barrier_mode != SKIP_WRITE_BARRIER) {
FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(isolate->heap(), *indices, 0, sort_size);
}
if (a->IsSmi() || !a->IsUndefined(isolate)) {
if (!b->IsSmi() && b->IsUndefined(isolate)) {
return true;
}
return a->Number() < b->Number();
}
return !b->IsSmi() && b->IsUndefined(isolate);
});
isolate->heap()->WriteBarrierForRange(*indices, ObjectSlot(start),
ObjectSlot(end));
}
static Maybe<bool> IncludesValueSlowPath(Isolate* isolate,
@ -2239,13 +2237,13 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
Handle<FixedArrayBase> backing_store, int dst_index,
int src_index, int len, int hole_start,
int hole_end) {
Heap* heap = isolate->heap();
Handle<BackingStore> dst_elms = Handle<BackingStore>::cast(backing_store);
if (len > JSArray::kMaxCopyElements && dst_index == 0 &&
heap->CanMoveObjectStart(*dst_elms)) {
isolate->heap()->CanMoveObjectStart(*dst_elms)) {
// Update all the copies of this backing_store handle.
*dst_elms.location() =
BackingStore::cast(heap->LeftTrimFixedArray(*dst_elms, src_index))
BackingStore::cast(
isolate->heap()->LeftTrimFixedArray(*dst_elms, src_index))
->ptr();
receiver->set_elements(*dst_elms);
// Adjust the hole offset as the array has been shrunk.
@ -2254,7 +2252,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
DCHECK_LE(hole_end, backing_store->length());
} else if (len != 0) {
WriteBarrierMode mode = GetWriteBarrierMode(KindTraits::Kind);
dst_elms->MoveElements(heap, dst_index, src_index, len, mode);
dst_elms->MoveElements(isolate, dst_index, src_index, len, mode);
}
if (hole_start != hole_end) {
dst_elms->FillWithHoles(hole_start, hole_end);

View File

@ -38,12 +38,6 @@ V8_EXPORT_PRIVATE void Heap_GenerationalBarrierForCodeSlow(Code host,
V8_EXPORT_PRIVATE void Heap_MarkingBarrierForCodeSlow(Code host,
RelocInfo* rinfo,
HeapObject object);
V8_EXPORT_PRIVATE void Heap_GenerationalBarrierForElementsSlow(Heap* heap,
FixedArray array,
int offset,
int length);
V8_EXPORT_PRIVATE void Heap_MarkingBarrierForElementsSlow(Heap* heap,
FixedArray object);
V8_EXPORT_PRIVATE void Heap_MarkingBarrierForDescriptorArraySlow(
Heap* heap, HeapObject host, HeapObject descriptor_array,
int number_of_own_descriptors);
@ -183,15 +177,6 @@ inline void GenerationalBarrier(HeapObject object, MaybeObjectSlot slot,
value_heap_object);
}
inline void GenerationalBarrierForElements(Heap* heap, FixedArray array,
int offset, int length) {
heap_internals::MemoryChunk* array_chunk =
heap_internals::MemoryChunk::FromHeapObject(array);
if (array_chunk->InYoungGeneration()) return;
Heap_GenerationalBarrierForElementsSlow(heap, array, offset, length);
}
inline void GenerationalBarrierForCode(Code host, RelocInfo* rinfo,
HeapObject object) {
heap_internals::MemoryChunk* object_chunk =
@ -216,14 +201,6 @@ inline void MarkingBarrier(HeapObject object, MaybeObjectSlot slot,
value_heap_object);
}
inline void MarkingBarrierForElements(Heap* heap, FixedArray array) {
heap_internals::MemoryChunk* object_chunk =
heap_internals::MemoryChunk::FromHeapObject(array);
if (!object_chunk->IsMarking()) return;
Heap_MarkingBarrierForElementsSlow(heap, array);
}
inline void MarkingBarrierForCode(Code host, RelocInfo* rinfo,
HeapObject object) {
DCHECK(!HasWeakHeapObjectTag(object));

View File

@ -20,13 +20,6 @@ class EphemeronHashTable;
// Note: In general it is preferred to use the macros defined in
// object-macros.h.
// Write barrier for FixedArray elements.
#define FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(heap, array, start, length) \
do { \
GenerationalBarrierForElements(heap, array, start, length); \
MarkingBarrierForElements(heap, array); \
} while (false)
// Combined write barriers.
void WriteBarrierForCode(Code host, RelocInfo* rinfo, Object value);
void WriteBarrierForCode(Code host);
@ -37,14 +30,11 @@ void GenerationalBarrier(HeapObject object, MaybeObjectSlot slot,
MaybeObject value);
void GenerationalEphemeronKeyBarrier(EphemeronHashTable table, ObjectSlot slot,
Object value);
void GenerationalBarrierForElements(Heap* heap, FixedArray array, int offset,
int length);
void GenerationalBarrierForCode(Code host, RelocInfo* rinfo, HeapObject object);
// Marking write barrier.
void MarkingBarrier(HeapObject object, ObjectSlot slot, Object value);
void MarkingBarrier(HeapObject object, MaybeObjectSlot slot, MaybeObject value);
void MarkingBarrierForElements(Heap* heap, FixedArray array);
void MarkingBarrierForCode(Code host, RelocInfo* rinfo, HeapObject object);
void MarkingBarrierForDescriptorArray(Heap* heap, HeapObject host,

View File

@ -54,6 +54,7 @@
#include "src/objects/hash-table-inl.h"
#include "src/objects/maybe-object.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/slots-atomic-inl.h"
#include "src/objects/slots-inl.h"
#include "src/regexp/jsregexp.h"
#include "src/runtime-profiler.h"
@ -107,15 +108,6 @@ void Heap_MarkingBarrierForCodeSlow(Code host, RelocInfo* rinfo,
Heap::MarkingBarrierForCodeSlow(host, rinfo, object);
}
void Heap_GenerationalBarrierForElementsSlow(Heap* heap, FixedArray array,
int offset, int length) {
Heap::GenerationalBarrierForElementsSlow(heap, array, offset, length);
}
void Heap_MarkingBarrierForElementsSlow(Heap* heap, FixedArray array) {
Heap::MarkingBarrierForElementsSlow(heap, array);
}
void Heap_MarkingBarrierForDescriptorArraySlow(Heap* heap, HeapObject host,
HeapObject descriptor_array,
int number_of_own_descriptors) {
@ -1507,70 +1499,73 @@ void Heap::StartIdleIncrementalMarking(
gc_callback_flags);
}
void Heap::MoveElements(FixedArray array, int dst_index, int src_index, int len,
WriteBarrierMode mode) {
if (len == 0) return;
void Heap::MoveRange(HeapObject dst_object, const ObjectSlot dst_slot,
const ObjectSlot src_slot, int len,
WriteBarrierMode mode) {
DCHECK_NE(len, 0);
DCHECK_NE(dst_object->map(), ReadOnlyRoots(this).fixed_cow_array_map());
const ObjectSlot dst_end(dst_slot + len);
// Ensure no range overflow.
DCHECK(dst_slot < dst_end);
DCHECK(src_slot < src_slot + len);
DCHECK_NE(array->map(), ReadOnlyRoots(this).fixed_cow_array_map());
ObjectSlot dst = array->RawFieldOfElementAt(dst_index);
ObjectSlot src = array->RawFieldOfElementAt(src_index);
if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) {
if (dst < src) {
for (int i = 0; i < len; i++) {
dst.Relaxed_Store(src.Relaxed_Load());
if (dst_slot < src_slot) {
// Copy tagged values forward using relaxed load/stores that do not
// involve value decompression.
const AtomicSlot atomic_dst_end(dst_end);
AtomicSlot dst(dst_slot);
AtomicSlot src(src_slot);
while (dst < atomic_dst_end) {
*dst = *src;
++dst;
++src;
}
} else {
// Copy backwards.
dst += len - 1;
src += len - 1;
for (int i = 0; i < len; i++) {
dst.Relaxed_Store(src.Relaxed_Load());
// Copy tagged values backwards using relaxed load/stores that do not
// involve value decompression.
const AtomicSlot atomic_dst_begin(dst_slot);
AtomicSlot dst(dst_slot + len - 1);
AtomicSlot src(src_slot + len - 1);
while (dst >= atomic_dst_begin) {
*dst = *src;
--dst;
--src;
}
}
} else {
MemMove(dst.ToVoidPtr(), src.ToVoidPtr(), len * kTaggedSize);
MemMove(dst_slot.ToVoidPtr(), src_slot.ToVoidPtr(), len * kTaggedSize);
}
if (mode == SKIP_WRITE_BARRIER) return;
FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(this, array, dst_index, len);
WriteBarrierForRange(dst_object, dst_slot, dst_end);
}
void Heap::CopyElements(FixedArray dst_array, FixedArray src_array,
int dst_index, int src_index, int len,
WriteBarrierMode mode) {
DCHECK_NE(dst_array, src_array);
if (len == 0) return;
void Heap::CopyRange(HeapObject dst_object, const ObjectSlot dst_slot,
const ObjectSlot src_slot, int len,
WriteBarrierMode mode) {
DCHECK_NE(len, 0);
DCHECK_NE(dst_array->map(), ReadOnlyRoots(this).fixed_cow_array_map());
ObjectSlot dst = dst_array->RawFieldOfElementAt(dst_index);
ObjectSlot src = src_array->RawFieldOfElementAt(src_index);
DCHECK_NE(dst_object->map(), ReadOnlyRoots(this).fixed_cow_array_map());
const ObjectSlot dst_end(dst_slot + len);
// Ensure ranges do not overlap.
DCHECK(dst + len <= src || src + len <= dst);
DCHECK(dst_end <= src_slot || (src_slot + len) <= dst_slot);
if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) {
if (dst < src) {
for (int i = 0; i < len; i++) {
dst.Relaxed_Store(src.Relaxed_Load());
++dst;
++src;
}
} else {
// Copy backwards.
dst += len - 1;
src += len - 1;
for (int i = 0; i < len; i++) {
dst.Relaxed_Store(src.Relaxed_Load());
--dst;
--src;
}
// Copy tagged values using relaxed load/stores that do not involve value
// decompression.
const AtomicSlot atomic_dst_end(dst_end);
AtomicSlot dst(dst_slot);
AtomicSlot src(src_slot);
while (dst < atomic_dst_end) {
*dst = *src;
++dst;
++src;
}
} else {
MemCopy(dst.ToVoidPtr(), src.ToVoidPtr(), len * kTaggedSize);
MemCopy(dst_slot.ToVoidPtr(), src_slot.ToVoidPtr(), len * kTaggedSize);
}
if (mode == SKIP_WRITE_BARRIER) return;
FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(this, dst_array, dst_index, len);
WriteBarrierForRange(dst_object, dst_slot, dst_end);
}
#ifdef VERIFY_HEAP
@ -5848,12 +5843,19 @@ void Heap::EphemeronKeyWriteBarrierFromCode(Address raw_object,
maybe_key);
}
void Heap::GenerationalBarrierForElementsSlow(Heap* heap, FixedArray array,
int offset, int length) {
for (int i = 0; i < length; i++) {
if (!InYoungGeneration(array->get(offset + i))) continue;
heap->store_buffer()->InsertEntry(
array->RawFieldOfElementAt(offset + i).address());
void Heap::WriteBarrierForRange(HeapObject object, ObjectSlot start_slot,
ObjectSlot end_slot) {
// TODO(ishell): iterate values only once and avoid generic decompression.
if (!InYoungGeneration(object)) {
for (ObjectSlot slot = start_slot; slot < end_slot; ++slot) {
Object value = *slot;
if (InYoungGeneration(value)) {
store_buffer()->InsertEntry(slot.address());
}
}
}
if (incremental_marking()->IsMarking()) {
incremental_marking()->RecordWrites(object, start_slot, end_slot);
}
}
@ -5889,10 +5891,6 @@ void Heap::MarkingBarrierSlow(HeapObject object, Address slot,
value);
}
void Heap::MarkingBarrierForElementsSlow(Heap* heap, FixedArray array) {
heap->incremental_marking()->RecordWrites(array);
}
void Heap::MarkingBarrierForCodeSlow(Code host, RelocInfo* rinfo,
HeapObject object) {
Heap* heap = Heap::FromWritableHeapObject(host);

View File

@ -351,6 +351,11 @@ class Heap {
// by pointer size.
static inline void CopyBlock(Address dst, Address src, int byte_size);
// Executes generational and/or marking write barrier for a [start, end) range
// of non-weak slots inside |object|.
V8_EXPORT_PRIVATE void WriteBarrierForRange(HeapObject object,
ObjectSlot start, ObjectSlot end);
V8_EXPORT_PRIVATE static void WriteBarrierForCodeSlow(Code host);
V8_EXPORT_PRIVATE static void GenerationalBarrierSlow(HeapObject object,
Address slot,
@ -359,15 +364,11 @@ class Heap {
Address key_slot);
V8_EXPORT_PRIVATE static void EphemeronKeyWriteBarrierFromCode(
Address raw_object, Address address, Isolate* isolate);
V8_EXPORT_PRIVATE static void GenerationalBarrierForElementsSlow(
Heap* heap, FixedArray array, int offset, int length);
V8_EXPORT_PRIVATE static void GenerationalBarrierForCodeSlow(
Code host, RelocInfo* rinfo, HeapObject value);
V8_EXPORT_PRIVATE static void MarkingBarrierSlow(HeapObject object,
Address slot,
HeapObject value);
V8_EXPORT_PRIVATE static void MarkingBarrierForElementsSlow(Heap* heap,
FixedArray array);
V8_EXPORT_PRIVATE static void MarkingBarrierForCodeSlow(Code host,
RelocInfo* rinfo,
HeapObject value);
@ -389,14 +390,15 @@ class Heap {
inline Address* OldSpaceAllocationTopAddress();
inline Address* OldSpaceAllocationLimitAddress();
// Move len elements within a given array from src_index index to dst_index
// index.
void MoveElements(FixedArray array, int dst_index, int src_index, int len,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// Move len non-weak tagged elements from src_slot to dst_slot of dst_object.
// The source and destination memory ranges can overlap.
void MoveRange(HeapObject dst_object, ObjectSlot dst_slot,
ObjectSlot src_slot, int len, WriteBarrierMode mode);
// Copy len elements from src_index of src array to dst_index of dst array.
void CopyElements(FixedArray dst, FixedArray src, int dst_index,
int src_index, int len, WriteBarrierMode mode);
// Copy len non-weak tagged elements from src_slot to dst_slot of dst_object.
// The source and destination memory ranges must not overlap.
void CopyRange(HeapObject dst_object, ObjectSlot dst_slot,
ObjectSlot src_slot, int len, WriteBarrierMode mode);
// Initialize a filler object to keep the ability to iterate over the heap
// when introducing gaps within pages. If slots could have been recorded in

View File

@ -70,9 +70,8 @@ IncrementalMarking::IncrementalMarking(
SetState(STOPPED);
}
bool IncrementalMarking::BaseRecordWrite(HeapObject obj, Object value) {
HeapObject value_heap_obj = HeapObject::cast(value);
DCHECK(!marking_state()->IsImpossible(value_heap_obj));
bool IncrementalMarking::BaseRecordWrite(HeapObject obj, HeapObject value) {
DCHECK(!marking_state()->IsImpossible(value));
DCHECK(!marking_state()->IsImpossible(obj));
#ifdef V8_CONCURRENT_MARKING
// The write barrier stub generated with V8_CONCURRENT_MARKING does not
@ -82,14 +81,14 @@ bool IncrementalMarking::BaseRecordWrite(HeapObject obj, Object value) {
const bool need_recording = marking_state()->IsBlack(obj);
#endif
if (need_recording && WhiteToGreyAndPush(value_heap_obj)) {
if (need_recording && WhiteToGreyAndPush(value)) {
RestartIfNotMarking();
}
return is_compacting_ && need_recording;
}
void IncrementalMarking::RecordWriteSlow(HeapObject obj, HeapObjectSlot slot,
Object value) {
HeapObject value) {
if (BaseRecordWrite(obj, value) && slot.address() != kNullAddress) {
// Object is not going to be rescanned we need to record the slot.
heap_->mark_compact_collector()->RecordSlot(obj, slot,
@ -117,25 +116,26 @@ void IncrementalMarking::RecordWriteIntoCode(Code host, RelocInfo* rinfo,
}
}
void IncrementalMarking::RecordWrites(FixedArray array) {
int length = array->length();
MarkCompactCollector* collector = heap_->mark_compact_collector();
MemoryChunk* source_page = MemoryChunk::FromHeapObject(array);
void IncrementalMarking::RecordWrites(HeapObject object, ObjectSlot start_slot,
ObjectSlot end_slot) {
MemoryChunk* source_page = MemoryChunk::FromHeapObject(object);
if (source_page->ShouldSkipEvacuationSlotRecording<AccessMode::ATOMIC>()) {
for (int i = 0; i < length; i++) {
Object value = array->get(i);
if (value->IsHeapObject()) {
BaseRecordWrite(array, HeapObject::cast(value));
for (ObjectSlot slot = start_slot; slot < end_slot; ++slot) {
Object value = *slot;
HeapObject value_heap_object;
if (value.GetHeapObject(&value_heap_object)) {
BaseRecordWrite(object, value_heap_object);
}
}
} else {
for (int i = 0; i < length; i++) {
Object value = array->get(i);
if (value->IsHeapObject() &&
BaseRecordWrite(array, HeapObject::cast(value))) {
collector->RecordSlot(source_page,
HeapObjectSlot(array->RawFieldOfElementAt(i)),
HeapObject::cast(value));
MarkCompactCollector* collector = heap_->mark_compact_collector();
for (ObjectSlot slot = start_slot; slot < end_slot; ++slot) {
Object value = *slot;
HeapObject value_heap_object;
if (value.GetHeapObject(&value_heap_object) &&
BaseRecordWrite(object, value_heap_object)) {
collector->RecordSlot(source_page, HeapObjectSlot(slot),
value_heap_object);
}
}
}

View File

@ -198,18 +198,19 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
// No slots in white objects should be recorded, as some slots are typed and
// cannot be interpreted correctly if the underlying object does not survive
// the incremental cycle (stays white).
V8_INLINE bool BaseRecordWrite(HeapObject obj, Object value);
V8_INLINE bool BaseRecordWrite(HeapObject obj, HeapObject value);
V8_INLINE void RecordWrite(HeapObject obj, ObjectSlot slot, Object value);
V8_INLINE void RecordMaybeWeakWrite(HeapObject obj, MaybeObjectSlot slot,
MaybeObject value);
void RecordWrites(FixedArray array);
void RecordWrites(HeapObject object, ObjectSlot start_slot,
ObjectSlot end_slot);
void RevisitObject(HeapObject obj);
// Ensures that all descriptors int range [0, number_of_own_descripts)
// are visited.
void VisitDescriptors(HeapObject host, DescriptorArray array,
int number_of_own_descriptors);
void RecordWriteSlow(HeapObject obj, HeapObjectSlot slot, Object value);
void RecordWriteSlow(HeapObject obj, HeapObjectSlot slot, HeapObject value);
void RecordWriteIntoCode(Code host, RelocInfo* rinfo, HeapObject value);
// Returns true if the function succeeds in transitioning the object

View File

@ -223,16 +223,23 @@ ObjectSlot FixedArray::RawFieldOfElementAt(int index) {
return RawField(OffsetOfElementAt(index));
}
void FixedArray::MoveElements(Heap* heap, int dst_index, int src_index, int len,
WriteBarrierMode mode) {
void FixedArray::MoveElements(Isolate* isolate, int dst_index, int src_index,
int len, WriteBarrierMode mode) {
if (len == 0) return;
DisallowHeapAllocation no_gc;
heap->MoveElements(*this, dst_index, src_index, len, mode);
ObjectSlot dst_slot(RawFieldOfElementAt(dst_index));
ObjectSlot src_slot(RawFieldOfElementAt(src_index));
isolate->heap()->MoveRange(*this, dst_slot, src_slot, len, mode);
}
void FixedArray::CopyElements(Heap* heap, int dst_index, FixedArray src,
void FixedArray::CopyElements(Isolate* isolate, int dst_index, FixedArray src,
int src_index, int len, WriteBarrierMode mode) {
if (len == 0) return;
DisallowHeapAllocation no_gc;
heap->CopyElements(*this, src, dst_index, src_index, len, mode);
ObjectSlot dst_slot(RawFieldOfElementAt(dst_index));
ObjectSlot src_slot(src->RawFieldOfElementAt(src_index));
isolate->heap()->CopyRange(*this, dst_slot, src_slot, len, mode);
}
// Perform a binary search in a fixed array.
@ -392,8 +399,9 @@ bool FixedDoubleArray::is_the_hole(int index) {
return get_representation(index) == kHoleNanInt64;
}
void FixedDoubleArray::MoveElements(Heap* heap, int dst_index, int src_index,
int len, WriteBarrierMode mode) {
void FixedDoubleArray::MoveElements(Isolate* isolate, int dst_index,
int src_index, int len,
WriteBarrierMode mode) {
DCHECK_EQ(SKIP_WRITE_BARRIER, mode);
double* data_start =
reinterpret_cast<double*>(FIELD_ADDR(*this, kHeaderSize));

View File

@ -153,10 +153,10 @@ class FixedArray : public FixedArrayBase {
// Gives access to raw memory which stores the array's data.
inline ObjectSlot data_start();
inline void MoveElements(Heap* heap, int dst_index, int src_index, int len,
WriteBarrierMode mode);
inline void MoveElements(Isolate* isolate, int dst_index, int src_index,
int len, WriteBarrierMode mode);
inline void CopyElements(Heap* heap, int dst_index, FixedArray src,
inline void CopyElements(Isolate* isolate, int dst_index, FixedArray src,
int src_index, int len, WriteBarrierMode mode);
inline void FillWithHoles(int from, int to);
@ -245,8 +245,8 @@ class FixedDoubleArray : public FixedArrayBase {
return kHeaderSize + length * kDoubleSize;
}
inline void MoveElements(Heap* heap, int dst_index, int src_index, int len,
WriteBarrierMode mode);
inline void MoveElements(Isolate* isolate, int dst_index, int src_index,
int len, WriteBarrierMode mode);
inline void FillWithHoles(int from, int to);

View File

@ -1605,7 +1605,8 @@ class ThreadImpl {
int dst = static_cast<int>(StackHeight() - (sp_ - dest));
int src = static_cast<int>(StackHeight() - arity);
int len = static_cast<int>(arity);
isolate_->heap()->MoveElements(reference_stack(), dst, src, len);
reference_stack().MoveElements(isolate_, dst, src, len,
UPDATE_WRITE_BARRIER);
}
sp_ = dest + arity;
}