[heap,iwyu] Provide slim write barrier header
Move write barrier essentials into heap/heap-write-barrier-inl.h. Avoid including further heap inline headers by relying on constant to load flags from. Bug: v8:7490 Change-Id: I2891299f1b1ca2c3e2031cb9c63b583b1665e3f9 Reviewed-on: https://chromium-review.googlesource.com/1148448 Commit-Queue: Michael Lippautz <mlippautz@chromium.org> Reviewed-by: Michael Starzinger <mstarzinger@chromium.org> Reviewed-by: Clemens Hammacher <clemensh@chromium.org> Reviewed-by: Ulan Degenbaev <ulan@chromium.org> Cr-Commit-Position: refs/heads/master@{#54710}
This commit is contained in:
parent
22b441ac69
commit
9e3d6cbec5
2
BUILD.gn
2
BUILD.gn
@ -1965,6 +1965,8 @@ v8_source_set("v8_base") {
|
||||
"src/heap/heap-controller.cc",
|
||||
"src/heap/heap-controller.h",
|
||||
"src/heap/heap-inl.h",
|
||||
"src/heap/heap-write-barrier-inl.h",
|
||||
"src/heap/heap-write-barrier.h",
|
||||
"src/heap/heap.cc",
|
||||
"src/heap/heap.h",
|
||||
"src/heap/incremental-marking-inl.h",
|
||||
|
@ -2765,7 +2765,7 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
|
||||
void MacroAssembler::CheckPageFlag(const Register& object,
|
||||
const Register& scratch, int mask,
|
||||
Condition cc, Label* condition_met) {
|
||||
And(scratch, object, ~Page::kPageAlignmentMask);
|
||||
And(scratch, object, ~kPageAlignmentMask);
|
||||
Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
|
||||
if (cc == eq) {
|
||||
TestAndBranchIfAnySet(scratch, mask, condition_met);
|
||||
@ -2777,7 +2777,7 @@ void MacroAssembler::CheckPageFlag(const Register& object,
|
||||
void TurboAssembler::CheckPageFlagSet(const Register& object,
|
||||
const Register& scratch, int mask,
|
||||
Label* if_any_set) {
|
||||
And(scratch, object, ~Page::kPageAlignmentMask);
|
||||
And(scratch, object, ~kPageAlignmentMask);
|
||||
Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
|
||||
TestAndBranchIfAnySet(scratch, mask, if_any_set);
|
||||
}
|
||||
@ -2785,7 +2785,7 @@ void TurboAssembler::CheckPageFlagSet(const Register& object,
|
||||
void TurboAssembler::CheckPageFlagClear(const Register& object,
|
||||
const Register& scratch, int mask,
|
||||
Label* if_all_clear) {
|
||||
And(scratch, object, ~Page::kPageAlignmentMask);
|
||||
And(scratch, object, ~kPageAlignmentMask);
|
||||
Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
|
||||
TestAndBranchIfAllClear(scratch, mask, if_all_clear);
|
||||
}
|
||||
|
@ -221,7 +221,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
|
||||
}
|
||||
|
||||
Node* IsPageFlagSet(Node* object, int mask) {
|
||||
Node* page = WordAnd(object, IntPtrConstant(~Page::kPageAlignmentMask));
|
||||
Node* page = WordAnd(object, IntPtrConstant(~kPageAlignmentMask));
|
||||
Node* flags = Load(MachineType::Pointer(), page,
|
||||
IntPtrConstant(MemoryChunk::kFlagsOffset));
|
||||
return WordNotEqual(WordAnd(flags, IntPtrConstant(mask)),
|
||||
@ -241,7 +241,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
|
||||
}
|
||||
|
||||
void GetMarkBit(Node* object, Node** cell, Node** mask) {
|
||||
Node* page = WordAnd(object, IntPtrConstant(~Page::kPageAlignmentMask));
|
||||
Node* page = WordAnd(object, IntPtrConstant(~kPageAlignmentMask));
|
||||
|
||||
{
|
||||
// Temp variable to calculate cell offset in bitmap.
|
||||
@ -249,7 +249,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
|
||||
int shift = Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 -
|
||||
Bitmap::kBytesPerCellLog2;
|
||||
r0 = WordShr(object, IntPtrConstant(shift));
|
||||
r0 = WordAnd(r0, IntPtrConstant((Page::kPageAlignmentMask >> shift) &
|
||||
r0 = WordAnd(r0, IntPtrConstant((kPageAlignmentMask >> shift) &
|
||||
~(Bitmap::kBytesPerCell - 1)));
|
||||
*cell = IntPtrAdd(IntPtrAdd(page, r0),
|
||||
IntPtrConstant(MemoryChunk::kHeaderSize));
|
||||
|
@ -9580,7 +9580,7 @@ void CodeStubAssembler::TrapAllocationMemento(Node* object,
|
||||
}
|
||||
|
||||
TNode<IntPtrT> CodeStubAssembler::PageFromAddress(TNode<IntPtrT> address) {
|
||||
return WordAnd(address, IntPtrConstant(~Page::kPageAlignmentMask));
|
||||
return WordAnd(address, IntPtrConstant(~kPageAlignmentMask));
|
||||
}
|
||||
|
||||
TNode<AllocationSite> CodeStubAssembler::CreateAllocationSiteInFeedbackVector(
|
||||
|
@ -3719,8 +3719,8 @@ void TranslatedState::InitializeJSObjectAt(
|
||||
Handle<Object> properties = GetValueAndAdvance(frame, value_index);
|
||||
WRITE_FIELD(*object_storage, JSObject::kPropertiesOrHashOffset,
|
||||
*properties);
|
||||
WRITE_BARRIER(isolate()->heap(), *object_storage,
|
||||
JSObject::kPropertiesOrHashOffset, *properties);
|
||||
WRITE_BARRIER(*object_storage, JSObject::kPropertiesOrHashOffset,
|
||||
*properties);
|
||||
}
|
||||
|
||||
// For all the other fields we first look at the fixed array and check the
|
||||
@ -3747,11 +3747,11 @@ void TranslatedState::InitializeJSObjectAt(
|
||||
} else if (marker == kStoreMutableHeapNumber) {
|
||||
CHECK(field_value->IsMutableHeapNumber());
|
||||
WRITE_FIELD(*object_storage, offset, *field_value);
|
||||
WRITE_BARRIER(isolate()->heap(), *object_storage, offset, *field_value);
|
||||
WRITE_BARRIER(*object_storage, offset, *field_value);
|
||||
} else {
|
||||
CHECK_EQ(kStoreTagged, marker);
|
||||
WRITE_FIELD(*object_storage, offset, *field_value);
|
||||
WRITE_BARRIER(isolate()->heap(), *object_storage, offset, *field_value);
|
||||
WRITE_BARRIER(*object_storage, offset, *field_value);
|
||||
}
|
||||
}
|
||||
object_storage->synchronized_set_map(*map);
|
||||
@ -3787,7 +3787,7 @@ void TranslatedState::InitializeObjectWithTaggedFieldsAt(
|
||||
}
|
||||
|
||||
WRITE_FIELD(*object_storage, offset, *field_value);
|
||||
WRITE_BARRIER(isolate()->heap(), *object_storage, offset, *field_value);
|
||||
WRITE_BARRIER(*object_storage, offset, *field_value);
|
||||
}
|
||||
|
||||
object_storage->synchronized_set_map(*map);
|
||||
|
@ -167,7 +167,7 @@ void FeedbackVector::set(int index, MaybeObject* value, WriteBarrierMode mode) {
|
||||
DCHECK_LT(index, this->length());
|
||||
int offset = kFeedbackSlotsOffset + index * kPointerSize;
|
||||
RELAXED_WRITE_FIELD(this, offset, value);
|
||||
CONDITIONAL_WEAK_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
|
||||
CONDITIONAL_WEAK_WRITE_BARRIER(this, offset, value, mode);
|
||||
}
|
||||
|
||||
void FeedbackVector::Set(FeedbackSlot slot, Object* value,
|
||||
|
@ -462,6 +462,9 @@ constexpr uint32_t kFreeListZapValue = 0xfeed1eaf;
|
||||
constexpr int kCodeZapValue = 0xbadc0de;
|
||||
constexpr uint32_t kPhantomReferenceZap = 0xca11bac;
|
||||
|
||||
// Page constants.
|
||||
static const intptr_t kPageAlignmentMask = (intptr_t{1} << kPageSizeBits) - 1;
|
||||
|
||||
// On Intel architecture, cache line size is 64 bytes.
|
||||
// On ARM it may be less (32 bytes), but as far this constant is
|
||||
// used for aligning data, it doesn't hurt to align on a greater value.
|
||||
|
@ -8,12 +8,15 @@
|
||||
#include <cmath>
|
||||
|
||||
// Clients of this interface shouldn't depend on lots of heap internals.
|
||||
// Do not include anything from src/heap other than src/heap/heap.h here!
|
||||
// Do not include anything from src/heap other than src/heap/heap.h and its
|
||||
// write barrier here!
|
||||
#include "src/heap/heap-write-barrier.h"
|
||||
#include "src/heap/heap.h"
|
||||
|
||||
#include "src/base/platform/platform.h"
|
||||
#include "src/counters-inl.h"
|
||||
#include "src/feedback-vector.h"
|
||||
|
||||
// TODO(mstarzinger): There are 3 more includes to remove in order to no longer
|
||||
// leak heap internals to users of this interface!
|
||||
#include "src/heap/incremental-marking-inl.h"
|
||||
@ -32,6 +35,12 @@
|
||||
#include "src/string-hasher.h"
|
||||
#include "src/zone/zone-list-inl.h"
|
||||
|
||||
// The following header includes the write barrier essentials that can also be
|
||||
// used stand-alone without including heap-inl.h.
|
||||
// TODO(mlippautz): Remove once users of object-macros.h include this file on
|
||||
// their own.
|
||||
#include "src/heap/heap-write-barrier-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
|
109
src/heap/heap-write-barrier-inl.h
Normal file
109
src/heap/heap-write-barrier-inl.h
Normal file
@ -0,0 +1,109 @@
|
||||
// Copyright 2018 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_HEAP_HEAP_WRITE_BARRIER_INL_H_
|
||||
#define V8_HEAP_HEAP_WRITE_BARRIER_INL_H_
|
||||
|
||||
#include "src/heap/heap-write-barrier.h"
|
||||
|
||||
#include "src/globals.h"
|
||||
#include "src/heap/heap.h"
|
||||
#include "src/objects-inl.h"
|
||||
#include "src/objects/maybe-object-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// Do not use these internal details anywhere outside of this file. These
|
||||
// internals are only intended to shortcut write barrier checks.
|
||||
namespace heap_internals {
|
||||
|
||||
struct MemoryChunk {
|
||||
static constexpr uintptr_t kFlagsOffset = sizeof(size_t);
|
||||
static constexpr uintptr_t kMarkingBit = uintptr_t{1} << 18;
|
||||
static constexpr uintptr_t kFromSpaceBit = uintptr_t{1} << 3;
|
||||
static constexpr uintptr_t kToSpaceBit = uintptr_t{1} << 4;
|
||||
|
||||
V8_INLINE static heap_internals::MemoryChunk* FromHeapObject(
|
||||
HeapObject* object) {
|
||||
return reinterpret_cast<MemoryChunk*>(reinterpret_cast<Address>(object) &
|
||||
~kPageAlignmentMask);
|
||||
}
|
||||
|
||||
V8_INLINE bool IsMarking() const { return GetFlags() & kMarkingBit; }
|
||||
|
||||
V8_INLINE bool InNewSpace() const {
|
||||
constexpr uintptr_t kNewSpaceMask = kFromSpaceBit | kToSpaceBit;
|
||||
return GetFlags() & kNewSpaceMask;
|
||||
}
|
||||
|
||||
V8_INLINE uintptr_t GetFlags() const {
|
||||
return *reinterpret_cast<const uintptr_t*>(
|
||||
reinterpret_cast<const uint8_t*>(this) + kFlagsOffset);
|
||||
}
|
||||
};
|
||||
|
||||
inline void GenerationalBarrierInternal(HeapObject* object, Address slot,
|
||||
HeapObject* value) {
|
||||
DCHECK(Heap::PageFlagsAreConsistent(object));
|
||||
heap_internals::MemoryChunk* value_chunk =
|
||||
heap_internals::MemoryChunk::FromHeapObject(value);
|
||||
heap_internals::MemoryChunk* object_chunk =
|
||||
heap_internals::MemoryChunk::FromHeapObject(object);
|
||||
|
||||
if (!value_chunk->InNewSpace() || object_chunk->InNewSpace()) return;
|
||||
|
||||
Heap::GenerationalBarrierSlow(object, slot, value);
|
||||
}
|
||||
|
||||
inline void MarkingBarrierInternal(HeapObject* object, Address slot,
|
||||
HeapObject* value) {
|
||||
DCHECK(Heap::PageFlagsAreConsistent(object));
|
||||
heap_internals::MemoryChunk* value_chunk =
|
||||
heap_internals::MemoryChunk::FromHeapObject(value);
|
||||
|
||||
if (!value_chunk->IsMarking()) return;
|
||||
|
||||
Heap::MarkingBarrierSlow(object, slot, value);
|
||||
}
|
||||
|
||||
} // namespace heap_internals
|
||||
|
||||
inline void GenerationalBarrier(HeapObject* object, Object** slot,
|
||||
Object* value) {
|
||||
DCHECK(!HasWeakHeapObjectTag(*slot));
|
||||
DCHECK(!HasWeakHeapObjectTag(value));
|
||||
if (!value->IsHeapObject()) return;
|
||||
heap_internals::GenerationalBarrierInternal(
|
||||
object, reinterpret_cast<Address>(slot), HeapObject::cast(value));
|
||||
}
|
||||
|
||||
inline void GenerationalBarrier(HeapObject* object, MaybeObject** slot,
|
||||
MaybeObject* value) {
|
||||
HeapObject* value_heap_object;
|
||||
if (!value->ToStrongOrWeakHeapObject(&value_heap_object)) return;
|
||||
heap_internals::GenerationalBarrierInternal(
|
||||
object, reinterpret_cast<Address>(slot), value_heap_object);
|
||||
}
|
||||
|
||||
inline void MarkingBarrier(HeapObject* object, Object** slot, Object* value) {
|
||||
DCHECK_IMPLIES(slot != nullptr, !HasWeakHeapObjectTag(*slot));
|
||||
DCHECK(!HasWeakHeapObjectTag(value));
|
||||
if (!value->IsHeapObject()) return;
|
||||
heap_internals::MarkingBarrierInternal(
|
||||
object, reinterpret_cast<Address>(slot), HeapObject::cast(value));
|
||||
}
|
||||
|
||||
inline void MarkingBarrier(HeapObject* object, MaybeObject** slot,
|
||||
MaybeObject* value) {
|
||||
HeapObject* value_heap_object;
|
||||
if (!value->ToStrongOrWeakHeapObject(&value_heap_object)) return;
|
||||
heap_internals::MarkingBarrierInternal(
|
||||
object, reinterpret_cast<Address>(slot), value_heap_object);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_HEAP_HEAP_WRITE_BARRIER_INL_H_
|
30
src/heap/heap-write-barrier.h
Normal file
30
src/heap/heap-write-barrier.h
Normal file
@ -0,0 +1,30 @@
|
||||
// Copyright 2018 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_HEAP_HEAP_WRITE_BARRIER_H_
|
||||
#define V8_HEAP_HEAP_WRITE_BARRIER_H_
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
class HeapObject;
|
||||
class MaybeObject;
|
||||
class Object;
|
||||
|
||||
// Note: In general it is preferred to use the macros defined in
|
||||
// object-macros.h.
|
||||
|
||||
// Generational write barrier.
|
||||
void GenerationalBarrier(HeapObject* object, Object** slot, Object* value);
|
||||
void GenerationalBarrier(HeapObject* object, MaybeObject** slot,
|
||||
MaybeObject* value);
|
||||
|
||||
// Marking write barrier.
|
||||
void MarkingBarrier(HeapObject* object, Object** slot, Object* value);
|
||||
void MarkingBarrier(HeapObject* object, MaybeObject** slot, MaybeObject* value);
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_HEAP_HEAP_WRITE_BARRIER_H_
|
@ -31,6 +31,7 @@
|
||||
#include "src/heap/gc-idle-time-handler.h"
|
||||
#include "src/heap/gc-tracer.h"
|
||||
#include "src/heap/heap-controller.h"
|
||||
#include "src/heap/heap-write-barrier-inl.h"
|
||||
#include "src/heap/incremental-marking.h"
|
||||
#include "src/heap/item-parallel-job.h"
|
||||
#include "src/heap/mark-compact-inl.h"
|
||||
@ -5787,5 +5788,48 @@ Code* Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
|
||||
}
|
||||
}
|
||||
|
||||
void Heap::GenerationalBarrierSlow(HeapObject* object, Address slot,
|
||||
HeapObject* value) {
|
||||
Heap* heap = Heap::FromWritableHeapObject(object);
|
||||
heap->store_buffer()->InsertEntry(slot);
|
||||
}
|
||||
|
||||
void Heap::MarkingBarrierSlow(HeapObject* object, Address slot,
|
||||
HeapObject* value) {
|
||||
Heap* heap = Heap::FromWritableHeapObject(object);
|
||||
heap->incremental_marking()->RecordWriteSlow(
|
||||
object, reinterpret_cast<HeapObjectReference**>(slot), value);
|
||||
}
|
||||
|
||||
bool Heap::PageFlagsAreConsistent(HeapObject* object) {
|
||||
Heap* heap = Heap::FromWritableHeapObject(object);
|
||||
MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
|
||||
heap_internals::MemoryChunk* slim_chunk =
|
||||
heap_internals::MemoryChunk::FromHeapObject(object);
|
||||
|
||||
const bool generation_consistency =
|
||||
chunk->owner()->identity() != NEW_SPACE ||
|
||||
(chunk->InNewSpace() && slim_chunk->InNewSpace());
|
||||
const bool marking_consistency =
|
||||
!heap->incremental_marking()->IsMarking() ||
|
||||
(chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING) &&
|
||||
slim_chunk->IsMarking());
|
||||
|
||||
return generation_consistency && marking_consistency;
|
||||
}
|
||||
|
||||
static_assert(MemoryChunk::Flag::INCREMENTAL_MARKING ==
|
||||
heap_internals::MemoryChunk::kMarkingBit,
|
||||
"Incremental marking flag inconsistent");
|
||||
static_assert(MemoryChunk::Flag::IN_FROM_SPACE ==
|
||||
heap_internals::MemoryChunk::kFromSpaceBit,
|
||||
"From space flag inconsistent");
|
||||
static_assert(MemoryChunk::Flag::IN_TO_SPACE ==
|
||||
heap_internals::MemoryChunk::kToSpaceBit,
|
||||
"To space flag inconsistent");
|
||||
static_assert(MemoryChunk::kFlagsOffset ==
|
||||
heap_internals::MemoryChunk::kFlagsOffset,
|
||||
"Flag offset inconsistent");
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -492,6 +492,14 @@ class Heap {
|
||||
// by pointer size.
|
||||
static inline void CopyBlock(Address dst, Address src, int byte_size);
|
||||
|
||||
V8_EXPORT_PRIVATE static void GenerationalBarrierSlow(HeapObject* object,
|
||||
Address slot,
|
||||
HeapObject* value);
|
||||
V8_EXPORT_PRIVATE static void MarkingBarrierSlow(HeapObject* object,
|
||||
Address slot,
|
||||
HeapObject* value);
|
||||
V8_EXPORT_PRIVATE static bool PageFlagsAreConsistent(HeapObject* object);
|
||||
|
||||
// Notifies the heap that is ok to start marking or other activities that
|
||||
// should not happen during deserialization.
|
||||
void NotifyDeserializationComplete();
|
||||
|
@ -1249,8 +1249,8 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
|
||||
bool AbortCompactionForTesting(HeapObject* object) {
|
||||
if (FLAG_stress_compaction) {
|
||||
const uintptr_t mask = static_cast<uintptr_t>(FLAG_random_seed) &
|
||||
Page::kPageAlignmentMask & ~kPointerAlignmentMask;
|
||||
if ((object->address() & Page::kPageAlignmentMask) == mask) {
|
||||
kPageAlignmentMask & ~kPointerAlignmentMask;
|
||||
if ((object->address() & kPageAlignmentMask) == mask) {
|
||||
Page* page = Page::FromAddress(object->address());
|
||||
if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) {
|
||||
page->ClearFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
|
||||
|
@ -908,9 +908,11 @@ void MemoryChunk::SetOldGenerationPageFlags(bool is_marking) {
|
||||
if (is_marking) {
|
||||
SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
|
||||
SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
|
||||
SetFlag(MemoryChunk::INCREMENTAL_MARKING);
|
||||
} else {
|
||||
ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
|
||||
SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
|
||||
ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
|
||||
}
|
||||
}
|
||||
|
||||
@ -918,8 +920,10 @@ void MemoryChunk::SetYoungGenerationPageFlags(bool is_marking) {
|
||||
SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
|
||||
if (is_marking) {
|
||||
SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
|
||||
SetFlag(MemoryChunk::INCREMENTAL_MARKING);
|
||||
} else {
|
||||
ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
|
||||
ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2543,7 +2547,7 @@ bool SemiSpace::GrowTo(size_t new_capacity) {
|
||||
if (!is_committed()) {
|
||||
if (!Commit()) return false;
|
||||
}
|
||||
DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0u);
|
||||
DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
|
||||
DCHECK_LE(new_capacity, maximum_capacity_);
|
||||
DCHECK_GT(new_capacity, current_capacity_);
|
||||
const size_t delta = new_capacity - current_capacity_;
|
||||
@ -2582,7 +2586,7 @@ void SemiSpace::RewindPages(int num_pages) {
|
||||
}
|
||||
|
||||
bool SemiSpace::ShrinkTo(size_t new_capacity) {
|
||||
DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0u);
|
||||
DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
|
||||
DCHECK_GE(new_capacity, minimum_capacity_);
|
||||
DCHECK_LT(new_capacity, current_capacity_);
|
||||
if (is_committed()) {
|
||||
|
@ -110,7 +110,7 @@ class Space;
|
||||
// Some assertion macros used in the debugging mode.
|
||||
|
||||
#define DCHECK_PAGE_ALIGNED(address) \
|
||||
DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
|
||||
DCHECK((OffsetFrom(address) & kPageAlignmentMask) == 0)
|
||||
|
||||
#define DCHECK_OBJECT_ALIGNED(address) \
|
||||
DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0)
|
||||
@ -312,7 +312,11 @@ class MemoryChunk {
|
||||
|
||||
// |SWEEP_TO_ITERATE|: The page requires sweeping using external markbits
|
||||
// to iterate the page.
|
||||
SWEEP_TO_ITERATE = 1u << 17
|
||||
SWEEP_TO_ITERATE = 1u << 17,
|
||||
|
||||
// |INCREMENTAL_MARKING|: Indicates whether incremental marking is currently
|
||||
// enabled.
|
||||
INCREMENTAL_MARKING = 1u << 18
|
||||
};
|
||||
|
||||
using Flags = uintptr_t;
|
||||
@ -403,7 +407,6 @@ class MemoryChunk {
|
||||
|
||||
// Page size in bytes. This must be a multiple of the OS page size.
|
||||
static const int kPageSize = 1 << kPageSizeBits;
|
||||
static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
|
||||
|
||||
static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
|
||||
|
||||
@ -758,7 +761,8 @@ class Page : public MemoryChunk {
|
||||
// Page flags copied from from-space to to-space when flipping semispaces.
|
||||
static const intptr_t kCopyOnFlipFlagsMask =
|
||||
static_cast<intptr_t>(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
|
||||
static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
|
||||
static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
|
||||
static_cast<intptr_t>(MemoryChunk::INCREMENTAL_MARKING);
|
||||
|
||||
// Returns the page containing a given address. The address ranges
|
||||
// from [page_addr .. page_addr + kPageSize[. This only works if the object
|
||||
@ -1177,7 +1181,7 @@ class SkipList {
|
||||
}
|
||||
|
||||
static inline int RegionNumber(Address addr) {
|
||||
return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
|
||||
return (OffsetFrom(addr) & kPageAlignmentMask) >> kRegionSizeLog2;
|
||||
}
|
||||
|
||||
static void Update(Address addr, int size) {
|
||||
|
@ -1742,9 +1742,9 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
|
||||
Label::Distance condition_met_distance) {
|
||||
DCHECK(cc == zero || cc == not_zero);
|
||||
if (scratch == object) {
|
||||
and_(scratch, Immediate(~Page::kPageAlignmentMask));
|
||||
and_(scratch, Immediate(~kPageAlignmentMask));
|
||||
} else {
|
||||
mov(scratch, Immediate(~Page::kPageAlignmentMask));
|
||||
mov(scratch, Immediate(~kPageAlignmentMask));
|
||||
and_(scratch, object);
|
||||
}
|
||||
if (mask < (1 << kBitsPerByte)) {
|
||||
|
@ -5417,7 +5417,7 @@ void TurboAssembler::CallCFunctionHelper(Register function_base,
|
||||
|
||||
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
|
||||
Condition cc, Label* condition_met) {
|
||||
And(scratch, object, Operand(~Page::kPageAlignmentMask));
|
||||
And(scratch, object, Operand(~kPageAlignmentMask));
|
||||
lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
|
||||
And(scratch, scratch, Operand(mask));
|
||||
Branch(condition_met, cc, scratch, Operand(zero_reg));
|
||||
|
@ -5764,7 +5764,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
|
||||
|
||||
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
|
||||
Condition cc, Label* condition_met) {
|
||||
And(scratch, object, Operand(~Page::kPageAlignmentMask));
|
||||
And(scratch, object, Operand(~kPageAlignmentMask));
|
||||
Ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
|
||||
And(scratch, scratch, Operand(mask));
|
||||
Branch(condition_met, cc, scratch, Operand(zero_reg));
|
||||
|
@ -1389,7 +1389,7 @@ void JSObject::SetMapAndElements(Handle<JSObject> object,
|
||||
|
||||
void JSObject::set_elements(FixedArrayBase* value, WriteBarrierMode mode) {
|
||||
WRITE_FIELD(this, kElementsOffset, value);
|
||||
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, value, mode);
|
||||
CONDITIONAL_WRITE_BARRIER(this, kElementsOffset, value, mode);
|
||||
}
|
||||
|
||||
|
||||
@ -1475,7 +1475,7 @@ void WeakCell::initialize(HeapObject* val) {
|
||||
heap->incremental_marking()->marking_state()->IsBlack(this)
|
||||
? UPDATE_WRITE_BARRIER
|
||||
: UPDATE_WEAK_WRITE_BARRIER;
|
||||
CONDITIONAL_WRITE_BARRIER(heap, this, kValueOffset, val, mode);
|
||||
CONDITIONAL_WRITE_BARRIER(this, kValueOffset, val, mode);
|
||||
}
|
||||
|
||||
bool WeakCell::cleared() const { return value() == Smi::kZero; }
|
||||
@ -1547,7 +1547,7 @@ void JSObject::SetEmbedderField(int index, Object* value) {
|
||||
// to adjust the index here.
|
||||
int offset = GetHeaderSize() + (kPointerSize * index);
|
||||
WRITE_FIELD(this, offset, value);
|
||||
WRITE_BARRIER(GetHeap(), this, offset, value);
|
||||
WRITE_BARRIER(this, offset, value);
|
||||
}
|
||||
|
||||
void JSObject::SetEmbedderField(int index, Smi* value) {
|
||||
@ -1592,7 +1592,7 @@ void JSObject::RawFastPropertyAtPut(FieldIndex index, Object* value) {
|
||||
if (index.is_inobject()) {
|
||||
int offset = index.offset();
|
||||
WRITE_FIELD(this, offset, value);
|
||||
WRITE_BARRIER(GetHeap(), this, offset, value);
|
||||
WRITE_BARRIER(this, offset, value);
|
||||
} else {
|
||||
property_array()->set(index.outobject_array_index(), value);
|
||||
}
|
||||
@ -1668,7 +1668,7 @@ Object* JSObject::InObjectPropertyAtPut(int index,
|
||||
// Adjust for the number of properties stored in the object.
|
||||
int offset = GetInObjectPropertyOffset(index);
|
||||
WRITE_FIELD(this, offset, value);
|
||||
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
|
||||
CONDITIONAL_WRITE_BARRIER(this, offset, value, mode);
|
||||
return value;
|
||||
}
|
||||
|
||||
@ -1735,7 +1735,7 @@ void PropertyArray::set(int index, Object* value) {
|
||||
DCHECK_LT(index, this->length());
|
||||
int offset = kHeaderSize + index * kPointerSize;
|
||||
RELAXED_WRITE_FIELD(this, offset, value);
|
||||
WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, offset, value);
|
||||
WRITE_BARRIER(this, offset, value);
|
||||
}
|
||||
|
||||
int RegExpMatchInfo::NumberOfCaptureRegisters() {
|
||||
@ -1834,8 +1834,7 @@ void PropertyArray::set(int index, Object* value, WriteBarrierMode mode) {
|
||||
DCHECK_LT(index, this->length());
|
||||
int offset = kHeaderSize + index * kPointerSize;
|
||||
RELAXED_WRITE_FIELD(this, offset, value);
|
||||
CONDITIONAL_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, offset,
|
||||
value, mode);
|
||||
CONDITIONAL_WRITE_BARRIER(this, offset, value, mode);
|
||||
}
|
||||
|
||||
Object** PropertyArray::data_start() {
|
||||
@ -2572,7 +2571,7 @@ Context* JSFunction::native_context() { return context()->native_context(); }
|
||||
void JSFunction::set_context(Object* value) {
|
||||
DCHECK(value->IsUndefined() || value->IsContext());
|
||||
WRITE_FIELD(this, kContextOffset, value);
|
||||
WRITE_BARRIER(GetHeap(), this, kContextOffset, value);
|
||||
WRITE_BARRIER(this, kContextOffset, value);
|
||||
}
|
||||
|
||||
ACCESSORS_CHECKED(JSFunction, prototype_or_initial_map, Object,
|
||||
@ -2651,8 +2650,7 @@ void SmallOrderedHashTable<Derived>::SetDataEntry(int entry, int relative_index,
|
||||
Object* value) {
|
||||
Address entry_offset = GetDataEntryOffset(entry, relative_index);
|
||||
RELAXED_WRITE_FIELD(this, entry_offset, value);
|
||||
WRITE_BARRIER(Heap::FromWritableHeapObject(this), this,
|
||||
static_cast<int>(entry_offset), value);
|
||||
WRITE_BARRIER(this, static_cast<int>(entry_offset), value);
|
||||
}
|
||||
|
||||
ACCESSORS(JSValue, value, Object, kValueOffset)
|
||||
|
@ -99,7 +99,7 @@ void FixedArray::set(int index, Object* value) {
|
||||
DCHECK_LT(index, this->length());
|
||||
int offset = kHeaderSize + index * kPointerSize;
|
||||
RELAXED_WRITE_FIELD(this, offset, value);
|
||||
WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, offset, value);
|
||||
WRITE_BARRIER(this, offset, value);
|
||||
}
|
||||
|
||||
void FixedArray::set(int index, Object* value, WriteBarrierMode mode) {
|
||||
@ -108,8 +108,7 @@ void FixedArray::set(int index, Object* value, WriteBarrierMode mode) {
|
||||
DCHECK_LT(index, this->length());
|
||||
int offset = kHeaderSize + index * kPointerSize;
|
||||
RELAXED_WRITE_FIELD(this, offset, value);
|
||||
CONDITIONAL_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, offset,
|
||||
value, mode);
|
||||
CONDITIONAL_WRITE_BARRIER(this, offset, value, mode);
|
||||
}
|
||||
|
||||
void FixedArray::NoWriteBarrierSet(FixedArray* array, int index,
|
||||
@ -245,7 +244,7 @@ void WeakFixedArray::Set(int index, MaybeObject* value) {
|
||||
DCHECK_LT(index, length());
|
||||
int offset = OffsetOfElementAt(index);
|
||||
RELAXED_WRITE_FIELD(this, offset, value);
|
||||
WEAK_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, offset, value);
|
||||
WEAK_WRITE_BARRIER(this, offset, value);
|
||||
}
|
||||
|
||||
void WeakFixedArray::Set(int index, MaybeObject* value, WriteBarrierMode mode) {
|
||||
@ -253,8 +252,7 @@ void WeakFixedArray::Set(int index, MaybeObject* value, WriteBarrierMode mode) {
|
||||
DCHECK_LT(index, length());
|
||||
int offset = OffsetOfElementAt(index);
|
||||
RELAXED_WRITE_FIELD(this, offset, value);
|
||||
CONDITIONAL_WEAK_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this,
|
||||
offset, value, mode);
|
||||
CONDITIONAL_WEAK_WRITE_BARRIER(this, offset, value, mode);
|
||||
}
|
||||
|
||||
MaybeObject** WeakFixedArray::data_start() {
|
||||
@ -280,8 +278,7 @@ void WeakArrayList::Set(int index, MaybeObject* value, WriteBarrierMode mode) {
|
||||
DCHECK_LT(index, this->capacity());
|
||||
int offset = OffsetOfElementAt(index);
|
||||
RELAXED_WRITE_FIELD(this, offset, value);
|
||||
CONDITIONAL_WEAK_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this,
|
||||
offset, value, mode);
|
||||
CONDITIONAL_WEAK_WRITE_BARRIER(this, offset, value, mode);
|
||||
}
|
||||
|
||||
MaybeObject** WeakArrayList::data_start() {
|
||||
|
@ -168,7 +168,7 @@ Object* JSArrayBufferView::byte_offset() const {
|
||||
|
||||
void JSArrayBufferView::set_byte_offset(Object* value, WriteBarrierMode mode) {
|
||||
WRITE_FIELD(this, kByteOffsetOffset, value);
|
||||
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kByteOffsetOffset, value, mode);
|
||||
CONDITIONAL_WRITE_BARRIER(this, kByteOffsetOffset, value, mode);
|
||||
}
|
||||
|
||||
Object* JSArrayBufferView::byte_length() const {
|
||||
@ -178,7 +178,7 @@ Object* JSArrayBufferView::byte_length() const {
|
||||
|
||||
void JSArrayBufferView::set_byte_length(Object* value, WriteBarrierMode mode) {
|
||||
WRITE_FIELD(this, kByteLengthOffset, value);
|
||||
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kByteLengthOffset, value, mode);
|
||||
CONDITIONAL_WRITE_BARRIER(this, kByteLengthOffset, value, mode);
|
||||
}
|
||||
|
||||
ACCESSORS(JSArrayBufferView, buffer, Object, kBufferOffset)
|
||||
@ -208,7 +208,7 @@ size_t JSTypedArray::length_value() const {
|
||||
|
||||
void JSTypedArray::set_length(Object* value, WriteBarrierMode mode) {
|
||||
WRITE_FIELD(this, kLengthOffset, value);
|
||||
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kLengthOffset, value, mode);
|
||||
CONDITIONAL_WRITE_BARRIER(this, kLengthOffset, value, mode);
|
||||
}
|
||||
|
||||
bool JSTypedArray::is_on_heap() const {
|
||||
|
@ -569,8 +569,7 @@ Object* Map::prototype() const { return READ_FIELD(this, kPrototypeOffset); }
|
||||
void Map::set_prototype(Object* value, WriteBarrierMode mode) {
|
||||
DCHECK(value->IsNull() || value->IsJSReceiver());
|
||||
WRITE_FIELD(this, kPrototypeOffset, value);
|
||||
CONDITIONAL_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this,
|
||||
kPrototypeOffset, value, mode);
|
||||
CONDITIONAL_WRITE_BARRIER(this, kPrototypeOffset, value, mode);
|
||||
}
|
||||
|
||||
LayoutDescriptor* Map::layout_descriptor_gc_safe() const {
|
||||
@ -690,8 +689,7 @@ Object* Map::prototype_info() const {
|
||||
void Map::set_prototype_info(Object* value, WriteBarrierMode mode) {
|
||||
CHECK(is_prototype_map());
|
||||
WRITE_FIELD(this, Map::kTransitionsOrPrototypeInfoOffset, value);
|
||||
CONDITIONAL_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this,
|
||||
Map::kTransitionsOrPrototypeInfoOffset, value,
|
||||
CONDITIONAL_WRITE_BARRIER(this, Map::kTransitionsOrPrototypeInfoOffset, value,
|
||||
mode);
|
||||
}
|
||||
|
||||
|
@ -77,18 +77,17 @@
|
||||
WRITE_UINT8_FIELD(this, offset, value); \
|
||||
}
|
||||
|
||||
#define ACCESSORS_CHECKED2(holder, name, type, offset, get_condition, \
|
||||
set_condition) \
|
||||
type* holder::name() const { \
|
||||
type* value = type::cast(READ_FIELD(this, offset)); \
|
||||
DCHECK(get_condition); \
|
||||
return value; \
|
||||
} \
|
||||
void holder::set_##name(type* value, WriteBarrierMode mode) { \
|
||||
DCHECK(set_condition); \
|
||||
WRITE_FIELD(this, offset, value); \
|
||||
CONDITIONAL_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, \
|
||||
offset, value, mode); \
|
||||
#define ACCESSORS_CHECKED2(holder, name, type, offset, get_condition, \
|
||||
set_condition) \
|
||||
type* holder::name() const { \
|
||||
type* value = type::cast(READ_FIELD(this, offset)); \
|
||||
DCHECK(get_condition); \
|
||||
return value; \
|
||||
} \
|
||||
void holder::set_##name(type* value, WriteBarrierMode mode) { \
|
||||
DCHECK(set_condition); \
|
||||
WRITE_FIELD(this, offset, value); \
|
||||
CONDITIONAL_WRITE_BARRIER(this, offset, value, mode); \
|
||||
}
|
||||
#define ACCESSORS_CHECKED(holder, name, type, offset, condition) \
|
||||
ACCESSORS_CHECKED2(holder, name, type, offset, condition, condition)
|
||||
@ -96,18 +95,17 @@
|
||||
#define ACCESSORS(holder, name, type, offset) \
|
||||
ACCESSORS_CHECKED(holder, name, type, offset, true)
|
||||
|
||||
#define WEAK_ACCESSORS_CHECKED2(holder, name, offset, get_condition, \
|
||||
set_condition) \
|
||||
MaybeObject* holder::name() const { \
|
||||
MaybeObject* value = READ_WEAK_FIELD(this, offset); \
|
||||
DCHECK(get_condition); \
|
||||
return value; \
|
||||
} \
|
||||
void holder::set_##name(MaybeObject* value, WriteBarrierMode mode) { \
|
||||
DCHECK(set_condition); \
|
||||
WRITE_WEAK_FIELD(this, offset, value); \
|
||||
CONDITIONAL_WEAK_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, \
|
||||
offset, value, mode); \
|
||||
#define WEAK_ACCESSORS_CHECKED2(holder, name, offset, get_condition, \
|
||||
set_condition) \
|
||||
MaybeObject* holder::name() const { \
|
||||
MaybeObject* value = READ_WEAK_FIELD(this, offset); \
|
||||
DCHECK(get_condition); \
|
||||
return value; \
|
||||
} \
|
||||
void holder::set_##name(MaybeObject* value, WriteBarrierMode mode) { \
|
||||
DCHECK(set_condition); \
|
||||
WRITE_WEAK_FIELD(this, offset, value); \
|
||||
CONDITIONAL_WEAK_WRITE_BARRIER(this, offset, value, mode); \
|
||||
}
|
||||
|
||||
#define WEAK_ACCESSORS_CHECKED(holder, name, offset, condition) \
|
||||
@ -218,48 +216,45 @@
|
||||
reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
|
||||
reinterpret_cast<base::AtomicWord>(value));
|
||||
|
||||
#define WRITE_BARRIER(heap, object, offset, value) \
|
||||
do { \
|
||||
Heap* __heap__ = heap; \
|
||||
__heap__->incremental_marking()->RecordWrite( \
|
||||
object, HeapObject::RawField(object, offset), value); \
|
||||
__heap__->RecordWrite(object, HeapObject::RawField(object, offset), \
|
||||
value); \
|
||||
#define WRITE_BARRIER(object, offset, value) \
|
||||
do { \
|
||||
DCHECK_NOT_NULL(Heap::FromWritableHeapObject(object)); \
|
||||
MarkingBarrier(object, HeapObject::RawField(object, offset), value); \
|
||||
GenerationalBarrier(object, HeapObject::RawField(object, offset), value); \
|
||||
} while (false)
|
||||
|
||||
#define WEAK_WRITE_BARRIER(heap, object, offset, value) \
|
||||
do { \
|
||||
Heap* __heap__ = heap; \
|
||||
__heap__->incremental_marking()->RecordMaybeWeakWrite( \
|
||||
object, HeapObject::RawMaybeWeakField(object, offset), value); \
|
||||
__heap__->RecordWrite( \
|
||||
object, HeapObject::RawMaybeWeakField(object, offset), value); \
|
||||
#define WEAK_WRITE_BARRIER(object, offset, value) \
|
||||
do { \
|
||||
DCHECK_NOT_NULL(Heap::FromWritableHeapObject(object)); \
|
||||
MarkingBarrier(object, HeapObject::RawMaybeWeakField(object, offset), \
|
||||
value); \
|
||||
GenerationalBarrier(object, HeapObject::RawMaybeWeakField(object, offset), \
|
||||
value); \
|
||||
} while (false)
|
||||
|
||||
#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \
|
||||
do { \
|
||||
Heap* __heap__ = heap; \
|
||||
if (mode != SKIP_WRITE_BARRIER) { \
|
||||
if (mode == UPDATE_WRITE_BARRIER) { \
|
||||
__heap__->incremental_marking()->RecordWrite( \
|
||||
object, HeapObject::RawField(object, offset), value); \
|
||||
} \
|
||||
__heap__->RecordWrite(object, HeapObject::RawField(object, offset), \
|
||||
value); \
|
||||
} \
|
||||
#define CONDITIONAL_WRITE_BARRIER(object, offset, value, mode) \
|
||||
do { \
|
||||
DCHECK_NOT_NULL(Heap::FromWritableHeapObject(object)); \
|
||||
if (mode != SKIP_WRITE_BARRIER) { \
|
||||
if (mode == UPDATE_WRITE_BARRIER) { \
|
||||
MarkingBarrier(object, HeapObject::RawField(object, offset), value); \
|
||||
} \
|
||||
GenerationalBarrier(object, HeapObject::RawField(object, offset), \
|
||||
value); \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
#define CONDITIONAL_WEAK_WRITE_BARRIER(heap, object, offset, value, mode) \
|
||||
do { \
|
||||
Heap* __heap__ = heap; \
|
||||
if (mode != SKIP_WRITE_BARRIER) { \
|
||||
if (mode == UPDATE_WRITE_BARRIER) { \
|
||||
__heap__->incremental_marking()->RecordMaybeWeakWrite( \
|
||||
object, HeapObject::RawMaybeWeakField(object, offset), value); \
|
||||
} \
|
||||
__heap__->RecordWrite( \
|
||||
object, HeapObject::RawMaybeWeakField(object, offset), value); \
|
||||
} \
|
||||
#define CONDITIONAL_WEAK_WRITE_BARRIER(object, offset, value, mode) \
|
||||
do { \
|
||||
DCHECK_NOT_NULL(Heap::FromWritableHeapObject(object)); \
|
||||
if (mode != SKIP_WRITE_BARRIER) { \
|
||||
if (mode == UPDATE_WRITE_BARRIER) { \
|
||||
MarkingBarrier(object, HeapObject::RawMaybeWeakField(object, offset), \
|
||||
value); \
|
||||
} \
|
||||
GenerationalBarrier( \
|
||||
object, HeapObject::RawMaybeWeakField(object, offset), value); \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
#define READ_DOUBLE_FIELD(p, offset) ReadDoubleValue(FIELD_ADDR(p, offset))
|
||||
|
@ -35,8 +35,7 @@ void PreParsedScopeData::set_child_data(int index, Object* value,
|
||||
DCHECK_LT(index, this->length());
|
||||
int offset = kChildDataStartOffset + index * kPointerSize;
|
||||
RELAXED_WRITE_FIELD(this, offset, value);
|
||||
CONDITIONAL_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, offset,
|
||||
value, mode);
|
||||
CONDITIONAL_WRITE_BARRIER(this, offset, value, mode);
|
||||
}
|
||||
|
||||
Object** PreParsedScopeData::child_data_start() const {
|
||||
@ -361,7 +360,7 @@ void SharedFunctionInfo::set_scope_info(ScopeInfo* scope_info,
|
||||
}
|
||||
WRITE_FIELD(this, kNameOrScopeInfoOffset,
|
||||
reinterpret_cast<Object*>(scope_info));
|
||||
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kNameOrScopeInfoOffset,
|
||||
CONDITIONAL_WRITE_BARRIER(this, kNameOrScopeInfoOffset,
|
||||
reinterpret_cast<Object*>(scope_info), mode);
|
||||
}
|
||||
|
||||
|
@ -498,7 +498,7 @@ void SlicedString::set_parent(Isolate* isolate, String* parent,
|
||||
WriteBarrierMode mode) {
|
||||
DCHECK(parent->IsSeqString() || parent->IsExternalString());
|
||||
WRITE_FIELD(this, kParentOffset, parent);
|
||||
CONDITIONAL_WRITE_BARRIER(isolate->heap(), this, kParentOffset, parent, mode);
|
||||
CONDITIONAL_WRITE_BARRIER(this, kParentOffset, parent, mode);
|
||||
}
|
||||
|
||||
SMI_ACCESSORS(SlicedString, offset, kOffsetOffset)
|
||||
@ -512,7 +512,7 @@ Object* ConsString::unchecked_first() { return READ_FIELD(this, kFirstOffset); }
|
||||
void ConsString::set_first(Isolate* isolate, String* value,
|
||||
WriteBarrierMode mode) {
|
||||
WRITE_FIELD(this, kFirstOffset, value);
|
||||
CONDITIONAL_WRITE_BARRIER(isolate->heap(), this, kFirstOffset, value, mode);
|
||||
CONDITIONAL_WRITE_BARRIER(this, kFirstOffset, value, mode);
|
||||
}
|
||||
|
||||
String* ConsString::second() {
|
||||
@ -526,7 +526,7 @@ Object* ConsString::unchecked_second() {
|
||||
void ConsString::set_second(Isolate* isolate, String* value,
|
||||
WriteBarrierMode mode) {
|
||||
WRITE_FIELD(this, kSecondOffset, value);
|
||||
CONDITIONAL_WRITE_BARRIER(isolate->heap(), this, kSecondOffset, value, mode);
|
||||
CONDITIONAL_WRITE_BARRIER(this, kSecondOffset, value, mode);
|
||||
}
|
||||
|
||||
ACCESSORS(ThinString, actual, String, kActualOffset);
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include "src/debug/debug.h"
|
||||
#include "src/external-reference-table.h"
|
||||
#include "src/frames-inl.h"
|
||||
#include "src/globals.h"
|
||||
#include "src/heap/heap-inl.h"
|
||||
#include "src/instruction-stream.h"
|
||||
#include "src/objects-inl.h"
|
||||
@ -2595,9 +2596,9 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
|
||||
Label::Distance condition_met_distance) {
|
||||
DCHECK(cc == zero || cc == not_zero);
|
||||
if (scratch == object) {
|
||||
andp(scratch, Immediate(~Page::kPageAlignmentMask));
|
||||
andp(scratch, Immediate(~kPageAlignmentMask));
|
||||
} else {
|
||||
movp(scratch, Immediate(~Page::kPageAlignmentMask));
|
||||
movp(scratch, Immediate(~kPageAlignmentMask));
|
||||
andp(scratch, object);
|
||||
}
|
||||
if (mask < (1 << kBitsPerByte)) {
|
||||
|
@ -3,6 +3,7 @@
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/heap/heap-inl.h"
|
||||
#include "src/heap/heap-write-barrier-inl.h"
|
||||
#include "src/heap/spaces-inl.h"
|
||||
#include "src/isolate.h"
|
||||
#include "test/unittests/test-utils.h"
|
||||
@ -51,5 +52,70 @@ TEST_F(SpacesTest, CompactionSpaceMerge) {
|
||||
delete compaction_space;
|
||||
}
|
||||
|
||||
TEST_F(SpacesTest, WriteBarrierFromHeapObject) {
|
||||
Heap* heap = i_isolate()->heap();
|
||||
CompactionSpace* temporary_space =
|
||||
new CompactionSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
|
||||
EXPECT_NE(nullptr, temporary_space);
|
||||
HeapObject* object =
|
||||
temporary_space->AllocateRawUnaligned(kMaxRegularHeapObjectSize)
|
||||
.ToObjectChecked();
|
||||
EXPECT_NE(nullptr, object);
|
||||
|
||||
MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
|
||||
heap_internals::MemoryChunk* slim_chunk =
|
||||
heap_internals::MemoryChunk::FromHeapObject(object);
|
||||
EXPECT_EQ(static_cast<void*>(chunk), static_cast<void*>(slim_chunk));
|
||||
delete temporary_space;
|
||||
}
|
||||
|
||||
TEST_F(SpacesTest, WriteBarrierIsMarking) {
|
||||
char memory[256];
|
||||
memset(&memory, 0, sizeof(memory));
|
||||
MemoryChunk* chunk = reinterpret_cast<MemoryChunk*>(&memory);
|
||||
heap_internals::MemoryChunk* slim_chunk =
|
||||
reinterpret_cast<heap_internals::MemoryChunk*>(&memory);
|
||||
EXPECT_FALSE(chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING));
|
||||
EXPECT_FALSE(slim_chunk->IsMarking());
|
||||
chunk->SetFlag(MemoryChunk::INCREMENTAL_MARKING);
|
||||
EXPECT_TRUE(chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING));
|
||||
EXPECT_TRUE(slim_chunk->IsMarking());
|
||||
chunk->ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
|
||||
EXPECT_FALSE(chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING));
|
||||
EXPECT_FALSE(slim_chunk->IsMarking());
|
||||
}
|
||||
|
||||
TEST_F(SpacesTest, WriteBarrierInNewSpaceToSpace) {
|
||||
char memory[256];
|
||||
memset(&memory, 0, sizeof(memory));
|
||||
MemoryChunk* chunk = reinterpret_cast<MemoryChunk*>(&memory);
|
||||
heap_internals::MemoryChunk* slim_chunk =
|
||||
reinterpret_cast<heap_internals::MemoryChunk*>(&memory);
|
||||
EXPECT_FALSE(chunk->InNewSpace());
|
||||
EXPECT_FALSE(slim_chunk->InNewSpace());
|
||||
chunk->SetFlag(MemoryChunk::IN_TO_SPACE);
|
||||
EXPECT_TRUE(chunk->InNewSpace());
|
||||
EXPECT_TRUE(slim_chunk->InNewSpace());
|
||||
chunk->ClearFlag(MemoryChunk::IN_TO_SPACE);
|
||||
EXPECT_FALSE(chunk->InNewSpace());
|
||||
EXPECT_FALSE(slim_chunk->InNewSpace());
|
||||
}
|
||||
|
||||
TEST_F(SpacesTest, WriteBarrierInNewSpaceFromSpace) {
|
||||
char memory[256];
|
||||
memset(&memory, 0, sizeof(memory));
|
||||
MemoryChunk* chunk = reinterpret_cast<MemoryChunk*>(&memory);
|
||||
heap_internals::MemoryChunk* slim_chunk =
|
||||
reinterpret_cast<heap_internals::MemoryChunk*>(&memory);
|
||||
EXPECT_FALSE(chunk->InNewSpace());
|
||||
EXPECT_FALSE(slim_chunk->InNewSpace());
|
||||
chunk->SetFlag(MemoryChunk::IN_FROM_SPACE);
|
||||
EXPECT_TRUE(chunk->InNewSpace());
|
||||
EXPECT_TRUE(slim_chunk->InNewSpace());
|
||||
chunk->ClearFlag(MemoryChunk::IN_FROM_SPACE);
|
||||
EXPECT_FALSE(chunk->InNewSpace());
|
||||
EXPECT_FALSE(slim_chunk->InNewSpace());
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
Loading…
Reference in New Issue
Block a user