From 9e3d6cbec5095ca2f49f61c2bb2cde3618e74854 Mon Sep 17 00:00:00 2001 From: Michael Lippautz Date: Tue, 24 Jul 2018 22:13:40 +0200 Subject: [PATCH] [heap,iwyu] Provide slim write barrier header Move write barrier essentials into heap/heap-write-barrier-inl.h. Avoid including further heap inline headers by relying on constant to load flags from. Bug: v8:7490 Change-Id: I2891299f1b1ca2c3e2031cb9c63b583b1665e3f9 Reviewed-on: https://chromium-review.googlesource.com/1148448 Commit-Queue: Michael Lippautz Reviewed-by: Michael Starzinger Reviewed-by: Clemens Hammacher Reviewed-by: Ulan Degenbaev Cr-Commit-Position: refs/heads/master@{#54710} --- BUILD.gn | 2 + src/arm64/macro-assembler-arm64.cc | 6 +- src/builtins/builtins-internal-gen.cc | 6 +- src/code-stub-assembler.cc | 2 +- src/deoptimizer.cc | 10 +-- src/feedback-vector-inl.h | 2 +- src/globals.h | 3 + src/heap/heap-inl.h | 11 ++- src/heap/heap-write-barrier-inl.h | 109 +++++++++++++++++++++++ src/heap/heap-write-barrier.h | 30 +++++++ src/heap/heap.cc | 44 ++++++++++ src/heap/heap.h | 8 ++ src/heap/mark-compact.cc | 4 +- src/heap/spaces.cc | 8 +- src/heap/spaces.h | 14 +-- src/ia32/macro-assembler-ia32.cc | 4 +- src/mips/macro-assembler-mips.cc | 2 +- src/mips64/macro-assembler-mips64.cc | 2 +- src/objects-inl.h | 20 ++--- src/objects/fixed-array-inl.h | 13 ++- src/objects/js-array-inl.h | 6 +- src/objects/map-inl.h | 6 +- src/objects/object-macros.h | 115 ++++++++++++------------- src/objects/shared-function-info-inl.h | 5 +- src/objects/string-inl.h | 6 +- src/x64/macro-assembler-x64.cc | 5 +- test/unittests/heap/spaces-unittest.cc | 66 ++++++++++++++ 27 files changed, 388 insertions(+), 121 deletions(-) create mode 100644 src/heap/heap-write-barrier-inl.h create mode 100644 src/heap/heap-write-barrier.h diff --git a/BUILD.gn b/BUILD.gn index ddf7aaf121..6f9df99dd8 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -1965,6 +1965,8 @@ v8_source_set("v8_base") { "src/heap/heap-controller.cc", "src/heap/heap-controller.h", "src/heap/heap-inl.h", + "src/heap/heap-write-barrier-inl.h", + "src/heap/heap-write-barrier.h", "src/heap/heap.cc", "src/heap/heap.h", "src/heap/incremental-marking-inl.h", diff --git a/src/arm64/macro-assembler-arm64.cc b/src/arm64/macro-assembler-arm64.cc index 2439e864df..a7ab2a3fcd 100644 --- a/src/arm64/macro-assembler-arm64.cc +++ b/src/arm64/macro-assembler-arm64.cc @@ -2765,7 +2765,7 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { void MacroAssembler::CheckPageFlag(const Register& object, const Register& scratch, int mask, Condition cc, Label* condition_met) { - And(scratch, object, ~Page::kPageAlignmentMask); + And(scratch, object, ~kPageAlignmentMask); Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); if (cc == eq) { TestAndBranchIfAnySet(scratch, mask, condition_met); @@ -2777,7 +2777,7 @@ void MacroAssembler::CheckPageFlag(const Register& object, void TurboAssembler::CheckPageFlagSet(const Register& object, const Register& scratch, int mask, Label* if_any_set) { - And(scratch, object, ~Page::kPageAlignmentMask); + And(scratch, object, ~kPageAlignmentMask); Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); TestAndBranchIfAnySet(scratch, mask, if_any_set); } @@ -2785,7 +2785,7 @@ void TurboAssembler::CheckPageFlagSet(const Register& object, void TurboAssembler::CheckPageFlagClear(const Register& object, const Register& scratch, int mask, Label* if_all_clear) { - And(scratch, object, ~Page::kPageAlignmentMask); + And(scratch, object, ~kPageAlignmentMask); Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); TestAndBranchIfAllClear(scratch, mask, if_all_clear); } diff --git a/src/builtins/builtins-internal-gen.cc b/src/builtins/builtins-internal-gen.cc index db80d81047..6152d15f95 100644 --- a/src/builtins/builtins-internal-gen.cc +++ b/src/builtins/builtins-internal-gen.cc @@ -221,7 +221,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler { } Node* IsPageFlagSet(Node* object, int mask) { - Node* page = WordAnd(object, IntPtrConstant(~Page::kPageAlignmentMask)); + Node* page = WordAnd(object, IntPtrConstant(~kPageAlignmentMask)); Node* flags = Load(MachineType::Pointer(), page, IntPtrConstant(MemoryChunk::kFlagsOffset)); return WordNotEqual(WordAnd(flags, IntPtrConstant(mask)), @@ -241,7 +241,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler { } void GetMarkBit(Node* object, Node** cell, Node** mask) { - Node* page = WordAnd(object, IntPtrConstant(~Page::kPageAlignmentMask)); + Node* page = WordAnd(object, IntPtrConstant(~kPageAlignmentMask)); { // Temp variable to calculate cell offset in bitmap. @@ -249,7 +249,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler { int shift = Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2; r0 = WordShr(object, IntPtrConstant(shift)); - r0 = WordAnd(r0, IntPtrConstant((Page::kPageAlignmentMask >> shift) & + r0 = WordAnd(r0, IntPtrConstant((kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1))); *cell = IntPtrAdd(IntPtrAdd(page, r0), IntPtrConstant(MemoryChunk::kHeaderSize)); diff --git a/src/code-stub-assembler.cc b/src/code-stub-assembler.cc index c1cfe78ae4..5b88e03a27 100644 --- a/src/code-stub-assembler.cc +++ b/src/code-stub-assembler.cc @@ -9580,7 +9580,7 @@ void CodeStubAssembler::TrapAllocationMemento(Node* object, } TNode CodeStubAssembler::PageFromAddress(TNode address) { - return WordAnd(address, IntPtrConstant(~Page::kPageAlignmentMask)); + return WordAnd(address, IntPtrConstant(~kPageAlignmentMask)); } TNode CodeStubAssembler::CreateAllocationSiteInFeedbackVector( diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc index 8106abea60..4b93c8685c 100644 --- a/src/deoptimizer.cc +++ b/src/deoptimizer.cc @@ -3719,8 +3719,8 @@ void TranslatedState::InitializeJSObjectAt( Handle properties = GetValueAndAdvance(frame, value_index); WRITE_FIELD(*object_storage, JSObject::kPropertiesOrHashOffset, *properties); - WRITE_BARRIER(isolate()->heap(), *object_storage, - JSObject::kPropertiesOrHashOffset, *properties); + WRITE_BARRIER(*object_storage, JSObject::kPropertiesOrHashOffset, + *properties); } // For all the other fields we first look at the fixed array and check the @@ -3747,11 +3747,11 @@ void TranslatedState::InitializeJSObjectAt( } else if (marker == kStoreMutableHeapNumber) { CHECK(field_value->IsMutableHeapNumber()); WRITE_FIELD(*object_storage, offset, *field_value); - WRITE_BARRIER(isolate()->heap(), *object_storage, offset, *field_value); + WRITE_BARRIER(*object_storage, offset, *field_value); } else { CHECK_EQ(kStoreTagged, marker); WRITE_FIELD(*object_storage, offset, *field_value); - WRITE_BARRIER(isolate()->heap(), *object_storage, offset, *field_value); + WRITE_BARRIER(*object_storage, offset, *field_value); } } object_storage->synchronized_set_map(*map); @@ -3787,7 +3787,7 @@ void TranslatedState::InitializeObjectWithTaggedFieldsAt( } WRITE_FIELD(*object_storage, offset, *field_value); - WRITE_BARRIER(isolate()->heap(), *object_storage, offset, *field_value); + WRITE_BARRIER(*object_storage, offset, *field_value); } object_storage->synchronized_set_map(*map); diff --git a/src/feedback-vector-inl.h b/src/feedback-vector-inl.h index 1d3028cdaf..d539eef57b 100644 --- a/src/feedback-vector-inl.h +++ b/src/feedback-vector-inl.h @@ -167,7 +167,7 @@ void FeedbackVector::set(int index, MaybeObject* value, WriteBarrierMode mode) { DCHECK_LT(index, this->length()); int offset = kFeedbackSlotsOffset + index * kPointerSize; RELAXED_WRITE_FIELD(this, offset, value); - CONDITIONAL_WEAK_WRITE_BARRIER(GetHeap(), this, offset, value, mode); + CONDITIONAL_WEAK_WRITE_BARRIER(this, offset, value, mode); } void FeedbackVector::Set(FeedbackSlot slot, Object* value, diff --git a/src/globals.h b/src/globals.h index 25dc3e8017..6abe510859 100644 --- a/src/globals.h +++ b/src/globals.h @@ -462,6 +462,9 @@ constexpr uint32_t kFreeListZapValue = 0xfeed1eaf; constexpr int kCodeZapValue = 0xbadc0de; constexpr uint32_t kPhantomReferenceZap = 0xca11bac; +// Page constants. +static const intptr_t kPageAlignmentMask = (intptr_t{1} << kPageSizeBits) - 1; + // On Intel architecture, cache line size is 64 bytes. // On ARM it may be less (32 bytes), but as far this constant is // used for aligning data, it doesn't hurt to align on a greater value. diff --git a/src/heap/heap-inl.h b/src/heap/heap-inl.h index 02a7dc985a..df30648fb9 100644 --- a/src/heap/heap-inl.h +++ b/src/heap/heap-inl.h @@ -8,12 +8,15 @@ #include // Clients of this interface shouldn't depend on lots of heap internals. -// Do not include anything from src/heap other than src/heap/heap.h here! +// Do not include anything from src/heap other than src/heap/heap.h and its +// write barrier here! +#include "src/heap/heap-write-barrier.h" #include "src/heap/heap.h" #include "src/base/platform/platform.h" #include "src/counters-inl.h" #include "src/feedback-vector.h" + // TODO(mstarzinger): There are 3 more includes to remove in order to no longer // leak heap internals to users of this interface! #include "src/heap/incremental-marking-inl.h" @@ -32,6 +35,12 @@ #include "src/string-hasher.h" #include "src/zone/zone-list-inl.h" +// The following header includes the write barrier essentials that can also be +// used stand-alone without including heap-inl.h. +// TODO(mlippautz): Remove once users of object-macros.h include this file on +// their own. +#include "src/heap/heap-write-barrier-inl.h" + namespace v8 { namespace internal { diff --git a/src/heap/heap-write-barrier-inl.h b/src/heap/heap-write-barrier-inl.h new file mode 100644 index 0000000000..fc187b1110 --- /dev/null +++ b/src/heap/heap-write-barrier-inl.h @@ -0,0 +1,109 @@ +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_HEAP_HEAP_WRITE_BARRIER_INL_H_ +#define V8_HEAP_HEAP_WRITE_BARRIER_INL_H_ + +#include "src/heap/heap-write-barrier.h" + +#include "src/globals.h" +#include "src/heap/heap.h" +#include "src/objects-inl.h" +#include "src/objects/maybe-object-inl.h" + +namespace v8 { +namespace internal { + +// Do not use these internal details anywhere outside of this file. These +// internals are only intended to shortcut write barrier checks. +namespace heap_internals { + +struct MemoryChunk { + static constexpr uintptr_t kFlagsOffset = sizeof(size_t); + static constexpr uintptr_t kMarkingBit = uintptr_t{1} << 18; + static constexpr uintptr_t kFromSpaceBit = uintptr_t{1} << 3; + static constexpr uintptr_t kToSpaceBit = uintptr_t{1} << 4; + + V8_INLINE static heap_internals::MemoryChunk* FromHeapObject( + HeapObject* object) { + return reinterpret_cast(reinterpret_cast
(object) & + ~kPageAlignmentMask); + } + + V8_INLINE bool IsMarking() const { return GetFlags() & kMarkingBit; } + + V8_INLINE bool InNewSpace() const { + constexpr uintptr_t kNewSpaceMask = kFromSpaceBit | kToSpaceBit; + return GetFlags() & kNewSpaceMask; + } + + V8_INLINE uintptr_t GetFlags() const { + return *reinterpret_cast( + reinterpret_cast(this) + kFlagsOffset); + } +}; + +inline void GenerationalBarrierInternal(HeapObject* object, Address slot, + HeapObject* value) { + DCHECK(Heap::PageFlagsAreConsistent(object)); + heap_internals::MemoryChunk* value_chunk = + heap_internals::MemoryChunk::FromHeapObject(value); + heap_internals::MemoryChunk* object_chunk = + heap_internals::MemoryChunk::FromHeapObject(object); + + if (!value_chunk->InNewSpace() || object_chunk->InNewSpace()) return; + + Heap::GenerationalBarrierSlow(object, slot, value); +} + +inline void MarkingBarrierInternal(HeapObject* object, Address slot, + HeapObject* value) { + DCHECK(Heap::PageFlagsAreConsistent(object)); + heap_internals::MemoryChunk* value_chunk = + heap_internals::MemoryChunk::FromHeapObject(value); + + if (!value_chunk->IsMarking()) return; + + Heap::MarkingBarrierSlow(object, slot, value); +} + +} // namespace heap_internals + +inline void GenerationalBarrier(HeapObject* object, Object** slot, + Object* value) { + DCHECK(!HasWeakHeapObjectTag(*slot)); + DCHECK(!HasWeakHeapObjectTag(value)); + if (!value->IsHeapObject()) return; + heap_internals::GenerationalBarrierInternal( + object, reinterpret_cast
(slot), HeapObject::cast(value)); +} + +inline void GenerationalBarrier(HeapObject* object, MaybeObject** slot, + MaybeObject* value) { + HeapObject* value_heap_object; + if (!value->ToStrongOrWeakHeapObject(&value_heap_object)) return; + heap_internals::GenerationalBarrierInternal( + object, reinterpret_cast
(slot), value_heap_object); +} + +inline void MarkingBarrier(HeapObject* object, Object** slot, Object* value) { + DCHECK_IMPLIES(slot != nullptr, !HasWeakHeapObjectTag(*slot)); + DCHECK(!HasWeakHeapObjectTag(value)); + if (!value->IsHeapObject()) return; + heap_internals::MarkingBarrierInternal( + object, reinterpret_cast
(slot), HeapObject::cast(value)); +} + +inline void MarkingBarrier(HeapObject* object, MaybeObject** slot, + MaybeObject* value) { + HeapObject* value_heap_object; + if (!value->ToStrongOrWeakHeapObject(&value_heap_object)) return; + heap_internals::MarkingBarrierInternal( + object, reinterpret_cast
(slot), value_heap_object); +} + +} // namespace internal +} // namespace v8 + +#endif // V8_HEAP_HEAP_WRITE_BARRIER_INL_H_ diff --git a/src/heap/heap-write-barrier.h b/src/heap/heap-write-barrier.h new file mode 100644 index 0000000000..ee1e647c4d --- /dev/null +++ b/src/heap/heap-write-barrier.h @@ -0,0 +1,30 @@ +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_HEAP_HEAP_WRITE_BARRIER_H_ +#define V8_HEAP_HEAP_WRITE_BARRIER_H_ + +namespace v8 { +namespace internal { + +class HeapObject; +class MaybeObject; +class Object; + +// Note: In general it is preferred to use the macros defined in +// object-macros.h. + +// Generational write barrier. +void GenerationalBarrier(HeapObject* object, Object** slot, Object* value); +void GenerationalBarrier(HeapObject* object, MaybeObject** slot, + MaybeObject* value); + +// Marking write barrier. +void MarkingBarrier(HeapObject* object, Object** slot, Object* value); +void MarkingBarrier(HeapObject* object, MaybeObject** slot, MaybeObject* value); + +} // namespace internal +} // namespace v8 + +#endif // V8_HEAP_HEAP_WRITE_BARRIER_H_ diff --git a/src/heap/heap.cc b/src/heap/heap.cc index 1daad8db21..9a8e517521 100644 --- a/src/heap/heap.cc +++ b/src/heap/heap.cc @@ -31,6 +31,7 @@ #include "src/heap/gc-idle-time-handler.h" #include "src/heap/gc-tracer.h" #include "src/heap/heap-controller.h" +#include "src/heap/heap-write-barrier-inl.h" #include "src/heap/incremental-marking.h" #include "src/heap/item-parallel-job.h" #include "src/heap/mark-compact-inl.h" @@ -5787,5 +5788,48 @@ Code* Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) { } } +void Heap::GenerationalBarrierSlow(HeapObject* object, Address slot, + HeapObject* value) { + Heap* heap = Heap::FromWritableHeapObject(object); + heap->store_buffer()->InsertEntry(slot); +} + +void Heap::MarkingBarrierSlow(HeapObject* object, Address slot, + HeapObject* value) { + Heap* heap = Heap::FromWritableHeapObject(object); + heap->incremental_marking()->RecordWriteSlow( + object, reinterpret_cast(slot), value); +} + +bool Heap::PageFlagsAreConsistent(HeapObject* object) { + Heap* heap = Heap::FromWritableHeapObject(object); + MemoryChunk* chunk = MemoryChunk::FromHeapObject(object); + heap_internals::MemoryChunk* slim_chunk = + heap_internals::MemoryChunk::FromHeapObject(object); + + const bool generation_consistency = + chunk->owner()->identity() != NEW_SPACE || + (chunk->InNewSpace() && slim_chunk->InNewSpace()); + const bool marking_consistency = + !heap->incremental_marking()->IsMarking() || + (chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING) && + slim_chunk->IsMarking()); + + return generation_consistency && marking_consistency; +} + +static_assert(MemoryChunk::Flag::INCREMENTAL_MARKING == + heap_internals::MemoryChunk::kMarkingBit, + "Incremental marking flag inconsistent"); +static_assert(MemoryChunk::Flag::IN_FROM_SPACE == + heap_internals::MemoryChunk::kFromSpaceBit, + "From space flag inconsistent"); +static_assert(MemoryChunk::Flag::IN_TO_SPACE == + heap_internals::MemoryChunk::kToSpaceBit, + "To space flag inconsistent"); +static_assert(MemoryChunk::kFlagsOffset == + heap_internals::MemoryChunk::kFlagsOffset, + "Flag offset inconsistent"); + } // namespace internal } // namespace v8 diff --git a/src/heap/heap.h b/src/heap/heap.h index 7735555000..e3a0c6f8ef 100644 --- a/src/heap/heap.h +++ b/src/heap/heap.h @@ -492,6 +492,14 @@ class Heap { // by pointer size. static inline void CopyBlock(Address dst, Address src, int byte_size); + V8_EXPORT_PRIVATE static void GenerationalBarrierSlow(HeapObject* object, + Address slot, + HeapObject* value); + V8_EXPORT_PRIVATE static void MarkingBarrierSlow(HeapObject* object, + Address slot, + HeapObject* value); + V8_EXPORT_PRIVATE static bool PageFlagsAreConsistent(HeapObject* object); + // Notifies the heap that is ok to start marking or other activities that // should not happen during deserialization. void NotifyDeserializationComplete(); diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc index 6bc01238b8..a78f01ee39 100644 --- a/src/heap/mark-compact.cc +++ b/src/heap/mark-compact.cc @@ -1249,8 +1249,8 @@ class EvacuateVisitorBase : public HeapObjectVisitor { bool AbortCompactionForTesting(HeapObject* object) { if (FLAG_stress_compaction) { const uintptr_t mask = static_cast(FLAG_random_seed) & - Page::kPageAlignmentMask & ~kPointerAlignmentMask; - if ((object->address() & Page::kPageAlignmentMask) == mask) { + kPageAlignmentMask & ~kPointerAlignmentMask; + if ((object->address() & kPageAlignmentMask) == mask) { Page* page = Page::FromAddress(object->address()); if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) { page->ClearFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING); diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc index b901e6905d..7f14590e30 100644 --- a/src/heap/spaces.cc +++ b/src/heap/spaces.cc @@ -908,9 +908,11 @@ void MemoryChunk::SetOldGenerationPageFlags(bool is_marking) { if (is_marking) { SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); + SetFlag(MemoryChunk::INCREMENTAL_MARKING); } else { ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); + ClearFlag(MemoryChunk::INCREMENTAL_MARKING); } } @@ -918,8 +920,10 @@ void MemoryChunk::SetYoungGenerationPageFlags(bool is_marking) { SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); if (is_marking) { SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); + SetFlag(MemoryChunk::INCREMENTAL_MARKING); } else { ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); + ClearFlag(MemoryChunk::INCREMENTAL_MARKING); } } @@ -2543,7 +2547,7 @@ bool SemiSpace::GrowTo(size_t new_capacity) { if (!is_committed()) { if (!Commit()) return false; } - DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0u); + DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u); DCHECK_LE(new_capacity, maximum_capacity_); DCHECK_GT(new_capacity, current_capacity_); const size_t delta = new_capacity - current_capacity_; @@ -2582,7 +2586,7 @@ void SemiSpace::RewindPages(int num_pages) { } bool SemiSpace::ShrinkTo(size_t new_capacity) { - DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0u); + DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u); DCHECK_GE(new_capacity, minimum_capacity_); DCHECK_LT(new_capacity, current_capacity_); if (is_committed()) { diff --git a/src/heap/spaces.h b/src/heap/spaces.h index 30edeab2b8..f40e59b5ff 100644 --- a/src/heap/spaces.h +++ b/src/heap/spaces.h @@ -110,7 +110,7 @@ class Space; // Some assertion macros used in the debugging mode. #define DCHECK_PAGE_ALIGNED(address) \ - DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0) + DCHECK((OffsetFrom(address) & kPageAlignmentMask) == 0) #define DCHECK_OBJECT_ALIGNED(address) \ DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0) @@ -312,7 +312,11 @@ class MemoryChunk { // |SWEEP_TO_ITERATE|: The page requires sweeping using external markbits // to iterate the page. - SWEEP_TO_ITERATE = 1u << 17 + SWEEP_TO_ITERATE = 1u << 17, + + // |INCREMENTAL_MARKING|: Indicates whether incremental marking is currently + // enabled. + INCREMENTAL_MARKING = 1u << 18 }; using Flags = uintptr_t; @@ -403,7 +407,6 @@ class MemoryChunk { // Page size in bytes. This must be a multiple of the OS page size. static const int kPageSize = 1 << kPageSizeBits; - static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; static const int kAllocatableMemory = kPageSize - kObjectStartOffset; @@ -758,7 +761,8 @@ class Page : public MemoryChunk { // Page flags copied from from-space to to-space when flipping semispaces. static const intptr_t kCopyOnFlipFlagsMask = static_cast(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) | - static_cast(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); + static_cast(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) | + static_cast(MemoryChunk::INCREMENTAL_MARKING); // Returns the page containing a given address. The address ranges // from [page_addr .. page_addr + kPageSize[. This only works if the object @@ -1177,7 +1181,7 @@ class SkipList { } static inline int RegionNumber(Address addr) { - return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2; + return (OffsetFrom(addr) & kPageAlignmentMask) >> kRegionSizeLog2; } static void Update(Address addr, int size) { diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc index dd52bd1235..d29e16bb2c 100644 --- a/src/ia32/macro-assembler-ia32.cc +++ b/src/ia32/macro-assembler-ia32.cc @@ -1742,9 +1742,9 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask, Label::Distance condition_met_distance) { DCHECK(cc == zero || cc == not_zero); if (scratch == object) { - and_(scratch, Immediate(~Page::kPageAlignmentMask)); + and_(scratch, Immediate(~kPageAlignmentMask)); } else { - mov(scratch, Immediate(~Page::kPageAlignmentMask)); + mov(scratch, Immediate(~kPageAlignmentMask)); and_(scratch, object); } if (mask < (1 << kBitsPerByte)) { diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc index 6bdda660ab..26d2f478b5 100644 --- a/src/mips/macro-assembler-mips.cc +++ b/src/mips/macro-assembler-mips.cc @@ -5417,7 +5417,7 @@ void TurboAssembler::CallCFunctionHelper(Register function_base, void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask, Condition cc, Label* condition_met) { - And(scratch, object, Operand(~Page::kPageAlignmentMask)); + And(scratch, object, Operand(~kPageAlignmentMask)); lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); And(scratch, scratch, Operand(mask)); Branch(condition_met, cc, scratch, Operand(zero_reg)); diff --git a/src/mips64/macro-assembler-mips64.cc b/src/mips64/macro-assembler-mips64.cc index 79a4846eff..3361cc6d0a 100644 --- a/src/mips64/macro-assembler-mips64.cc +++ b/src/mips64/macro-assembler-mips64.cc @@ -5764,7 +5764,7 @@ void TurboAssembler::CallCFunctionHelper(Register function, void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask, Condition cc, Label* condition_met) { - And(scratch, object, Operand(~Page::kPageAlignmentMask)); + And(scratch, object, Operand(~kPageAlignmentMask)); Ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); And(scratch, scratch, Operand(mask)); Branch(condition_met, cc, scratch, Operand(zero_reg)); diff --git a/src/objects-inl.h b/src/objects-inl.h index b1f6f16a4b..c6961bc550 100644 --- a/src/objects-inl.h +++ b/src/objects-inl.h @@ -1389,7 +1389,7 @@ void JSObject::SetMapAndElements(Handle object, void JSObject::set_elements(FixedArrayBase* value, WriteBarrierMode mode) { WRITE_FIELD(this, kElementsOffset, value); - CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, value, mode); + CONDITIONAL_WRITE_BARRIER(this, kElementsOffset, value, mode); } @@ -1475,7 +1475,7 @@ void WeakCell::initialize(HeapObject* val) { heap->incremental_marking()->marking_state()->IsBlack(this) ? UPDATE_WRITE_BARRIER : UPDATE_WEAK_WRITE_BARRIER; - CONDITIONAL_WRITE_BARRIER(heap, this, kValueOffset, val, mode); + CONDITIONAL_WRITE_BARRIER(this, kValueOffset, val, mode); } bool WeakCell::cleared() const { return value() == Smi::kZero; } @@ -1547,7 +1547,7 @@ void JSObject::SetEmbedderField(int index, Object* value) { // to adjust the index here. int offset = GetHeaderSize() + (kPointerSize * index); WRITE_FIELD(this, offset, value); - WRITE_BARRIER(GetHeap(), this, offset, value); + WRITE_BARRIER(this, offset, value); } void JSObject::SetEmbedderField(int index, Smi* value) { @@ -1592,7 +1592,7 @@ void JSObject::RawFastPropertyAtPut(FieldIndex index, Object* value) { if (index.is_inobject()) { int offset = index.offset(); WRITE_FIELD(this, offset, value); - WRITE_BARRIER(GetHeap(), this, offset, value); + WRITE_BARRIER(this, offset, value); } else { property_array()->set(index.outobject_array_index(), value); } @@ -1668,7 +1668,7 @@ Object* JSObject::InObjectPropertyAtPut(int index, // Adjust for the number of properties stored in the object. int offset = GetInObjectPropertyOffset(index); WRITE_FIELD(this, offset, value); - CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); + CONDITIONAL_WRITE_BARRIER(this, offset, value, mode); return value; } @@ -1735,7 +1735,7 @@ void PropertyArray::set(int index, Object* value) { DCHECK_LT(index, this->length()); int offset = kHeaderSize + index * kPointerSize; RELAXED_WRITE_FIELD(this, offset, value); - WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, offset, value); + WRITE_BARRIER(this, offset, value); } int RegExpMatchInfo::NumberOfCaptureRegisters() { @@ -1834,8 +1834,7 @@ void PropertyArray::set(int index, Object* value, WriteBarrierMode mode) { DCHECK_LT(index, this->length()); int offset = kHeaderSize + index * kPointerSize; RELAXED_WRITE_FIELD(this, offset, value); - CONDITIONAL_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, offset, - value, mode); + CONDITIONAL_WRITE_BARRIER(this, offset, value, mode); } Object** PropertyArray::data_start() { @@ -2572,7 +2571,7 @@ Context* JSFunction::native_context() { return context()->native_context(); } void JSFunction::set_context(Object* value) { DCHECK(value->IsUndefined() || value->IsContext()); WRITE_FIELD(this, kContextOffset, value); - WRITE_BARRIER(GetHeap(), this, kContextOffset, value); + WRITE_BARRIER(this, kContextOffset, value); } ACCESSORS_CHECKED(JSFunction, prototype_or_initial_map, Object, @@ -2651,8 +2650,7 @@ void SmallOrderedHashTable::SetDataEntry(int entry, int relative_index, Object* value) { Address entry_offset = GetDataEntryOffset(entry, relative_index); RELAXED_WRITE_FIELD(this, entry_offset, value); - WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, - static_cast(entry_offset), value); + WRITE_BARRIER(this, static_cast(entry_offset), value); } ACCESSORS(JSValue, value, Object, kValueOffset) diff --git a/src/objects/fixed-array-inl.h b/src/objects/fixed-array-inl.h index 800b34a2e4..91003105a8 100644 --- a/src/objects/fixed-array-inl.h +++ b/src/objects/fixed-array-inl.h @@ -99,7 +99,7 @@ void FixedArray::set(int index, Object* value) { DCHECK_LT(index, this->length()); int offset = kHeaderSize + index * kPointerSize; RELAXED_WRITE_FIELD(this, offset, value); - WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, offset, value); + WRITE_BARRIER(this, offset, value); } void FixedArray::set(int index, Object* value, WriteBarrierMode mode) { @@ -108,8 +108,7 @@ void FixedArray::set(int index, Object* value, WriteBarrierMode mode) { DCHECK_LT(index, this->length()); int offset = kHeaderSize + index * kPointerSize; RELAXED_WRITE_FIELD(this, offset, value); - CONDITIONAL_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, offset, - value, mode); + CONDITIONAL_WRITE_BARRIER(this, offset, value, mode); } void FixedArray::NoWriteBarrierSet(FixedArray* array, int index, @@ -245,7 +244,7 @@ void WeakFixedArray::Set(int index, MaybeObject* value) { DCHECK_LT(index, length()); int offset = OffsetOfElementAt(index); RELAXED_WRITE_FIELD(this, offset, value); - WEAK_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, offset, value); + WEAK_WRITE_BARRIER(this, offset, value); } void WeakFixedArray::Set(int index, MaybeObject* value, WriteBarrierMode mode) { @@ -253,8 +252,7 @@ void WeakFixedArray::Set(int index, MaybeObject* value, WriteBarrierMode mode) { DCHECK_LT(index, length()); int offset = OffsetOfElementAt(index); RELAXED_WRITE_FIELD(this, offset, value); - CONDITIONAL_WEAK_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, - offset, value, mode); + CONDITIONAL_WEAK_WRITE_BARRIER(this, offset, value, mode); } MaybeObject** WeakFixedArray::data_start() { @@ -280,8 +278,7 @@ void WeakArrayList::Set(int index, MaybeObject* value, WriteBarrierMode mode) { DCHECK_LT(index, this->capacity()); int offset = OffsetOfElementAt(index); RELAXED_WRITE_FIELD(this, offset, value); - CONDITIONAL_WEAK_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, - offset, value, mode); + CONDITIONAL_WEAK_WRITE_BARRIER(this, offset, value, mode); } MaybeObject** WeakArrayList::data_start() { diff --git a/src/objects/js-array-inl.h b/src/objects/js-array-inl.h index bfb05d9e14..5ecdb8b1ac 100644 --- a/src/objects/js-array-inl.h +++ b/src/objects/js-array-inl.h @@ -168,7 +168,7 @@ Object* JSArrayBufferView::byte_offset() const { void JSArrayBufferView::set_byte_offset(Object* value, WriteBarrierMode mode) { WRITE_FIELD(this, kByteOffsetOffset, value); - CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kByteOffsetOffset, value, mode); + CONDITIONAL_WRITE_BARRIER(this, kByteOffsetOffset, value, mode); } Object* JSArrayBufferView::byte_length() const { @@ -178,7 +178,7 @@ Object* JSArrayBufferView::byte_length() const { void JSArrayBufferView::set_byte_length(Object* value, WriteBarrierMode mode) { WRITE_FIELD(this, kByteLengthOffset, value); - CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kByteLengthOffset, value, mode); + CONDITIONAL_WRITE_BARRIER(this, kByteLengthOffset, value, mode); } ACCESSORS(JSArrayBufferView, buffer, Object, kBufferOffset) @@ -208,7 +208,7 @@ size_t JSTypedArray::length_value() const { void JSTypedArray::set_length(Object* value, WriteBarrierMode mode) { WRITE_FIELD(this, kLengthOffset, value); - CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kLengthOffset, value, mode); + CONDITIONAL_WRITE_BARRIER(this, kLengthOffset, value, mode); } bool JSTypedArray::is_on_heap() const { diff --git a/src/objects/map-inl.h b/src/objects/map-inl.h index 57b0e8c6e6..9d93326b8f 100644 --- a/src/objects/map-inl.h +++ b/src/objects/map-inl.h @@ -569,8 +569,7 @@ Object* Map::prototype() const { return READ_FIELD(this, kPrototypeOffset); } void Map::set_prototype(Object* value, WriteBarrierMode mode) { DCHECK(value->IsNull() || value->IsJSReceiver()); WRITE_FIELD(this, kPrototypeOffset, value); - CONDITIONAL_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, - kPrototypeOffset, value, mode); + CONDITIONAL_WRITE_BARRIER(this, kPrototypeOffset, value, mode); } LayoutDescriptor* Map::layout_descriptor_gc_safe() const { @@ -690,8 +689,7 @@ Object* Map::prototype_info() const { void Map::set_prototype_info(Object* value, WriteBarrierMode mode) { CHECK(is_prototype_map()); WRITE_FIELD(this, Map::kTransitionsOrPrototypeInfoOffset, value); - CONDITIONAL_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, - Map::kTransitionsOrPrototypeInfoOffset, value, + CONDITIONAL_WRITE_BARRIER(this, Map::kTransitionsOrPrototypeInfoOffset, value, mode); } diff --git a/src/objects/object-macros.h b/src/objects/object-macros.h index 473d4f821c..d14b7b2080 100644 --- a/src/objects/object-macros.h +++ b/src/objects/object-macros.h @@ -77,18 +77,17 @@ WRITE_UINT8_FIELD(this, offset, value); \ } -#define ACCESSORS_CHECKED2(holder, name, type, offset, get_condition, \ - set_condition) \ - type* holder::name() const { \ - type* value = type::cast(READ_FIELD(this, offset)); \ - DCHECK(get_condition); \ - return value; \ - } \ - void holder::set_##name(type* value, WriteBarrierMode mode) { \ - DCHECK(set_condition); \ - WRITE_FIELD(this, offset, value); \ - CONDITIONAL_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, \ - offset, value, mode); \ +#define ACCESSORS_CHECKED2(holder, name, type, offset, get_condition, \ + set_condition) \ + type* holder::name() const { \ + type* value = type::cast(READ_FIELD(this, offset)); \ + DCHECK(get_condition); \ + return value; \ + } \ + void holder::set_##name(type* value, WriteBarrierMode mode) { \ + DCHECK(set_condition); \ + WRITE_FIELD(this, offset, value); \ + CONDITIONAL_WRITE_BARRIER(this, offset, value, mode); \ } #define ACCESSORS_CHECKED(holder, name, type, offset, condition) \ ACCESSORS_CHECKED2(holder, name, type, offset, condition, condition) @@ -96,18 +95,17 @@ #define ACCESSORS(holder, name, type, offset) \ ACCESSORS_CHECKED(holder, name, type, offset, true) -#define WEAK_ACCESSORS_CHECKED2(holder, name, offset, get_condition, \ - set_condition) \ - MaybeObject* holder::name() const { \ - MaybeObject* value = READ_WEAK_FIELD(this, offset); \ - DCHECK(get_condition); \ - return value; \ - } \ - void holder::set_##name(MaybeObject* value, WriteBarrierMode mode) { \ - DCHECK(set_condition); \ - WRITE_WEAK_FIELD(this, offset, value); \ - CONDITIONAL_WEAK_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, \ - offset, value, mode); \ +#define WEAK_ACCESSORS_CHECKED2(holder, name, offset, get_condition, \ + set_condition) \ + MaybeObject* holder::name() const { \ + MaybeObject* value = READ_WEAK_FIELD(this, offset); \ + DCHECK(get_condition); \ + return value; \ + } \ + void holder::set_##name(MaybeObject* value, WriteBarrierMode mode) { \ + DCHECK(set_condition); \ + WRITE_WEAK_FIELD(this, offset, value); \ + CONDITIONAL_WEAK_WRITE_BARRIER(this, offset, value, mode); \ } #define WEAK_ACCESSORS_CHECKED(holder, name, offset, condition) \ @@ -218,48 +216,45 @@ reinterpret_cast(FIELD_ADDR(p, offset)), \ reinterpret_cast(value)); -#define WRITE_BARRIER(heap, object, offset, value) \ - do { \ - Heap* __heap__ = heap; \ - __heap__->incremental_marking()->RecordWrite( \ - object, HeapObject::RawField(object, offset), value); \ - __heap__->RecordWrite(object, HeapObject::RawField(object, offset), \ - value); \ +#define WRITE_BARRIER(object, offset, value) \ + do { \ + DCHECK_NOT_NULL(Heap::FromWritableHeapObject(object)); \ + MarkingBarrier(object, HeapObject::RawField(object, offset), value); \ + GenerationalBarrier(object, HeapObject::RawField(object, offset), value); \ } while (false) -#define WEAK_WRITE_BARRIER(heap, object, offset, value) \ - do { \ - Heap* __heap__ = heap; \ - __heap__->incremental_marking()->RecordMaybeWeakWrite( \ - object, HeapObject::RawMaybeWeakField(object, offset), value); \ - __heap__->RecordWrite( \ - object, HeapObject::RawMaybeWeakField(object, offset), value); \ +#define WEAK_WRITE_BARRIER(object, offset, value) \ + do { \ + DCHECK_NOT_NULL(Heap::FromWritableHeapObject(object)); \ + MarkingBarrier(object, HeapObject::RawMaybeWeakField(object, offset), \ + value); \ + GenerationalBarrier(object, HeapObject::RawMaybeWeakField(object, offset), \ + value); \ } while (false) -#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \ - do { \ - Heap* __heap__ = heap; \ - if (mode != SKIP_WRITE_BARRIER) { \ - if (mode == UPDATE_WRITE_BARRIER) { \ - __heap__->incremental_marking()->RecordWrite( \ - object, HeapObject::RawField(object, offset), value); \ - } \ - __heap__->RecordWrite(object, HeapObject::RawField(object, offset), \ - value); \ - } \ +#define CONDITIONAL_WRITE_BARRIER(object, offset, value, mode) \ + do { \ + DCHECK_NOT_NULL(Heap::FromWritableHeapObject(object)); \ + if (mode != SKIP_WRITE_BARRIER) { \ + if (mode == UPDATE_WRITE_BARRIER) { \ + MarkingBarrier(object, HeapObject::RawField(object, offset), value); \ + } \ + GenerationalBarrier(object, HeapObject::RawField(object, offset), \ + value); \ + } \ } while (false) -#define CONDITIONAL_WEAK_WRITE_BARRIER(heap, object, offset, value, mode) \ - do { \ - Heap* __heap__ = heap; \ - if (mode != SKIP_WRITE_BARRIER) { \ - if (mode == UPDATE_WRITE_BARRIER) { \ - __heap__->incremental_marking()->RecordMaybeWeakWrite( \ - object, HeapObject::RawMaybeWeakField(object, offset), value); \ - } \ - __heap__->RecordWrite( \ - object, HeapObject::RawMaybeWeakField(object, offset), value); \ - } \ +#define CONDITIONAL_WEAK_WRITE_BARRIER(object, offset, value, mode) \ + do { \ + DCHECK_NOT_NULL(Heap::FromWritableHeapObject(object)); \ + if (mode != SKIP_WRITE_BARRIER) { \ + if (mode == UPDATE_WRITE_BARRIER) { \ + MarkingBarrier(object, HeapObject::RawMaybeWeakField(object, offset), \ + value); \ + } \ + GenerationalBarrier( \ + object, HeapObject::RawMaybeWeakField(object, offset), value); \ + } \ } while (false) #define READ_DOUBLE_FIELD(p, offset) ReadDoubleValue(FIELD_ADDR(p, offset)) diff --git a/src/objects/shared-function-info-inl.h b/src/objects/shared-function-info-inl.h index 1cd15f54a6..dc107d798a 100644 --- a/src/objects/shared-function-info-inl.h +++ b/src/objects/shared-function-info-inl.h @@ -35,8 +35,7 @@ void PreParsedScopeData::set_child_data(int index, Object* value, DCHECK_LT(index, this->length()); int offset = kChildDataStartOffset + index * kPointerSize; RELAXED_WRITE_FIELD(this, offset, value); - CONDITIONAL_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, offset, - value, mode); + CONDITIONAL_WRITE_BARRIER(this, offset, value, mode); } Object** PreParsedScopeData::child_data_start() const { @@ -361,7 +360,7 @@ void SharedFunctionInfo::set_scope_info(ScopeInfo* scope_info, } WRITE_FIELD(this, kNameOrScopeInfoOffset, reinterpret_cast(scope_info)); - CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kNameOrScopeInfoOffset, + CONDITIONAL_WRITE_BARRIER(this, kNameOrScopeInfoOffset, reinterpret_cast(scope_info), mode); } diff --git a/src/objects/string-inl.h b/src/objects/string-inl.h index 3d2c459653..5634eeeae2 100644 --- a/src/objects/string-inl.h +++ b/src/objects/string-inl.h @@ -498,7 +498,7 @@ void SlicedString::set_parent(Isolate* isolate, String* parent, WriteBarrierMode mode) { DCHECK(parent->IsSeqString() || parent->IsExternalString()); WRITE_FIELD(this, kParentOffset, parent); - CONDITIONAL_WRITE_BARRIER(isolate->heap(), this, kParentOffset, parent, mode); + CONDITIONAL_WRITE_BARRIER(this, kParentOffset, parent, mode); } SMI_ACCESSORS(SlicedString, offset, kOffsetOffset) @@ -512,7 +512,7 @@ Object* ConsString::unchecked_first() { return READ_FIELD(this, kFirstOffset); } void ConsString::set_first(Isolate* isolate, String* value, WriteBarrierMode mode) { WRITE_FIELD(this, kFirstOffset, value); - CONDITIONAL_WRITE_BARRIER(isolate->heap(), this, kFirstOffset, value, mode); + CONDITIONAL_WRITE_BARRIER(this, kFirstOffset, value, mode); } String* ConsString::second() { @@ -526,7 +526,7 @@ Object* ConsString::unchecked_second() { void ConsString::set_second(Isolate* isolate, String* value, WriteBarrierMode mode) { WRITE_FIELD(this, kSecondOffset, value); - CONDITIONAL_WRITE_BARRIER(isolate->heap(), this, kSecondOffset, value, mode); + CONDITIONAL_WRITE_BARRIER(this, kSecondOffset, value, mode); } ACCESSORS(ThinString, actual, String, kActualOffset); diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc index 174bf9e191..6267447a29 100644 --- a/src/x64/macro-assembler-x64.cc +++ b/src/x64/macro-assembler-x64.cc @@ -15,6 +15,7 @@ #include "src/debug/debug.h" #include "src/external-reference-table.h" #include "src/frames-inl.h" +#include "src/globals.h" #include "src/heap/heap-inl.h" #include "src/instruction-stream.h" #include "src/objects-inl.h" @@ -2595,9 +2596,9 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask, Label::Distance condition_met_distance) { DCHECK(cc == zero || cc == not_zero); if (scratch == object) { - andp(scratch, Immediate(~Page::kPageAlignmentMask)); + andp(scratch, Immediate(~kPageAlignmentMask)); } else { - movp(scratch, Immediate(~Page::kPageAlignmentMask)); + movp(scratch, Immediate(~kPageAlignmentMask)); andp(scratch, object); } if (mask < (1 << kBitsPerByte)) { diff --git a/test/unittests/heap/spaces-unittest.cc b/test/unittests/heap/spaces-unittest.cc index 2d499df4f3..75804792d4 100644 --- a/test/unittests/heap/spaces-unittest.cc +++ b/test/unittests/heap/spaces-unittest.cc @@ -3,6 +3,7 @@ // found in the LICENSE file. #include "src/heap/heap-inl.h" +#include "src/heap/heap-write-barrier-inl.h" #include "src/heap/spaces-inl.h" #include "src/isolate.h" #include "test/unittests/test-utils.h" @@ -51,5 +52,70 @@ TEST_F(SpacesTest, CompactionSpaceMerge) { delete compaction_space; } +TEST_F(SpacesTest, WriteBarrierFromHeapObject) { + Heap* heap = i_isolate()->heap(); + CompactionSpace* temporary_space = + new CompactionSpace(heap, OLD_SPACE, NOT_EXECUTABLE); + EXPECT_NE(nullptr, temporary_space); + HeapObject* object = + temporary_space->AllocateRawUnaligned(kMaxRegularHeapObjectSize) + .ToObjectChecked(); + EXPECT_NE(nullptr, object); + + MemoryChunk* chunk = MemoryChunk::FromHeapObject(object); + heap_internals::MemoryChunk* slim_chunk = + heap_internals::MemoryChunk::FromHeapObject(object); + EXPECT_EQ(static_cast(chunk), static_cast(slim_chunk)); + delete temporary_space; +} + +TEST_F(SpacesTest, WriteBarrierIsMarking) { + char memory[256]; + memset(&memory, 0, sizeof(memory)); + MemoryChunk* chunk = reinterpret_cast(&memory); + heap_internals::MemoryChunk* slim_chunk = + reinterpret_cast(&memory); + EXPECT_FALSE(chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING)); + EXPECT_FALSE(slim_chunk->IsMarking()); + chunk->SetFlag(MemoryChunk::INCREMENTAL_MARKING); + EXPECT_TRUE(chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING)); + EXPECT_TRUE(slim_chunk->IsMarking()); + chunk->ClearFlag(MemoryChunk::INCREMENTAL_MARKING); + EXPECT_FALSE(chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING)); + EXPECT_FALSE(slim_chunk->IsMarking()); +} + +TEST_F(SpacesTest, WriteBarrierInNewSpaceToSpace) { + char memory[256]; + memset(&memory, 0, sizeof(memory)); + MemoryChunk* chunk = reinterpret_cast(&memory); + heap_internals::MemoryChunk* slim_chunk = + reinterpret_cast(&memory); + EXPECT_FALSE(chunk->InNewSpace()); + EXPECT_FALSE(slim_chunk->InNewSpace()); + chunk->SetFlag(MemoryChunk::IN_TO_SPACE); + EXPECT_TRUE(chunk->InNewSpace()); + EXPECT_TRUE(slim_chunk->InNewSpace()); + chunk->ClearFlag(MemoryChunk::IN_TO_SPACE); + EXPECT_FALSE(chunk->InNewSpace()); + EXPECT_FALSE(slim_chunk->InNewSpace()); +} + +TEST_F(SpacesTest, WriteBarrierInNewSpaceFromSpace) { + char memory[256]; + memset(&memory, 0, sizeof(memory)); + MemoryChunk* chunk = reinterpret_cast(&memory); + heap_internals::MemoryChunk* slim_chunk = + reinterpret_cast(&memory); + EXPECT_FALSE(chunk->InNewSpace()); + EXPECT_FALSE(slim_chunk->InNewSpace()); + chunk->SetFlag(MemoryChunk::IN_FROM_SPACE); + EXPECT_TRUE(chunk->InNewSpace()); + EXPECT_TRUE(slim_chunk->InNewSpace()); + chunk->ClearFlag(MemoryChunk::IN_FROM_SPACE); + EXPECT_FALSE(chunk->InNewSpace()); + EXPECT_FALSE(slim_chunk->InNewSpace()); +} + } // namespace internal } // namespace v8