[heap] Allocate black on background threads during marking

Objects allocated on the background thread during incremental marking, need to be allocated black. This prevents concurrent marking to observe uninitialized objects.

Bug: v8:10315
Change-Id: Ia4b05a2a72e4142c79b31a01cbf162a6599a18c0
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2196347
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67967}
This commit is contained in:
Dominik Inführ 2020-05-26 14:27:52 +02:00 committed by Commit Bot
parent a7a201b260
commit cf19f5f4e1
12 changed files with 262 additions and 24 deletions

View File

@ -8,8 +8,8 @@
#include "include/v8-internal.h"
#include "src/common/globals.h"
#include "src/heap/concurrent-allocator.h"
#include "src/heap/heap.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/spaces.h"
#include "src/objects/heap-object.h"
@ -23,15 +23,7 @@ AllocationResult ConcurrentAllocator::Allocate(int object_size,
// TODO(dinfuehr): Add support for allocation observers
CHECK(FLAG_concurrent_allocation);
if (object_size > kMaxLabObjectSize) {
auto result = space_->SlowGetLinearAllocationAreaBackground(
local_heap_, object_size, object_size, alignment, origin);
if (result) {
HeapObject object = HeapObject::FromAddress(result->first);
return AllocationResult(object);
} else {
return AllocationResult::Retry(OLD_SPACE);
}
return AllocateOutsideLab(object_size, alignment, origin);
}
return AllocateInLab(object_size, alignment, origin);
@ -69,6 +61,12 @@ bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) {
if (!result) return false;
if (local_heap_->heap()->incremental_marking()->black_allocation()) {
Address top = result->first;
Address limit = top + result->second;
Page::FromAllocationAreaAddress(top)->CreateBlackAreaBackground(top, limit);
}
HeapObject object = HeapObject::FromAddress(result->first);
LocalAllocationBuffer saved_lab = std::move(lab_);
lab_ = LocalAllocationBuffer::FromResult(

View File

@ -6,6 +6,7 @@
#include "src/heap/concurrent-allocator-inl.h"
#include "src/heap/local-heap.h"
#include "src/heap/marking.h"
namespace v8 {
namespace internal {
@ -39,5 +40,43 @@ void ConcurrentAllocator::MakeLinearAllocationAreaIterable() {
lab_.MakeIterable();
}
void ConcurrentAllocator::MarkLinearAllocationAreaBlack() {
Address top = lab_.top();
Address limit = lab_.limit();
if (top != kNullAddress && top != limit) {
Page::FromAllocationAreaAddress(top)->CreateBlackAreaBackground(top, limit);
}
}
void ConcurrentAllocator::UnmarkLinearAllocationArea() {
Address top = lab_.top();
Address limit = lab_.limit();
if (top != kNullAddress && top != limit) {
Page::FromAllocationAreaAddress(top)->DestroyBlackAreaBackground(top,
limit);
}
}
AllocationResult ConcurrentAllocator::AllocateOutsideLab(
int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
auto result = space_->SlowGetLinearAllocationAreaBackground(
local_heap_, object_size, object_size, alignment, origin);
if (result) {
HeapObject object = HeapObject::FromAddress(result->first);
if (local_heap_->heap()->incremental_marking()->black_allocation()) {
local_heap_->heap()->incremental_marking()->MarkBlackBackground(
object, object_size);
}
return AllocationResult(object);
} else {
return AllocationResult::Retry(OLD_SPACE);
}
}
} // namespace internal
} // namespace v8

View File

@ -36,6 +36,8 @@ class ConcurrentAllocator {
void FreeLinearAllocationArea();
void MakeLinearAllocationAreaIterable();
void MarkLinearAllocationAreaBlack();
void UnmarkLinearAllocationArea();
private:
inline bool EnsureLab(AllocationOrigin origin);
@ -43,6 +45,9 @@ class ConcurrentAllocator {
AllocationAlignment alignment,
AllocationOrigin origin);
V8_EXPORT_PRIVATE AllocationResult AllocateOutsideLab(
int object_size, AllocationAlignment alignment, AllocationOrigin origin);
V8_EXPORT_PRIVATE Address PerformCollectionAndAllocateAgain(
int object_size, AllocationAlignment alignment, AllocationOrigin origin);

View File

@ -2073,9 +2073,6 @@ size_t Heap::PerformGarbageCollection(
base::Optional<SafepointScope> optional_safepoint_scope;
if (FLAG_local_heaps) {
optional_safepoint_scope.emplace(this);
// Fill and reset all LABs
safepoint()->IterateLocalHeaps(
[](LocalHeap* local_heap) { local_heap->FreeLinearAllocationArea(); });
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
@ -4204,6 +4201,13 @@ void Heap::Verify() {
CHECK(HasBeenSetUp());
HandleScope scope(isolate());
if (FLAG_local_heaps) {
// Ensure heap is iterable
safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
local_heap->MakeLinearAllocationAreaIterable();
});
}
// We have to wait here for the sweeper threads to have an iterable heap.
mark_compact_collector()->EnsureSweepingCompleted();
array_buffer_sweeper()->EnsureFinished();

View File

@ -93,6 +93,13 @@ void IncrementalMarking::MarkBlackAndVisitObjectDueToLayoutChange(
collector_->VisitObject(obj);
}
void IncrementalMarking::MarkBlackBackground(HeapObject obj, int object_size) {
MarkBit mark_bit = atomic_marking_state()->MarkBitFrom(obj);
Marking::MarkBlack<AccessMode::ATOMIC>(mark_bit);
MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
IncrementLiveBytesBackground(chunk, static_cast<intptr_t>(object_size));
}
void IncrementalMarking::NotifyLeftTrimming(HeapObject from, HeapObject to) {
DCHECK(IsMarking());
DCHECK(MemoryChunk::FromHeapObject(from)->SweepingDone());
@ -367,6 +374,11 @@ void IncrementalMarking::StartBlackAllocation() {
heap()->old_space()->MarkLinearAllocationAreaBlack();
heap()->map_space()->MarkLinearAllocationAreaBlack();
heap()->code_space()->MarkLinearAllocationAreaBlack();
if (FLAG_local_heaps) {
heap()->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
local_heap->MarkLinearAllocationAreaBlack();
});
}
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Black allocation started\n");
@ -378,6 +390,11 @@ void IncrementalMarking::PauseBlackAllocation() {
heap()->old_space()->UnmarkLinearAllocationArea();
heap()->map_space()->UnmarkLinearAllocationArea();
heap()->code_space()->UnmarkLinearAllocationArea();
if (FLAG_local_heaps) {
heap()->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
local_heap->UnmarkLinearAllocationArea();
});
}
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Black allocation paused\n");
@ -790,6 +807,20 @@ void IncrementalMarking::Stop() {
SetState(STOPPED);
is_compacting_ = false;
FinishBlackAllocation();
if (FLAG_local_heaps) {
// Merge live bytes counters of background threads
for (auto pair : background_live_bytes_) {
MemoryChunk* memory_chunk = pair.first;
intptr_t live_bytes = pair.second;
if (live_bytes) {
marking_state()->IncrementLiveBytes(memory_chunk, live_bytes);
}
}
background_live_bytes_.clear();
}
}

View File

@ -5,6 +5,7 @@
#ifndef V8_HEAP_INCREMENTAL_MARKING_H_
#define V8_HEAP_INCREMENTAL_MARKING_H_
#include "src/base/platform/mutex.h"
#include "src/heap/heap.h"
#include "src/heap/incremental-marking-job.h"
#include "src/heap/mark-compact.h"
@ -206,6 +207,8 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
// the concurrent marker.
void MarkBlackAndVisitObjectDueToLayoutChange(HeapObject obj);
void MarkBlackBackground(HeapObject obj, int object_size);
bool IsCompacting() { return IsMarking() && is_compacting_; }
void ProcessBlackAllocatedObject(HeapObject obj);
@ -236,6 +239,11 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
bool IsBelowActivationThresholds() const;
void IncrementLiveBytesBackground(MemoryChunk* chunk, intptr_t by) {
base::MutexGuard guard(&background_live_bytes_mutex_);
background_live_bytes_[chunk] += by;
}
private:
class Observer : public AllocationObserver {
public:
@ -338,6 +346,9 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
AtomicMarkingState atomic_marking_state_;
NonAtomicMarkingState non_atomic_marking_state_;
base::Mutex background_live_bytes_mutex_;
std::unordered_map<MemoryChunk*, intptr_t> background_live_bytes_;
DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
};
} // namespace internal

View File

@ -107,5 +107,13 @@ void LocalHeap::MakeLinearAllocationAreaIterable() {
old_space_allocator_.MakeLinearAllocationAreaIterable();
}
void LocalHeap::MarkLinearAllocationAreaBlack() {
old_space_allocator_.MarkLinearAllocationAreaBlack();
}
void LocalHeap::UnmarkLinearAllocationArea() {
old_space_allocator_.UnmarkLinearAllocationArea();
}
} // namespace internal
} // namespace v8

View File

@ -48,6 +48,17 @@ class LocalHeap {
ConcurrentAllocator* old_space_allocator() { return &old_space_allocator_; }
// Mark/Unmark linear allocation areas black. Used for black allocation.
void MarkLinearAllocationAreaBlack();
void UnmarkLinearAllocationArea();
// Give up linear allocation areas. Used for mark-compact GC.
void FreeLinearAllocationArea();
// Create filler object in linear allocation areas. Verifying requires
// iterable heap.
void MakeLinearAllocationAreaIterable();
private:
enum class ThreadState {
// Threads in this state need to be stopped in a safepoint.
@ -68,9 +79,6 @@ class LocalHeap {
void EnterSafepoint();
void FreeLinearAllocationArea();
void MakeLinearAllocationAreaIterable();
Heap* heap_;
base::Mutex state_mutex_;

View File

@ -32,6 +32,7 @@
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/read-only-spaces.h"
#include "src/heap/safepoint.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/sweeper.h"
#include "src/heap/worklist.h"
@ -869,6 +870,13 @@ void MarkCompactCollector::Prepare() {
space = spaces.Next()) {
space->PrepareForMarkCompact();
}
if (FLAG_local_heaps) {
// Fill and reset all background thread LABs
heap_->safepoint()->IterateLocalHeaps(
[](LocalHeap* local_heap) { local_heap->FreeLinearAllocationArea(); });
}
heap()->account_external_memory_concurrently_freed();
}

View File

@ -226,6 +226,19 @@ void Page::CreateBlackArea(Address start, Address end) {
marking_state->IncrementLiveBytes(this, static_cast<intptr_t>(end - start));
}
void Page::CreateBlackAreaBackground(Address start, Address end) {
DCHECK(heap()->incremental_marking()->black_allocation());
DCHECK_EQ(Page::FromAddress(start), this);
DCHECK_LT(start, end);
DCHECK_EQ(Page::FromAddress(end - 1), this);
IncrementalMarking::AtomicMarkingState* marking_state =
heap()->incremental_marking()->atomic_marking_state();
marking_state->bitmap(this)->SetRange(AddressToMarkbitIndex(start),
AddressToMarkbitIndex(end));
heap()->incremental_marking()->IncrementLiveBytesBackground(
this, static_cast<intptr_t>(end - start));
}
void Page::DestroyBlackArea(Address start, Address end) {
DCHECK(heap()->incremental_marking()->black_allocation());
DCHECK_EQ(Page::FromAddress(start), this);
@ -238,6 +251,19 @@ void Page::DestroyBlackArea(Address start, Address end) {
marking_state->IncrementLiveBytes(this, -static_cast<intptr_t>(end - start));
}
void Page::DestroyBlackAreaBackground(Address start, Address end) {
DCHECK(heap()->incremental_marking()->black_allocation());
DCHECK_EQ(Page::FromAddress(start), this);
DCHECK_LT(start, end);
DCHECK_EQ(Page::FromAddress(end - 1), this);
IncrementalMarking::AtomicMarkingState* marking_state =
heap()->incremental_marking()->atomic_marking_state();
marking_state->bitmap(this)->ClearRange(AddressToMarkbitIndex(start),
AddressToMarkbitIndex(end));
heap()->incremental_marking()->IncrementLiveBytesBackground(
this, -static_cast<intptr_t>(end - start));
}
// -----------------------------------------------------------------------------
// PagedSpace implementation

View File

@ -629,7 +629,9 @@ class Page : public MemoryChunk {
size_t ShrinkToHighWaterMark();
V8_EXPORT_PRIVATE void CreateBlackArea(Address start, Address end);
V8_EXPORT_PRIVATE void CreateBlackAreaBackground(Address start, Address end);
void DestroyBlackArea(Address start, Address end);
void DestroyBlackAreaBackground(Address start, Address end);
void InitializeFreeListCategories();
void AllocateFreeListCategories();
@ -1274,6 +1276,9 @@ class LocalAllocationBuffer {
V8_EXPORT_PRIVATE LinearAllocationArea CloseAndMakeIterable();
void MakeIterable();
Address top() const { return allocation_info_.top(); }
Address limit() const { return allocation_info_.limit(); }
private:
V8_EXPORT_PRIVATE LocalAllocationBuffer(
Heap* heap, LinearAllocationArea allocation_info) V8_NOEXCEPT;

View File

@ -17,15 +17,27 @@
#include "src/heap/local-heap.h"
#include "src/heap/safepoint.h"
#include "src/objects/heap-number.h"
#include "src/objects/heap-object.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-utils.h"
namespace v8 {
namespace internal {
void CreateFixedArray(Heap* heap, Address start, int size) {
HeapObject object = HeapObject::FromAddress(start);
object.set_map_after_allocation(ReadOnlyRoots(heap).fixed_array_map(),
SKIP_WRITE_BARRIER);
FixedArray array = FixedArray::cast(object);
int length = (size - FixedArray::kHeaderSize) / kTaggedSize;
array.set_length(length);
MemsetTagged(array.data_start(), ReadOnlyRoots(heap).undefined_value(),
length);
}
const int kNumIterations = 2000;
const int kObjectSize = 10 * kTaggedSize;
const int kLargeObjectSize = 8 * KB;
const int kSmallObjectSize = 10 * kTaggedSize;
const int kMediumObjectSize = 8 * KB;
class ConcurrentAllocationThread final : public v8::base::Thread {
public:
@ -40,15 +52,13 @@ class ConcurrentAllocationThread final : public v8::base::Thread {
for (int i = 0; i < kNumIterations; i++) {
Address address = allocator->AllocateOrFail(
kObjectSize, AllocationAlignment::kWordAligned,
kSmallObjectSize, AllocationAlignment::kWordAligned,
AllocationOrigin::kRuntime);
heap_->CreateFillerObjectAt(address, kObjectSize,
ClearRecordedSlots::kNo);
address = allocator->AllocateOrFail(kLargeObjectSize,
CreateFixedArray(heap_, address, kSmallObjectSize);
address = allocator->AllocateOrFail(kMediumObjectSize,
AllocationAlignment::kWordAligned,
AllocationOrigin::kRuntime);
heap_->CreateFillerObjectAt(address, kLargeObjectSize,
ClearRecordedSlots::kNo);
CreateFixedArray(heap_, address, kMediumObjectSize);
if (i % 10 == 0) {
local_heap.Safepoint();
}
@ -96,5 +106,90 @@ UNINITIALIZED_TEST(ConcurrentAllocationInOldSpace) {
isolate->Dispose();
}
const int kWhiteIterations = 1000;
class ConcurrentBlackAllocationThread final : public v8::base::Thread {
public:
explicit ConcurrentBlackAllocationThread(
Heap* heap, std::vector<Address>* objects, base::Semaphore* sema_white,
base::Semaphore* sema_marking_started)
: v8::base::Thread(base::Thread::Options("ThreadWithLocalHeap")),
heap_(heap),
objects_(objects),
sema_white_(sema_white),
sema_marking_started_(sema_marking_started) {}
void Run() override {
LocalHeap local_heap(heap_);
ConcurrentAllocator* allocator = local_heap.old_space_allocator();
for (int i = 0; i < kNumIterations; i++) {
if (i == kWhiteIterations) {
ParkedScope scope(&local_heap);
sema_white_->Signal();
sema_marking_started_->Wait();
}
Address address = allocator->AllocateOrFail(
kSmallObjectSize, AllocationAlignment::kWordAligned,
AllocationOrigin::kRuntime);
objects_->push_back(address);
CreateFixedArray(heap_, address, kSmallObjectSize);
address = allocator->AllocateOrFail(kMediumObjectSize,
AllocationAlignment::kWordAligned,
AllocationOrigin::kRuntime);
objects_->push_back(address);
CreateFixedArray(heap_, address, kMediumObjectSize);
}
}
Heap* heap_;
std::vector<Address>* objects_;
base::Semaphore* sema_white_;
base::Semaphore* sema_marking_started_;
};
UNINITIALIZED_TEST(ConcurrentBlackAllocation) {
FLAG_concurrent_allocation = true;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
Heap* heap = i_isolate->heap();
FLAG_local_heaps = true;
std::vector<Address> objects;
base::Semaphore sema_white(0);
base::Semaphore sema_marking_started(0);
auto thread = std::make_unique<ConcurrentBlackAllocationThread>(
heap, &objects, &sema_white, &sema_marking_started);
CHECK(thread->Start());
sema_white.Wait();
heap->StartIncrementalMarking(i::Heap::kNoGCFlags,
i::GarbageCollectionReason::kTesting);
sema_marking_started.Signal();
thread->Join();
const int kObjectsAllocatedPerIteration = 2;
for (int i = 0; i < kNumIterations * kObjectsAllocatedPerIteration; i++) {
Address address = objects[i];
HeapObject object = HeapObject::FromAddress(address);
if (i < kWhiteIterations * kObjectsAllocatedPerIteration) {
CHECK(heap->incremental_marking()->marking_state()->IsWhite(object));
} else {
CHECK(heap->incremental_marking()->marking_state()->IsBlack(object));
}
}
isolate->Dispose();
}
} // namespace internal
} // namespace v8