[heap] Use std::atomic for page flags.
Bug: chromium:852420,chromium:852420 Change-Id: I873666df415c6b4919f8b3385494c9a08f105188 Reviewed-on: https://chromium-review.googlesource.com/1170700 Reviewed-by: Ulan Degenbaev <ulan@chromium.org> Commit-Queue: Hannes Payer <hpayer@chromium.org> Cr-Commit-Position: refs/heads/master@{#55055}
This commit is contained in:
parent
17ad3ae47f
commit
7f3f7e8a95
@ -156,7 +156,7 @@ HeapObject* Factory::AllocateRawArray(int size, PretenureFlag pretenure) {
|
|||||||
isolate()->heap()->AllocateRawWithRetryOrFail(size, space);
|
isolate()->heap()->AllocateRawWithRetryOrFail(size, space);
|
||||||
if (size > kMaxRegularHeapObjectSize && FLAG_use_marking_progress_bar) {
|
if (size > kMaxRegularHeapObjectSize && FLAG_use_marking_progress_bar) {
|
||||||
MemoryChunk* chunk = MemoryChunk::FromAddress(result->address());
|
MemoryChunk* chunk = MemoryChunk::FromAddress(result->address());
|
||||||
chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
|
chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
@ -376,7 +376,7 @@ MaybeHandle<FixedArray> Factory::TryNewFixedArray(int length,
|
|||||||
if (!allocation.To(&result)) return MaybeHandle<FixedArray>();
|
if (!allocation.To(&result)) return MaybeHandle<FixedArray>();
|
||||||
if (size > kMaxRegularHeapObjectSize && FLAG_use_marking_progress_bar) {
|
if (size > kMaxRegularHeapObjectSize && FLAG_use_marking_progress_bar) {
|
||||||
MemoryChunk* chunk = MemoryChunk::FromAddress(result->address());
|
MemoryChunk* chunk = MemoryChunk::FromAddress(result->address());
|
||||||
chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
|
chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
|
||||||
}
|
}
|
||||||
result->set_map_after_allocation(*fixed_array_map(), SKIP_WRITE_BARRIER);
|
result->set_map_after_allocation(*fixed_array_map(), SKIP_WRITE_BARRIER);
|
||||||
Handle<FixedArray> array(FixedArray::cast(result), isolate());
|
Handle<FixedArray> array(FixedArray::cast(result), isolate());
|
||||||
|
@ -419,8 +419,8 @@ void MarkCompactCollector::RecordSlot(HeapObject* object,
|
|||||||
HeapObject* target) {
|
HeapObject* target) {
|
||||||
Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
|
Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
|
||||||
Page* source_page = Page::FromAddress(reinterpret_cast<Address>(object));
|
Page* source_page = Page::FromAddress(reinterpret_cast<Address>(object));
|
||||||
if (target_page->IsEvacuationCandidate<AccessMode::ATOMIC>() &&
|
if (target_page->IsEvacuationCandidate() &&
|
||||||
!source_page->ShouldSkipEvacuationSlotRecording<AccessMode::ATOMIC>()) {
|
!source_page->ShouldSkipEvacuationSlotRecording()) {
|
||||||
RememberedSet<OLD_TO_OLD>::Insert(source_page,
|
RememberedSet<OLD_TO_OLD>::Insert(source_page,
|
||||||
reinterpret_cast<Address>(slot));
|
reinterpret_cast<Address>(slot));
|
||||||
}
|
}
|
||||||
|
@ -2176,7 +2176,7 @@ bool SemiSpace::EnsureCurrentCapacity() {
|
|||||||
DCHECK_NOT_NULL(current_page);
|
DCHECK_NOT_NULL(current_page);
|
||||||
memory_chunk_list_.PushBack(current_page);
|
memory_chunk_list_.PushBack(current_page);
|
||||||
marking_state->ClearLiveness(current_page);
|
marking_state->ClearLiveness(current_page);
|
||||||
current_page->SetFlags(first_page()->GetFlags(),
|
current_page->SetFlags(first_page()->flags(),
|
||||||
static_cast<uintptr_t>(Page::kCopyAllFlags));
|
static_cast<uintptr_t>(Page::kCopyAllFlags));
|
||||||
heap()->CreateFillerObjectAt(current_page->area_start(),
|
heap()->CreateFillerObjectAt(current_page->area_start(),
|
||||||
static_cast<int>(current_page->area_size()),
|
static_cast<int>(current_page->area_size()),
|
||||||
@ -2575,7 +2575,7 @@ bool SemiSpace::GrowTo(size_t new_capacity) {
|
|||||||
memory_chunk_list_.PushBack(new_page);
|
memory_chunk_list_.PushBack(new_page);
|
||||||
marking_state->ClearLiveness(new_page);
|
marking_state->ClearLiveness(new_page);
|
||||||
// Duplicate the flags that was set on the old page.
|
// Duplicate the flags that was set on the old page.
|
||||||
new_page->SetFlags(last_page()->GetFlags(), Page::kCopyOnFlipFlagsMask);
|
new_page->SetFlags(last_page()->flags(), Page::kCopyOnFlipFlagsMask);
|
||||||
}
|
}
|
||||||
AccountCommitted(delta);
|
AccountCommitted(delta);
|
||||||
current_capacity_ = new_capacity;
|
current_capacity_ = new_capacity;
|
||||||
@ -2650,7 +2650,7 @@ void SemiSpace::RemovePage(Page* page) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void SemiSpace::PrependPage(Page* page) {
|
void SemiSpace::PrependPage(Page* page) {
|
||||||
page->SetFlags(current_page()->GetFlags(),
|
page->SetFlags(current_page()->flags(),
|
||||||
static_cast<uintptr_t>(Page::kCopyAllFlags));
|
static_cast<uintptr_t>(Page::kCopyAllFlags));
|
||||||
page->set_owner(this);
|
page->set_owner(this);
|
||||||
memory_chunk_list_.PushFront(page);
|
memory_chunk_list_.PushFront(page);
|
||||||
@ -2666,7 +2666,7 @@ void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
|
|||||||
DCHECK(from->first_page());
|
DCHECK(from->first_page());
|
||||||
DCHECK(to->first_page());
|
DCHECK(to->first_page());
|
||||||
|
|
||||||
intptr_t saved_to_space_flags = to->current_page()->GetFlags();
|
intptr_t saved_to_space_flags = to->current_page()->flags();
|
||||||
|
|
||||||
// We swap all properties but id_.
|
// We swap all properties but id_.
|
||||||
std::swap(from->current_capacity_, to->current_capacity_);
|
std::swap(from->current_capacity_, to->current_capacity_);
|
||||||
|
@ -563,36 +563,24 @@ class MemoryChunk {
|
|||||||
return this->address() + (index << kPointerSizeLog2);
|
return this->address() + (index << kPointerSizeLog2);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
|
void SetFlag(Flag flag) { SetFlags(flag, flag); }
|
||||||
void SetFlag(Flag flag) {
|
|
||||||
if (access_mode == AccessMode::NON_ATOMIC) {
|
|
||||||
flags_ |= flag;
|
|
||||||
} else {
|
|
||||||
base::AsAtomicWord::SetBits<uintptr_t>(&flags_, flag, flag);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
|
bool IsFlagSet(Flag flag) { return (flags() & flag) != 0; }
|
||||||
bool IsFlagSet(Flag flag) {
|
|
||||||
return (GetFlags<access_mode>() & flag) != 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void ClearFlag(Flag flag) { flags_ &= ~flag; }
|
void ClearFlag(Flag flag) { flags_ &= ~flag; }
|
||||||
// Set or clear multiple flags at a time. The flags in the mask are set to
|
// Set or clear multiple flags at a time. The flags in the mask are set to
|
||||||
// the value in "flags", the rest retain the current value in |flags_|.
|
// the value in "flags", the rest retain the current value in |flags_|.
|
||||||
void SetFlags(uintptr_t flags, uintptr_t mask) {
|
void SetFlags(uintptr_t flags, uintptr_t mask) {
|
||||||
flags_ = (flags_ & ~mask) | (flags & mask);
|
uintptr_t old_flags;
|
||||||
|
uintptr_t new_flags;
|
||||||
|
do {
|
||||||
|
old_flags = flags_;
|
||||||
|
new_flags = (old_flags & ~mask) | (flags & mask);
|
||||||
|
} while (!flags_.compare_exchange_weak(old_flags, new_flags));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return all current flags.
|
// Return all current flags.
|
||||||
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
|
uintptr_t flags() { return flags_; }
|
||||||
uintptr_t GetFlags() {
|
|
||||||
if (access_mode == AccessMode::NON_ATOMIC) {
|
|
||||||
return flags_;
|
|
||||||
} else {
|
|
||||||
return base::AsAtomicWord::Relaxed_Load(&flags_);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
|
bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
|
||||||
|
|
||||||
@ -602,18 +590,15 @@ class MemoryChunk {
|
|||||||
return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
|
return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
|
|
||||||
bool IsEvacuationCandidate() {
|
bool IsEvacuationCandidate() {
|
||||||
DCHECK(!(IsFlagSet<access_mode>(NEVER_EVACUATE) &&
|
DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE)));
|
||||||
IsFlagSet<access_mode>(EVACUATION_CANDIDATE)));
|
return IsFlagSet(EVACUATION_CANDIDATE);
|
||||||
return IsFlagSet<access_mode>(EVACUATION_CANDIDATE);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
|
|
||||||
bool ShouldSkipEvacuationSlotRecording() {
|
bool ShouldSkipEvacuationSlotRecording() {
|
||||||
uintptr_t flags = GetFlags<access_mode>();
|
uintptr_t current_flags = flags();
|
||||||
return ((flags & kSkipEvacuationSlotsRecordingMask) != 0) &&
|
return ((current_flags & kSkipEvacuationSlotsRecordingMask) != 0) &&
|
||||||
((flags & COMPACTION_WAS_ABORTED) == 0);
|
((current_flags & COMPACTION_WAS_ABORTED) == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
Executability executable() {
|
Executability executable() {
|
||||||
@ -653,7 +638,7 @@ class MemoryChunk {
|
|||||||
VirtualMemory* reserved_memory() { return &reservation_; }
|
VirtualMemory* reserved_memory() { return &reservation_; }
|
||||||
|
|
||||||
size_t size_;
|
size_t size_;
|
||||||
uintptr_t flags_;
|
std::atomic<uintptr_t> flags_;
|
||||||
|
|
||||||
// Start and end of allocatable memory on this chunk.
|
// Start and end of allocatable memory on this chunk.
|
||||||
Address area_start_;
|
Address area_start_;
|
||||||
|
Loading…
Reference in New Issue
Block a user