Revert "[heap] Use std::atomic for page flags."
This reverts commit 7f3f7e8a95
.
Reason for revert: performance
Original change's description:
> [heap] Use std::atomic for page flags.
>
> Bug: chromium:852420,chromium:852420
> Change-Id: I873666df415c6b4919f8b3385494c9a08f105188
> Reviewed-on: https://chromium-review.googlesource.com/1170700
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Commit-Queue: Hannes Payer <hpayer@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#55055}
TBR=ulan@chromium.org,hpayer@chromium.org
# Not skipping CQ checks because original CL landed > 1 day ago.
Bug: chromium:852420
Change-Id: I89fcd085395fa372ae9fb254e55954ff3b7ca4d7
Reviewed-on: https://chromium-review.googlesource.com/1184982
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Commit-Queue: Hannes Payer <hpayer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#55329}
This commit is contained in:
parent
c17053c2c3
commit
26fcc4aaf5
@ -157,7 +157,7 @@ HeapObject* Factory::AllocateRawArray(int size, PretenureFlag pretenure) {
|
||||
isolate()->heap()->AllocateRawWithRetryOrFail(size, space);
|
||||
if (size > kMaxRegularHeapObjectSize && FLAG_use_marking_progress_bar) {
|
||||
MemoryChunk* chunk = MemoryChunk::FromAddress(result->address());
|
||||
chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
|
||||
chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
@ -377,7 +377,7 @@ MaybeHandle<FixedArray> Factory::TryNewFixedArray(int length,
|
||||
if (!allocation.To(&result)) return MaybeHandle<FixedArray>();
|
||||
if (size > kMaxRegularHeapObjectSize && FLAG_use_marking_progress_bar) {
|
||||
MemoryChunk* chunk = MemoryChunk::FromAddress(result->address());
|
||||
chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
|
||||
chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
|
||||
}
|
||||
result->set_map_after_allocation(*fixed_array_map(), SKIP_WRITE_BARRIER);
|
||||
Handle<FixedArray> array(FixedArray::cast(result), isolate());
|
||||
|
@ -395,8 +395,8 @@ void MarkCompactCollector::RecordSlot(HeapObject* object,
|
||||
HeapObject* target) {
|
||||
Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
|
||||
Page* source_page = Page::FromAddress(reinterpret_cast<Address>(object));
|
||||
if (target_page->IsEvacuationCandidate() &&
|
||||
!source_page->ShouldSkipEvacuationSlotRecording()) {
|
||||
if (target_page->IsEvacuationCandidate<AccessMode::ATOMIC>() &&
|
||||
!source_page->ShouldSkipEvacuationSlotRecording<AccessMode::ATOMIC>()) {
|
||||
RememberedSet<OLD_TO_OLD>::Insert(source_page,
|
||||
reinterpret_cast<Address>(slot));
|
||||
}
|
||||
|
@ -2234,7 +2234,7 @@ bool SemiSpace::EnsureCurrentCapacity() {
|
||||
DCHECK_NOT_NULL(current_page);
|
||||
memory_chunk_list_.PushBack(current_page);
|
||||
marking_state->ClearLiveness(current_page);
|
||||
current_page->SetFlags(first_page()->flags(),
|
||||
current_page->SetFlags(first_page()->GetFlags(),
|
||||
static_cast<uintptr_t>(Page::kCopyAllFlags));
|
||||
heap()->CreateFillerObjectAt(current_page->area_start(),
|
||||
static_cast<int>(current_page->area_size()),
|
||||
@ -2633,7 +2633,7 @@ bool SemiSpace::GrowTo(size_t new_capacity) {
|
||||
memory_chunk_list_.PushBack(new_page);
|
||||
marking_state->ClearLiveness(new_page);
|
||||
// Duplicate the flags that was set on the old page.
|
||||
new_page->SetFlags(last_page()->flags(), Page::kCopyOnFlipFlagsMask);
|
||||
new_page->SetFlags(last_page()->GetFlags(), Page::kCopyOnFlipFlagsMask);
|
||||
}
|
||||
AccountCommitted(delta);
|
||||
current_capacity_ = new_capacity;
|
||||
@ -2708,7 +2708,7 @@ void SemiSpace::RemovePage(Page* page) {
|
||||
}
|
||||
|
||||
void SemiSpace::PrependPage(Page* page) {
|
||||
page->SetFlags(current_page()->flags(),
|
||||
page->SetFlags(current_page()->GetFlags(),
|
||||
static_cast<uintptr_t>(Page::kCopyAllFlags));
|
||||
page->set_owner(this);
|
||||
memory_chunk_list_.PushFront(page);
|
||||
@ -2724,7 +2724,7 @@ void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
|
||||
DCHECK(from->first_page());
|
||||
DCHECK(to->first_page());
|
||||
|
||||
intptr_t saved_to_space_flags = to->current_page()->flags();
|
||||
intptr_t saved_to_space_flags = to->current_page()->GetFlags();
|
||||
|
||||
// We swap all properties but id_.
|
||||
std::swap(from->current_capacity_, to->current_capacity_);
|
||||
|
@ -566,24 +566,36 @@ class MemoryChunk {
|
||||
return this->address() + (index << kPointerSizeLog2);
|
||||
}
|
||||
|
||||
void SetFlag(Flag flag) { SetFlags(flag, flag); }
|
||||
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
|
||||
void SetFlag(Flag flag) {
|
||||
if (access_mode == AccessMode::NON_ATOMIC) {
|
||||
flags_ |= flag;
|
||||
} else {
|
||||
base::AsAtomicWord::SetBits<uintptr_t>(&flags_, flag, flag);
|
||||
}
|
||||
}
|
||||
|
||||
bool IsFlagSet(Flag flag) { return (flags() & flag) != 0; }
|
||||
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
|
||||
bool IsFlagSet(Flag flag) {
|
||||
return (GetFlags<access_mode>() & flag) != 0;
|
||||
}
|
||||
|
||||
void ClearFlag(Flag flag) { flags_ &= ~flag; }
|
||||
// Set or clear multiple flags at a time. The flags in the mask are set to
|
||||
// the value in "flags", the rest retain the current value in |flags_|.
|
||||
void SetFlags(uintptr_t flags, uintptr_t mask) {
|
||||
uintptr_t old_flags;
|
||||
uintptr_t new_flags;
|
||||
do {
|
||||
old_flags = flags_;
|
||||
new_flags = (old_flags & ~mask) | (flags & mask);
|
||||
} while (!flags_.compare_exchange_weak(old_flags, new_flags));
|
||||
flags_ = (flags_ & ~mask) | (flags & mask);
|
||||
}
|
||||
|
||||
// Return all current flags.
|
||||
uintptr_t flags() { return flags_; }
|
||||
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
|
||||
uintptr_t GetFlags() {
|
||||
if (access_mode == AccessMode::NON_ATOMIC) {
|
||||
return flags_;
|
||||
} else {
|
||||
return base::AsAtomicWord::Relaxed_Load(&flags_);
|
||||
}
|
||||
}
|
||||
|
||||
bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
|
||||
|
||||
@ -593,15 +605,18 @@ class MemoryChunk {
|
||||
return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
|
||||
}
|
||||
|
||||
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
|
||||
bool IsEvacuationCandidate() {
|
||||
DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE)));
|
||||
return IsFlagSet(EVACUATION_CANDIDATE);
|
||||
DCHECK(!(IsFlagSet<access_mode>(NEVER_EVACUATE) &&
|
||||
IsFlagSet<access_mode>(EVACUATION_CANDIDATE)));
|
||||
return IsFlagSet<access_mode>(EVACUATION_CANDIDATE);
|
||||
}
|
||||
|
||||
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
|
||||
bool ShouldSkipEvacuationSlotRecording() {
|
||||
uintptr_t current_flags = flags();
|
||||
return ((current_flags & kSkipEvacuationSlotsRecordingMask) != 0) &&
|
||||
((current_flags & COMPACTION_WAS_ABORTED) == 0);
|
||||
uintptr_t flags = GetFlags<access_mode>();
|
||||
return ((flags & kSkipEvacuationSlotsRecordingMask) != 0) &&
|
||||
((flags & COMPACTION_WAS_ABORTED) == 0);
|
||||
}
|
||||
|
||||
Executability executable() {
|
||||
@ -645,7 +660,7 @@ class MemoryChunk {
|
||||
VirtualMemory* reserved_memory() { return &reservation_; }
|
||||
|
||||
size_t size_;
|
||||
std::atomic<uintptr_t> flags_;
|
||||
uintptr_t flags_;
|
||||
|
||||
// Start and end of allocatable memory on this chunk.
|
||||
Address area_start_;
|
||||
|
Loading…
Reference in New Issue
Block a user