[zone] Switch AccountingAllocator to std::atomic

Instead of using our own atomic utils, use std::atomic.

R=mstarzinger@chromium.org

Bug: v8:8916, v8:8834
Cq-Include-Trybots: luci.v8.try:v8_linux64_tsan_rel
Change-Id: I663d7f28dbaaa476a62407cf42dca1927c69f68b
Reviewed-on: https://chromium-review.googlesource.com/c/1491631
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#59913}
This commit is contained in:
Clemens Hammacher 2019-02-27 16:52:22 +01:00 committed by Commit Bot
parent 6a5cd5987d
commit 087727d1fc
2 changed files with 28 additions and 34 deletions

View File

@ -18,7 +18,6 @@ namespace internal {
AccountingAllocator::AccountingAllocator() : unused_segments_mutex_() {
static const size_t kDefaultBucketMaxSize = 5;
memory_pressure_level_.SetValue(MemoryPressureLevel::kNone);
std::fill(unused_segments_heads_, unused_segments_heads_ + kNumberBuckets,
nullptr);
std::fill(unused_segments_sizes_, unused_segments_sizes_ + kNumberBuckets, 0);
@ -30,7 +29,7 @@ AccountingAllocator::~AccountingAllocator() { ClearPool(); }
void AccountingAllocator::MemoryPressureNotification(
MemoryPressureLevel level) {
memory_pressure_level_.SetValue(level);
memory_pressure_level_.store(level);
if (level != MemoryPressureLevel::kNone) {
ClearPool();
@ -86,11 +85,12 @@ Segment* AccountingAllocator::GetSegment(size_t bytes) {
Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
void* memory = AllocWithRetry(bytes);
if (memory != nullptr) {
base::AtomicWord current =
base::Relaxed_AtomicIncrement(&current_memory_usage_, bytes);
base::AtomicWord max = base::Relaxed_Load(&max_memory_usage_);
while (current > max) {
max = base::Relaxed_CompareAndSwap(&max_memory_usage_, max, current);
size_t current =
current_memory_usage_.fetch_add(bytes, std::memory_order_relaxed);
size_t max = max_memory_usage_.load(std::memory_order_relaxed);
while (current > max && !max_memory_usage_.compare_exchange_weak(
max, current, std::memory_order_relaxed)) {
// {max} was updated by {compare_exchange_weak}; retry.
}
}
return reinterpret_cast<Segment*>(memory);
@ -99,7 +99,7 @@ Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
void AccountingAllocator::ReturnSegment(Segment* segment) {
segment->ZapContents();
if (memory_pressure_level_.Value() != MemoryPressureLevel::kNone) {
if (memory_pressure_level_.load() != MemoryPressureLevel::kNone) {
FreeSegment(segment);
} else if (!AddSegmentToPool(segment)) {
FreeSegment(segment);
@ -107,24 +107,11 @@ void AccountingAllocator::ReturnSegment(Segment* segment) {
}
void AccountingAllocator::FreeSegment(Segment* memory) {
base::Relaxed_AtomicIncrement(&current_memory_usage_,
-static_cast<base::AtomicWord>(memory->size()));
current_memory_usage_.fetch_sub(memory->size(), std::memory_order_relaxed);
memory->ZapHeader();
free(memory);
}
size_t AccountingAllocator::GetCurrentMemoryUsage() const {
return base::Relaxed_Load(&current_memory_usage_);
}
size_t AccountingAllocator::GetMaxMemoryUsage() const {
return base::Relaxed_Load(&max_memory_usage_);
}
size_t AccountingAllocator::GetCurrentPoolSize() const {
return base::Relaxed_Load(&current_pool_size_);
}
Segment* AccountingAllocator::GetSegmentFromPool(size_t requested_size) {
if (requested_size > (1 << kMaxSegmentSizePower)) {
return nullptr;
@ -147,8 +134,7 @@ Segment* AccountingAllocator::GetSegmentFromPool(size_t requested_size) {
segment->set_next(nullptr);
unused_segments_sizes_[power]--;
base::Relaxed_AtomicIncrement(
&current_pool_size_, -static_cast<base::AtomicWord>(segment->size()));
current_pool_size_.fetch_sub(segment->size(), std::memory_order_relaxed);
}
}
@ -181,7 +167,7 @@ bool AccountingAllocator::AddSegmentToPool(Segment* segment) {
segment->set_next(unused_segments_heads_[power]);
unused_segments_heads_[power] = segment;
base::Relaxed_AtomicIncrement(&current_pool_size_, size);
current_pool_size_.fetch_add(size, std::memory_order_relaxed);
unused_segments_sizes_[power]++;
}

View File

@ -5,10 +5,10 @@
#ifndef V8_ZONE_ACCOUNTING_ALLOCATOR_H_
#define V8_ZONE_ACCOUNTING_ALLOCATOR_H_
#include <atomic>
#include "include/v8-platform.h"
#include "include/v8.h"
#include "src/base/atomic-utils.h"
#include "src/base/atomicops.h"
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/semaphore.h"
@ -32,10 +32,17 @@ class V8_EXPORT_PRIVATE AccountingAllocator {
// them if the pool is already full or memory pressure is high.
virtual void ReturnSegment(Segment* memory);
size_t GetCurrentMemoryUsage() const;
size_t GetMaxMemoryUsage() const;
size_t GetCurrentMemoryUsage() const {
return current_memory_usage_.load(std::memory_order_relaxed);
}
size_t GetCurrentPoolSize() const;
size_t GetMaxMemoryUsage() const {
return max_memory_usage_.load(std::memory_order_relaxed);
}
size_t GetCurrentPoolSize() const {
return current_pool_size_.load(std::memory_order_relaxed);
}
void MemoryPressureNotification(MemoryPressureLevel level);
// Configures the zone segment pool size limits so the pool does not
@ -77,11 +84,12 @@ class V8_EXPORT_PRIVATE AccountingAllocator {
base::Mutex unused_segments_mutex_;
base::AtomicWord current_memory_usage_ = 0;
base::AtomicWord max_memory_usage_ = 0;
base::AtomicWord current_pool_size_ = 0;
std::atomic<size_t> current_memory_usage_{0};
std::atomic<size_t> max_memory_usage_{0};
std::atomic<size_t> current_pool_size_{0};
base::AtomicValue<MemoryPressureLevel> memory_pressure_level_;
std::atomic<MemoryPressureLevel> memory_pressure_level_{
MemoryPressureLevel::kNone};
DISALLOW_COPY_AND_ASSIGN(AccountingAllocator);
};