[zone] Get rid of the Zone's segment pool
It's unclear that this helps performance. Let's see what the bots say. Change-Id: Ic28783c90495f6ce01b4980d84794d394f941a4f Reviewed-on: https://chromium-review.googlesource.com/c/1346331 Commit-Queue: Toon Verwaest <verwaest@chromium.org> Reviewed-by: Ulan Degenbaev <ulan@chromium.org> Cr-Commit-Position: refs/heads/master@{#57696}
This commit is contained in:
parent
621de4bd2c
commit
74038c86e9
@ -943,7 +943,6 @@ void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
|
||||
set_max_semi_space_size_in_kb(
|
||||
i::Heap::ComputeMaxSemiSpaceSize(physical_memory));
|
||||
set_max_old_space_size(i::Heap::ComputeMaxOldGenerationSize(physical_memory));
|
||||
set_max_zone_pool_size(i::AccountingAllocator::kMaxPoolSize);
|
||||
|
||||
if (virtual_memory_limit > 0 && i::kRequiresCodeRange) {
|
||||
// Reserve no more than 1/8 of the memory for the code range, but at most
|
||||
@ -959,12 +958,10 @@ void SetResourceConstraints(i::Isolate* isolate,
|
||||
size_t semi_space_size = constraints.max_semi_space_size_in_kb();
|
||||
size_t old_space_size = constraints.max_old_space_size();
|
||||
size_t code_range_size = constraints.code_range_size();
|
||||
size_t max_pool_size = constraints.max_zone_pool_size();
|
||||
if (semi_space_size != 0 || old_space_size != 0 || code_range_size != 0) {
|
||||
isolate->heap()->ConfigureHeap(semi_space_size, old_space_size,
|
||||
code_range_size);
|
||||
}
|
||||
isolate->allocator()->ConfigureSegmentPool(max_pool_size);
|
||||
|
||||
if (constraints.stack_limit() != nullptr) {
|
||||
uintptr_t limit = reinterpret_cast<uintptr_t>(constraints.stack_limit());
|
||||
@ -8460,8 +8457,8 @@ void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
|
||||
isolate->wasm_engine()->allocator()->GetCurrentMemoryUsage();
|
||||
heap_statistics->external_memory_ = isolate->heap()->external_memory();
|
||||
heap_statistics->peak_malloced_memory_ =
|
||||
isolate->allocator()->GetMaxMemoryUsage() +
|
||||
isolate->wasm_engine()->allocator()->GetMaxMemoryUsage();
|
||||
isolate->allocator()->GetPeakMemoryUsage() +
|
||||
isolate->wasm_engine()->allocator()->GetPeakMemoryUsage();
|
||||
heap_statistics->number_of_native_contexts_ = heap->NumberOfNativeContexts();
|
||||
heap_statistics->number_of_detached_contexts_ =
|
||||
heap->NumberOfDetachedContexts();
|
||||
@ -8750,7 +8747,6 @@ void Isolate::MemoryPressureNotification(MemoryPressureLevel level) {
|
||||
? isolate->thread_manager()->IsLockedByCurrentThread()
|
||||
: i::ThreadId::Current().Equals(isolate->thread_id());
|
||||
isolate->heap()->MemoryPressureNotification(level, on_isolate_thread);
|
||||
isolate->allocator()->MemoryPressureNotification(level);
|
||||
}
|
||||
|
||||
void Isolate::EnableMemorySavingsMode() {
|
||||
|
@ -4040,7 +4040,7 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
|
||||
memory_allocator()->Size() + memory_allocator()->Available();
|
||||
*stats->os_error = base::OS::GetLastError();
|
||||
*stats->malloced_memory = isolate_->allocator()->GetCurrentMemoryUsage();
|
||||
*stats->malloced_peak_memory = isolate_->allocator()->GetMaxMemoryUsage();
|
||||
*stats->malloced_peak_memory = isolate_->allocator()->GetPeakMemoryUsage();
|
||||
if (take_snapshot) {
|
||||
HeapIterator iterator(this);
|
||||
for (HeapObject* obj = iterator.next(); obj != nullptr;
|
||||
|
@ -2641,26 +2641,20 @@ void Isolate::ThreadDataTable::RemoveAllThreads() {
|
||||
|
||||
class VerboseAccountingAllocator : public AccountingAllocator {
|
||||
public:
|
||||
VerboseAccountingAllocator(Heap* heap, size_t allocation_sample_bytes,
|
||||
size_t pool_sample_bytes)
|
||||
VerboseAccountingAllocator(Heap* heap, size_t allocation_sample_bytes)
|
||||
: heap_(heap),
|
||||
last_memory_usage_(0),
|
||||
last_pool_size_(0),
|
||||
nesting_deepth_(0),
|
||||
allocation_sample_bytes_(allocation_sample_bytes),
|
||||
pool_sample_bytes_(pool_sample_bytes) {}
|
||||
allocation_sample_bytes_(allocation_sample_bytes) {}
|
||||
|
||||
v8::internal::Segment* GetSegment(size_t size) override {
|
||||
v8::internal::Segment* memory = AccountingAllocator::GetSegment(size);
|
||||
if (memory) {
|
||||
size_t malloced_current = GetCurrentMemoryUsage();
|
||||
size_t pooled_current = GetCurrentPoolSize();
|
||||
|
||||
if (last_memory_usage_ + allocation_sample_bytes_ < malloced_current ||
|
||||
last_pool_size_ + pool_sample_bytes_ < pooled_current) {
|
||||
PrintMemoryJSON(malloced_current, pooled_current);
|
||||
if (last_memory_usage_ + allocation_sample_bytes_ < malloced_current) {
|
||||
PrintMemoryJSON(malloced_current);
|
||||
last_memory_usage_ = malloced_current;
|
||||
last_pool_size_ = pooled_current;
|
||||
}
|
||||
}
|
||||
return memory;
|
||||
@ -2669,13 +2663,10 @@ class VerboseAccountingAllocator : public AccountingAllocator {
|
||||
void ReturnSegment(v8::internal::Segment* memory) override {
|
||||
AccountingAllocator::ReturnSegment(memory);
|
||||
size_t malloced_current = GetCurrentMemoryUsage();
|
||||
size_t pooled_current = GetCurrentPoolSize();
|
||||
|
||||
if (malloced_current + allocation_sample_bytes_ < last_memory_usage_ ||
|
||||
pooled_current + pool_sample_bytes_ < last_pool_size_) {
|
||||
PrintMemoryJSON(malloced_current, pooled_current);
|
||||
if (malloced_current + allocation_sample_bytes_ < last_memory_usage_) {
|
||||
PrintMemoryJSON(malloced_current);
|
||||
last_memory_usage_ = malloced_current;
|
||||
last_pool_size_ = pooled_current;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2707,7 +2698,7 @@ class VerboseAccountingAllocator : public AccountingAllocator {
|
||||
zone->allocation_size(), nesting_deepth_.load());
|
||||
}
|
||||
|
||||
void PrintMemoryJSON(size_t malloced, size_t pooled) {
|
||||
void PrintMemoryJSON(size_t malloced) {
|
||||
// Note: Neither isolate, nor heap is locked, so be careful with accesses
|
||||
// as the allocator is potentially used on a concurrent thread.
|
||||
double time = heap_->isolate()->time_millis_since_init();
|
||||
@ -2716,17 +2707,14 @@ class VerboseAccountingAllocator : public AccountingAllocator {
|
||||
"\"type\": \"zone\", "
|
||||
"\"isolate\": \"%p\", "
|
||||
"\"time\": %f, "
|
||||
"\"allocated\": %" PRIuS
|
||||
","
|
||||
"\"pooled\": %" PRIuS "}\n",
|
||||
reinterpret_cast<void*>(heap_->isolate()), time, malloced, pooled);
|
||||
"\"allocated\": %" PRIuS "}\n",
|
||||
reinterpret_cast<void*>(heap_->isolate()), time, malloced);
|
||||
}
|
||||
|
||||
Heap* heap_;
|
||||
std::atomic<size_t> last_memory_usage_;
|
||||
std::atomic<size_t> last_pool_size_;
|
||||
std::atomic<size_t> nesting_deepth_;
|
||||
size_t allocation_sample_bytes_, pool_sample_bytes_;
|
||||
size_t allocation_sample_bytes_;
|
||||
};
|
||||
|
||||
#ifdef DEBUG
|
||||
@ -2794,9 +2782,9 @@ Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator)
|
||||
: isolate_allocator_(std::move(isolate_allocator)),
|
||||
id_(base::Relaxed_AtomicIncrement(&isolate_counter_, 1)),
|
||||
stack_guard_(this),
|
||||
allocator_(FLAG_trace_zone_stats ? new VerboseAccountingAllocator(
|
||||
&heap_, 256 * KB, 128 * KB)
|
||||
: new AccountingAllocator()),
|
||||
allocator_(FLAG_trace_zone_stats
|
||||
? new VerboseAccountingAllocator(&heap_, 256 * KB)
|
||||
: new AccountingAllocator()),
|
||||
builtins_(this),
|
||||
rail_mode_(PERFORMANCE_ANIMATION),
|
||||
code_event_dispatcher_(new CodeEventDispatcher()),
|
||||
|
@ -15,71 +15,9 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
AccountingAllocator::AccountingAllocator() : unused_segments_mutex_() {
|
||||
static const size_t kDefaultBucketMaxSize = 5;
|
||||
|
||||
memory_pressure_level_.SetValue(MemoryPressureLevel::kNone);
|
||||
std::fill(unused_segments_heads_, unused_segments_heads_ + kNumberBuckets,
|
||||
nullptr);
|
||||
std::fill(unused_segments_sizes_, unused_segments_sizes_ + kNumberBuckets, 0);
|
||||
std::fill(unused_segments_max_sizes_,
|
||||
unused_segments_max_sizes_ + kNumberBuckets, kDefaultBucketMaxSize);
|
||||
}
|
||||
|
||||
AccountingAllocator::~AccountingAllocator() { ClearPool(); }
|
||||
|
||||
void AccountingAllocator::MemoryPressureNotification(
|
||||
MemoryPressureLevel level) {
|
||||
memory_pressure_level_.SetValue(level);
|
||||
|
||||
if (level != MemoryPressureLevel::kNone) {
|
||||
ClearPool();
|
||||
}
|
||||
}
|
||||
|
||||
void AccountingAllocator::ConfigureSegmentPool(const size_t max_pool_size) {
|
||||
// The sum of the bytes of one segment of each size.
|
||||
static const size_t full_size = (size_t(1) << (kMaxSegmentSizePower + 1)) -
|
||||
(size_t(1) << kMinSegmentSizePower);
|
||||
size_t fits_fully = max_pool_size / full_size;
|
||||
|
||||
base::MutexGuard lock_guard(&unused_segments_mutex_);
|
||||
|
||||
// We assume few zones (less than 'fits_fully' many) to be active at the same
|
||||
// time. When zones grow regularly, they will keep requesting segments of
|
||||
// increasing size each time. Therefore we try to get as many segments with an
|
||||
// equal number of segments of each size as possible.
|
||||
// The remaining space is used to make more room for an 'incomplete set' of
|
||||
// segments beginning with the smaller ones.
|
||||
// This code will work best if the max_pool_size is a multiple of the
|
||||
// full_size. If max_pool_size is no sum of segment sizes the actual pool
|
||||
// size might be smaller then max_pool_size. Note that no actual memory gets
|
||||
// wasted though.
|
||||
// TODO(heimbuef): Determine better strategy generating a segment sizes
|
||||
// distribution that is closer to real/benchmark usecases and uses the given
|
||||
// max_pool_size more efficiently.
|
||||
size_t total_size = fits_fully * full_size;
|
||||
|
||||
for (size_t power = 0; power < kNumberBuckets; ++power) {
|
||||
if (total_size + (size_t(1) << (power + kMinSegmentSizePower)) <=
|
||||
max_pool_size) {
|
||||
unused_segments_max_sizes_[power] = fits_fully + 1;
|
||||
total_size += size_t(1) << power;
|
||||
} else {
|
||||
unused_segments_max_sizes_[power] = fits_fully;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Segment* AccountingAllocator::GetSegment(size_t bytes) {
|
||||
Segment* result = GetSegmentFromPool(bytes);
|
||||
if (result == nullptr) {
|
||||
result = AllocateSegment(bytes);
|
||||
if (result != nullptr) {
|
||||
result->Initialize(bytes);
|
||||
}
|
||||
}
|
||||
|
||||
Segment* result = AllocateSegment(bytes);
|
||||
if (result != nullptr) result->Initialize(bytes);
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -88,9 +26,9 @@ Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
|
||||
if (memory != nullptr) {
|
||||
base::AtomicWord current =
|
||||
base::Relaxed_AtomicIncrement(¤t_memory_usage_, bytes);
|
||||
base::AtomicWord max = base::Relaxed_Load(&max_memory_usage_);
|
||||
while (current > max) {
|
||||
max = base::Relaxed_CompareAndSwap(&max_memory_usage_, max, current);
|
||||
base::AtomicWord peak = base::Relaxed_Load(&peak_memory_usage_);
|
||||
while (current > peak) {
|
||||
peak = base::Relaxed_CompareAndSwap(&peak_memory_usage_, peak, current);
|
||||
}
|
||||
}
|
||||
return reinterpret_cast<Segment*>(memory);
|
||||
@ -98,12 +36,7 @@ Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
|
||||
|
||||
void AccountingAllocator::ReturnSegment(Segment* segment) {
|
||||
segment->ZapContents();
|
||||
|
||||
if (memory_pressure_level_.Value() != MemoryPressureLevel::kNone) {
|
||||
FreeSegment(segment);
|
||||
} else if (!AddSegmentToPool(segment)) {
|
||||
FreeSegment(segment);
|
||||
}
|
||||
FreeSegment(segment);
|
||||
}
|
||||
|
||||
void AccountingAllocator::FreeSegment(Segment* memory) {
|
||||
@ -113,95 +46,13 @@ void AccountingAllocator::FreeSegment(Segment* memory) {
|
||||
free(memory);
|
||||
}
|
||||
|
||||
size_t AccountingAllocator::GetPeakMemoryUsage() const {
|
||||
return base::Relaxed_Load(&peak_memory_usage_);
|
||||
}
|
||||
|
||||
size_t AccountingAllocator::GetCurrentMemoryUsage() const {
|
||||
return base::Relaxed_Load(¤t_memory_usage_);
|
||||
}
|
||||
|
||||
size_t AccountingAllocator::GetMaxMemoryUsage() const {
|
||||
return base::Relaxed_Load(&max_memory_usage_);
|
||||
}
|
||||
|
||||
size_t AccountingAllocator::GetCurrentPoolSize() const {
|
||||
return base::Relaxed_Load(¤t_pool_size_);
|
||||
}
|
||||
|
||||
Segment* AccountingAllocator::GetSegmentFromPool(size_t requested_size) {
|
||||
if (requested_size > (1 << kMaxSegmentSizePower)) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
size_t power = kMinSegmentSizePower;
|
||||
while (requested_size > (static_cast<size_t>(1) << power)) power++;
|
||||
|
||||
DCHECK_GE(power, kMinSegmentSizePower + 0);
|
||||
power -= kMinSegmentSizePower;
|
||||
|
||||
Segment* segment;
|
||||
{
|
||||
base::MutexGuard lock_guard(&unused_segments_mutex_);
|
||||
|
||||
segment = unused_segments_heads_[power];
|
||||
|
||||
if (segment != nullptr) {
|
||||
unused_segments_heads_[power] = segment->next();
|
||||
segment->set_next(nullptr);
|
||||
|
||||
unused_segments_sizes_[power]--;
|
||||
base::Relaxed_AtomicIncrement(
|
||||
¤t_pool_size_, -static_cast<base::AtomicWord>(segment->size()));
|
||||
}
|
||||
}
|
||||
|
||||
if (segment) {
|
||||
DCHECK_GE(segment->size(), requested_size);
|
||||
}
|
||||
return segment;
|
||||
}
|
||||
|
||||
bool AccountingAllocator::AddSegmentToPool(Segment* segment) {
|
||||
size_t size = segment->size();
|
||||
|
||||
if (size >= (1 << (kMaxSegmentSizePower + 1))) return false;
|
||||
|
||||
if (size < (1 << kMinSegmentSizePower)) return false;
|
||||
|
||||
size_t power = kMaxSegmentSizePower;
|
||||
|
||||
while (size < (static_cast<size_t>(1) << power)) power--;
|
||||
|
||||
DCHECK_GE(power, kMinSegmentSizePower + 0);
|
||||
power -= kMinSegmentSizePower;
|
||||
|
||||
{
|
||||
base::MutexGuard lock_guard(&unused_segments_mutex_);
|
||||
|
||||
if (unused_segments_sizes_[power] >= unused_segments_max_sizes_[power]) {
|
||||
return false;
|
||||
}
|
||||
|
||||
segment->set_next(unused_segments_heads_[power]);
|
||||
unused_segments_heads_[power] = segment;
|
||||
base::Relaxed_AtomicIncrement(¤t_pool_size_, size);
|
||||
unused_segments_sizes_[power]++;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void AccountingAllocator::ClearPool() {
|
||||
base::MutexGuard lock_guard(&unused_segments_mutex_);
|
||||
|
||||
for (size_t power = 0; power <= kMaxSegmentSizePower - kMinSegmentSizePower;
|
||||
power++) {
|
||||
Segment* current = unused_segments_heads_[power];
|
||||
while (current) {
|
||||
Segment* next = current->next();
|
||||
FreeSegment(current);
|
||||
current = next;
|
||||
}
|
||||
unused_segments_heads_[power] = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -21,67 +21,25 @@ namespace internal {
|
||||
|
||||
class V8_EXPORT_PRIVATE AccountingAllocator {
|
||||
public:
|
||||
static const size_t kMaxPoolSize = 8ul * KB;
|
||||
AccountingAllocator() {}
|
||||
virtual ~AccountingAllocator() {}
|
||||
|
||||
AccountingAllocator();
|
||||
virtual ~AccountingAllocator();
|
||||
|
||||
// Gets an empty segment from the pool or creates a new one.
|
||||
virtual Segment* GetSegment(size_t bytes);
|
||||
// Return unneeded segments to either insert them into the pool or release
|
||||
// them if the pool is already full or memory pressure is high.
|
||||
virtual void ReturnSegment(Segment* memory);
|
||||
|
||||
size_t GetPeakMemoryUsage() const;
|
||||
size_t GetCurrentMemoryUsage() const;
|
||||
size_t GetMaxMemoryUsage() const;
|
||||
|
||||
size_t GetCurrentPoolSize() const;
|
||||
|
||||
void MemoryPressureNotification(MemoryPressureLevel level);
|
||||
// Configures the zone segment pool size limits so the pool does not
|
||||
// grow bigger than max_pool_size.
|
||||
// TODO(heimbuef): Do not accept segments to pool that are larger than
|
||||
// their size class requires. Sometimes the zones generate weird segments.
|
||||
void ConfigureSegmentPool(const size_t max_pool_size);
|
||||
|
||||
virtual void ZoneCreation(const Zone* zone) {}
|
||||
virtual void ZoneDestruction(const Zone* zone) {}
|
||||
|
||||
private:
|
||||
FRIEND_TEST(Zone, SegmentPoolConstraints);
|
||||
|
||||
static const size_t kMinSegmentSizePower = 13;
|
||||
static const size_t kMaxSegmentSizePower = 18;
|
||||
|
||||
STATIC_ASSERT(kMinSegmentSizePower <= kMaxSegmentSizePower);
|
||||
|
||||
static const size_t kNumberBuckets =
|
||||
1 + kMaxSegmentSizePower - kMinSegmentSizePower;
|
||||
|
||||
// Allocates a new segment. Returns nullptr on failed allocation.
|
||||
Segment* AllocateSegment(size_t bytes);
|
||||
void FreeSegment(Segment* memory);
|
||||
|
||||
// Returns a segment from the pool of at least the requested size.
|
||||
Segment* GetSegmentFromPool(size_t requested_size);
|
||||
// Trys to add a segment to the pool. Returns false if the pool is full.
|
||||
bool AddSegmentToPool(Segment* segment);
|
||||
|
||||
// Empties the pool and puts all its contents onto the garbage stack.
|
||||
void ClearPool();
|
||||
|
||||
Segment* unused_segments_heads_[kNumberBuckets];
|
||||
|
||||
size_t unused_segments_sizes_[kNumberBuckets];
|
||||
size_t unused_segments_max_sizes_[kNumberBuckets];
|
||||
|
||||
base::Mutex unused_segments_mutex_;
|
||||
|
||||
base::AtomicWord peak_memory_usage_ = 0;
|
||||
base::AtomicWord current_memory_usage_ = 0;
|
||||
base::AtomicWord max_memory_usage_ = 0;
|
||||
base::AtomicWord current_pool_size_ = 0;
|
||||
|
||||
base::AtomicValue<MemoryPressureLevel> memory_pressure_level_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(AccountingAllocator);
|
||||
};
|
||||
|
@ -212,7 +212,6 @@ v8_source_set("unittests_sources") {
|
||||
"wasm/wasm-macro-gen-unittest.cc",
|
||||
"wasm/wasm-module-builder-unittest.cc",
|
||||
"wasm/wasm-opcodes-unittest.cc",
|
||||
"zone/segmentpool-unittest.cc",
|
||||
"zone/zone-allocator-unittest.cc",
|
||||
"zone/zone-chunk-list-unittest.cc",
|
||||
"zone/zone-unittest.cc",
|
||||
|
@ -1,32 +0,0 @@
|
||||
// Copyright 2016 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/zone/accounting-allocator.h"
|
||||
#include "testing/gtest/include/gtest/gtest.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
TEST(Zone, SegmentPoolConstraints) {
|
||||
size_t sizes[]{
|
||||
0, // Corner case
|
||||
AccountingAllocator::kMaxPoolSize,
|
||||
GB // Something really large
|
||||
};
|
||||
|
||||
AccountingAllocator allocator;
|
||||
for (size_t size : sizes) {
|
||||
allocator.ConfigureSegmentPool(size);
|
||||
size_t total_size = 0;
|
||||
for (size_t power = 0; power < AccountingAllocator::kNumberBuckets;
|
||||
++power) {
|
||||
total_size +=
|
||||
allocator.unused_segments_max_sizes_[power] * (size_t(1) << power);
|
||||
}
|
||||
EXPECT_LE(total_size, size);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
Loading…
Reference in New Issue
Block a user