Revert of Pool implementation for zone segments (patchset #9 id:420001 of https://codereview.chromium.org/2335343007/ )
Reason for revert: Blocks Roll https://codereview.chromium.org/2366733002/ Original issue's description: > Pool implementation for zone segments > > BUG=v8:5409 > > Committed: https://crrev.com/37c688a24578e787d3d8941093563ed049c3497e > Cr-Commit-Position: refs/heads/master@{#39631} TBR=jkummerow@chromium.org,jochen@chromium.org,verwaest@chromium.org,heimbuef@google.com # Skipping CQ checks because original CL landed less than 1 days ago. NOPRESUBMIT=true NOTREECHECKS=true NOTRY=true BUG=v8:5409 Review-Url: https://codereview.chromium.org/2365843002 Cr-Commit-Position: refs/heads/master@{#39652}
This commit is contained in:
parent
b88a848faf
commit
a717fb7c01
1
BUILD.gn
1
BUILD.gn
@ -1704,7 +1704,6 @@ v8_source_set("v8_base") {
|
||||
"src/zone/zone-allocator.h",
|
||||
"src/zone/zone-allocator.h",
|
||||
"src/zone/zone-containers.h",
|
||||
"src/zone/zone-segment.cc",
|
||||
"src/zone/zone-segment.h",
|
||||
"src/zone/zone.cc",
|
||||
"src/zone/zone.h",
|
||||
|
@ -8322,7 +8322,6 @@ void Isolate::IsolateInBackgroundNotification() {
|
||||
void Isolate::MemoryPressureNotification(MemoryPressureLevel level) {
|
||||
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
|
||||
isolate->heap()->MemoryPressureNotification(level, Locker::IsLocked(this));
|
||||
isolate->allocator()->MemoryPressureNotification(level);
|
||||
}
|
||||
|
||||
void Isolate::SetRAILMode(RAILMode rail_mode) {
|
||||
|
@ -1903,8 +1903,8 @@ class VerboseAccountingAllocator : public AccountingAllocator {
|
||||
VerboseAccountingAllocator(Heap* heap, size_t sample_bytes)
|
||||
: heap_(heap), last_memory_usage_(0), sample_bytes_(sample_bytes) {}
|
||||
|
||||
v8::internal::Segment* GetSegment(size_t size) override {
|
||||
v8::internal::Segment* memory = AccountingAllocator::GetSegment(size);
|
||||
v8::internal::Segment* AllocateSegment(size_t size) override {
|
||||
v8::internal::Segment* memory = AccountingAllocator::AllocateSegment(size);
|
||||
if (memory) {
|
||||
size_t current = GetCurrentMemoryUsage();
|
||||
if (last_memory_usage_.Value() + sample_bytes_ < current) {
|
||||
@ -1915,8 +1915,8 @@ class VerboseAccountingAllocator : public AccountingAllocator {
|
||||
return memory;
|
||||
}
|
||||
|
||||
void ReturnSegment(v8::internal::Segment* memory) override {
|
||||
AccountingAllocator::ReturnSegment(memory);
|
||||
void FreeSegment(v8::internal::Segment* memory) override {
|
||||
AccountingAllocator::FreeSegment(memory);
|
||||
size_t current = GetCurrentMemoryUsage();
|
||||
if (current + sample_bytes_ < last_memory_usage_.Value()) {
|
||||
PrintJSON(current);
|
||||
@ -2169,6 +2169,9 @@ void Isolate::SetIsolateThreadLocals(Isolate* isolate,
|
||||
Isolate::~Isolate() {
|
||||
TRACE_ISOLATE(destructor);
|
||||
|
||||
// Has to be called while counters_ are still alive
|
||||
runtime_zone_->DeleteKeptSegment();
|
||||
|
||||
// The entry stack must be empty when we get here.
|
||||
DCHECK(entry_stack_ == NULL || entry_stack_->previous_item == NULL);
|
||||
|
||||
|
@ -1274,8 +1274,6 @@
|
||||
'zone/zone-segment.h',
|
||||
'zone/zone.cc',
|
||||
'zone/zone.h',
|
||||
'zone/zone-segment.cc',
|
||||
'zone/zone-segment.h',
|
||||
'zone/zone-allocator.h',
|
||||
'zone/zone-containers.h',
|
||||
],
|
||||
|
@ -13,38 +13,6 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
AccountingAllocator::AccountingAllocator() : unused_segments_mutex_() {
|
||||
memory_pressure_level_.SetValue(MemoryPressureLevel::kNone);
|
||||
std::fill(unused_segments_heads_,
|
||||
unused_segments_heads_ +
|
||||
(1 + kMaxSegmentSizePower - kMinSegmentSizePower),
|
||||
nullptr);
|
||||
std::fill(
|
||||
unused_segments_sizes,
|
||||
unused_segments_sizes + (1 + kMaxSegmentSizePower - kMinSegmentSizePower),
|
||||
0);
|
||||
}
|
||||
|
||||
AccountingAllocator::~AccountingAllocator() { ClearPool(); }
|
||||
|
||||
void AccountingAllocator::MemoryPressureNotification(
|
||||
MemoryPressureLevel level) {
|
||||
memory_pressure_level_.SetValue(level);
|
||||
|
||||
if (level != MemoryPressureLevel::kNone) {
|
||||
ClearPool();
|
||||
}
|
||||
}
|
||||
|
||||
Segment* AccountingAllocator::GetSegment(size_t bytes) {
|
||||
Segment* result = GetSegmentFromPool(bytes);
|
||||
if (result == nullptr) {
|
||||
result = AllocateSegment(bytes);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
|
||||
void* memory = malloc(bytes);
|
||||
if (memory) {
|
||||
@ -58,19 +26,9 @@ Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
|
||||
return reinterpret_cast<Segment*>(memory);
|
||||
}
|
||||
|
||||
void AccountingAllocator::ReturnSegment(Segment* segment) {
|
||||
segment->ZapContents();
|
||||
if (memory_pressure_level_.Value() != MemoryPressureLevel::kNone) {
|
||||
FreeSegment(segment);
|
||||
} else if (!AddSegmentToPool(segment)) {
|
||||
FreeSegment(segment);
|
||||
}
|
||||
}
|
||||
|
||||
void AccountingAllocator::FreeSegment(Segment* memory) {
|
||||
base::NoBarrier_AtomicIncrement(
|
||||
¤t_memory_usage_, -static_cast<base::AtomicWord>(memory->size()));
|
||||
memory->ZapHeader();
|
||||
free(memory);
|
||||
}
|
||||
|
||||
@ -82,82 +40,5 @@ size_t AccountingAllocator::GetMaxMemoryUsage() const {
|
||||
return base::NoBarrier_Load(&max_memory_usage_);
|
||||
}
|
||||
|
||||
Segment* AccountingAllocator::GetSegmentFromPool(size_t requested_size) {
|
||||
if (requested_size > (1 << kMaxSegmentSizePower)) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
uint8_t power = kMinSegmentSizePower;
|
||||
while (requested_size > static_cast<size_t>(1 << power)) power++;
|
||||
|
||||
DCHECK_GE(power, kMinSegmentSizePower + 0);
|
||||
power -= kMinSegmentSizePower;
|
||||
|
||||
Segment* segment;
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
|
||||
|
||||
segment = unused_segments_heads_[power];
|
||||
|
||||
if (segment != nullptr) {
|
||||
unused_segments_heads_[power] = segment->next();
|
||||
segment->set_next(nullptr);
|
||||
|
||||
unused_segments_sizes[power]--;
|
||||
unused_segments_size_ -= segment->size();
|
||||
}
|
||||
}
|
||||
|
||||
if (segment) {
|
||||
DCHECK_GE(segment->size(), requested_size);
|
||||
}
|
||||
return segment;
|
||||
}
|
||||
|
||||
bool AccountingAllocator::AddSegmentToPool(Segment* segment) {
|
||||
size_t size = segment->size();
|
||||
|
||||
if (size >= (1 << (kMaxSegmentSizePower + 1))) return false;
|
||||
|
||||
if (size < (1 << kMinSegmentSizePower)) return false;
|
||||
|
||||
uint8_t power = kMaxSegmentSizePower;
|
||||
|
||||
while (size < static_cast<size_t>(1 << power)) power--;
|
||||
|
||||
DCHECK_GE(power, kMinSegmentSizePower + 0);
|
||||
power -= kMinSegmentSizePower;
|
||||
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
|
||||
|
||||
if (unused_segments_sizes[power] >= kMaxSegmentsPerBucket) {
|
||||
return false;
|
||||
}
|
||||
|
||||
segment->set_next(unused_segments_heads_[power]);
|
||||
unused_segments_heads_[power] = segment;
|
||||
unused_segments_size_ += size;
|
||||
unused_segments_sizes[power]++;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void AccountingAllocator::ClearPool() {
|
||||
base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
|
||||
|
||||
for (uint8_t power = 0; power <= kMaxSegmentSizePower - kMinSegmentSizePower;
|
||||
power++) {
|
||||
Segment* current = unused_segments_heads_[power];
|
||||
while (current) {
|
||||
Segment* next = current->next();
|
||||
FreeSegment(current);
|
||||
current = next;
|
||||
}
|
||||
unused_segments_heads_[power] = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -19,55 +19,19 @@ namespace internal {
|
||||
|
||||
class AccountingAllocator {
|
||||
public:
|
||||
AccountingAllocator();
|
||||
virtual ~AccountingAllocator();
|
||||
AccountingAllocator() = default;
|
||||
virtual ~AccountingAllocator() = default;
|
||||
|
||||
// Gets an empty segment from the pool or creates a new one.
|
||||
virtual Segment* GetSegment(size_t bytes);
|
||||
// Return unneeded segments to either insert them into the pool or release
|
||||
// them if the pool is already full or memory pressure is high.
|
||||
virtual void ReturnSegment(Segment* memory);
|
||||
virtual Segment* AllocateSegment(size_t bytes);
|
||||
virtual void FreeSegment(Segment* memory);
|
||||
|
||||
size_t GetCurrentMemoryUsage() const;
|
||||
size_t GetMaxMemoryUsage() const;
|
||||
|
||||
size_t GetCurrentPoolSize() const;
|
||||
|
||||
void MemoryPressureNotification(MemoryPressureLevel level);
|
||||
|
||||
private:
|
||||
static const uint8_t kMinSegmentSizePower = 13;
|
||||
static const uint8_t kMaxSegmentSizePower = 18;
|
||||
static const uint8_t kMaxSegmentsPerBucket = 5;
|
||||
|
||||
STATIC_ASSERT(kMinSegmentSizePower <= kMaxSegmentSizePower);
|
||||
|
||||
// Allocates a new segment. Returns nullptr on failed allocation.
|
||||
Segment* AllocateSegment(size_t bytes);
|
||||
void FreeSegment(Segment* memory);
|
||||
|
||||
// Returns a segment from the pool of at least the requested size.
|
||||
Segment* GetSegmentFromPool(size_t requested_size);
|
||||
// Trys to add a segment to the pool. Returns false if the pool is full.
|
||||
bool AddSegmentToPool(Segment* segment);
|
||||
|
||||
// Empties the pool and puts all its contents onto the garbage stack.
|
||||
void ClearPool();
|
||||
|
||||
Segment*
|
||||
unused_segments_heads_[1 + kMaxSegmentSizePower - kMinSegmentSizePower];
|
||||
|
||||
size_t unused_segments_sizes[1 + kMaxSegmentSizePower - kMinSegmentSizePower];
|
||||
|
||||
size_t unused_segments_size_ = 0;
|
||||
|
||||
base::Mutex unused_segments_mutex_;
|
||||
|
||||
base::AtomicWord current_memory_usage_ = 0;
|
||||
base::AtomicWord max_memory_usage_ = 0;
|
||||
|
||||
base::AtomicValue<MemoryPressureLevel> memory_pressure_level_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(AccountingAllocator);
|
||||
};
|
||||
|
||||
|
@ -1,23 +0,0 @@
|
||||
// Copyright 2016 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/zone/zone-segment.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
void Segment::ZapContents() {
|
||||
#ifdef DEBUG
|
||||
memset(start(), kZapDeadByte, capacity());
|
||||
#endif
|
||||
}
|
||||
|
||||
void Segment::ZapHeader() {
|
||||
#ifdef DEBUG
|
||||
memset(this, kZapDeadByte, sizeof(Segment));
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
@ -38,17 +38,7 @@ class Segment {
|
||||
Address start() const { return address(sizeof(Segment)); }
|
||||
Address end() const { return address(size_); }
|
||||
|
||||
// Zap the contents of the segment (but not the header).
|
||||
void ZapContents();
|
||||
// Zaps the header and makes the segment unusable this way.
|
||||
void ZapHeader();
|
||||
|
||||
private:
|
||||
#ifdef DEBUG
|
||||
// Constant byte value used for zapping dead memory in debug mode.
|
||||
static const unsigned char kZapDeadByte = 0xcd;
|
||||
#endif
|
||||
|
||||
// Computes the address of the nth byte in this segment.
|
||||
Address address(size_t n) const { return Address(this) + n; }
|
||||
|
||||
|
@ -51,6 +51,7 @@ Zone::Zone(AccountingAllocator* allocator)
|
||||
|
||||
Zone::~Zone() {
|
||||
DeleteAll();
|
||||
DeleteKeptSegment();
|
||||
|
||||
DCHECK(segment_bytes_allocated_ == 0);
|
||||
}
|
||||
@ -91,30 +92,85 @@ void* Zone::New(size_t size) {
|
||||
}
|
||||
|
||||
void Zone::DeleteAll() {
|
||||
// Traverse the chained list of segments and return them all to the allocator.
|
||||
#ifdef DEBUG
|
||||
// Constant byte value used for zapping dead memory in debug mode.
|
||||
static const unsigned char kZapDeadByte = 0xcd;
|
||||
#endif
|
||||
|
||||
// Find a segment with a suitable size to keep around.
|
||||
Segment* keep = nullptr;
|
||||
// Traverse the chained list of segments, zapping (in debug mode)
|
||||
// and freeing every segment except the one we wish to keep.
|
||||
for (Segment* current = segment_head_; current;) {
|
||||
Segment* next = current->next();
|
||||
size_t size = current->size();
|
||||
|
||||
// Un-poison the segment content so we can re-use or zap it later.
|
||||
ASAN_UNPOISON_MEMORY_REGION(current->start(), current->capacity());
|
||||
|
||||
segment_bytes_allocated_ -= size;
|
||||
allocator_->ReturnSegment(current);
|
||||
if (!keep && current->size() <= kMaximumKeptSegmentSize) {
|
||||
// Unlink the segment we wish to keep from the list.
|
||||
keep = current;
|
||||
keep->set_next(nullptr);
|
||||
} else {
|
||||
size_t size = current->size();
|
||||
#ifdef DEBUG
|
||||
// Un-poison first so the zapping doesn't trigger ASan complaints.
|
||||
ASAN_UNPOISON_MEMORY_REGION(current, size);
|
||||
// Zap the entire current segment (including the header).
|
||||
memset(current, kZapDeadByte, size);
|
||||
#endif
|
||||
segment_bytes_allocated_ -= size;
|
||||
allocator_->FreeSegment(current);
|
||||
}
|
||||
current = next;
|
||||
}
|
||||
|
||||
position_ = limit_ = 0;
|
||||
// If we have found a segment we want to keep, we must recompute the
|
||||
// variables 'position' and 'limit' to prepare for future allocate
|
||||
// attempts. Otherwise, we must clear the position and limit to
|
||||
// force a new segment to be allocated on demand.
|
||||
if (keep) {
|
||||
Address start = keep->start();
|
||||
position_ = RoundUp(start, kAlignment);
|
||||
limit_ = keep->end();
|
||||
// Un-poison so we can re-use the segment later.
|
||||
ASAN_UNPOISON_MEMORY_REGION(start, keep->capacity());
|
||||
#ifdef DEBUG
|
||||
// Zap the contents of the kept segment (but not the header).
|
||||
memset(start, kZapDeadByte, keep->capacity());
|
||||
#endif
|
||||
} else {
|
||||
position_ = limit_ = 0;
|
||||
}
|
||||
|
||||
allocation_size_ = 0;
|
||||
// Update the head segment to be the kept segment (if any).
|
||||
segment_head_ = nullptr;
|
||||
segment_head_ = keep;
|
||||
}
|
||||
|
||||
void Zone::DeleteKeptSegment() {
|
||||
#ifdef DEBUG
|
||||
// Constant byte value used for zapping dead memory in debug mode.
|
||||
static const unsigned char kZapDeadByte = 0xcd;
|
||||
#endif
|
||||
|
||||
DCHECK(segment_head_ == nullptr || segment_head_->next() == nullptr);
|
||||
if (segment_head_ != nullptr) {
|
||||
size_t size = segment_head_->size();
|
||||
#ifdef DEBUG
|
||||
// Un-poison first so the zapping doesn't trigger ASan complaints.
|
||||
ASAN_UNPOISON_MEMORY_REGION(segment_head_, size);
|
||||
// Zap the entire kept segment (including the header).
|
||||
memset(segment_head_, kZapDeadByte, size);
|
||||
#endif
|
||||
segment_bytes_allocated_ -= size;
|
||||
allocator_->FreeSegment(segment_head_);
|
||||
segment_head_ = nullptr;
|
||||
}
|
||||
|
||||
DCHECK(segment_bytes_allocated_ == 0);
|
||||
}
|
||||
|
||||
// Creates a new segment, sets it size, and pushes it to the front
|
||||
// of the segment chain. Returns the new segment.
|
||||
Segment* Zone::NewSegment(size_t size) {
|
||||
Segment* result = allocator_->GetSegment(size);
|
||||
Segment* result = allocator_->AllocateSegment(size);
|
||||
segment_bytes_allocated_ += size;
|
||||
if (result != nullptr) {
|
||||
result->Initialize(segment_head_, size, this);
|
||||
|
@ -25,7 +25,7 @@ namespace internal {
|
||||
//
|
||||
// Note: There is no need to initialize the Zone; the first time an
|
||||
// allocation is attempted, a segment of memory will be requested
|
||||
// through the allocator.
|
||||
// through a call to malloc().
|
||||
//
|
||||
// Note: The implementation is inherently not thread safe. Do not use
|
||||
// from multi-threaded code.
|
||||
@ -44,9 +44,14 @@ class Zone final {
|
||||
return static_cast<T*>(New(length * sizeof(T)));
|
||||
}
|
||||
|
||||
// Deletes all objects and free all memory allocated in the Zone.
|
||||
// Deletes all objects and free all memory allocated in the Zone. Keeps one
|
||||
// small (size <= kMaximumKeptSegmentSize) segment around if it finds one.
|
||||
void DeleteAll();
|
||||
|
||||
// Deletes the last small segment kept around by DeleteAll(). You
|
||||
// may no longer allocate in the Zone after a call to this method.
|
||||
void DeleteKeptSegment();
|
||||
|
||||
// Returns true if more memory has been allocated in zones than
|
||||
// the limit allows.
|
||||
bool excess_allocation() const {
|
||||
@ -74,6 +79,9 @@ class Zone final {
|
||||
// Never allocate segments larger than this size in bytes.
|
||||
static const size_t kMaximumSegmentSize = 1 * MB;
|
||||
|
||||
// Never keep segments larger than this size in bytes around.
|
||||
static const size_t kMaximumKeptSegmentSize = 64 * KB;
|
||||
|
||||
// Report zone excess when allocation exceeds this limit.
|
||||
static const size_t kExcessLimit = 256 * MB;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user