[zone] Further simplify zone expansion, use single default page size

Change-Id: Ibe539f0c90fdcd93ba5da40240c6325138a05bac
Reviewed-on: https://chromium-review.googlesource.com/c/1347480
Reviewed-by: Igor Sheludko <ishell@chromium.org>
Commit-Queue: Toon Verwaest <verwaest@chromium.org>
Cr-Commit-Position: refs/heads/master@{#57732}
This commit is contained in:
Toon Verwaest 2018-11-22 12:50:42 +01:00 committed by Commit Bot
parent 4e3a17d040
commit 92e34290d4
3 changed files with 14 additions and 54 deletions

View File

@ -22,9 +22,6 @@ class Segment {
public:
void Initialize(size_t size) { size_ = size; }
Zone* zone() const { return zone_; }
void set_zone(Zone* const zone) { zone_ = zone; }
Segment* next() const { return next_; }
void set_next(Segment* const next) { next_ = next; }
@ -50,7 +47,6 @@ class Segment {
return reinterpret_cast<Address>(this) + n;
}
Zone* zone_;
Segment* next_;
size_t size_;
};

View File

@ -99,71 +99,42 @@ void Zone::DeleteAll() {
segment_head_ = nullptr;
}
// Creates a new segment, sets it size, and pushes it to the front
// of the segment chain. Returns the new segment.
Segment* Zone::NewSegment(size_t requested_size) {
Segment* result = allocator_->GetSegment(requested_size);
if (result != nullptr) {
DCHECK_GE(result->size(), requested_size);
segment_bytes_allocated_ += result->size();
result->set_zone(this);
result->set_next(segment_head_);
segment_head_ = result;
}
return result;
}
Address Zone::NewExpand(size_t size) {
// Make sure the requested size is already properly aligned and that
// there isn't enough room in the Zone to satisfy the request.
DCHECK_EQ(size, RoundDown(size, kAlignmentInBytes));
DCHECK(limit_ - position_ < size);
DCHECK_LT(limit_ - position_, size);
// Commit the allocation_size_ of segment_head_ if any.
allocation_size_ = allocation_size();
// Compute the new segment size. We use a 'high water mark'
// strategy, where we increase the segment size every time we expand
// except that we employ a maximum segment size when we delete. This
// is to avoid excessive malloc() and free() overhead.
Segment* head = segment_head_;
const size_t old_size = (head == nullptr) ? 0 : head->size();
static const size_t kSegmentOverhead = sizeof(Segment) + kAlignmentInBytes;
const size_t new_size_no_overhead = size + (old_size << 1);
size_t new_size = kSegmentOverhead + new_size_no_overhead;
const size_t min_new_size = kSegmentOverhead + size;
const size_t min_size = kSegmentOverhead + size;
// Guard against integer overflow.
if (new_size_no_overhead < size || new_size < kSegmentOverhead) {
if (V8_UNLIKELY(!IsInRange(min_size, size, static_cast<size_t>(INT_MAX)))) {
V8::FatalProcessOutOfMemory(nullptr, "Zone");
return kNullAddress;
}
if (new_size < kMinimumSegmentSize) {
new_size = kMinimumSegmentSize;
} else if (new_size >= kMaximumSegmentSize) {
// Limit the size of new segments to avoid growing the segment size
// exponentially, thus putting pressure on contiguous virtual address space.
// All the while making sure to allocate a segment large enough to hold the
// requested size.
new_size = Max(min_new_size, kMaximumSegmentSize);
}
if (new_size > INT_MAX) {
V8::FatalProcessOutOfMemory(nullptr, "Zone");
return kNullAddress;
}
Segment* segment = NewSegment(new_size);
if (segment == nullptr) {
const size_t requested_size = Max(min_size, kDefaultSegmentSize);
Segment* segment = allocator_->GetSegment(requested_size);
if (V8_UNLIKELY(segment == nullptr)) {
V8::FatalProcessOutOfMemory(nullptr, "Zone");
return kNullAddress;
}
DCHECK_GE(segment->size(), requested_size);
segment_bytes_allocated_ += segment->size();
segment->set_next(segment_head_);
segment_head_ = segment;
// Recompute 'top' and 'limit' based on the new segment.
Address result = RoundUp(segment->start(), kAlignmentInBytes);
position_ = result + size;
// Check for address overflow.
// (Should not happen since the segment is guaranteed to accommodate
// size bytes + header and alignment padding)
DCHECK(position_ >= result);
DCHECK_LE(result, position_);
limit_ = segment->end();
DCHECK(position_ <= limit_);
DCHECK_LE(position_, limit_);
return result;
}

View File

@ -96,10 +96,7 @@ class V8_EXPORT_PRIVATE Zone final {
static const size_t kAlignmentInBytes = 8;
// Never allocate segments smaller than this size in bytes.
static const size_t kMinimumSegmentSize = 8 * KB;
// Never allocate segments larger than this size in bytes.
static const size_t kMaximumSegmentSize = 32 * KB;
static const size_t kDefaultSegmentSize = 32 * KB;
// Report zone excess when allocation exceeds this limit.
static const size_t kExcessLimit = 256 * MB;
@ -118,10 +115,6 @@ class V8_EXPORT_PRIVATE Zone final {
// room in the Zone already.
Address NewExpand(size_t size);
// Creates a new segment, sets it size, and pushes it to the front
// of the segment chain. Returns the new segment.
inline Segment* NewSegment(size_t requested_size);
// The free region in the current (front) segment is represented as
// the half-open interval [position, limit). The 'position' variable
// is guaranteed to be aligned as dictated by kAlignment.