Clean up StoreBuffer::EnsureSpace.

BUG=
R=mstarzinger@chromium.org

Review URL: https://codereview.chromium.org/16690006

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@15085 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
hpayer@chromium.org 2013-06-12 13:14:35 +00:00
parent cc27c4c41b
commit a11d33967f
3 changed files with 11 additions and 15 deletions

View File

@ -1212,7 +1212,7 @@ void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
// Store Buffer overflowed while scanning promoted objects. These are not
// in any particular page, though they are likely to be clustered by the
// allocation routines.
store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize);
store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
} else {
// Store Buffer overflowed while scanning a particular old space page for
// pointers to new space.

View File

@ -142,6 +142,11 @@ void StoreBuffer::Uniq() {
}
bool StoreBuffer::SpaceAvailable(intptr_t space_needed) {
return old_limit_ - old_top_ >= space_needed;
}
void StoreBuffer::EnsureSpace(intptr_t space_needed) {
while (old_limit_ - old_top_ < space_needed &&
old_limit_ < old_reserved_limit_) {
@ -152,7 +157,7 @@ void StoreBuffer::EnsureSpace(intptr_t space_needed) {
old_limit_ += grow;
}
if (old_limit_ - old_top_ >= space_needed) return;
if (SpaceAvailable(space_needed)) return;
if (old_buffer_is_filtered_) return;
ASSERT(may_move_store_buffer_entries_);
@ -171,9 +176,7 @@ void StoreBuffer::EnsureSpace(intptr_t space_needed) {
Filter(MemoryChunk::SCAN_ON_SCAVENGE);
}
// If filtering out the entries from scan_on_scavenge pages got us down to
// less than half full, then we are satisfied with that.
if (old_limit_ - old_top_ > old_top_ - old_start_) return;
if (SpaceAvailable(space_needed)) return;
// Sample 1 entry in 97 and filter out the pages where we estimate that more
// than 1 in 8 pointers are to new space.
@ -192,7 +195,7 @@ void StoreBuffer::EnsureSpace(intptr_t space_needed) {
ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
// As a last resort we mark all pages as being exempt from the store buffer.
ASSERT(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
if (old_limit_ - old_top_ > old_top_ - old_start_) return;
if (SpaceAvailable(space_needed)) return;
}
UNREACHABLE();
}
@ -294,7 +297,7 @@ bool StoreBuffer::PrepareForIteration() {
void StoreBuffer::Clean() {
ClearFilteringHashSets();
Uniq(); // Also removes things that no longer point to new space.
CheckForFullBuffer();
EnsureSpace(kStoreBufferSize / 2);
}
@ -687,12 +690,6 @@ void StoreBuffer::Compact() {
ASSERT(old_top_ <= old_limit_);
}
heap_->isolate()->counters()->store_buffer_compactions()->Increment();
CheckForFullBuffer();
}
void StoreBuffer::CheckForFullBuffer() {
EnsureSpace(kStoreBufferSize * 2);
}
} } // namespace v8::internal

View File

@ -160,7 +160,7 @@ class StoreBuffer {
void ClearFilteringHashSets();
void CheckForFullBuffer();
bool SpaceAvailable(intptr_t space_needed);
void Uniq();
void ExemptPopularPages(int prime_sample_step, int threshold);
@ -223,7 +223,6 @@ class StoreBufferRebuildScope {
~StoreBufferRebuildScope() {
store_buffer_->callback_ = stored_callback_;
store_buffer_->store_buffer_rebuilding_enabled_ = stored_state_;
store_buffer_->CheckForFullBuffer();
}
private: