Revert of Concurrently unmap free pages. (patchset #4 id:60001 of https://codereview.chromium.org/1303263002/ )

Reason for revert:
Several tests on V8 Linux64 TSAN bot are broken, due to data races between allocation & GC.

A bisect points to this CL, and the CL description sounds pertinent to the observed breakage.

Original issue's description:
> Concurrently unmap free pages.
>
> BUG=
>
> Committed: https://crrev.com/d1aeb45d96123d47023066b244c0f450fbe57d2d
> Cr-Commit-Position: refs/heads/master@{#30306}

TBR=mlippautz@chromium.org,hpayer@chromium.org
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=

Review URL: https://codereview.chromium.org/1306213002

Cr-Commit-Position: refs/heads/master@{#30310}
This commit is contained in:
vogelheim 2015-08-21 09:20:50 -07:00 committed by Commit bot
parent 201706bc91
commit 218948e5f2
3 changed files with 5 additions and 55 deletions

View File

@ -135,7 +135,6 @@ Heap::Heap()
current_gc_flags_(Heap::kNoGCFlags),
external_string_table_(this),
chunks_queued_for_free_(NULL),
pending_unmap_job_semaphore_(0),
gc_callbacks_depth_(0),
deserialization_complete_(false),
concurrent_sweeping_enabled_(false),
@ -6515,33 +6514,6 @@ void ExternalStringTable::TearDown() {
}
class Heap::UnmapFreeMemoryTask : public v8::Task {
public:
UnmapFreeMemoryTask(Heap* heap, MemoryChunk* head)
: heap_(heap), head_(head) {}
virtual ~UnmapFreeMemoryTask() {}
private:
// v8::Task overrides.
void Run() override {
heap_->FreeQueuedChunks(head_);
heap_->pending_unmap_job_semaphore_.Signal();
}
Heap* heap_;
MemoryChunk* head_;
DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
};
void Heap::WaitUntilUnmappingOfFreeChunksCompleted() {
// We start an unmap job after sweeping and after compaction.
pending_unmap_job_semaphore_.Wait();
pending_unmap_job_semaphore_.Wait();
}
void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
chunk->set_next_chunk(chunks_queued_for_free_);
chunks_queued_for_free_ = chunk;
@ -6556,32 +6528,19 @@ void Heap::FilterStoreBufferEntriesOnAboutToBeFreedPages() {
next = chunk->next_chunk();
chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
}
store_buffer()->Compact();
store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
isolate_->heap()->store_buffer()->Compact();
isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
}
void Heap::FreeQueuedChunks() {
if (chunks_queued_for_free_ != NULL) {
V8::GetCurrentPlatform()->CallOnBackgroundThread(
new UnmapFreeMemoryTask(this, chunks_queued_for_free_),
v8::Platform::kShortRunningTask);
chunks_queued_for_free_ = NULL;
} else {
// If we do not have anything to unmap, we just signal the semaphore
// that we are done.
pending_unmap_job_semaphore_.Signal();
}
}
void Heap::FreeQueuedChunks(MemoryChunk* list_head) {
MemoryChunk* next;
MemoryChunk* chunk;
for (chunk = list_head; chunk != NULL; chunk = next) {
for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
next = chunk->next_chunk();
isolate_->memory_allocator()->Free(chunk);
}
chunks_queued_for_free_ = NULL;
}

View File

@ -1423,9 +1423,7 @@ class Heap {
void QueueMemoryChunkForFree(MemoryChunk* chunk);
void FilterStoreBufferEntriesOnAboutToBeFreedPages();
void FreeQueuedChunks(MemoryChunk* list_head);
void FreeQueuedChunks();
void WaitUntilUnmappingOfFreeChunksCompleted();
int gc_count() const { return gc_count_; }
@ -1602,8 +1600,6 @@ class Heap {
bool ShouldOptimizeForMemoryUsage() { return optimize_for_memory_usage_; }
private:
class UnmapFreeMemoryTask;
static const int kInitialStringTableSize = 2048;
static const int kInitialEvalCacheSize = 64;
static const int kInitialNumberStringCacheSize = 256;
@ -2281,8 +2277,6 @@ class Heap {
MemoryChunk* chunks_queued_for_free_;
base::Semaphore pending_unmap_job_semaphore_;
base::Mutex relocation_mutex_;
int gc_callbacks_depth_;

View File

@ -519,15 +519,12 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
SweepInParallel(heap()->paged_space(CODE_SPACE), 0);
SweepInParallel(heap()->paged_space(MAP_SPACE), 0);
}
// Wait twice for both jobs.
if (heap()->concurrent_sweeping_enabled()) {
pending_sweeper_jobs_semaphore_.Wait();
pending_sweeper_jobs_semaphore_.Wait();
pending_sweeper_jobs_semaphore_.Wait();
}
heap()->WaitUntilUnmappingOfFreeChunksCompleted();
ParallelSweepSpacesComplete();
sweeping_in_progress_ = false;
RefillFreeList(heap()->paged_space(OLD_SPACE));