Revert of Concurrently unmap free pages. (patchset #4 id:60001 of https://codereview.chromium.org/1303263002/ )
Reason for revert: Several tests on V8 Linux64 TSAN bot are broken, due to data races between allocation & GC. A bisect points to this CL, and the CL description sounds pertinent to the observed breakage. Original issue's description: > Concurrently unmap free pages. > > BUG= > > Committed: https://crrev.com/d1aeb45d96123d47023066b244c0f450fbe57d2d > Cr-Commit-Position: refs/heads/master@{#30306} TBR=mlippautz@chromium.org,hpayer@chromium.org NOPRESUBMIT=true NOTREECHECKS=true NOTRY=true BUG= Review URL: https://codereview.chromium.org/1306213002 Cr-Commit-Position: refs/heads/master@{#30310}
This commit is contained in:
parent
201706bc91
commit
218948e5f2
@ -135,7 +135,6 @@ Heap::Heap()
|
|||||||
current_gc_flags_(Heap::kNoGCFlags),
|
current_gc_flags_(Heap::kNoGCFlags),
|
||||||
external_string_table_(this),
|
external_string_table_(this),
|
||||||
chunks_queued_for_free_(NULL),
|
chunks_queued_for_free_(NULL),
|
||||||
pending_unmap_job_semaphore_(0),
|
|
||||||
gc_callbacks_depth_(0),
|
gc_callbacks_depth_(0),
|
||||||
deserialization_complete_(false),
|
deserialization_complete_(false),
|
||||||
concurrent_sweeping_enabled_(false),
|
concurrent_sweeping_enabled_(false),
|
||||||
@ -6515,33 +6514,6 @@ void ExternalStringTable::TearDown() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class Heap::UnmapFreeMemoryTask : public v8::Task {
|
|
||||||
public:
|
|
||||||
UnmapFreeMemoryTask(Heap* heap, MemoryChunk* head)
|
|
||||||
: heap_(heap), head_(head) {}
|
|
||||||
virtual ~UnmapFreeMemoryTask() {}
|
|
||||||
|
|
||||||
private:
|
|
||||||
// v8::Task overrides.
|
|
||||||
void Run() override {
|
|
||||||
heap_->FreeQueuedChunks(head_);
|
|
||||||
heap_->pending_unmap_job_semaphore_.Signal();
|
|
||||||
}
|
|
||||||
|
|
||||||
Heap* heap_;
|
|
||||||
MemoryChunk* head_;
|
|
||||||
|
|
||||||
DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
void Heap::WaitUntilUnmappingOfFreeChunksCompleted() {
|
|
||||||
// We start an unmap job after sweeping and after compaction.
|
|
||||||
pending_unmap_job_semaphore_.Wait();
|
|
||||||
pending_unmap_job_semaphore_.Wait();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
|
void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
|
||||||
chunk->set_next_chunk(chunks_queued_for_free_);
|
chunk->set_next_chunk(chunks_queued_for_free_);
|
||||||
chunks_queued_for_free_ = chunk;
|
chunks_queued_for_free_ = chunk;
|
||||||
@ -6556,32 +6528,19 @@ void Heap::FilterStoreBufferEntriesOnAboutToBeFreedPages() {
|
|||||||
next = chunk->next_chunk();
|
next = chunk->next_chunk();
|
||||||
chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
|
chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
|
||||||
}
|
}
|
||||||
store_buffer()->Compact();
|
isolate_->heap()->store_buffer()->Compact();
|
||||||
store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
|
isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Heap::FreeQueuedChunks() {
|
void Heap::FreeQueuedChunks() {
|
||||||
if (chunks_queued_for_free_ != NULL) {
|
|
||||||
V8::GetCurrentPlatform()->CallOnBackgroundThread(
|
|
||||||
new UnmapFreeMemoryTask(this, chunks_queued_for_free_),
|
|
||||||
v8::Platform::kShortRunningTask);
|
|
||||||
chunks_queued_for_free_ = NULL;
|
|
||||||
} else {
|
|
||||||
// If we do not have anything to unmap, we just signal the semaphore
|
|
||||||
// that we are done.
|
|
||||||
pending_unmap_job_semaphore_.Signal();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void Heap::FreeQueuedChunks(MemoryChunk* list_head) {
|
|
||||||
MemoryChunk* next;
|
MemoryChunk* next;
|
||||||
MemoryChunk* chunk;
|
MemoryChunk* chunk;
|
||||||
for (chunk = list_head; chunk != NULL; chunk = next) {
|
for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
|
||||||
next = chunk->next_chunk();
|
next = chunk->next_chunk();
|
||||||
isolate_->memory_allocator()->Free(chunk);
|
isolate_->memory_allocator()->Free(chunk);
|
||||||
}
|
}
|
||||||
|
chunks_queued_for_free_ = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1423,9 +1423,7 @@ class Heap {
|
|||||||
|
|
||||||
void QueueMemoryChunkForFree(MemoryChunk* chunk);
|
void QueueMemoryChunkForFree(MemoryChunk* chunk);
|
||||||
void FilterStoreBufferEntriesOnAboutToBeFreedPages();
|
void FilterStoreBufferEntriesOnAboutToBeFreedPages();
|
||||||
void FreeQueuedChunks(MemoryChunk* list_head);
|
|
||||||
void FreeQueuedChunks();
|
void FreeQueuedChunks();
|
||||||
void WaitUntilUnmappingOfFreeChunksCompleted();
|
|
||||||
|
|
||||||
int gc_count() const { return gc_count_; }
|
int gc_count() const { return gc_count_; }
|
||||||
|
|
||||||
@ -1602,8 +1600,6 @@ class Heap {
|
|||||||
bool ShouldOptimizeForMemoryUsage() { return optimize_for_memory_usage_; }
|
bool ShouldOptimizeForMemoryUsage() { return optimize_for_memory_usage_; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
class UnmapFreeMemoryTask;
|
|
||||||
|
|
||||||
static const int kInitialStringTableSize = 2048;
|
static const int kInitialStringTableSize = 2048;
|
||||||
static const int kInitialEvalCacheSize = 64;
|
static const int kInitialEvalCacheSize = 64;
|
||||||
static const int kInitialNumberStringCacheSize = 256;
|
static const int kInitialNumberStringCacheSize = 256;
|
||||||
@ -2281,8 +2277,6 @@ class Heap {
|
|||||||
|
|
||||||
MemoryChunk* chunks_queued_for_free_;
|
MemoryChunk* chunks_queued_for_free_;
|
||||||
|
|
||||||
base::Semaphore pending_unmap_job_semaphore_;
|
|
||||||
|
|
||||||
base::Mutex relocation_mutex_;
|
base::Mutex relocation_mutex_;
|
||||||
|
|
||||||
int gc_callbacks_depth_;
|
int gc_callbacks_depth_;
|
||||||
|
@ -519,15 +519,12 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
|
|||||||
SweepInParallel(heap()->paged_space(CODE_SPACE), 0);
|
SweepInParallel(heap()->paged_space(CODE_SPACE), 0);
|
||||||
SweepInParallel(heap()->paged_space(MAP_SPACE), 0);
|
SweepInParallel(heap()->paged_space(MAP_SPACE), 0);
|
||||||
}
|
}
|
||||||
|
// Wait twice for both jobs.
|
||||||
if (heap()->concurrent_sweeping_enabled()) {
|
if (heap()->concurrent_sweeping_enabled()) {
|
||||||
pending_sweeper_jobs_semaphore_.Wait();
|
pending_sweeper_jobs_semaphore_.Wait();
|
||||||
pending_sweeper_jobs_semaphore_.Wait();
|
pending_sweeper_jobs_semaphore_.Wait();
|
||||||
pending_sweeper_jobs_semaphore_.Wait();
|
pending_sweeper_jobs_semaphore_.Wait();
|
||||||
}
|
}
|
||||||
|
|
||||||
heap()->WaitUntilUnmappingOfFreeChunksCompleted();
|
|
||||||
|
|
||||||
ParallelSweepSpacesComplete();
|
ParallelSweepSpacesComplete();
|
||||||
sweeping_in_progress_ = false;
|
sweeping_in_progress_ = false;
|
||||||
RefillFreeList(heap()->paged_space(OLD_SPACE));
|
RefillFreeList(heap()->paged_space(OLD_SPACE));
|
||||||
|
Loading…
Reference in New Issue
Block a user