Do not dereference handles during relocation.
R=hpayer@chromium.org BUG= Review URL: https://chromiumcodereview.appspot.com/13982023 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@14445 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
d0f9b7386b
commit
49ce7642be
@ -91,6 +91,10 @@ bool Handle<T>::IsDereferenceAllowed(bool allow_deferred) const {
|
||||
handle < roots_array_start + Heap::kStrongRootListLength) {
|
||||
return true;
|
||||
}
|
||||
if (isolate->optimizing_compiler_thread()->IsOptimizerThread() &&
|
||||
!Heap::RelocationLock::IsLockedByOptimizerThread(isolate->heap())) {
|
||||
return false;
|
||||
}
|
||||
switch (isolate->HandleDereferenceGuardState()) {
|
||||
case HandleDereferenceGuard::ALLOW:
|
||||
return true;
|
||||
|
@ -211,6 +211,7 @@ MaybeObject* Heap::CopyFixedDoubleArray(FixedDoubleArray* src) {
|
||||
MaybeObject* Heap::AllocateRaw(int size_in_bytes,
|
||||
AllocationSpace space,
|
||||
AllocationSpace retry_space) {
|
||||
SLOW_ASSERT(!isolate_->optimizing_compiler_thread()->IsOptimizerThread());
|
||||
ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
|
||||
ASSERT(space != NEW_SPACE ||
|
||||
retry_space == OLD_POINTER_SPACE ||
|
||||
|
20
src/heap.cc
20
src/heap.cc
@ -1300,6 +1300,8 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
|
||||
|
||||
|
||||
void Heap::Scavenge() {
|
||||
RelocationLock relocation_lock(this);
|
||||
|
||||
#ifdef VERIFY_HEAP
|
||||
if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
|
||||
#endif
|
||||
@ -6635,6 +6637,11 @@ bool Heap::SetUp() {
|
||||
|
||||
store_buffer()->SetUp();
|
||||
|
||||
if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
|
||||
#ifdef DEBUG
|
||||
relocation_mutex_locked_by_optimizer_thread_ = false;
|
||||
#endif // DEBUG
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -6737,6 +6744,8 @@ void Heap::TearDown() {
|
||||
incremental_marking()->TearDown();
|
||||
|
||||
isolate_->memory_allocator()->TearDown();
|
||||
|
||||
delete relocation_mutex_;
|
||||
}
|
||||
|
||||
|
||||
@ -7866,4 +7875,15 @@ void Heap::CheckpointObjectStats() {
|
||||
ClearObjectStats();
|
||||
}
|
||||
|
||||
|
||||
Heap::RelocationLock::RelocationLock(Heap* heap) : heap_(heap) {
|
||||
if (FLAG_parallel_recompilation) {
|
||||
heap_->relocation_mutex_->Lock();
|
||||
#ifdef DEBUG
|
||||
heap_->relocation_mutex_locked_by_optimizer_thread_ =
|
||||
heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
|
||||
#endif // DEBUG
|
||||
}
|
||||
}
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
30
src/heap.h
30
src/heap.h
@ -1858,6 +1858,31 @@ class Heap {
|
||||
|
||||
void CheckpointObjectStats();
|
||||
|
||||
// We don't use a ScopedLock here since we want to lock the heap
|
||||
// only when FLAG_parallel_recompilation is true.
|
||||
class RelocationLock {
|
||||
public:
|
||||
explicit RelocationLock(Heap* heap);
|
||||
|
||||
~RelocationLock() {
|
||||
if (FLAG_parallel_recompilation) {
|
||||
#ifdef DEBUG
|
||||
heap_->relocation_mutex_locked_by_optimizer_thread_ = false;
|
||||
#endif // DEBUG
|
||||
heap_->relocation_mutex_->Unlock();
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
static bool IsLockedByOptimizerThread(Heap* heap) {
|
||||
return heap->relocation_mutex_locked_by_optimizer_thread_;
|
||||
}
|
||||
#endif // DEBUG
|
||||
|
||||
private:
|
||||
Heap* heap_;
|
||||
};
|
||||
|
||||
private:
|
||||
Heap();
|
||||
|
||||
@ -2332,6 +2357,11 @@ class Heap {
|
||||
|
||||
MemoryChunk* chunks_queued_for_free_;
|
||||
|
||||
Mutex* relocation_mutex_;
|
||||
#ifdef DEBUG
|
||||
bool relocation_mutex_locked_by_optimizer_thread_;
|
||||
#endif // DEBUG;
|
||||
|
||||
friend class Factory;
|
||||
friend class GCTracer;
|
||||
friend class DisallowAllocationFailure;
|
||||
|
@ -511,6 +511,7 @@ class ReachabilityAnalyzer BASE_EMBEDDED {
|
||||
|
||||
|
||||
void HGraph::Verify(bool do_full_verify) const {
|
||||
Heap::RelocationLock(isolate()->heap());
|
||||
ALLOW_HANDLE_DEREF(isolate(), "debug mode verification");
|
||||
for (int i = 0; i < blocks_.length(); i++) {
|
||||
HBasicBlock* block = blocks_.at(i);
|
||||
|
@ -3125,6 +3125,8 @@ void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
|
||||
|
||||
|
||||
void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
|
||||
Heap::RelocationLock relocation_lock(heap());
|
||||
|
||||
bool code_slots_filtering_required;
|
||||
{ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
|
||||
code_slots_filtering_required = MarkInvalidatedCode();
|
||||
|
@ -88,7 +88,9 @@ void OptimizingCompilerThread::CompileNext() {
|
||||
// The function may have already been optimized by OSR. Simply continue.
|
||||
// Mark it for installing before queuing so that we can be sure of the write
|
||||
// order: marking first and (after being queued) installing code second.
|
||||
optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode();
|
||||
{ Heap::RelocationLock relocation_lock(isolate_->heap());
|
||||
optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode();
|
||||
}
|
||||
output_queue_.Enqueue(optimizing_compiler);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user