Revert of [heap] Parallel newspace evacuation, semispace copy, and compaction \o/ (patchset #16 id:620001 of https://codereview.chromium.org/1577853007/ )

Reason for revert:
[Sheriff] Leads to crashes on all webrtc chromium testers, e.g.:
https://build.chromium.org/p/chromium.webrtc/builders/Mac%20Tester/builds/49664

Original issue's description:
> [heap] Parallel newspace evacuation, semispace copy, and compaction \o/
>
> All parallelism can be turned off using --predictable, or --noparallel-compaction.
>
> This patch completely parallelizes
>  - semispace copy: from space -> to space (within newspace)
>  - newspace evacuation: newspace -> oldspace
>  - oldspace compaction: oldspace -> oldspace
>
> Previously newspace has been handled sequentially (semispace copy, newspace
> evacuation) before compacting oldspace in parallel. However, on a high level
> there are no dependencies between those two actions, hence we parallelize them
> altogether. We base the number of evacuation tasks on the overall set of
> to-be-processed pages (newspace + oldspace compaction pages).
>
> Some low-level details:
>  - The hard cap on number of tasks has been lifted
>  - We cache store buffer entries locally before merging them back into the global
>    StoreBuffer in a finalization phase.
>  - We cache AllocationSite operations locally before merging them back into the
>    global pretenuring storage in a finalization phase.
>  - AllocationSite might be compacted while they would be needed for newspace
>    evacuation. To mitigate any problems we defer checking allocation sites for
>    newspace till merging locally buffered data.
>
> CQ_EXTRA_TRYBOTS=tryserver.v8:v8_linux_arm64_gc_stress_dbg,v8_linux_gc_stress_dbg,v8_mac_gc_stress_dbg,v8_linux64_asan_rel,v8_linux64_tsan_rel,v8_mac64_asan_rel
> BUG=chromium:524425
> LOG=N
> R=hpayer@chromium.org, ulan@chromium.org
>
> Committed: https://crrev.com/8f0fd8c0370ae8c5aab56491b879d7e30c329062
> Cr-Commit-Position: refs/heads/master@{#33523}

TBR=hpayer@chromium.org,ulan@chromium.org,mlippautz@chromium.org
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=chromium:524425

Review URL: https://codereview.chromium.org/1643473002

Cr-Commit-Position: refs/heads/master@{#33539}
This commit is contained in:
machenbach 2016-01-27 01:11:26 -08:00 committed by Commit bot
parent a2baaaac93
commit 85ba94f28c
18 changed files with 304 additions and 462 deletions

View File

@ -1317,7 +1317,6 @@ source_set("v8_base") {
"src/unicode-cache.h",
"src/unicode-decoder.cc",
"src/unicode-decoder.h",
"src/utils-inl.h",
"src/utils.cc",
"src/utils.h",
"src/v8.cc",

View File

@ -1058,14 +1058,6 @@ inline FunctionKind WithObjectLiteralBit(FunctionKind kind) {
DCHECK(IsValidFunctionKind(kind));
return kind;
}
inline uint32_t ObjectHash(Address address) {
// All objects are at least pointer aligned, so we can remove the trailing
// zeros.
return static_cast<uint32_t>(bit_cast<uintptr_t>(address) >>
kPointerSizeLog2);
}
} // namespace internal
} // namespace v8

View File

@ -77,7 +77,6 @@ void ArrayBufferTracker::Unregister(JSArrayBuffer* buffer) {
void ArrayBufferTracker::MarkLive(JSArrayBuffer* buffer) {
base::LockGuard<base::Mutex> guard(&mutex_);
void* data = buffer->backing_store();
// ArrayBuffer might be in the middle of being constructed.
@ -124,8 +123,6 @@ void ArrayBufferTracker::PrepareDiscoveryInNewSpace() {
void ArrayBufferTracker::Promote(JSArrayBuffer* buffer) {
base::LockGuard<base::Mutex> guard(&mutex_);
if (buffer->is_external()) return;
void* data = buffer->backing_store();
if (!data) return;

View File

@ -7,7 +7,6 @@
#include <map>
#include "src/base/platform/mutex.h"
#include "src/globals.h"
namespace v8 {
@ -48,7 +47,6 @@ class ArrayBufferTracker {
void Promote(JSArrayBuffer* buffer);
private:
base::Mutex mutex_;
Heap* heap_;
// |live_array_buffers_| maps externally allocated memory used as backing

View File

@ -467,7 +467,7 @@ void Heap::MoveBlock(Address dst, Address src, int byte_size) {
}
}
template <Heap::FindMementoMode mode>
AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
// Check if there is potentially a memento behind the object. If
// the last word of the memento is on another page we return
@ -476,43 +476,34 @@ AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
Address memento_address = object_address + object->Size();
Address last_memento_word_address = memento_address + kPointerSize;
if (!NewSpacePage::OnSamePage(object_address, last_memento_word_address)) {
return nullptr;
return NULL;
}
HeapObject* candidate = HeapObject::FromAddress(memento_address);
Map* candidate_map = candidate->map();
// This fast check may peek at an uninitialized word. However, the slow check
// below (memento_address == top) ensures that this is safe. Mark the word as
// initialized to silence MemorySanitizer warnings.
MSAN_MEMORY_IS_INITIALIZED(&candidate_map, sizeof(candidate_map));
if (candidate_map != allocation_memento_map()) {
return nullptr;
}
AllocationMemento* memento_candidate = AllocationMemento::cast(candidate);
if (candidate_map != allocation_memento_map()) return NULL;
// Depending on what the memento is used for, we might need to perform
// additional checks.
Address top;
switch (mode) {
case Heap::kForGC:
return memento_candidate;
case Heap::kForRuntime:
if (memento_candidate == nullptr) return nullptr;
// Either the object is the last object in the new space, or there is
// another object of at least word size (the header map word) following
// it, so suffices to compare ptr and top here.
top = NewSpaceTop();
// Either the object is the last object in the new space, or there is another
// object of at least word size (the header map word) following it, so
// suffices to compare ptr and top here. Note that technically we do not have
// to compare with the current top pointer of the from space page during GC,
// since we always install filler objects above the top pointer of a from
// space page when performing a garbage collection. However, always performing
// the test makes it possible to have a single, unified version of
// FindAllocationMemento that is used both by the GC and the mutator.
Address top = NewSpaceTop();
DCHECK(memento_address == top ||
memento_address + HeapObject::kHeaderSize <= top ||
!NewSpacePage::OnSamePage(memento_address, top - 1));
if ((memento_address != top) && memento_candidate->IsValid()) {
return memento_candidate;
}
return nullptr;
default:
UNREACHABLE();
}
UNREACHABLE();
return nullptr;
if (memento_address == top) return NULL;
AllocationMemento* memento = AllocationMemento::cast(candidate);
if (!memento->IsValid()) return NULL;
return memento;
}
@ -522,28 +513,24 @@ void Heap::UpdateAllocationSite(HeapObject* object,
if (!FLAG_allocation_site_pretenuring ||
!AllocationSite::CanTrack(object->map()->instance_type()))
return;
AllocationMemento* memento_candidate = FindAllocationMemento<kForGC>(object);
if (memento_candidate == nullptr) return;
AllocationMemento* memento = FindAllocationMemento(object);
if (memento == nullptr) return;
AllocationSite* key = memento->GetAllocationSite();
DCHECK(!key->IsZombie());
if (pretenuring_feedback == global_pretenuring_feedback_) {
// Entering global pretenuring feedback is only used in the scavenger, where
// we are allowed to actually touch the allocation site.
if (!memento_candidate->IsValid()) return;
AllocationSite* site = memento_candidate->GetAllocationSite();
DCHECK(!site->IsZombie());
// For inserting in the global pretenuring storage we need to first
// increment the memento found count on the allocation site.
if (site->IncrementMementoFoundCount()) {
global_pretenuring_feedback_->LookupOrInsert(site,
ObjectHash(site->address()));
if (key->IncrementMementoFoundCount()) {
global_pretenuring_feedback_->LookupOrInsert(
key, static_cast<uint32_t>(bit_cast<uintptr_t>(key)));
}
} else {
// Entering cached feedback is used in the parallel case. We are not allowed
// to dereference the allocation site and rather have to postpone all checks
// till actually merging the data.
Address key = memento_candidate->GetAllocationSiteUnchecked();
HashMap::Entry* e =
pretenuring_feedback->LookupOrInsert(key, ObjectHash(key));
// Any other pretenuring storage than the global one is used as a cache,
// where the count is later on merge in the allocation site.
HashMap::Entry* e = pretenuring_feedback->LookupOrInsert(
key, static_cast<uint32_t>(bit_cast<uintptr_t>(key)));
DCHECK(e != nullptr);
(*bit_cast<intptr_t*>(&e->value))++;
}

View File

@ -518,19 +518,17 @@ void Heap::MergeAllocationSitePretenuringFeedback(
if (map_word.IsForwardingAddress()) {
site = AllocationSite::cast(map_word.ToForwardingAddress());
}
// We have not validated the allocation site yet, since we have not
// dereferenced the site during collecting information.
// This is an inlined check of AllocationMemento::IsValid.
if (!site->IsAllocationSite() || site->IsZombie()) continue;
DCHECK(site->IsAllocationSite());
int value =
static_cast<int>(reinterpret_cast<intptr_t>(local_entry->value));
DCHECK_GT(value, 0);
{
// TODO(mlippautz): For parallel processing we need synchronization here.
if (site->IncrementMementoFoundCount(value)) {
global_pretenuring_feedback_->LookupOrInsert(site,
ObjectHash(site->address()));
global_pretenuring_feedback_->LookupOrInsert(
site, static_cast<uint32_t>(bit_cast<uintptr_t>(site)));
}
}
}
}

View File

@ -450,8 +450,6 @@ class Heap {
enum PretenuringFeedbackInsertionMode { kCached, kGlobal };
enum FindMementoMode { kForRuntime, kForGC };
enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
// Taking this lock prevents the GC from entering a phase that relocates
@ -741,7 +739,6 @@ class Heap {
// If an object has an AllocationMemento trailing it, return it, otherwise
// return NULL;
template <FindMementoMode mode>
inline AllocationMemento* FindAllocationMemento(HeapObject* object);
// Returns false if not able to reserve.
@ -1222,13 +1219,13 @@ class Heap {
void UpdateSurvivalStatistics(int start_new_space_size);
inline void IncrementPromotedObjectsSize(intptr_t object_size) {
inline void IncrementPromotedObjectsSize(int object_size) {
DCHECK_GE(object_size, 0);
promoted_objects_size_ += object_size;
}
inline intptr_t promoted_objects_size() { return promoted_objects_size_; }
inline void IncrementSemiSpaceCopiedObjectSize(intptr_t object_size) {
inline void IncrementSemiSpaceCopiedObjectSize(int object_size) {
DCHECK_GE(object_size, 0);
semi_space_copied_object_size_ += object_size;
}
@ -1246,8 +1243,8 @@ class Heap {
inline void IncrementNodesPromoted() { nodes_promoted_++; }
inline void IncrementYoungSurvivorsCounter(intptr_t survived) {
DCHECK_GE(survived, 0);
inline void IncrementYoungSurvivorsCounter(int survived) {
DCHECK(survived >= 0);
survived_last_scavenge_ = survived;
survived_since_last_expansion_ += survived;
}
@ -1996,10 +1993,10 @@ class Heap {
// For keeping track of how much data has survived
// scavenge since last new space expansion.
intptr_t survived_since_last_expansion_;
int survived_since_last_expansion_;
// ... and since the last scavenge.
intptr_t survived_last_scavenge_;
int survived_last_scavenge_;
// This is not the depth of nested AlwaysAllocateScope's but rather a single
// count, as scopes can be acquired from multiple tasks (read: threads).

View File

@ -19,14 +19,13 @@
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/slots-buffer.h"
#include "src/heap/spaces-inl.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/profiler/cpu-profiler.h"
#include "src/utils-inl.h"
#include "src/v8.h"
namespace v8 {
@ -321,7 +320,9 @@ void MarkCompactCollector::ClearInvalidStoreAndSlotsBufferEntries() {
{
GCTracer::Scope gc_scope(heap()->tracer(),
GCTracer::Scope::MC_CLEAR_SLOTS_BUFFER);
for (Page* p : evacuation_candidates_) {
int number_of_pages = evacuation_candidates_.length();
for (int i = 0; i < number_of_pages; i++) {
Page* p = evacuation_candidates_[i];
SlotsBuffer::RemoveInvalidSlots(heap_, p->slots_buffer());
}
}
@ -477,6 +478,30 @@ void MarkCompactCollector::ClearMarkbits() {
}
class MarkCompactCollector::CompactionTask : public CancelableTask {
public:
explicit CompactionTask(Heap* heap, CompactionSpaceCollection* spaces)
: CancelableTask(heap->isolate()), spaces_(spaces) {}
virtual ~CompactionTask() {}
private:
// v8::internal::CancelableTask overrides.
void RunInternal() override {
MarkCompactCollector* mark_compact =
isolate()->heap()->mark_compact_collector();
SlotsBuffer* evacuation_slots_buffer = nullptr;
mark_compact->EvacuatePages(spaces_, &evacuation_slots_buffer);
mark_compact->AddEvacuationSlotsBufferSynchronized(evacuation_slots_buffer);
mark_compact->pending_compaction_tasks_semaphore_.Signal();
}
CompactionSpaceCollection* spaces_;
DISALLOW_COPY_AND_ASSIGN(CompactionTask);
};
class MarkCompactCollector::SweeperTask : public v8::Task {
public:
SweeperTask(Heap* heap, PagedSpace* space) : heap_(heap), space_(space) {}
@ -806,7 +831,9 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
void MarkCompactCollector::AbortCompaction() {
if (compacting_) {
for (Page* p : evacuation_candidates_) {
int npages = evacuation_candidates_.length();
for (int i = 0; i < npages; i++) {
Page* p = evacuation_candidates_[i];
slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
p->ClearEvacuationCandidate();
p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
@ -1521,13 +1548,8 @@ class MarkCompactCollector::HeapObjectVisitor {
class MarkCompactCollector::EvacuateVisitorBase
: public MarkCompactCollector::HeapObjectVisitor {
public:
EvacuateVisitorBase(Heap* heap, CompactionSpaceCollection* compaction_spaces,
SlotsBuffer** evacuation_slots_buffer,
LocalStoreBuffer* local_store_buffer)
: heap_(heap),
evacuation_slots_buffer_(evacuation_slots_buffer),
compaction_spaces_(compaction_spaces),
local_store_buffer_(local_store_buffer) {}
EvacuateVisitorBase(Heap* heap, SlotsBuffer** evacuation_slots_buffer)
: heap_(heap), evacuation_slots_buffer_(evacuation_slots_buffer) {}
bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
HeapObject** target_object) {
@ -1537,7 +1559,7 @@ class MarkCompactCollector::EvacuateVisitorBase
if (allocation.To(target_object)) {
heap_->mark_compact_collector()->MigrateObject(
*target_object, object, size, target_space->identity(),
evacuation_slots_buffer_, local_store_buffer_);
evacuation_slots_buffer_);
return true;
}
return false;
@ -1546,8 +1568,6 @@ class MarkCompactCollector::EvacuateVisitorBase
protected:
Heap* heap_;
SlotsBuffer** evacuation_slots_buffer_;
CompactionSpaceCollection* compaction_spaces_;
LocalStoreBuffer* local_store_buffer_;
};
@ -1558,12 +1578,9 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final
static const intptr_t kMaxLabObjectSize = 256;
explicit EvacuateNewSpaceVisitor(Heap* heap,
CompactionSpaceCollection* compaction_spaces,
SlotsBuffer** evacuation_slots_buffer,
LocalStoreBuffer* local_store_buffer,
HashMap* local_pretenuring_feedback)
: EvacuateVisitorBase(heap, compaction_spaces, evacuation_slots_buffer,
local_store_buffer),
: EvacuateVisitorBase(heap, evacuation_slots_buffer),
buffer_(LocalAllocationBuffer::InvalidBuffer()),
space_to_allocate_(NEW_SPACE),
promoted_size_(0),
@ -1575,8 +1592,7 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final
int size = object->Size();
HeapObject* target_object = nullptr;
if (heap_->ShouldBePromoted(object->address(), size) &&
TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object,
&target_object)) {
TryEvacuateObject(heap_->old_space(), object, &target_object)) {
// If we end up needing more special cases, we should factor this out.
if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) {
heap_->array_buffer_tracker()->Promote(
@ -1589,8 +1605,7 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final
AllocationSpace space = AllocateTargetObject(object, &target);
heap_->mark_compact_collector()->MigrateObject(
HeapObject::cast(target), object, size, space,
(space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_,
(space == NEW_SPACE) ? nullptr : local_store_buffer_);
(space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_);
if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
}
@ -1663,8 +1678,7 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final
inline AllocationResult AllocateInOldSpace(int size_in_bytes,
AllocationAlignment alignment) {
AllocationResult allocation =
compaction_spaces_->Get(OLD_SPACE)->AllocateRaw(size_in_bytes,
alignment);
heap_->old_space()->AllocateRaw(size_in_bytes, alignment);
if (allocation.IsRetry()) {
FatalProcessOutOfMemory(
"MarkCompactCollector: semi-space copy, fallback in old gen\n");
@ -1710,10 +1724,9 @@ class MarkCompactCollector::EvacuateOldSpaceVisitor final
public:
EvacuateOldSpaceVisitor(Heap* heap,
CompactionSpaceCollection* compaction_spaces,
SlotsBuffer** evacuation_slots_buffer,
LocalStoreBuffer* local_store_buffer)
: EvacuateVisitorBase(heap, compaction_spaces, evacuation_slots_buffer,
local_store_buffer) {}
SlotsBuffer** evacuation_slots_buffer)
: EvacuateVisitorBase(heap, evacuation_slots_buffer),
compaction_spaces_(compaction_spaces) {}
bool Visit(HeapObject* object) override {
CompactionSpace* target_space = compaction_spaces_->Get(
@ -1725,6 +1738,9 @@ class MarkCompactCollector::EvacuateOldSpaceVisitor final
}
return false;
}
private:
CompactionSpaceCollection* compaction_spaces_;
};
@ -2532,14 +2548,14 @@ void MarkCompactCollector::AbortTransitionArrays() {
heap()->set_encountered_transition_arrays(Smi::FromInt(0));
}
void MarkCompactCollector::RecordMigratedSlot(
Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer,
LocalStoreBuffer* local_store_buffer) {
Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer) {
// When parallel compaction is in progress, store and slots buffer entries
// require synchronization.
if (heap_->InNewSpace(value)) {
if (compaction_in_progress_) {
local_store_buffer->Record(slot);
heap_->store_buffer()->MarkSynchronized(slot);
} else {
heap_->store_buffer()->Mark(slot);
}
@ -2621,23 +2637,19 @@ void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
class RecordMigratedSlotVisitor final : public ObjectVisitor {
public:
RecordMigratedSlotVisitor(MarkCompactCollector* collector,
SlotsBuffer** evacuation_slots_buffer,
LocalStoreBuffer* local_store_buffer)
SlotsBuffer** evacuation_slots_buffer)
: collector_(collector),
evacuation_slots_buffer_(evacuation_slots_buffer),
local_store_buffer_(local_store_buffer) {}
evacuation_slots_buffer_(evacuation_slots_buffer) {}
V8_INLINE void VisitPointer(Object** p) override {
collector_->RecordMigratedSlot(*p, reinterpret_cast<Address>(p),
evacuation_slots_buffer_,
local_store_buffer_);
evacuation_slots_buffer_);
}
V8_INLINE void VisitPointers(Object** start, Object** end) override {
while (start < end) {
collector_->RecordMigratedSlot(*start, reinterpret_cast<Address>(start),
evacuation_slots_buffer_,
local_store_buffer_);
evacuation_slots_buffer_);
++start;
}
}
@ -2653,7 +2665,6 @@ class RecordMigratedSlotVisitor final : public ObjectVisitor {
private:
MarkCompactCollector* collector_;
SlotsBuffer** evacuation_slots_buffer_;
LocalStoreBuffer* local_store_buffer_;
};
@ -2671,10 +2682,9 @@ class RecordMigratedSlotVisitor final : public ObjectVisitor {
// pointer iteration. This is an issue if the store buffer overflows and we
// have to scan the entire old space, including dead objects, looking for
// pointers to new space.
void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
int size, AllocationSpace dest,
SlotsBuffer** evacuation_slots_buffer,
LocalStoreBuffer* local_store_buffer) {
void MarkCompactCollector::MigrateObject(
HeapObject* dst, HeapObject* src, int size, AllocationSpace dest,
SlotsBuffer** evacuation_slots_buffer) {
Address dst_addr = dst->address();
Address src_addr = src->address();
DCHECK(heap()->AllowedToBeMigrated(src, dest));
@ -2685,8 +2695,7 @@ void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
DCHECK(IsAligned(size, kPointerSize));
heap()->MoveBlock(dst->address(), src->address(), size);
RecordMigratedSlotVisitor visitor(this, evacuation_slots_buffer,
local_store_buffer);
RecordMigratedSlotVisitor visitor(this, evacuation_slots_buffer);
dst->IterateBody(&visitor);
} else if (dest == CODE_SPACE) {
DCHECK_CODEOBJECT_SIZE(size, heap()->code_space());
@ -3048,18 +3057,54 @@ void MarkCompactCollector::VerifyIsSlotInLiveObject(Address slot,
void MarkCompactCollector::EvacuateNewSpacePrologue() {
// There are soft limits in the allocation code, designed trigger a mark
// sweep collection by failing allocations. But since we are already in
// a mark-sweep allocation, there is no sense in trying to trigger one.
AlwaysAllocateScope scope(isolate());
NewSpace* new_space = heap()->new_space();
NewSpacePageIterator it(new_space->bottom(), new_space->top());
// Append the list of new space pages to be processed.
// Store allocation range before flipping semispaces.
Address from_bottom = new_space->bottom();
Address from_top = new_space->top();
// Flip the semispaces. After flipping, to space is empty, from space has
// live objects.
new_space->Flip();
new_space->ResetAllocationInfo();
newspace_evacuation_candidates_.Clear();
NewSpacePageIterator it(from_bottom, from_top);
while (it.has_next()) {
newspace_evacuation_candidates_.Add(it.next());
}
new_space->Flip();
new_space->ResetAllocationInfo();
}
void MarkCompactCollector::EvacuateNewSpaceEpilogue() {
newspace_evacuation_candidates_.Rewind(0);
HashMap* MarkCompactCollector::EvacuateNewSpaceInParallel() {
HashMap* local_pretenuring_feedback = new HashMap(
HashMap::PointersMatch, kInitialLocalPretenuringFeedbackCapacity);
EvacuateNewSpaceVisitor new_space_visitor(heap(), &migration_slots_buffer_,
local_pretenuring_feedback);
// First pass: traverse all objects in inactive semispace, remove marks,
// migrate live objects and write forwarding addresses. This stage puts
// new entries in the store buffer and may cause some pages to be marked
// scan-on-scavenge.
for (int i = 0; i < newspace_evacuation_candidates_.length(); i++) {
NewSpacePage* p =
reinterpret_cast<NewSpacePage*>(newspace_evacuation_candidates_[i]);
bool ok = VisitLiveObjects(p, &new_space_visitor, kClearMarkbits);
USE(ok);
DCHECK(ok);
}
heap_->IncrementPromotedObjectsSize(
static_cast<int>(new_space_visitor.promoted_size()));
heap_->IncrementSemiSpaceCopiedObjectSize(
static_cast<int>(new_space_visitor.semispace_copied_size()));
heap_->IncrementYoungSurvivorsCounter(
static_cast<int>(new_space_visitor.promoted_size()) +
static_cast<int>(new_space_visitor.semispace_copied_size()));
return local_pretenuring_feedback;
}
@ -3069,168 +3114,8 @@ void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized(
evacuation_slots_buffers_.Add(evacuation_slots_buffer);
}
class MarkCompactCollector::Evacuator : public Malloced {
public:
Evacuator(MarkCompactCollector* collector,
const List<Page*>& evacuation_candidates,
const List<NewSpacePage*>& newspace_evacuation_candidates)
: collector_(collector),
evacuation_candidates_(evacuation_candidates),
newspace_evacuation_candidates_(newspace_evacuation_candidates),
compaction_spaces_(collector->heap()),
local_slots_buffer_(nullptr),
local_store_buffer_(),
local_pretenuring_feedback_(HashMap::PointersMatch,
kInitialLocalPretenuringFeedbackCapacity),
new_space_visitor_(collector->heap(), &compaction_spaces_,
&local_slots_buffer_, &local_store_buffer_,
&local_pretenuring_feedback_),
old_space_visitor_(collector->heap(), &compaction_spaces_,
&local_slots_buffer_, &local_store_buffer_),
duration_(0.0),
bytes_compacted_(0),
task_id_(0) {}
// Evacuate the configured set of pages in parallel.
inline void EvacuatePages();
// Merge back locally cached info sequentially. Note that this method needs
// to be called from the main thread.
inline void Finalize();
CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; }
uint32_t task_id() { return task_id_; }
void set_task_id(uint32_t id) { task_id_ = id; }
private:
static const int kInitialLocalPretenuringFeedbackCapacity = 256;
Heap* heap() { return collector_->heap(); }
void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
duration_ += duration;
bytes_compacted_ += bytes_compacted;
}
inline bool EvacuateSinglePage(MemoryChunk* p, HeapObjectVisitor* visitor);
MarkCompactCollector* collector_;
// Pages to process.
const List<Page*>& evacuation_candidates_;
const List<NewSpacePage*>& newspace_evacuation_candidates_;
// Locally cached collector data.
CompactionSpaceCollection compaction_spaces_;
SlotsBuffer* local_slots_buffer_;
LocalStoreBuffer local_store_buffer_;
HashMap local_pretenuring_feedback_;
// Vistors for the corresponding spaces.
EvacuateNewSpaceVisitor new_space_visitor_;
EvacuateOldSpaceVisitor old_space_visitor_;
// Book keeping info.
double duration_;
intptr_t bytes_compacted_;
// Task id, if this evacuator is executed on a background task instead of
// the main thread. Can be used to try to abort the task currently scheduled
// to executed to evacuate pages.
uint32_t task_id_;
};
bool MarkCompactCollector::Evacuator::EvacuateSinglePage(
MemoryChunk* p, HeapObjectVisitor* visitor) {
bool success = true;
if (p->parallel_compaction_state().TrySetValue(
MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
if (p->IsEvacuationCandidate() || p->InNewSpace()) {
DCHECK_EQ(p->parallel_compaction_state().Value(),
MemoryChunk::kCompactingInProgress);
int saved_live_bytes = p->LiveBytes();
double evacuation_time;
{
AlwaysAllocateScope always_allocate(heap()->isolate());
TimedScope timed_scope(&evacuation_time);
success = collector_->VisitLiveObjects(p, visitor, kClearMarkbits);
}
if (success) {
ReportCompactionProgress(evacuation_time, saved_live_bytes);
p->parallel_compaction_state().SetValue(
MemoryChunk::kCompactingFinalize);
} else {
p->parallel_compaction_state().SetValue(
MemoryChunk::kCompactingAborted);
}
} else {
// There could be popular pages in the list of evacuation candidates
// which we do not compact.
p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
}
}
return success;
}
void MarkCompactCollector::Evacuator::EvacuatePages() {
for (NewSpacePage* p : newspace_evacuation_candidates_) {
DCHECK(p->InNewSpace());
DCHECK_EQ(p->concurrent_sweeping_state().Value(),
NewSpacePage::kSweepingDone);
bool success = EvacuateSinglePage(p, &new_space_visitor_);
DCHECK(success);
USE(success);
}
for (Page* p : evacuation_candidates_) {
DCHECK(p->IsEvacuationCandidate() ||
p->IsFlagSet(MemoryChunk::RESCAN_ON_EVACUATION));
DCHECK_EQ(p->concurrent_sweeping_state().Value(), Page::kSweepingDone);
EvacuateSinglePage(p, &old_space_visitor_);
}
}
void MarkCompactCollector::Evacuator::Finalize() {
heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE));
heap()->code_space()->MergeCompactionSpace(
compaction_spaces_.Get(CODE_SPACE));
heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size());
heap()->IncrementSemiSpaceCopiedObjectSize(
new_space_visitor_.semispace_copied_size());
heap()->IncrementYoungSurvivorsCounter(
new_space_visitor_.promoted_size() +
new_space_visitor_.semispace_copied_size());
heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
local_store_buffer_.Process(heap()->store_buffer());
collector_->AddEvacuationSlotsBufferSynchronized(local_slots_buffer_);
}
class MarkCompactCollector::CompactionTask : public CancelableTask {
public:
explicit CompactionTask(Heap* heap, Evacuator* evacuator)
: CancelableTask(heap->isolate()), heap_(heap), evacuator_(evacuator) {
evacuator->set_task_id(id());
}
virtual ~CompactionTask() {}
private:
// v8::internal::CancelableTask overrides.
void RunInternal() override {
evacuator_->EvacuatePages();
heap_->mark_compact_collector()
->pending_compaction_tasks_semaphore_.Signal();
}
Heap* heap_;
Evacuator* evacuator_;
DISALLOW_COPY_AND_ASSIGN(CompactionTask);
};
int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
intptr_t live_bytes) {
int MarkCompactCollector::NumberOfParallelCompactionTasks() {
if (!FLAG_parallel_compaction) return 1;
// Compute the number of needed tasks based on a target compaction time, the
// profiled compaction speed and marked live memory.
@ -3238,83 +3123,83 @@ int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
// The number of parallel compaction tasks is limited by:
// - #evacuation pages
// - (#cores - 1)
// - a hard limit
const double kTargetCompactionTimeInMs = 1;
const int kNumSweepingTasks = 3;
const int kMaxCompactionTasks = 8;
intptr_t compaction_speed =
heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
if (compaction_speed == 0) return 1;
const int available_cores =
Max(1, base::SysInfo::NumberOfProcessors() - kNumSweepingTasks - 1);
int tasks;
if (compaction_speed > 0) {
tasks = 1 + static_cast<int>(static_cast<double>(live_bytes) /
compaction_speed / kTargetCompactionTimeInMs);
} else {
tasks = pages;
intptr_t live_bytes = 0;
for (Page* page : evacuation_candidates_) {
live_bytes += page->LiveBytes();
}
const int tasks_capped_pages = Min(pages, tasks);
return Min(available_cores, tasks_capped_pages);
const int cores = Max(1, base::SysInfo::NumberOfProcessors() - 1);
const int tasks =
1 + static_cast<int>(static_cast<double>(live_bytes) / compaction_speed /
kTargetCompactionTimeInMs);
const int tasks_capped_pages = Min(evacuation_candidates_.length(), tasks);
const int tasks_capped_cores = Min(cores, tasks_capped_pages);
const int tasks_capped_hard = Min(kMaxCompactionTasks, tasks_capped_cores);
return tasks_capped_hard;
}
void MarkCompactCollector::EvacuatePagesInParallel() {
int num_pages = 0;
intptr_t live_bytes = 0;
for (Page* page : evacuation_candidates_) {
num_pages++;
live_bytes += page->LiveBytes();
}
for (NewSpacePage* page : newspace_evacuation_candidates_) {
num_pages++;
live_bytes += page->LiveBytes();
}
DCHECK_GE(num_pages, 1);
const int num_pages = evacuation_candidates_.length();
if (num_pages == 0) return;
// Used for trace summary.
intptr_t live_bytes = 0;
intptr_t compaction_speed = 0;
if (FLAG_trace_fragmentation) {
for (Page* page : evacuation_candidates_) {
live_bytes += page->LiveBytes();
}
compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
}
const int num_tasks = NumberOfParallelCompactionTasks(num_pages, live_bytes);
const int num_tasks = NumberOfParallelCompactionTasks();
// Set up compaction spaces.
Evacuator** evacuators = new Evacuator*[num_tasks];
CompactionSpaceCollection** compaction_spaces_for_tasks =
new CompactionSpaceCollection*[num_tasks];
for (int i = 0; i < num_tasks; i++) {
evacuators[i] = new Evacuator(this, evacuation_candidates_,
newspace_evacuation_candidates_);
compaction_spaces_for_tasks[i] = evacuators[i]->compaction_spaces();
compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap());
}
heap()->old_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks,
num_tasks);
heap()->code_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks,
num_tasks);
delete[] compaction_spaces_for_tasks;
uint32_t* task_ids = new uint32_t[num_tasks - 1];
// Kick off parallel tasks.
StartParallelCompaction(evacuators, num_tasks);
StartParallelCompaction(compaction_spaces_for_tasks, task_ids, num_tasks);
// Wait for unfinished and not-yet-started tasks.
WaitUntilCompactionCompleted(&evacuators[1], num_tasks - 1);
WaitUntilCompactionCompleted(task_ids, num_tasks - 1);
delete[] task_ids;
// Finalize local evacuators by merging back all locally cached data.
double compaction_duration = 0.0;
intptr_t compacted_memory = 0;
// Merge back memory (compacted and unused) from compaction spaces.
for (int i = 0; i < num_tasks; i++) {
evacuators[i]->Finalize();
delete evacuators[i];
}
delete[] evacuators;
// Finalize pages sequentially.
for (NewSpacePage* p : newspace_evacuation_candidates_) {
DCHECK_EQ(p->parallel_compaction_state().Value(),
MemoryChunk::kCompactingFinalize);
p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
heap()->old_space()->MergeCompactionSpace(
compaction_spaces_for_tasks[i]->Get(OLD_SPACE));
heap()->code_space()->MergeCompactionSpace(
compaction_spaces_for_tasks[i]->Get(CODE_SPACE));
compacted_memory += compaction_spaces_for_tasks[i]->bytes_compacted();
compaction_duration += compaction_spaces_for_tasks[i]->duration();
delete compaction_spaces_for_tasks[i];
}
delete[] compaction_spaces_for_tasks;
heap()->tracer()->AddCompactionEvent(compaction_duration, compacted_memory);
// Finalize sequentially.
int abandoned_pages = 0;
for (Page* p : evacuation_candidates_) {
for (int i = 0; i < num_pages; i++) {
Page* p = evacuation_candidates_[i];
switch (p->parallel_compaction_state().Value()) {
case MemoryChunk::ParallelCompactingState::kCompactingAborted:
// We have partially compacted the page, i.e., some objects may have
@ -3347,7 +3232,7 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
break;
default:
// MemoryChunk::kCompactingInProgress.
// We should not observe kCompactingInProgress, or kCompactingDone.
UNREACHABLE();
}
p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
@ -3364,28 +3249,31 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
}
}
void MarkCompactCollector::StartParallelCompaction(Evacuator** evacuators,
void MarkCompactCollector::StartParallelCompaction(
CompactionSpaceCollection** compaction_spaces, uint32_t* task_ids,
int len) {
compaction_in_progress_ = true;
for (int i = 1; i < len; i++) {
CompactionTask* task = new CompactionTask(heap(), evacuators[i]);
CompactionTask* task = new CompactionTask(heap(), compaction_spaces[i]);
task_ids[i - 1] = task->id();
V8::GetCurrentPlatform()->CallOnBackgroundThread(
task, v8::Platform::kShortRunningTask);
}
// Contribute on main thread.
evacuators[0]->EvacuatePages();
// Contribute in main thread.
EvacuatePages(compaction_spaces[0], &migration_slots_buffer_);
}
void MarkCompactCollector::WaitUntilCompactionCompleted(Evacuator** evacuators,
void MarkCompactCollector::WaitUntilCompactionCompleted(uint32_t* task_ids,
int len) {
// Try to cancel compaction tasks that have not been run (as they might be
// stuck in a worker queue). Tasks that cannot be canceled, have either
// already completed or are still running, hence we need to wait for their
// semaphore signal.
for (int i = 0; i < len; i++) {
if (!heap()->isolate()->cancelable_task_manager()->TryAbort(
evacuators[i]->task_id())) {
if (!heap()->isolate()->cancelable_task_manager()->TryAbort(task_ids[i])) {
pending_compaction_tasks_semaphore_.Wait();
}
}
@ -3393,6 +3281,43 @@ void MarkCompactCollector::WaitUntilCompactionCompleted(Evacuator** evacuators,
}
void MarkCompactCollector::EvacuatePages(
CompactionSpaceCollection* compaction_spaces,
SlotsBuffer** evacuation_slots_buffer) {
EvacuateOldSpaceVisitor visitor(heap(), compaction_spaces,
evacuation_slots_buffer);
for (int i = 0; i < evacuation_candidates_.length(); i++) {
Page* p = evacuation_candidates_[i];
DCHECK(p->IsEvacuationCandidate() ||
p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
DCHECK(p->SweepingDone());
if (p->parallel_compaction_state().TrySetValue(
MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
if (p->IsEvacuationCandidate()) {
DCHECK_EQ(p->parallel_compaction_state().Value(),
MemoryChunk::kCompactingInProgress);
double start = heap()->MonotonicallyIncreasingTimeInMs();
intptr_t live_bytes = p->LiveBytes();
AlwaysAllocateScope always_allocate(isolate());
if (VisitLiveObjects(p, &visitor, kClearMarkbits)) {
p->parallel_compaction_state().SetValue(
MemoryChunk::kCompactingFinalize);
compaction_spaces->ReportCompactionProgress(
heap()->MonotonicallyIncreasingTimeInMs() - start, live_bytes);
} else {
p->parallel_compaction_state().SetValue(
MemoryChunk::kCompactingAborted);
}
} else {
// There could be popular pages in the list of evacuation candidates
// which we do compact.
p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
}
}
}
}
class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
public:
virtual Object* RetainAs(Object* object) {
@ -3535,7 +3460,9 @@ void MarkCompactCollector::RemoveObjectSlots(Address start_slot,
Address end_slot) {
// Remove entries by replacing them with an old-space slot containing a smi
// that is located in an unmovable page.
for (Page* p : evacuation_candidates_) {
int npages = evacuation_candidates_.length();
for (int i = 0; i < npages; i++) {
Page* p = evacuation_candidates_[i];
DCHECK(p->IsEvacuationCandidate() ||
p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
if (p->IsEvacuationCandidate()) {
@ -3615,7 +3542,8 @@ void MarkCompactCollector::VisitLiveObjectsBody(Page* page,
void MarkCompactCollector::SweepAbortedPages() {
// Second pass on aborted pages.
for (Page* p : evacuation_candidates_) {
for (int i = 0; i < evacuation_candidates_.length(); i++) {
Page* p = evacuation_candidates_[i];
if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED);
p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
@ -3647,15 +3575,26 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
Heap::RelocationLock relocation_lock(heap());
HashMap* local_pretenuring_feedback = nullptr;
{
GCTracer::Scope gc_scope(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_NEW_SPACE);
EvacuationScope evacuation_scope(this);
EvacuateNewSpacePrologue();
local_pretenuring_feedback = EvacuateNewSpaceInParallel();
heap_->new_space()->set_age_mark(heap_->new_space()->top());
}
{
GCTracer::Scope gc_scope(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_CANDIDATES);
EvacuationScope evacuation_scope(this);
EvacuatePagesInParallel();
EvacuateNewSpaceEpilogue();
heap()->new_space()->set_age_mark(heap()->new_space()->top());
}
{
heap_->MergeAllocationSitePretenuringFeedback(*local_pretenuring_feedback);
delete local_pretenuring_feedback;
}
UpdatePointersAfterEvacuation();
@ -3732,11 +3671,13 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
}
int npages = evacuation_candidates_.length();
{
GCTracer::Scope gc_scope(
heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED);
for (Page* p : evacuation_candidates_) {
for (int i = 0; i < npages; i++) {
Page* p = evacuation_candidates_[i];
DCHECK(p->IsEvacuationCandidate() ||
p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
@ -3811,7 +3752,9 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
void MarkCompactCollector::ReleaseEvacuationCandidates() {
for (Page* p : evacuation_candidates_) {
int npages = evacuation_candidates_.length();
for (int i = 0; i < npages; i++) {
Page* p = evacuation_candidates_[i];
if (!p->IsEvacuationCandidate()) continue;
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
space->Free(p->area_start(), p->area_size());

View File

@ -7,7 +7,6 @@
#include "src/base/bits.h"
#include "src/heap/spaces.h"
#include "src/heap/store-buffer.h"
namespace v8 {
namespace internal {
@ -407,8 +406,7 @@ class MarkCompactCollector {
void MigrateObject(HeapObject* dst, HeapObject* src, int size,
AllocationSpace to_old_space,
SlotsBuffer** evacuation_slots_buffer,
LocalStoreBuffer* local_store_buffer);
SlotsBuffer** evacuation_slots_buffer);
void InvalidateCode(Code* code);
@ -511,12 +509,13 @@ class MarkCompactCollector {
class EvacuateNewSpaceVisitor;
class EvacuateOldSpaceVisitor;
class EvacuateVisitorBase;
class Evacuator;
class HeapObjectVisitor;
class SweeperTask;
typedef std::vector<Page*> SweepingList;
static const int kInitialLocalPretenuringFeedbackCapacity = 256;
explicit MarkCompactCollector(Heap* heap);
bool WillBeDeoptimized(Code* code);
@ -705,18 +704,25 @@ class MarkCompactCollector {
void SweepSpaces();
void EvacuateNewSpacePrologue();
void EvacuateNewSpaceEpilogue();
// Returns local pretenuring feedback.
HashMap* EvacuateNewSpaceInParallel();
void AddEvacuationSlotsBufferSynchronized(
SlotsBuffer* evacuation_slots_buffer);
void EvacuatePages(CompactionSpaceCollection* compaction_spaces,
SlotsBuffer** evacuation_slots_buffer);
void EvacuatePagesInParallel();
// The number of parallel compaction tasks, including the main thread.
int NumberOfParallelCompactionTasks(int pages, intptr_t live_bytes);
int NumberOfParallelCompactionTasks();
void StartParallelCompaction(Evacuator** evacuators, int len);
void WaitUntilCompactionCompleted(Evacuator** evacuators, int len);
void StartParallelCompaction(CompactionSpaceCollection** compaction_spaces,
uint32_t* task_ids, int len);
void WaitUntilCompactionCompleted(uint32_t* task_ids, int len);
void EvacuateNewSpaceAndCandidates();
@ -745,8 +751,7 @@ class MarkCompactCollector {
// Updates store buffer and slot buffer for a pointer in a migrating object.
void RecordMigratedSlot(Object* value, Address slot,
SlotsBuffer** evacuation_slots_buffer,
LocalStoreBuffer* local_store_buffer);
SlotsBuffer** evacuation_slots_buffer);
// Adds the code entry slot to the slots buffer.
void RecordMigratedCodeEntrySlot(Address code_entry, Address code_entry_slot,
@ -772,7 +777,8 @@ class MarkCompactCollector {
bool have_code_to_deoptimize_;
List<Page*> evacuation_candidates_;
List<NewSpacePage*> newspace_evacuation_candidates_;
List<MemoryChunk*> newspace_evacuation_candidates_;
// The evacuation_slots_buffers_ are used by the compaction threads.
// When a compaction task finishes, it uses

View File

@ -2917,7 +2917,9 @@ class CompactionSpaceCollection : public Malloced {
public:
explicit CompactionSpaceCollection(Heap* heap)
: old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {}
code_space_(heap, CODE_SPACE, Executability::EXECUTABLE),
duration_(0.0),
bytes_compacted_(0) {}
CompactionSpace* Get(AllocationSpace space) {
switch (space) {
@ -2932,9 +2934,21 @@ class CompactionSpaceCollection : public Malloced {
return nullptr;
}
void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
duration_ += duration;
bytes_compacted_ += bytes_compacted;
}
double duration() const { return duration_; }
intptr_t bytes_compacted() const { return bytes_compacted_; }
private:
CompactionSpace old_space_;
CompactionSpace code_space_;
// Book keeping.
double duration_;
intptr_t bytes_compacted_;
};

View File

@ -26,6 +26,12 @@ void StoreBuffer::Mark(Address addr) {
}
inline void StoreBuffer::MarkSynchronized(Address addr) {
base::LockGuard<base::Mutex> lock_guard(&mutex_);
Mark(addr);
}
void StoreBuffer::EnterDirectlyIntoStoreBuffer(Address addr) {
if (store_buffer_rebuilding_enabled_) {
SLOW_DCHECK(!heap_->code_space()->Contains(addr) &&
@ -42,22 +48,6 @@ void StoreBuffer::EnterDirectlyIntoStoreBuffer(Address addr) {
}
}
}
void LocalStoreBuffer::Record(Address addr) {
if (top_->is_full()) top_ = new Node(top_);
top_->buffer[top_->count++] = addr;
}
void LocalStoreBuffer::Process(StoreBuffer* store_buffer) {
Node* current = top_;
while (current != nullptr) {
for (int i = 0; i < current->count; i++) {
store_buffer->Mark(current->buffer[i]);
}
current = current->next;
}
}
} // namespace internal
} // namespace v8

View File

@ -33,6 +33,10 @@ class StoreBuffer {
// This is used to add addresses to the store buffer non-concurrently.
inline void Mark(Address addr);
// This is used to add addresses to the store buffer when multiple threads
// may operate on the store buffer.
inline void MarkSynchronized(Address addr);
// This is used by the heap traversal to enter the addresses into the store
// buffer that should still be in the store buffer after GC. It enters
// addresses directly into the old buffer because the GC starts by wiping the
@ -212,39 +216,6 @@ class DontMoveStoreBufferEntriesScope {
StoreBuffer* store_buffer_;
bool stored_state_;
};
class LocalStoreBuffer BASE_EMBEDDED {
public:
LocalStoreBuffer() : top_(new Node(nullptr)) {}
~LocalStoreBuffer() {
Node* current = top_;
while (current != nullptr) {
Node* tmp = current->next;
delete current;
current = tmp;
}
}
inline void Record(Address addr);
inline void Process(StoreBuffer* store_buffer);
private:
static const int kBufferSize = 16 * KB;
struct Node : Malloced {
explicit Node(Node* next_node) : next(next_node), count(0) {}
inline bool is_full() { return count == kBufferSize; }
Node* next;
Address buffer[kBufferSize];
int count;
};
Node* top_;
};
} // namespace internal
} // namespace v8

View File

@ -1855,9 +1855,6 @@ AllocationSite* AllocationMemento::GetAllocationSite() {
return AllocationSite::cast(allocation_site());
}
Address AllocationMemento::GetAllocationSiteUnchecked() {
return reinterpret_cast<Address>(allocation_site());
}
void JSObject::EnsureCanContainHeapObjectElements(Handle<JSObject> object) {
JSObject::ValidateElements(object);

View File

@ -16044,8 +16044,7 @@ void JSObject::UpdateAllocationSite(Handle<JSObject> object,
{
DisallowHeapAllocation no_allocation;
AllocationMemento* memento =
heap->FindAllocationMemento<Heap::kForRuntime>(*object);
AllocationMemento* memento = heap->FindAllocationMemento(*object);
if (memento == NULL) return;
// Walk through to the Allocation Site

View File

@ -8289,7 +8289,6 @@ class AllocationMemento: public Struct {
inline bool IsValid();
inline AllocationSite* GetAllocationSite();
inline Address GetAllocationSiteUnchecked();
DECLARE_PRINTER(AllocationMemento)
DECLARE_VERIFIER(AllocationMemento)

View File

@ -1,37 +0,0 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_UTILS_INL_H_
#define V8_UTILS_INL_H_
#include "src/utils.h"
#include "include/v8-platform.h"
#include "src/base/platform/time.h"
#include "src/v8.h"
namespace v8 {
namespace internal {
class TimedScope {
public:
explicit TimedScope(double* result)
: start_(TimestampMs()), result_(result) {}
~TimedScope() { *result_ = TimestampMs() - start_; }
private:
static inline double TimestampMs() {
return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
static_cast<double>(base::Time::kMillisecondsPerSecond);
}
double start_;
double* result_;
};
} // namespace internal
} // namespace v8
#endif // V8_UTILS_INL_H_

View File

@ -3514,13 +3514,6 @@ TEST(ReleaseOverReservedPages) {
// The optimizer can allocate stuff, messing up the test.
i::FLAG_crankshaft = false;
i::FLAG_always_opt = false;
// Parallel compaction increases fragmentation, depending on how existing
// memory is distributed. Since this is non-deterministic because of
// concurrent sweeping, we disable it for this test.
i::FLAG_parallel_compaction = false;
// Concurrent sweeping adds non determinism, depending on when memory is
// available for further reuse.
i::FLAG_concurrent_sweeping = false;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();

View File

@ -1080,7 +1080,6 @@
'../../src/unicode-cache.h',
'../../src/unicode-decoder.cc',
'../../src/unicode-decoder.h',
'../../src/utils-inl.h',
'../../src/utils.cc',
'../../src/utils.h',
'../../src/v8.cc',