[heap] Add GC accounting to slow allocation and incremental marking job

BUG=v8:6343

Review-Url: https://codereview.chromium.org/2861763002
Cr-Commit-Position: refs/heads/master@{#45073}
This commit is contained in:
mlippautz 2017-05-03 13:59:28 -07:00 committed by Commit bot
parent 644379eedc
commit 8ab39ebcf9
5 changed files with 25 additions and 4 deletions

View File

@ -716,6 +716,8 @@ class RuntimeCallTimer final {
V(FunctionCallback) \
V(GC) \
V(GC_AllAvailableGarbage) \
V(GC_IncrementalMarkingJob) \
V(GC_SlowAllocateRaw) \
V(GCEpilogueCallback) \
V(GCPrologueCallback) \
V(GenericNamedPropertyDefinerCallback) \

View File

@ -978,7 +978,7 @@ bool Heap::CollectGarbage(GarbageCollector collector,
const char* collector_reason,
const v8::GCCallbackFlags gc_callback_flags) {
// The VM is in the GC state until exiting this function.
VMState<GC> state(isolate_);
VMState<GC> state(isolate());
RuntimeCallTimerScope(isolate(), &RuntimeCallStats::GC);
#ifdef DEBUG

View File

@ -10,6 +10,7 @@
#include "src/heap/incremental-marking.h"
#include "src/isolate.h"
#include "src/v8.h"
#include "src/vm-state-inl.h"
namespace v8 {
namespace internal {
@ -42,6 +43,9 @@ void IncrementalMarkingJob::Task::Step(Heap* heap) {
}
void IncrementalMarkingJob::Task::RunInternal() {
VMState<GC> state(isolate());
RuntimeCallTimerScope(isolate(), &RuntimeCallStats::GC_IncrementalMarkingJob);
Heap* heap = isolate()->heap();
job_->NotifyTask();
IncrementalMarking* incremental_marking = heap->incremental_marking();

View File

@ -20,6 +20,7 @@
#include "src/objects-inl.h"
#include "src/snapshot/snapshot.h"
#include "src/v8.h"
#include "src/vm-state-inl.h"
namespace v8 {
namespace internal {
@ -2896,11 +2897,21 @@ HeapObject* CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
}
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
VMState<GC> state(heap()->isolate());
RuntimeCallTimerScope(heap()->isolate(),
&RuntimeCallStats::GC_SlowAllocateRaw);
return RawSlowAllocateRaw(size_in_bytes);
}
HeapObject* CompactionSpace::SlowAllocateRaw(int size_in_bytes) {
return RawSlowAllocateRaw(size_in_bytes);
}
HeapObject* PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
// Allocation in this space has failed.
DCHECK_GE(size_in_bytes, 0);
const int kMaxPagesToSweep = 1;
// Allocation in this space has failed.
MarkCompactCollector* collector = heap()->mark_compact_collector();
// Sweeping is still in progress.
if (collector->sweeping_in_progress()) {

View File

@ -2183,7 +2183,9 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
int size_in_bytes);
// Slow path of AllocateRaw. This function is space-dependent.
MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
MUST_USE_RESULT HeapObject* RawSlowAllocateRaw(int size_in_bytes);
size_t area_size_;
@ -2742,6 +2744,8 @@ class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
MUST_USE_RESULT HeapObject* SweepAndRetryAllocation(
int size_in_bytes) override;
MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes) override;
};