[profiler] proper observation of old space inline allocations
Bug: chromium:633920 Change-Id: I9a2f4a89f6b9c0f63cb3b166b06a88a12f0a203c Reviewed-on: https://chromium-review.googlesource.com/631696 Commit-Queue: Ali Ijaz Sheikh <ofrobots@google.com> Reviewed-by: Ulan Degenbaev <ulan@chromium.org> Cr-Commit-Position: refs/heads/master@{#48043}
This commit is contained in:
parent
5719ca6eea
commit
672a41c3ca
@ -369,6 +369,11 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
|
||||
|
||||
AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
|
||||
AllocationAlignment alignment) {
|
||||
DCHECK(top() >= top_on_previous_step_);
|
||||
size_t bytes_since_last =
|
||||
top_on_previous_step_ ? top() - top_on_previous_step_ : 0;
|
||||
|
||||
DCHECK_IMPLIES(!SupportsInlineAllocation(), bytes_since_last == 0);
|
||||
#ifdef V8_HOST_ARCH_32_BIT
|
||||
AllocationResult result =
|
||||
alignment == kDoubleAligned
|
||||
@ -378,11 +383,13 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
|
||||
AllocationResult result = AllocateRawUnaligned(size_in_bytes);
|
||||
#endif
|
||||
HeapObject* heap_obj = nullptr;
|
||||
if (!result.IsRetry() && result.To(&heap_obj)) {
|
||||
AllocationStep(heap_obj->address(), size_in_bytes);
|
||||
if (!result.IsRetry() && result.To(&heap_obj) && !is_local()) {
|
||||
AllocationStep(static_cast<int>(size_in_bytes + bytes_since_last),
|
||||
heap_obj->address(), size_in_bytes);
|
||||
DCHECK_IMPLIES(
|
||||
heap()->incremental_marking()->black_allocation(),
|
||||
heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
|
||||
StartNextInlineAllocationStep();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
@ -1332,6 +1332,7 @@ STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
|
||||
|
||||
void Space::AddAllocationObserver(AllocationObserver* observer) {
|
||||
allocation_observers_.push_back(observer);
|
||||
StartNextInlineAllocationStep();
|
||||
}
|
||||
|
||||
void Space::RemoveAllocationObserver(AllocationObserver* observer) {
|
||||
@ -1339,6 +1340,7 @@ void Space::RemoveAllocationObserver(AllocationObserver* observer) {
|
||||
allocation_observers_.end(), observer);
|
||||
DCHECK(allocation_observers_.end() != it);
|
||||
allocation_observers_.erase(it);
|
||||
StartNextInlineAllocationStep();
|
||||
}
|
||||
|
||||
void Space::PauseAllocationObservers() { allocation_observers_paused_ = true; }
|
||||
@ -1347,11 +1349,12 @@ void Space::ResumeAllocationObservers() {
|
||||
allocation_observers_paused_ = false;
|
||||
}
|
||||
|
||||
void Space::AllocationStep(Address soon_object, int size) {
|
||||
void Space::AllocationStep(int bytes_since_last, Address soon_object,
|
||||
int size) {
|
||||
if (!allocation_observers_paused_) {
|
||||
heap()->CreateFillerObjectAt(soon_object, size, ClearRecordedSlots::kNo);
|
||||
for (AllocationObserver* observer : allocation_observers_) {
|
||||
observer->AllocationStep(size, soon_object, size);
|
||||
observer->AllocationStep(bytes_since_last, soon_object, size);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1371,7 +1374,8 @@ PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
|
||||
: Space(heap, space, executable),
|
||||
anchor_(this),
|
||||
free_list_(this),
|
||||
locked_page_(nullptr) {
|
||||
locked_page_(nullptr),
|
||||
top_on_previous_step_(0) {
|
||||
area_size_ = MemoryAllocator::PageAreaSize(space);
|
||||
accounting_stats_.Clear();
|
||||
|
||||
@ -1600,6 +1604,48 @@ void PagedSpace::SetAllocationInfo(Address top, Address limit) {
|
||||
}
|
||||
}
|
||||
|
||||
void PagedSpace::DecreaseLimit(Address new_limit) {
|
||||
Address old_limit = limit();
|
||||
DCHECK_LE(top(), new_limit);
|
||||
DCHECK_GE(old_limit, new_limit);
|
||||
if (new_limit != old_limit) {
|
||||
SetTopAndLimit(top(), new_limit);
|
||||
Free(new_limit, old_limit - new_limit);
|
||||
if (heap()->incremental_marking()->black_allocation()) {
|
||||
Page::FromAllocationAreaAddress(new_limit)->DestroyBlackArea(new_limit,
|
||||
old_limit);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Address PagedSpace::ComputeLimit(Address start, Address end,
|
||||
size_t size_in_bytes) {
|
||||
DCHECK_GE(end - start, size_in_bytes);
|
||||
|
||||
if (heap()->inline_allocation_disabled()) {
|
||||
// Keep the linear allocation area to fit exactly the requested size.
|
||||
return start + size_in_bytes;
|
||||
} else if (!allocation_observers_paused_ && !allocation_observers_.empty() &&
|
||||
identity() == OLD_SPACE && !is_local()) {
|
||||
// Generated code may allocate inline from the linear allocation area for
|
||||
// Old Space. To make sure we can observe these allocations, we use a lower
|
||||
// limit.
|
||||
size_t step = RoundSizeDownToObjectAlignment(
|
||||
static_cast<int>(GetNextInlineAllocationStepSize()));
|
||||
return Max(start + size_in_bytes, Min(start + step, end));
|
||||
} else {
|
||||
// The entire node can be used as the linear allocation area.
|
||||
return end;
|
||||
}
|
||||
}
|
||||
|
||||
void PagedSpace::StartNextInlineAllocationStep() {
|
||||
if (!allocation_observers_paused_ && SupportsInlineAllocation()) {
|
||||
top_on_previous_step_ = allocation_observers_.empty() ? 0 : top();
|
||||
DecreaseLimit(ComputeLimit(top(), limit(), 0));
|
||||
}
|
||||
}
|
||||
|
||||
void PagedSpace::MarkAllocationInfoBlack() {
|
||||
DCHECK(heap()->incremental_marking()->black_allocation());
|
||||
Address current_top = top();
|
||||
@ -1645,6 +1691,12 @@ void PagedSpace::EmptyAllocationInfo() {
|
||||
}
|
||||
}
|
||||
|
||||
if (top_on_previous_step_) {
|
||||
DCHECK(current_top >= top_on_previous_step_);
|
||||
AllocationStep(static_cast<int>(current_top - top_on_previous_step_),
|
||||
nullptr, 0);
|
||||
top_on_previous_step_ = 0;
|
||||
}
|
||||
SetTopAndLimit(NULL, NULL);
|
||||
DCHECK_GE(current_limit, current_top);
|
||||
Free(current_top, current_limit - current_top);
|
||||
@ -2087,16 +2139,6 @@ void NewSpace::StartNextInlineAllocationStep() {
|
||||
}
|
||||
}
|
||||
|
||||
void NewSpace::AddAllocationObserver(AllocationObserver* observer) {
|
||||
Space::AddAllocationObserver(observer);
|
||||
StartNextInlineAllocationStep();
|
||||
}
|
||||
|
||||
void NewSpace::RemoveAllocationObserver(AllocationObserver* observer) {
|
||||
Space::RemoveAllocationObserver(observer);
|
||||
StartNextInlineAllocationStep();
|
||||
}
|
||||
|
||||
void NewSpace::PauseAllocationObservers() {
|
||||
// Do a step to account for memory allocated so far.
|
||||
InlineAllocationStep(top(), top(), nullptr, 0);
|
||||
@ -2105,12 +2147,28 @@ void NewSpace::PauseAllocationObservers() {
|
||||
UpdateInlineAllocationLimit(0);
|
||||
}
|
||||
|
||||
void PagedSpace::PauseAllocationObservers() {
|
||||
// Do a step to account for memory allocated so far.
|
||||
if (top_on_previous_step_) {
|
||||
int bytes_allocated = static_cast<int>(top() - top_on_previous_step_);
|
||||
AllocationStep(bytes_allocated, nullptr, 0);
|
||||
}
|
||||
Space::PauseAllocationObservers();
|
||||
top_on_previous_step_ = 0;
|
||||
}
|
||||
|
||||
void NewSpace::ResumeAllocationObservers() {
|
||||
DCHECK(top_on_previous_step_ == 0);
|
||||
Space::ResumeAllocationObservers();
|
||||
StartNextInlineAllocationStep();
|
||||
}
|
||||
|
||||
// TODO(ofrobots): refactor into SpaceWithLinearArea
|
||||
void PagedSpace::ResumeAllocationObservers() {
|
||||
DCHECK(top_on_previous_step_ == 0);
|
||||
Space::ResumeAllocationObservers();
|
||||
StartNextInlineAllocationStep();
|
||||
}
|
||||
|
||||
void NewSpace::InlineAllocationStep(Address top, Address new_top,
|
||||
Address soon_object, size_t size) {
|
||||
@ -2885,7 +2943,6 @@ bool FreeList::Allocate(size_t size_in_bytes) {
|
||||
if (new_node == nullptr) return false;
|
||||
|
||||
DCHECK_GE(new_node_size, size_in_bytes);
|
||||
size_t bytes_left = new_node_size - size_in_bytes;
|
||||
|
||||
#ifdef DEBUG
|
||||
for (size_t i = 0; i < size_in_bytes / kPointerSize; i++) {
|
||||
@ -2899,38 +2956,21 @@ bool FreeList::Allocate(size_t size_in_bytes) {
|
||||
// candidate.
|
||||
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
|
||||
|
||||
const size_t kThreshold = IncrementalMarking::kAllocatedThreshold;
|
||||
|
||||
// Memory in the linear allocation area is counted as allocated. We may free
|
||||
// a little of this again immediately - see below.
|
||||
owner_->IncreaseAllocatedBytes(new_node_size,
|
||||
Page::FromAddress(new_node->address()));
|
||||
|
||||
if (owner_->heap()->inline_allocation_disabled()) {
|
||||
// Keep the linear allocation area to fit exactly the requested size.
|
||||
// Return the rest to the free list.
|
||||
owner_->Free(new_node->address() + size_in_bytes, bytes_left);
|
||||
owner_->SetAllocationInfo(new_node->address(),
|
||||
new_node->address() + size_in_bytes);
|
||||
} else if (bytes_left > kThreshold &&
|
||||
owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
|
||||
FLAG_incremental_marking &&
|
||||
!owner_->is_local()) { // Not needed on CompactionSpaces.
|
||||
size_t linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
|
||||
// We don't want to give too large linear areas to the allocator while
|
||||
// incremental marking is going on, because we won't check again whether
|
||||
// we want to do another increment until the linear area is used up.
|
||||
DCHECK_GE(new_node_size, size_in_bytes + linear_size);
|
||||
owner_->Free(new_node->address() + size_in_bytes + linear_size,
|
||||
new_node_size - size_in_bytes - linear_size);
|
||||
owner_->SetAllocationInfo(
|
||||
new_node->address(), new_node->address() + size_in_bytes + linear_size);
|
||||
} else {
|
||||
// Normally we give the rest of the node to the allocator as its new
|
||||
// linear allocation area.
|
||||
owner_->SetAllocationInfo(new_node->address(),
|
||||
new_node->address() + new_node_size);
|
||||
Address start = new_node->address();
|
||||
Address end = new_node->address() + new_node_size;
|
||||
Address limit = owner_->ComputeLimit(start, end, size_in_bytes);
|
||||
DCHECK_LE(limit, end);
|
||||
DCHECK_LE(size_in_bytes, limit - start);
|
||||
if (limit != end) {
|
||||
owner_->Free(limit, end - limit);
|
||||
}
|
||||
owner_->SetAllocationInfo(start, limit);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -3318,7 +3358,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
|
||||
if (heap()->incremental_marking()->black_allocation()) {
|
||||
heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
|
||||
}
|
||||
AllocationStep(object->address(), object_size);
|
||||
AllocationStep(object_size, object->address(), object_size);
|
||||
DCHECK_IMPLIES(
|
||||
heap()->incremental_marking()->black_allocation(),
|
||||
heap()->incremental_marking()->marking_state()->IsBlack(object));
|
||||
|
@ -903,17 +903,17 @@ class Space : public Malloced {
|
||||
// Identity used in error reporting.
|
||||
AllocationSpace identity() { return id_; }
|
||||
|
||||
V8_EXPORT_PRIVATE virtual void AddAllocationObserver(
|
||||
AllocationObserver* observer);
|
||||
void AddAllocationObserver(AllocationObserver* observer);
|
||||
|
||||
V8_EXPORT_PRIVATE virtual void RemoveAllocationObserver(
|
||||
AllocationObserver* observer);
|
||||
void RemoveAllocationObserver(AllocationObserver* observer);
|
||||
|
||||
V8_EXPORT_PRIVATE virtual void PauseAllocationObservers();
|
||||
|
||||
V8_EXPORT_PRIVATE virtual void ResumeAllocationObservers();
|
||||
|
||||
void AllocationStep(Address soon_object, int size);
|
||||
V8_EXPORT_PRIVATE virtual void StartNextInlineAllocationStep() {}
|
||||
|
||||
void AllocationStep(int bytes_since_last, Address soon_object, int size);
|
||||
|
||||
// Return the total amount committed memory for this space, i.e., allocatable
|
||||
// memory and page headers.
|
||||
@ -2071,15 +2071,8 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
|
||||
|
||||
void ResetFreeList() { free_list_.Reset(); }
|
||||
|
||||
// Set space allocation info.
|
||||
void SetTopAndLimit(Address top, Address limit) {
|
||||
DCHECK(top == limit ||
|
||||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
|
||||
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
|
||||
allocation_info_.Reset(top, limit);
|
||||
}
|
||||
|
||||
void SetAllocationInfo(Address top, Address limit);
|
||||
void PauseAllocationObservers() override;
|
||||
void ResumeAllocationObservers() override;
|
||||
|
||||
// Empty space allocation info, returning unused area to free list.
|
||||
void EmptyAllocationInfo();
|
||||
@ -2184,6 +2177,21 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
|
||||
// multiple tasks hold locks on pages while trying to sweep each others pages.
|
||||
void AnnounceLockedPage(Page* page) { locked_page_ = page; }
|
||||
|
||||
Address ComputeLimit(Address start, Address end, size_t size_in_bytes);
|
||||
void SetAllocationInfo(Address top, Address limit);
|
||||
|
||||
private:
|
||||
// Set space allocation info.
|
||||
void SetTopAndLimit(Address top, Address limit) {
|
||||
DCHECK(top == limit ||
|
||||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
|
||||
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
|
||||
allocation_info_.Reset(top, limit);
|
||||
}
|
||||
void DecreaseLimit(Address new_limit);
|
||||
void StartNextInlineAllocationStep() override;
|
||||
bool SupportsInlineAllocation() { return identity() == OLD_SPACE; }
|
||||
|
||||
protected:
|
||||
// PagedSpaces that should be included in snapshots have different, i.e.,
|
||||
// smaller, initial pages.
|
||||
@ -2246,6 +2254,7 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
|
||||
base::Mutex space_mutex_;
|
||||
|
||||
Page* locked_page_;
|
||||
Address top_on_previous_step_;
|
||||
|
||||
friend class IncrementalMarking;
|
||||
friend class MarkCompactCollector;
|
||||
@ -2647,14 +2656,6 @@ class NewSpace : public Space {
|
||||
UpdateInlineAllocationLimit(0);
|
||||
}
|
||||
|
||||
// Allows observation of inline allocation. The observer->Step() method gets
|
||||
// called after every step_size bytes have been allocated (approximately).
|
||||
// This works by adjusting the allocation limit to a lower value and adjusting
|
||||
// it after each step.
|
||||
void AddAllocationObserver(AllocationObserver* observer) override;
|
||||
|
||||
void RemoveAllocationObserver(AllocationObserver* observer) override;
|
||||
|
||||
// Get the extent of the inactive semispace (for use as a marking stack,
|
||||
// or to zap it). Notice: space-addresses are not necessarily on the
|
||||
// same page, so FromSpaceStart() might be above FromSpaceEnd().
|
||||
@ -2762,7 +2763,7 @@ class NewSpace : public Space {
|
||||
// different when we cross a page boundary or reset the space.
|
||||
void InlineAllocationStep(Address top, Address new_top, Address soon_object,
|
||||
size_t size);
|
||||
void StartNextInlineAllocationStep();
|
||||
void StartNextInlineAllocationStep() override;
|
||||
|
||||
friend class SemiSpaceIterator;
|
||||
};
|
||||
|
@ -172,8 +172,11 @@ class SamplingAllocationObserver : public AllocationObserver {
|
||||
void Step(int bytes_allocated, Address soon_object, size_t size) override {
|
||||
USE(heap_);
|
||||
DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
|
||||
DCHECK(soon_object);
|
||||
profiler_->SampleObject(soon_object, size);
|
||||
if (soon_object) {
|
||||
// TODO(ofrobots): it would be better to sample the next object rather
|
||||
// than skipping this sample epoch if soon_object happens to be null.
|
||||
profiler_->SampleObject(soon_object, size);
|
||||
}
|
||||
}
|
||||
|
||||
intptr_t GetNextStepSize() override { return GetNextSampleInterval(rate_); }
|
||||
|
@ -203,7 +203,7 @@ void ForceEvacuationCandidate(Page* page) {
|
||||
int remaining = static_cast<int>(limit - top);
|
||||
space->heap()->CreateFillerObjectAt(top, remaining,
|
||||
ClearRecordedSlots::kNo);
|
||||
space->SetTopAndLimit(nullptr, nullptr);
|
||||
space->EmptyAllocationInfo();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3063,3 +3063,77 @@ TEST(SamplingHeapProfilerLeftTrimming) {
|
||||
|
||||
heap_profiler->StopSamplingHeapProfiler();
|
||||
}
|
||||
|
||||
TEST(SamplingHeapProfilerPretenuredInlineAllocations) {
|
||||
i::FLAG_allow_natives_syntax = true;
|
||||
i::FLAG_expose_gc = true;
|
||||
|
||||
CcTest::InitializeVM();
|
||||
if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
|
||||
if (i::FLAG_gc_global || i::FLAG_stress_compaction ||
|
||||
i::FLAG_stress_incremental_marking) {
|
||||
return;
|
||||
}
|
||||
|
||||
v8::HandleScope scope(v8::Isolate::GetCurrent());
|
||||
LocalContext env;
|
||||
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
|
||||
|
||||
// Suppress randomness to avoid flakiness in tests.
|
||||
v8::internal::FLAG_sampling_heap_profiler_suppress_randomness = true;
|
||||
|
||||
// Grow new space unitl maximum capacity reached.
|
||||
while (!CcTest::heap()->new_space()->IsAtMaximumCapacity()) {
|
||||
CcTest::heap()->new_space()->Grow();
|
||||
}
|
||||
|
||||
i::ScopedVector<char> source(1024);
|
||||
i::SNPrintF(source,
|
||||
"var number_elements = %d;"
|
||||
"var elements = new Array(number_elements);"
|
||||
"function f() {"
|
||||
" for (var i = 0; i < number_elements; i++) {"
|
||||
" elements[i] = [{}, {}, {}];"
|
||||
" }"
|
||||
" return elements[number_elements - 1];"
|
||||
"};"
|
||||
"f(); gc();"
|
||||
"f(); f();"
|
||||
"%%OptimizeFunctionOnNextCall(f);"
|
||||
"f();"
|
||||
"f;",
|
||||
i::AllocationSite::kPretenureMinimumCreated + 1);
|
||||
|
||||
v8::Local<v8::Function> f =
|
||||
v8::Local<v8::Function>::Cast(CompileRun(source.start()));
|
||||
|
||||
// Make sure the function is producing pre-tenured objects.
|
||||
auto res = f->Call(env.local(), env->Global(), 0, NULL).ToLocalChecked();
|
||||
i::Handle<i::JSObject> o = i::Handle<i::JSObject>::cast(
|
||||
v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(res)));
|
||||
CHECK(CcTest::heap()->InOldSpace(o->elements()));
|
||||
CHECK(CcTest::heap()->InOldSpace(*o));
|
||||
|
||||
// Call the function and profile it.
|
||||
heap_profiler->StartSamplingHeapProfiler(64);
|
||||
for (int i = 0; i < 100; ++i) {
|
||||
f->Call(env.local(), env->Global(), 0, NULL).ToLocalChecked();
|
||||
}
|
||||
|
||||
std::unique_ptr<v8::AllocationProfile> profile(
|
||||
heap_profiler->GetAllocationProfile());
|
||||
CHECK(profile);
|
||||
heap_profiler->StopSamplingHeapProfiler();
|
||||
|
||||
const char* names[] = {"f"};
|
||||
auto node_f = FindAllocationProfileNode(env->GetIsolate(), *profile,
|
||||
ArrayVector(names));
|
||||
CHECK(node_f);
|
||||
|
||||
int count = 0;
|
||||
for (auto allocation : node_f->allocations) {
|
||||
count += allocation.count;
|
||||
}
|
||||
|
||||
CHECK_GE(count, 9000);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user