Reland "Reland "[profiler] proper observation of old space inline allocations""
This is a reland ofec952aaa68
. Included is a fix that ensures that top_on_previous_step_ is cleared when we release a page. Original change's description: > Reland "[profiler] proper observation of old space inline allocations" > > This is a reland of672a41c3ca
> Original change's description: > > [profiler] proper observation of old space inline allocations > > > > Bug: chromium:633920 > > Change-Id: I9a2f4a89f6b9c0f63cb3b166b06a88a12f0a203c > > Reviewed-on: https://chromium-review.googlesource.com/631696 > > Commit-Queue: Ali Ijaz Sheikh <ofrobots@google.com> > > Reviewed-by: Ulan Degenbaev <ulan@chromium.org> > > Cr-Commit-Position: refs/heads/master@{#48043} > > Bug: chromium:633920 > Change-Id: I6fe743d31b8ff26f3858488d4c014c62d3c85add > Reviewed-on: https://chromium-review.googlesource.com/671127 > Reviewed-by: Ulan Degenbaev <ulan@chromium.org> > Commit-Queue: Ali Ijaz Sheikh <ofrobots@google.com> > Cr-Commit-Position: refs/heads/master@{#48085} Bug: chromium:633920 Change-Id: I8a0dcc4eaffc1f1d3ac5b3f8d344001cdae36606 Reviewed-on: https://chromium-review.googlesource.com/677407 Reviewed-by: Ulan Degenbaev <ulan@chromium.org> Commit-Queue: Ali Ijaz Sheikh <ofrobots@google.com> Cr-Commit-Position: refs/heads/master@{#48141}
This commit is contained in:
parent
855b88ae5a
commit
52e8d0ab40
@ -369,6 +369,16 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
|
|||||||
|
|
||||||
AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
|
AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
|
||||||
AllocationAlignment alignment) {
|
AllocationAlignment alignment) {
|
||||||
|
if (top() < top_on_previous_step_) {
|
||||||
|
// Generated code decreased the top() pointer to do folded allocations
|
||||||
|
DCHECK_EQ(Page::FromAddress(top()),
|
||||||
|
Page::FromAddress(top_on_previous_step_));
|
||||||
|
top_on_previous_step_ = top();
|
||||||
|
}
|
||||||
|
size_t bytes_since_last =
|
||||||
|
top_on_previous_step_ ? top() - top_on_previous_step_ : 0;
|
||||||
|
|
||||||
|
DCHECK_IMPLIES(!SupportsInlineAllocation(), bytes_since_last == 0);
|
||||||
#ifdef V8_HOST_ARCH_32_BIT
|
#ifdef V8_HOST_ARCH_32_BIT
|
||||||
AllocationResult result =
|
AllocationResult result =
|
||||||
alignment == kDoubleAligned
|
alignment == kDoubleAligned
|
||||||
@ -378,11 +388,13 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
|
|||||||
AllocationResult result = AllocateRawUnaligned(size_in_bytes);
|
AllocationResult result = AllocateRawUnaligned(size_in_bytes);
|
||||||
#endif
|
#endif
|
||||||
HeapObject* heap_obj = nullptr;
|
HeapObject* heap_obj = nullptr;
|
||||||
if (!result.IsRetry() && result.To(&heap_obj)) {
|
if (!result.IsRetry() && result.To(&heap_obj) && !is_local()) {
|
||||||
AllocationStep(heap_obj->address(), size_in_bytes);
|
AllocationStep(static_cast<int>(size_in_bytes + bytes_since_last),
|
||||||
|
heap_obj->address(), size_in_bytes);
|
||||||
DCHECK_IMPLIES(
|
DCHECK_IMPLIES(
|
||||||
heap()->incremental_marking()->black_allocation(),
|
heap()->incremental_marking()->black_allocation(),
|
||||||
heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
|
heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
|
||||||
|
StartNextInlineAllocationStep();
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
@ -1328,6 +1328,7 @@ STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
|
|||||||
|
|
||||||
void Space::AddAllocationObserver(AllocationObserver* observer) {
|
void Space::AddAllocationObserver(AllocationObserver* observer) {
|
||||||
allocation_observers_.push_back(observer);
|
allocation_observers_.push_back(observer);
|
||||||
|
StartNextInlineAllocationStep();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Space::RemoveAllocationObserver(AllocationObserver* observer) {
|
void Space::RemoveAllocationObserver(AllocationObserver* observer) {
|
||||||
@ -1335,6 +1336,7 @@ void Space::RemoveAllocationObserver(AllocationObserver* observer) {
|
|||||||
allocation_observers_.end(), observer);
|
allocation_observers_.end(), observer);
|
||||||
DCHECK(allocation_observers_.end() != it);
|
DCHECK(allocation_observers_.end() != it);
|
||||||
allocation_observers_.erase(it);
|
allocation_observers_.erase(it);
|
||||||
|
StartNextInlineAllocationStep();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Space::PauseAllocationObservers() { allocation_observers_paused_ = true; }
|
void Space::PauseAllocationObservers() { allocation_observers_paused_ = true; }
|
||||||
@ -1343,11 +1345,12 @@ void Space::ResumeAllocationObservers() {
|
|||||||
allocation_observers_paused_ = false;
|
allocation_observers_paused_ = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Space::AllocationStep(Address soon_object, int size) {
|
void Space::AllocationStep(int bytes_since_last, Address soon_object,
|
||||||
|
int size) {
|
||||||
if (!allocation_observers_paused_) {
|
if (!allocation_observers_paused_) {
|
||||||
heap()->CreateFillerObjectAt(soon_object, size, ClearRecordedSlots::kNo);
|
heap()->CreateFillerObjectAt(soon_object, size, ClearRecordedSlots::kNo);
|
||||||
for (AllocationObserver* observer : allocation_observers_) {
|
for (AllocationObserver* observer : allocation_observers_) {
|
||||||
observer->AllocationStep(size, soon_object, size);
|
observer->AllocationStep(bytes_since_last, soon_object, size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1367,7 +1370,8 @@ PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
|
|||||||
: Space(heap, space, executable),
|
: Space(heap, space, executable),
|
||||||
anchor_(this),
|
anchor_(this),
|
||||||
free_list_(this),
|
free_list_(this),
|
||||||
locked_page_(nullptr) {
|
locked_page_(nullptr),
|
||||||
|
top_on_previous_step_(0) {
|
||||||
area_size_ = MemoryAllocator::PageAreaSize(space);
|
area_size_ = MemoryAllocator::PageAreaSize(space);
|
||||||
accounting_stats_.Clear();
|
accounting_stats_.Clear();
|
||||||
|
|
||||||
@ -1596,6 +1600,48 @@ void PagedSpace::SetAllocationInfo(Address top, Address limit) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void PagedSpace::DecreaseLimit(Address new_limit) {
|
||||||
|
Address old_limit = limit();
|
||||||
|
DCHECK_LE(top(), new_limit);
|
||||||
|
DCHECK_GE(old_limit, new_limit);
|
||||||
|
if (new_limit != old_limit) {
|
||||||
|
SetTopAndLimit(top(), new_limit);
|
||||||
|
Free(new_limit, old_limit - new_limit);
|
||||||
|
if (heap()->incremental_marking()->black_allocation()) {
|
||||||
|
Page::FromAllocationAreaAddress(new_limit)->DestroyBlackArea(new_limit,
|
||||||
|
old_limit);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Address PagedSpace::ComputeLimit(Address start, Address end,
|
||||||
|
size_t size_in_bytes) {
|
||||||
|
DCHECK_GE(end - start, size_in_bytes);
|
||||||
|
|
||||||
|
if (heap()->inline_allocation_disabled()) {
|
||||||
|
// Keep the linear allocation area to fit exactly the requested size.
|
||||||
|
return start + size_in_bytes;
|
||||||
|
} else if (!allocation_observers_paused_ && !allocation_observers_.empty() &&
|
||||||
|
identity() == OLD_SPACE && !is_local()) {
|
||||||
|
// Generated code may allocate inline from the linear allocation area for
|
||||||
|
// Old Space. To make sure we can observe these allocations, we use a lower
|
||||||
|
// limit.
|
||||||
|
size_t step = RoundSizeDownToObjectAlignment(
|
||||||
|
static_cast<int>(GetNextInlineAllocationStepSize()));
|
||||||
|
return Max(start + size_in_bytes, Min(start + step, end));
|
||||||
|
} else {
|
||||||
|
// The entire node can be used as the linear allocation area.
|
||||||
|
return end;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void PagedSpace::StartNextInlineAllocationStep() {
|
||||||
|
if (!allocation_observers_paused_ && SupportsInlineAllocation()) {
|
||||||
|
top_on_previous_step_ = allocation_observers_.empty() ? 0 : top();
|
||||||
|
DecreaseLimit(ComputeLimit(top(), limit(), 0));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void PagedSpace::MarkAllocationInfoBlack() {
|
void PagedSpace::MarkAllocationInfoBlack() {
|
||||||
DCHECK(heap()->incremental_marking()->black_allocation());
|
DCHECK(heap()->incremental_marking()->black_allocation());
|
||||||
Address current_top = top();
|
Address current_top = top();
|
||||||
@ -1641,6 +1687,12 @@ void PagedSpace::EmptyAllocationInfo() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (top_on_previous_step_) {
|
||||||
|
DCHECK(current_top >= top_on_previous_step_);
|
||||||
|
AllocationStep(static_cast<int>(current_top - top_on_previous_step_),
|
||||||
|
nullptr, 0);
|
||||||
|
top_on_previous_step_ = 0;
|
||||||
|
}
|
||||||
SetTopAndLimit(NULL, NULL);
|
SetTopAndLimit(NULL, NULL);
|
||||||
DCHECK_GE(current_limit, current_top);
|
DCHECK_GE(current_limit, current_top);
|
||||||
Free(current_top, current_limit - current_top);
|
Free(current_top, current_limit - current_top);
|
||||||
@ -1656,6 +1708,7 @@ void PagedSpace::ReleasePage(Page* page) {
|
|||||||
DCHECK(!free_list_.ContainsPageFreeListItems(page));
|
DCHECK(!free_list_.ContainsPageFreeListItems(page));
|
||||||
|
|
||||||
if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
|
if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
|
||||||
|
DCHECK(!top_on_previous_step_);
|
||||||
allocation_info_.Reset(nullptr, nullptr);
|
allocation_info_.Reset(nullptr, nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2088,16 +2141,6 @@ void NewSpace::StartNextInlineAllocationStep() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void NewSpace::AddAllocationObserver(AllocationObserver* observer) {
|
|
||||||
Space::AddAllocationObserver(observer);
|
|
||||||
StartNextInlineAllocationStep();
|
|
||||||
}
|
|
||||||
|
|
||||||
void NewSpace::RemoveAllocationObserver(AllocationObserver* observer) {
|
|
||||||
Space::RemoveAllocationObserver(observer);
|
|
||||||
StartNextInlineAllocationStep();
|
|
||||||
}
|
|
||||||
|
|
||||||
void NewSpace::PauseAllocationObservers() {
|
void NewSpace::PauseAllocationObservers() {
|
||||||
// Do a step to account for memory allocated so far.
|
// Do a step to account for memory allocated so far.
|
||||||
InlineAllocationStep(top(), top(), nullptr, 0);
|
InlineAllocationStep(top(), top(), nullptr, 0);
|
||||||
@ -2106,12 +2149,28 @@ void NewSpace::PauseAllocationObservers() {
|
|||||||
UpdateInlineAllocationLimit(0);
|
UpdateInlineAllocationLimit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void PagedSpace::PauseAllocationObservers() {
|
||||||
|
// Do a step to account for memory allocated so far.
|
||||||
|
if (top_on_previous_step_) {
|
||||||
|
int bytes_allocated = static_cast<int>(top() - top_on_previous_step_);
|
||||||
|
AllocationStep(bytes_allocated, nullptr, 0);
|
||||||
|
}
|
||||||
|
Space::PauseAllocationObservers();
|
||||||
|
top_on_previous_step_ = 0;
|
||||||
|
}
|
||||||
|
|
||||||
void NewSpace::ResumeAllocationObservers() {
|
void NewSpace::ResumeAllocationObservers() {
|
||||||
DCHECK_NULL(top_on_previous_step_);
|
DCHECK_NULL(top_on_previous_step_);
|
||||||
Space::ResumeAllocationObservers();
|
Space::ResumeAllocationObservers();
|
||||||
StartNextInlineAllocationStep();
|
StartNextInlineAllocationStep();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(ofrobots): refactor into SpaceWithLinearArea
|
||||||
|
void PagedSpace::ResumeAllocationObservers() {
|
||||||
|
DCHECK(top_on_previous_step_ == 0);
|
||||||
|
Space::ResumeAllocationObservers();
|
||||||
|
StartNextInlineAllocationStep();
|
||||||
|
}
|
||||||
|
|
||||||
void NewSpace::InlineAllocationStep(Address top, Address new_top,
|
void NewSpace::InlineAllocationStep(Address top, Address new_top,
|
||||||
Address soon_object, size_t size) {
|
Address soon_object, size_t size) {
|
||||||
@ -2886,7 +2945,6 @@ bool FreeList::Allocate(size_t size_in_bytes) {
|
|||||||
if (new_node == nullptr) return false;
|
if (new_node == nullptr) return false;
|
||||||
|
|
||||||
DCHECK_GE(new_node_size, size_in_bytes);
|
DCHECK_GE(new_node_size, size_in_bytes);
|
||||||
size_t bytes_left = new_node_size - size_in_bytes;
|
|
||||||
|
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
for (size_t i = 0; i < size_in_bytes / kPointerSize; i++) {
|
for (size_t i = 0; i < size_in_bytes / kPointerSize; i++) {
|
||||||
@ -2900,38 +2958,21 @@ bool FreeList::Allocate(size_t size_in_bytes) {
|
|||||||
// candidate.
|
// candidate.
|
||||||
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
|
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
|
||||||
|
|
||||||
const size_t kThreshold = IncrementalMarking::kAllocatedThreshold;
|
|
||||||
|
|
||||||
// Memory in the linear allocation area is counted as allocated. We may free
|
// Memory in the linear allocation area is counted as allocated. We may free
|
||||||
// a little of this again immediately - see below.
|
// a little of this again immediately - see below.
|
||||||
owner_->IncreaseAllocatedBytes(new_node_size,
|
owner_->IncreaseAllocatedBytes(new_node_size,
|
||||||
Page::FromAddress(new_node->address()));
|
Page::FromAddress(new_node->address()));
|
||||||
|
|
||||||
if (owner_->heap()->inline_allocation_disabled()) {
|
Address start = new_node->address();
|
||||||
// Keep the linear allocation area to fit exactly the requested size.
|
Address end = new_node->address() + new_node_size;
|
||||||
// Return the rest to the free list.
|
Address limit = owner_->ComputeLimit(start, end, size_in_bytes);
|
||||||
owner_->Free(new_node->address() + size_in_bytes, bytes_left);
|
DCHECK_LE(limit, end);
|
||||||
owner_->SetAllocationInfo(new_node->address(),
|
DCHECK_LE(size_in_bytes, limit - start);
|
||||||
new_node->address() + size_in_bytes);
|
if (limit != end) {
|
||||||
} else if (bytes_left > kThreshold &&
|
owner_->Free(limit, end - limit);
|
||||||
owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
|
|
||||||
FLAG_incremental_marking &&
|
|
||||||
!owner_->is_local()) { // Not needed on CompactionSpaces.
|
|
||||||
size_t linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
|
|
||||||
// We don't want to give too large linear areas to the allocator while
|
|
||||||
// incremental marking is going on, because we won't check again whether
|
|
||||||
// we want to do another increment until the linear area is used up.
|
|
||||||
DCHECK_GE(new_node_size, size_in_bytes + linear_size);
|
|
||||||
owner_->Free(new_node->address() + size_in_bytes + linear_size,
|
|
||||||
new_node_size - size_in_bytes - linear_size);
|
|
||||||
owner_->SetAllocationInfo(
|
|
||||||
new_node->address(), new_node->address() + size_in_bytes + linear_size);
|
|
||||||
} else {
|
|
||||||
// Normally we give the rest of the node to the allocator as its new
|
|
||||||
// linear allocation area.
|
|
||||||
owner_->SetAllocationInfo(new_node->address(),
|
|
||||||
new_node->address() + new_node_size);
|
|
||||||
}
|
}
|
||||||
|
owner_->SetAllocationInfo(start, limit);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3319,7 +3360,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
|
|||||||
if (heap()->incremental_marking()->black_allocation()) {
|
if (heap()->incremental_marking()->black_allocation()) {
|
||||||
heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
|
heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
|
||||||
}
|
}
|
||||||
AllocationStep(object->address(), object_size);
|
AllocationStep(object_size, object->address(), object_size);
|
||||||
DCHECK_IMPLIES(
|
DCHECK_IMPLIES(
|
||||||
heap()->incremental_marking()->black_allocation(),
|
heap()->incremental_marking()->black_allocation(),
|
||||||
heap()->incremental_marking()->marking_state()->IsBlack(object));
|
heap()->incremental_marking()->marking_state()->IsBlack(object));
|
||||||
|
@ -903,17 +903,17 @@ class Space : public Malloced {
|
|||||||
// Identity used in error reporting.
|
// Identity used in error reporting.
|
||||||
AllocationSpace identity() { return id_; }
|
AllocationSpace identity() { return id_; }
|
||||||
|
|
||||||
V8_EXPORT_PRIVATE virtual void AddAllocationObserver(
|
void AddAllocationObserver(AllocationObserver* observer);
|
||||||
AllocationObserver* observer);
|
|
||||||
|
|
||||||
V8_EXPORT_PRIVATE virtual void RemoveAllocationObserver(
|
void RemoveAllocationObserver(AllocationObserver* observer);
|
||||||
AllocationObserver* observer);
|
|
||||||
|
|
||||||
V8_EXPORT_PRIVATE virtual void PauseAllocationObservers();
|
V8_EXPORT_PRIVATE virtual void PauseAllocationObservers();
|
||||||
|
|
||||||
V8_EXPORT_PRIVATE virtual void ResumeAllocationObservers();
|
V8_EXPORT_PRIVATE virtual void ResumeAllocationObservers();
|
||||||
|
|
||||||
void AllocationStep(Address soon_object, int size);
|
V8_EXPORT_PRIVATE virtual void StartNextInlineAllocationStep() {}
|
||||||
|
|
||||||
|
void AllocationStep(int bytes_since_last, Address soon_object, int size);
|
||||||
|
|
||||||
// Return the total amount committed memory for this space, i.e., allocatable
|
// Return the total amount committed memory for this space, i.e., allocatable
|
||||||
// memory and page headers.
|
// memory and page headers.
|
||||||
@ -2071,15 +2071,8 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
|
|||||||
|
|
||||||
void ResetFreeList() { free_list_.Reset(); }
|
void ResetFreeList() { free_list_.Reset(); }
|
||||||
|
|
||||||
// Set space allocation info.
|
void PauseAllocationObservers() override;
|
||||||
void SetTopAndLimit(Address top, Address limit) {
|
void ResumeAllocationObservers() override;
|
||||||
DCHECK(top == limit ||
|
|
||||||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
|
|
||||||
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
|
|
||||||
allocation_info_.Reset(top, limit);
|
|
||||||
}
|
|
||||||
|
|
||||||
void SetAllocationInfo(Address top, Address limit);
|
|
||||||
|
|
||||||
// Empty space allocation info, returning unused area to free list.
|
// Empty space allocation info, returning unused area to free list.
|
||||||
void EmptyAllocationInfo();
|
void EmptyAllocationInfo();
|
||||||
@ -2184,6 +2177,21 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
|
|||||||
// multiple tasks hold locks on pages while trying to sweep each others pages.
|
// multiple tasks hold locks on pages while trying to sweep each others pages.
|
||||||
void AnnounceLockedPage(Page* page) { locked_page_ = page; }
|
void AnnounceLockedPage(Page* page) { locked_page_ = page; }
|
||||||
|
|
||||||
|
Address ComputeLimit(Address start, Address end, size_t size_in_bytes);
|
||||||
|
void SetAllocationInfo(Address top, Address limit);
|
||||||
|
|
||||||
|
private:
|
||||||
|
// Set space allocation info.
|
||||||
|
void SetTopAndLimit(Address top, Address limit) {
|
||||||
|
DCHECK(top == limit ||
|
||||||
|
Page::FromAddress(top) == Page::FromAddress(limit - 1));
|
||||||
|
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
|
||||||
|
allocation_info_.Reset(top, limit);
|
||||||
|
}
|
||||||
|
void DecreaseLimit(Address new_limit);
|
||||||
|
void StartNextInlineAllocationStep() override;
|
||||||
|
bool SupportsInlineAllocation() { return identity() == OLD_SPACE; }
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
// PagedSpaces that should be included in snapshots have different, i.e.,
|
// PagedSpaces that should be included in snapshots have different, i.e.,
|
||||||
// smaller, initial pages.
|
// smaller, initial pages.
|
||||||
@ -2246,6 +2254,7 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
|
|||||||
base::Mutex space_mutex_;
|
base::Mutex space_mutex_;
|
||||||
|
|
||||||
Page* locked_page_;
|
Page* locked_page_;
|
||||||
|
Address top_on_previous_step_;
|
||||||
|
|
||||||
friend class IncrementalMarking;
|
friend class IncrementalMarking;
|
||||||
friend class MarkCompactCollector;
|
friend class MarkCompactCollector;
|
||||||
@ -2647,14 +2656,6 @@ class NewSpace : public Space {
|
|||||||
UpdateInlineAllocationLimit(0);
|
UpdateInlineAllocationLimit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Allows observation of inline allocation. The observer->Step() method gets
|
|
||||||
// called after every step_size bytes have been allocated (approximately).
|
|
||||||
// This works by adjusting the allocation limit to a lower value and adjusting
|
|
||||||
// it after each step.
|
|
||||||
void AddAllocationObserver(AllocationObserver* observer) override;
|
|
||||||
|
|
||||||
void RemoveAllocationObserver(AllocationObserver* observer) override;
|
|
||||||
|
|
||||||
// Get the extent of the inactive semispace (for use as a marking stack,
|
// Get the extent of the inactive semispace (for use as a marking stack,
|
||||||
// or to zap it). Notice: space-addresses are not necessarily on the
|
// or to zap it). Notice: space-addresses are not necessarily on the
|
||||||
// same page, so FromSpaceStart() might be above FromSpaceEnd().
|
// same page, so FromSpaceStart() might be above FromSpaceEnd().
|
||||||
@ -2761,7 +2762,7 @@ class NewSpace : public Space {
|
|||||||
// different when we cross a page boundary or reset the space.
|
// different when we cross a page boundary or reset the space.
|
||||||
void InlineAllocationStep(Address top, Address new_top, Address soon_object,
|
void InlineAllocationStep(Address top, Address new_top, Address soon_object,
|
||||||
size_t size);
|
size_t size);
|
||||||
void StartNextInlineAllocationStep();
|
void StartNextInlineAllocationStep() override;
|
||||||
|
|
||||||
friend class SemiSpaceIterator;
|
friend class SemiSpaceIterator;
|
||||||
};
|
};
|
||||||
|
@ -172,8 +172,11 @@ class SamplingAllocationObserver : public AllocationObserver {
|
|||||||
void Step(int bytes_allocated, Address soon_object, size_t size) override {
|
void Step(int bytes_allocated, Address soon_object, size_t size) override {
|
||||||
USE(heap_);
|
USE(heap_);
|
||||||
DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
|
DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
|
||||||
DCHECK(soon_object);
|
if (soon_object) {
|
||||||
profiler_->SampleObject(soon_object, size);
|
// TODO(ofrobots): it would be better to sample the next object rather
|
||||||
|
// than skipping this sample epoch if soon_object happens to be null.
|
||||||
|
profiler_->SampleObject(soon_object, size);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
intptr_t GetNextStepSize() override { return GetNextSampleInterval(rate_); }
|
intptr_t GetNextStepSize() override { return GetNextSampleInterval(rate_); }
|
||||||
|
@ -32,6 +32,7 @@ int FixedArrayLenFromSize(int size) {
|
|||||||
|
|
||||||
std::vector<Handle<FixedArray>> FillOldSpacePageWithFixedArrays(Heap* heap,
|
std::vector<Handle<FixedArray>> FillOldSpacePageWithFixedArrays(Heap* heap,
|
||||||
int remainder) {
|
int remainder) {
|
||||||
|
PauseAllocationObserversScope pause_observers(heap);
|
||||||
std::vector<Handle<FixedArray>> handles;
|
std::vector<Handle<FixedArray>> handles;
|
||||||
Isolate* isolate = heap->isolate();
|
Isolate* isolate = heap->isolate();
|
||||||
const int kArraySize = 128;
|
const int kArraySize = 128;
|
||||||
@ -203,7 +204,7 @@ void ForceEvacuationCandidate(Page* page) {
|
|||||||
int remaining = static_cast<int>(limit - top);
|
int remaining = static_cast<int>(limit - top);
|
||||||
space->heap()->CreateFillerObjectAt(top, remaining,
|
space->heap()->CreateFillerObjectAt(top, remaining,
|
||||||
ClearRecordedSlots::kNo);
|
ClearRecordedSlots::kNo);
|
||||||
space->SetTopAndLimit(nullptr, nullptr);
|
space->EmptyAllocationInfo();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -20,6 +20,7 @@ namespace heap {
|
|||||||
|
|
||||||
Page* HeapTester::AllocateByteArraysOnPage(
|
Page* HeapTester::AllocateByteArraysOnPage(
|
||||||
Heap* heap, std::vector<ByteArray*>* byte_arrays) {
|
Heap* heap, std::vector<ByteArray*>* byte_arrays) {
|
||||||
|
PauseAllocationObserversScope pause_observers(heap);
|
||||||
const int kLength = 256 - ByteArray::kHeaderSize;
|
const int kLength = 256 - ByteArray::kHeaderSize;
|
||||||
const int kSize = ByteArray::SizeFor(kLength);
|
const int kSize = ByteArray::SizeFor(kLength);
|
||||||
CHECK_EQ(kSize, 256);
|
CHECK_EQ(kSize, 256);
|
||||||
|
@ -3063,3 +3063,77 @@ TEST(SamplingHeapProfilerLeftTrimming) {
|
|||||||
|
|
||||||
heap_profiler->StopSamplingHeapProfiler();
|
heap_profiler->StopSamplingHeapProfiler();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST(SamplingHeapProfilerPretenuredInlineAllocations) {
|
||||||
|
i::FLAG_allow_natives_syntax = true;
|
||||||
|
i::FLAG_expose_gc = true;
|
||||||
|
|
||||||
|
CcTest::InitializeVM();
|
||||||
|
if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
|
||||||
|
if (i::FLAG_gc_global || i::FLAG_stress_compaction ||
|
||||||
|
i::FLAG_stress_incremental_marking) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
v8::HandleScope scope(v8::Isolate::GetCurrent());
|
||||||
|
LocalContext env;
|
||||||
|
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
|
||||||
|
|
||||||
|
// Suppress randomness to avoid flakiness in tests.
|
||||||
|
v8::internal::FLAG_sampling_heap_profiler_suppress_randomness = true;
|
||||||
|
|
||||||
|
// Grow new space unitl maximum capacity reached.
|
||||||
|
while (!CcTest::heap()->new_space()->IsAtMaximumCapacity()) {
|
||||||
|
CcTest::heap()->new_space()->Grow();
|
||||||
|
}
|
||||||
|
|
||||||
|
i::ScopedVector<char> source(1024);
|
||||||
|
i::SNPrintF(source,
|
||||||
|
"var number_elements = %d;"
|
||||||
|
"var elements = new Array(number_elements);"
|
||||||
|
"function f() {"
|
||||||
|
" for (var i = 0; i < number_elements; i++) {"
|
||||||
|
" elements[i] = [{}, {}, {}];"
|
||||||
|
" }"
|
||||||
|
" return elements[number_elements - 1];"
|
||||||
|
"};"
|
||||||
|
"f(); gc();"
|
||||||
|
"f(); f();"
|
||||||
|
"%%OptimizeFunctionOnNextCall(f);"
|
||||||
|
"f();"
|
||||||
|
"f;",
|
||||||
|
i::AllocationSite::kPretenureMinimumCreated + 1);
|
||||||
|
|
||||||
|
v8::Local<v8::Function> f =
|
||||||
|
v8::Local<v8::Function>::Cast(CompileRun(source.start()));
|
||||||
|
|
||||||
|
// Make sure the function is producing pre-tenured objects.
|
||||||
|
auto res = f->Call(env.local(), env->Global(), 0, NULL).ToLocalChecked();
|
||||||
|
i::Handle<i::JSObject> o = i::Handle<i::JSObject>::cast(
|
||||||
|
v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(res)));
|
||||||
|
CHECK(CcTest::heap()->InOldSpace(o->elements()));
|
||||||
|
CHECK(CcTest::heap()->InOldSpace(*o));
|
||||||
|
|
||||||
|
// Call the function and profile it.
|
||||||
|
heap_profiler->StartSamplingHeapProfiler(64);
|
||||||
|
for (int i = 0; i < 100; ++i) {
|
||||||
|
f->Call(env.local(), env->Global(), 0, NULL).ToLocalChecked();
|
||||||
|
}
|
||||||
|
|
||||||
|
std::unique_ptr<v8::AllocationProfile> profile(
|
||||||
|
heap_profiler->GetAllocationProfile());
|
||||||
|
CHECK(profile);
|
||||||
|
heap_profiler->StopSamplingHeapProfiler();
|
||||||
|
|
||||||
|
const char* names[] = {"f"};
|
||||||
|
auto node_f = FindAllocationProfileNode(env->GetIsolate(), *profile,
|
||||||
|
ArrayVector(names));
|
||||||
|
CHECK(node_f);
|
||||||
|
|
||||||
|
int count = 0;
|
||||||
|
for (auto allocation : node_f->allocations) {
|
||||||
|
count += allocation.count;
|
||||||
|
}
|
||||||
|
|
||||||
|
CHECK_GE(count, 9000);
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user