[heap] Fix failing DCHECK with original_top < top

The problem here was that IncrementalMarking::Step was invoking
new_space()->ResetOriginalTop() which sets original_top to the current
top. IncrementalMarking::Step could be invoked during
InvokeAllocationObservers(), which is called right after acquiring a
new LAB and allocating the first object in it. However this first
allocation might be from generated code with allocation folding enabled.
The generated code might not use all of the memory it allocated and in
that process move top backwards again. Nevertheless
InvokeAllocationObservers() could already set original_top to the
current top. If the generated code later not uses all of that
memory, original_top can be bigger than top.

Fix this problem by ensuring that original_top always equals the LAB
start. Each time LAB start is moved/accounted for, original_top is now
updated as well for the new space. Also IncrementalMarking::Step()
isn't allowed to move original_top anymore.

Bug: chromium:1116278, v8:10315
Change-Id: Ib18a0b07e2665b8ba933555387b84329cbecdf5b
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2398519
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: Igor Sheludko <ishell@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69840}
This commit is contained in:
Dominik Inführ 2020-09-10 13:39:07 +02:00 committed by Commit Bot
parent 00efb85b9c
commit ca4489972a
12 changed files with 71 additions and 15 deletions

View File

@ -249,6 +249,10 @@ class ShouldBeZeroOnReturnScope final {
Object StackGuard::HandleInterrupts() {
TRACE_EVENT0("v8.execute", "V8.HandleInterrupts");
#if DEBUG
isolate_->heap()->VerifyNewSpaceTop();
#endif
if (FLAG_verify_predictable) {
// Advance synthetic time by making a time request.
isolate_->heap()->MonotonicallyIncreasingTimeInMs();

View File

@ -3741,6 +3741,7 @@ double Heap::MonotonicallyIncreasingTimeInMs() {
static_cast<double>(base::Time::kMillisecondsPerSecond);
}
void Heap::VerifyNewSpaceTop() { new_space()->VerifyTop(); }
bool Heap::IdleNotification(int idle_time_in_ms) {
return IdleNotification(

View File

@ -699,6 +699,8 @@ class Heap {
V8_EXPORT_PRIVATE double MonotonicallyIncreasingTimeInMs();
void VerifyNewSpaceTop();
void RecordStats(HeapStats* stats, bool take_snapshot = false);
bool MeasureMemory(std::unique_ptr<v8::MeasureMemoryDelegate> delegate,

View File

@ -122,6 +122,9 @@ void IncrementalMarkingJob::Task::RunInternal() {
}
if (!incremental_marking->IsStopped()) {
// All objects are initialized at that point.
heap->new_space()->MarkLabStartInitialized();
heap->new_lo_space()->ResetPendingObject();
StepResult step_result = Step(heap);
if (!incremental_marking->IsStopped()) {
const TaskType task_type =

View File

@ -1041,8 +1041,6 @@ StepResult IncrementalMarking::Step(double max_step_size_in_ms,
double embedder_deadline = 0.0;
if (state_ == MARKING) {
if (FLAG_concurrent_marking) {
heap_->new_space()->ResetOriginalTop();
heap_->new_lo_space()->ResetPendingObject();
// It is safe to merge back all objects that were on hold to the shared
// work list at Step because we are at a safepoint where all objects
// are properly initialized.

View File

@ -36,11 +36,7 @@ class EvacuationAllocator {
// Give back remaining LAB space if this EvacuationAllocator's new space LAB
// sits right next to new space allocation top.
const LinearAllocationArea info = new_space_lab_.CloseAndMakeIterable();
const Address top = new_space_->top();
if (info.limit() != kNullAddress && info.limit() == top) {
DCHECK_NE(info.top(), kNullAddress);
*new_space_->allocation_top_address() = info.top();
}
new_space_->MaybeFreeUnusedLab(info);
}
inline AllocationResult Allocate(AllocationSpace space, int object_size,

View File

@ -511,7 +511,6 @@ bool MarkCompactCollector::StartCompaction() {
void MarkCompactCollector::StartMarking() {
if (FLAG_concurrent_marking || FLAG_parallel_marking) {
heap_->new_space()->ResetOriginalTop();
heap_->new_lo_space()->ResetPendingObject();
}
std::vector<Address> contexts =

View File

@ -87,6 +87,10 @@ HeapObject SemiSpaceObjectIterator::Next() {
AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
#if DEBUG
VerifyTop();
#endif
AllocationResult result;
if (alignment != kWordAligned) {

View File

@ -11,6 +11,7 @@
#include "src/heap/memory-allocator.h"
#include "src/heap/paged-spaces.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/spaces.h"
namespace v8 {
namespace internal {
@ -496,6 +497,10 @@ void NewSpace::UpdateInlineAllocationLimit(size_t min_size) {
DCHECK_LE(new_limit, to_space_.page_high());
allocation_info_.set_limit(new_limit);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
#if DEBUG
VerifyTop();
#endif
}
bool NewSpace::AddFreshPage() {
@ -550,6 +555,19 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
return true;
}
void NewSpace::MaybeFreeUnusedLab(LinearAllocationArea info) {
if (info.limit() != kNullAddress && info.limit() == top()) {
DCHECK_NE(info.top(), kNullAddress);
allocation_info_.set_top(info.top());
allocation_info_.MoveStartToTop();
original_top_.store(info.top(), std::memory_order_release);
}
#if DEBUG
VerifyTop();
#endif
}
std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator(Heap* heap) {
return std::unique_ptr<ObjectIterator>(new SemiSpaceObjectIterator(this));
}
@ -610,6 +628,20 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
return result;
}
void NewSpace::VerifyTop() {
// Ensure validity of LAB: start <= top <= limit
DCHECK_LE(allocation_info_.start(), allocation_info_.top());
DCHECK_LE(allocation_info_.top(), allocation_info_.limit());
// Ensure that original_top_ always equals LAB start.
DCHECK_EQ(original_top_, allocation_info_.start());
// Ensure that limit() is <= original_limit_, original_limit_ always needs
// to be end of curent to space page.
DCHECK_LE(allocation_info_.limit(), original_limit_);
DCHECK_EQ(original_limit_, to_space_.page_high());
}
#ifdef VERIFY_HEAP
// We do not use the SemiSpaceObjectIterator because verification doesn't assume
// that it works (it depends on the invariants we are checking).

View File

@ -368,11 +368,7 @@ class V8_EXPORT_PRIVATE NewSpace
return to_space_.minimum_capacity();
}
void ResetOriginalTop() {
DCHECK_GE(top(), original_top_);
DCHECK_LE(top(), original_limit_);
original_top_.store(top(), std::memory_order_release);
}
void VerifyTop();
Address original_top_acquire() {
return original_top_.load(std::memory_order_acquire);
@ -458,6 +454,14 @@ class V8_EXPORT_PRIVATE NewSpace
SemiSpace& from_space() { return from_space_; }
SemiSpace& to_space() { return to_space_; }
void MoveOriginalTopForward() {
DCHECK_GE(top(), original_top_);
DCHECK_LE(top(), original_limit_);
original_top_.store(top(), std::memory_order_release);
}
void MaybeFreeUnusedLab(LinearAllocationArea info);
private:
// Update linear allocation area to match the current to-space page.
void UpdateLinearAllocationArea();

View File

@ -365,7 +365,7 @@ void SpaceWithLinearArea::PauseAllocationObservers() {
void SpaceWithLinearArea::ResumeAllocationObservers() {
Space::ResumeAllocationObservers();
allocation_info_.MoveStartToTop();
MarkLabStartInitialized();
UpdateInlineAllocationLimit(0);
}
@ -374,7 +374,18 @@ void SpaceWithLinearArea::AdvanceAllocationObservers() {
allocation_info_.start() != allocation_info_.top()) {
allocation_counter_.AdvanceAllocationObservers(allocation_info_.top() -
allocation_info_.start());
allocation_info_.MoveStartToTop();
MarkLabStartInitialized();
}
}
void SpaceWithLinearArea::MarkLabStartInitialized() {
allocation_info_.MoveStartToTop();
if (identity() == NEW_SPACE) {
heap()->new_space()->MoveOriginalTopForward();
#if DEBUG
heap()->VerifyNewSpaceTop();
#endif
}
}

View File

@ -511,6 +511,8 @@ class SpaceWithLinearArea : public Space {
size_t aligned_size_in_bytes,
size_t allocation_size);
void MarkLabStartInitialized();
// When allocation observers are active we may use a lower limit to allow the
// observers to 'interrupt' earlier than the natural limit. Given a linear
// area bounded by [start, end), this function computes the limit to use to