Reland "[heap] Fix racy OOM in new space concurrent sweeping"
This is a reland of commit 95eece3068
Original change's description:
> [heap] Fix racy OOM in new space concurrent sweeping
>
> Some tests are flakily failing due to a timing issue between new space
> concurrent sweeping and allocations.
> When new spaces and other spaces are also swept, each concurrent thread
> will take one new space page. If a young allocation happens right after
> the atomic pause finished, it's possible that all new space pages are
> held by concurrent threads. The main thread will try to contribute to
> sweeping but get no pages, and fail to allocate.
>
> Fix by restoring the round robin order of sweeping, such that not all
> threads start with new space.
>
> Bug: v8:12612, v8:13413
> Change-Id: I3b448199b4678c339f9e59f7ca31d9e1e0e76011
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3976043
> Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
> Commit-Queue: Omer Katz <omerkatz@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#83918}
Bug: v8:12612, v8:13413
Change-Id: Idbd5cbb53c9f43290e02d10d85ee4199ea9a4136
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3980756
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Auto-Submit: Omer Katz <omerkatz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#83927}
This commit is contained in:
parent
199304b26b
commit
20b395a8ef
@ -982,24 +982,24 @@ using WeakSlotCallbackWithHeap = bool (*)(Heap* heap, FullObjectSlot pointer);
|
|||||||
// consecutive.
|
// consecutive.
|
||||||
enum AllocationSpace {
|
enum AllocationSpace {
|
||||||
RO_SPACE, // Immortal, immovable and immutable objects,
|
RO_SPACE, // Immortal, immovable and immutable objects,
|
||||||
|
NEW_SPACE, // Young generation space for regular objects collected
|
||||||
|
// with Scavenger/MinorMC.
|
||||||
OLD_SPACE, // Old generation regular object space.
|
OLD_SPACE, // Old generation regular object space.
|
||||||
CODE_SPACE, // Old generation code object space, marked executable.
|
CODE_SPACE, // Old generation code object space, marked executable.
|
||||||
SHARED_SPACE, // Space shared between multiple isolates. Optional.
|
SHARED_SPACE, // Space shared between multiple isolates. Optional.
|
||||||
NEW_SPACE, // Young generation space for regular objects collected
|
NEW_LO_SPACE, // Young generation large object space.
|
||||||
// with Scavenger/MinorMC.
|
|
||||||
LO_SPACE, // Old generation large object space.
|
LO_SPACE, // Old generation large object space.
|
||||||
CODE_LO_SPACE, // Old generation large code object space.
|
CODE_LO_SPACE, // Old generation large code object space.
|
||||||
NEW_LO_SPACE, // Young generation large object space.
|
|
||||||
SHARED_LO_SPACE, // Space shared between multiple isolates. Optional.
|
SHARED_LO_SPACE, // Space shared between multiple isolates. Optional.
|
||||||
|
|
||||||
FIRST_SPACE = RO_SPACE,
|
FIRST_SPACE = RO_SPACE,
|
||||||
LAST_SPACE = SHARED_LO_SPACE,
|
LAST_SPACE = SHARED_LO_SPACE,
|
||||||
FIRST_MUTABLE_SPACE = OLD_SPACE,
|
FIRST_MUTABLE_SPACE = NEW_SPACE,
|
||||||
LAST_MUTABLE_SPACE = SHARED_LO_SPACE,
|
LAST_MUTABLE_SPACE = SHARED_LO_SPACE,
|
||||||
FIRST_GROWABLE_PAGED_SPACE = OLD_SPACE,
|
FIRST_GROWABLE_PAGED_SPACE = OLD_SPACE,
|
||||||
LAST_GROWABLE_PAGED_SPACE = SHARED_SPACE,
|
LAST_GROWABLE_PAGED_SPACE = SHARED_SPACE,
|
||||||
FIRST_SWEEPABLE_SPACE = OLD_SPACE,
|
FIRST_SWEEPABLE_SPACE = NEW_SPACE,
|
||||||
LAST_SWEEPABLE_SPACE = NEW_SPACE
|
LAST_SWEEPABLE_SPACE = SHARED_SPACE
|
||||||
};
|
};
|
||||||
constexpr int kSpaceTagSize = 4;
|
constexpr int kSpaceTagSize = 4;
|
||||||
static_assert(FIRST_SPACE == 0);
|
static_assert(FIRST_SPACE == 0);
|
||||||
|
@ -6413,8 +6413,7 @@ HeapObjectIterator::HeapObjectIterator(
|
|||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
// By not calling |space_iterator_->HasNext()|, we assume that the old
|
CHECK(space_iterator_->HasNext());
|
||||||
// space is first returned and that it has been set up.
|
|
||||||
object_iterator_ = space_iterator_->Next()->GetObjectIterator(heap_);
|
object_iterator_ = space_iterator_->Next()->GetObjectIterator(heap_);
|
||||||
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) heap_->tp_heap_->ResetIterator();
|
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) heap_->tp_heap_->ResetIterator();
|
||||||
}
|
}
|
||||||
|
@ -85,30 +85,41 @@ class Sweeper::SweeperJob final : public JobTask {
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
void RunImpl(JobDelegate* delegate, bool is_joining_thread) {
|
void RunImpl(JobDelegate* delegate, bool is_joining_thread) {
|
||||||
static constexpr int non_new_space_sweeping_spaces =
|
static_assert(NEW_SPACE == FIRST_SWEEPABLE_SPACE);
|
||||||
kNumberOfSweepingSpaces - 1;
|
|
||||||
static_assert(NEW_SPACE ==
|
|
||||||
FIRST_SWEEPABLE_SPACE + non_new_space_sweeping_spaces);
|
|
||||||
const int offset = delegate->GetTaskId();
|
const int offset = delegate->GetTaskId();
|
||||||
DCHECK_LT(offset, concurrent_sweepers_->size());
|
DCHECK_LT(offset, concurrent_sweepers_->size());
|
||||||
ConcurrentSweeper& sweeper = (*concurrent_sweepers_)[offset];
|
ConcurrentSweeper& concurrent_sweeper = (*concurrent_sweepers_)[offset];
|
||||||
|
if (offset > 0) {
|
||||||
|
if (!SweepNonNewSpaces(concurrent_sweeper, delegate, is_joining_thread,
|
||||||
|
offset, kNumberOfSweepingSpaces))
|
||||||
|
return;
|
||||||
|
}
|
||||||
{
|
{
|
||||||
TRACE_GC_EPOCH(
|
TRACE_GC_EPOCH(
|
||||||
tracer_, sweeper_->GetTracingScope(NEW_SPACE, is_joining_thread),
|
tracer_, sweeper_->GetTracingScope(NEW_SPACE, is_joining_thread),
|
||||||
is_joining_thread ? ThreadKind::kMain : ThreadKind::kBackground);
|
is_joining_thread ? ThreadKind::kMain : ThreadKind::kBackground);
|
||||||
if (!sweeper.ConcurrentSweepSpace(NEW_SPACE, delegate)) return;
|
if (!concurrent_sweeper.ConcurrentSweepSpace(NEW_SPACE, delegate)) return;
|
||||||
}
|
}
|
||||||
if (!sweeper_->should_sweep_non_new_spaces_) return;
|
if (!SweepNonNewSpaces(concurrent_sweeper, delegate, is_joining_thread, 1,
|
||||||
|
offset == 0 ? kNumberOfSweepingSpaces : offset))
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool SweepNonNewSpaces(ConcurrentSweeper& concurrent_sweeper,
|
||||||
|
JobDelegate* delegate, bool is_joining_thread,
|
||||||
|
int first_space_index, int last_space_index) {
|
||||||
|
if (!sweeper_->should_sweep_non_new_spaces_) return true;
|
||||||
TRACE_GC_EPOCH(
|
TRACE_GC_EPOCH(
|
||||||
tracer_, sweeper_->GetTracingScope(OLD_SPACE, is_joining_thread),
|
tracer_, sweeper_->GetTracingScope(OLD_SPACE, is_joining_thread),
|
||||||
is_joining_thread ? ThreadKind::kMain : ThreadKind::kBackground);
|
is_joining_thread ? ThreadKind::kMain : ThreadKind::kBackground);
|
||||||
for (int i = 0; i < non_new_space_sweeping_spaces; i++) {
|
for (int i = first_space_index; i < last_space_index; i++) {
|
||||||
const AllocationSpace space_id = static_cast<AllocationSpace>(
|
const AllocationSpace space_id =
|
||||||
FIRST_SWEEPABLE_SPACE +
|
static_cast<AllocationSpace>(FIRST_SWEEPABLE_SPACE + i);
|
||||||
((i + offset) % non_new_space_sweeping_spaces));
|
|
||||||
DCHECK_NE(NEW_SPACE, space_id);
|
DCHECK_NE(NEW_SPACE, space_id);
|
||||||
if (!sweeper.ConcurrentSweepSpace(space_id, delegate)) return;
|
if (!concurrent_sweeper.ConcurrentSweepSpace(space_id, delegate))
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
Sweeper* const sweeper_;
|
Sweeper* const sweeper_;
|
||||||
|
@ -130,7 +130,7 @@ class Sweeper {
|
|||||||
|
|
||||||
static const int kNumberOfSweepingSpaces =
|
static const int kNumberOfSweepingSpaces =
|
||||||
LAST_SWEEPABLE_SPACE - FIRST_SWEEPABLE_SPACE + 1;
|
LAST_SWEEPABLE_SPACE - FIRST_SWEEPABLE_SPACE + 1;
|
||||||
static constexpr int kMaxSweeperTasks = 3;
|
static constexpr int kMaxSweeperTasks = kNumberOfSweepingSpaces;
|
||||||
|
|
||||||
template <typename Callback>
|
template <typename Callback>
|
||||||
void ForAllSweepingSpaces(Callback callback) const {
|
void ForAllSweepingSpaces(Callback callback) const {
|
||||||
|
Loading…
Reference in New Issue
Block a user