Unit tests for the live range conflict detection mechanism (CoalescedLiveRanges) in the Greedy Allocator.

Consolidated conflict detection and traversal logic in CoalescedLiveRanges to avoid duplication in both code and testing. In addition, this change achieves better separation between CoalescedLiveRanges and other register allocator components, improving testability and maintainability.

BUG=

Review URL: https://codereview.chromium.org/1219063017

Cr-Commit-Position: refs/heads/master@{#29783}
This commit is contained in:
mtrofin 2015-07-21 21:50:16 -07:00 committed by Commit bot
parent 9ec20f9c36
commit 3e3608cdd5
7 changed files with 589 additions and 170 deletions

View File

@ -10,136 +10,131 @@ namespace v8 {
namespace internal {
namespace compiler {
#define TRACE(...) \
do { \
if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
} while (false)
const float CoalescedLiveRanges::kAllocatedRangeMultiplier = 10.0;
void CoalescedLiveRanges::AllocateRange(LiveRange* range) {
UpdateWeightAtAllocation(range);
for (auto interval = range->first_interval(); interval != nullptr;
interval = interval->next()) {
storage().insert({interval->start(), interval->end(), range});
}
LiveRangeConflictIterator::LiveRangeConflictIterator(const LiveRange* range,
IntervalStore* storage)
: query_(range->first_interval()),
pos_(storage->end()),
intervals_(storage) {
MovePosAndQueryToFirstConflict();
}
void CoalescedLiveRanges::Remove(LiveRange* range) {
for (auto interval = range->first_interval(); interval != nullptr;
interval = interval->next()) {
storage().erase({interval->start(), interval->end(), nullptr});
}
range->UnsetAssignedRegister();
LiveRange* LiveRangeConflictIterator::Current() const {
if (IsFinished()) return nullptr;
return pos_->range_;
}
float CoalescedLiveRanges::GetMaximumConflictingWeight(
const LiveRange* range) const {
float ret = LiveRange::kInvalidWeight;
auto end = storage().end();
for (auto query = range->first_interval(); query != nullptr;
query = query->next()) {
auto conflict = GetFirstConflict(query);
void LiveRangeConflictIterator::MovePosToFirstConflictForQuery() {
DCHECK(query_ != nullptr);
auto end = intervals_->end();
LifetimePosition q_start = query_->start();
LifetimePosition q_end = query_->end();
if (conflict == end) continue;
for (; QueryIntersectsAllocatedInterval(query, conflict); ++conflict) {
// It is possible we'll visit the same range multiple times, because
// successive (not necessarily consecutive) intervals belong to the same
// range, or because different intervals of the query range have the same
// range as conflict.
DCHECK_NE(conflict->range->weight(), LiveRange::kInvalidWeight);
ret = Max(ret, conflict->range->weight());
if (ret == LiveRange::kMaxWeight) break;
}
}
return ret;
}
void CoalescedLiveRanges::EvictAndRescheduleConflicts(
LiveRange* range, AllocationScheduler* scheduler) {
auto end = storage().end();
for (auto query = range->first_interval(); query != nullptr;
query = query->next()) {
auto conflict = GetFirstConflict(query);
if (conflict == end) continue;
while (QueryIntersectsAllocatedInterval(query, conflict)) {
LiveRange* range_to_evict = conflict->range;
// Bypass successive intervals belonging to the same range, because we're
// about to remove this range, and we don't want the storage iterator to
// become invalid.
while (conflict != end && conflict->range == range_to_evict) {
++conflict;
}
DCHECK(range_to_evict->HasRegisterAssigned());
CHECK(!range_to_evict->IsFixed());
Remove(range_to_evict);
UpdateWeightAtEviction(range_to_evict);
TRACE("Evicted range %d.\n", range_to_evict->id());
scheduler->Schedule(range_to_evict);
}
}
}
bool CoalescedLiveRanges::VerifyAllocationsAreValid() const {
LifetimePosition last_end = LifetimePosition::GapFromInstructionIndex(0);
for (auto i : storage_) {
if (i.start < last_end) {
return false;
}
last_end = i.end;
}
return true;
}
void CoalescedLiveRanges::UpdateWeightAtAllocation(LiveRange* range) {
DCHECK_NE(range->weight(), LiveRange::kInvalidWeight);
range->set_weight(range->weight() * kAllocatedRangeMultiplier);
}
void CoalescedLiveRanges::UpdateWeightAtEviction(LiveRange* range) {
DCHECK_NE(range->weight(), LiveRange::kInvalidWeight);
range->set_weight(range->weight() / kAllocatedRangeMultiplier);
}
CoalescedLiveRanges::interval_iterator CoalescedLiveRanges::GetFirstConflict(
const UseInterval* query) const {
DCHECK(query != nullptr);
auto end = storage().end();
LifetimePosition q_start = query->start();
LifetimePosition q_end = query->end();
if (storage().empty() || storage().rbegin()->end <= q_start ||
storage().begin()->start >= q_end) {
return end;
if (intervals_->empty() || intervals_->rbegin()->end_ <= q_start ||
intervals_->begin()->start_ >= q_end) {
pos_ = end;
return;
}
auto ret = storage().upper_bound(AsAllocatedInterval(q_start));
// ret is either at the end (no start strictly greater than q_start) or
pos_ = intervals_->upper_bound(AsAllocatedInterval(q_start));
// pos is either at the end (no start strictly greater than q_start) or
// at some position with the aforementioned property. In either case, the
// allocated interval before this one may intersect our query:
// either because, although it starts before this query's start, it ends
// after; or because it starts exactly at the query start. So unless we're
// right at the beginning of the storage - meaning the first allocated
// interval is also starting after this query's start - see what's behind.
if (ret != storage().begin()) {
--ret;
if (!QueryIntersectsAllocatedInterval(query, ret)) {
if (pos_ != intervals_->begin()) {
--pos_;
if (!QueryIntersectsAllocatedInterval()) {
// The interval behind wasn't intersecting, so move back.
++ret;
++pos_;
}
}
if (ret != end && QueryIntersectsAllocatedInterval(query, ret)) return ret;
return end;
if (pos_ == end || !QueryIntersectsAllocatedInterval()) {
pos_ = end;
}
}
void LiveRangeConflictIterator::MovePosAndQueryToFirstConflict() {
auto end = intervals_->end();
for (; query_ != nullptr; query_ = query_->next()) {
MovePosToFirstConflictForQuery();
if (pos_ != end) {
DCHECK(QueryIntersectsAllocatedInterval());
return;
}
}
Invalidate();
}
void LiveRangeConflictIterator::IncrementPosAndSkipOverRepetitions() {
auto end = intervals_->end();
DCHECK(pos_ != end);
LiveRange* current_conflict = Current();
while (pos_ != end && pos_->range_ == current_conflict) {
++pos_;
}
}
LiveRange* LiveRangeConflictIterator::InternalGetNext(bool clean_behind) {
if (IsFinished()) return nullptr;
LiveRange* to_clear = Current();
IncrementPosAndSkipOverRepetitions();
// At this point, pos_ is either at the end, or on an interval that doesn't
// correspond to the same range as to_clear. This interval may not even be
// a conflict.
if (clean_behind) {
// Since we parked pos_ on an iterator that won't be affected by removal,
// we can safely delete to_clear's intervals.
for (auto interval = to_clear->first_interval(); interval != nullptr;
interval = interval->next()) {
AllocatedInterval erase_key(interval->start(), interval->end(), nullptr);
intervals_->erase(erase_key);
}
}
// We may have parked pos_ at the end, or on a non-conflict. In that case,
// move to the next query and reinitialize pos and query. This may invalidate
// the iterator, if no more conflicts are available.
if (!QueryIntersectsAllocatedInterval()) {
query_ = query_->next();
MovePosAndQueryToFirstConflict();
}
return Current();
}
LiveRangeConflictIterator CoalescedLiveRanges::GetConflicts(
const LiveRange* range) {
return LiveRangeConflictIterator(range, &intervals());
}
void CoalescedLiveRanges::AllocateRange(LiveRange* range) {
for (auto interval = range->first_interval(); interval != nullptr;
interval = interval->next()) {
AllocatedInterval to_insert(interval->start(), interval->end(), range);
intervals().insert(to_insert);
}
}
bool CoalescedLiveRanges::VerifyAllocationsAreValidForTesting() const {
LifetimePosition last_end = LifetimePosition::GapFromInstructionIndex(0);
for (auto i : intervals_) {
if (i.start_ < last_end) {
return false;
}
last_end = i.end_;
}
return true;
}

View File

@ -13,8 +13,96 @@ namespace internal {
namespace compiler {
class AllocationScheduler;
// Implementation detail for CoalescedLiveRanges.
struct AllocatedInterval {
AllocatedInterval(LifetimePosition start, LifetimePosition end,
LiveRange* range)
: start_(start), end_(end), range_(range) {}
LifetimePosition start_;
LifetimePosition end_;
LiveRange* range_;
bool operator<(const AllocatedInterval& other) const {
return start_ < other.start_;
}
bool operator>(const AllocatedInterval& other) const {
return start_ > other.start_;
}
};
typedef ZoneSet<AllocatedInterval> IntervalStore;
// An iterator over conflicts of a live range, obtained from CoalescedLiveRanges
// The design supports two main scenarios (see GreedyAllocator):
// (1) observing each conflicting range, without mutating the allocations, and
// (2) observing each conflicting range, and then moving to the next, after
// removing the current conflict.
class LiveRangeConflictIterator {
public:
// Current conflict. nullptr if no conflicts, or if we reached the end of
// conflicts.
LiveRange* Current() const;
// Get the next conflict. Caller should handle non-consecutive repetitions of
// the same range.
LiveRange* GetNext() { return InternalGetNext(false); }
// Get the next conflict, after evicting the current one. Caller may expect
// to never observe the same live range more than once.
LiveRange* RemoveCurrentAndGetNext() { return InternalGetNext(true); }
private:
friend class CoalescedLiveRanges;
typedef IntervalStore::const_iterator interval_iterator;
LiveRangeConflictIterator(const LiveRange* range, IntervalStore* store);
// Move the store iterator to first interval intersecting query. Since the
// intervals are sorted, subsequent intervals intersecting query follow. May
// leave the store iterator at "end", meaning that the current query does not
// have an intersection.
void MovePosToFirstConflictForQuery();
// Move both query and store iterator to the first intersection, if any. If
// none, then it invalidates the iterator (IsFinished() == true)
void MovePosAndQueryToFirstConflict();
// Increment pos and skip over intervals belonging to the same range we
// started with (i.e. Current() before the call). It is possible that range
// will be seen again, but not consecutively.
void IncrementPosAndSkipOverRepetitions();
// Common implementation used by both GetNext as well as
// ClearCurrentAndGetNext.
LiveRange* InternalGetNext(bool clean_behind);
bool IsFinished() const { return query_ == nullptr; }
static AllocatedInterval AsAllocatedInterval(LifetimePosition pos) {
return AllocatedInterval(pos, LifetimePosition::Invalid(), nullptr);
}
// Intersection utilities.
static bool Intersects(LifetimePosition a_start, LifetimePosition a_end,
LifetimePosition b_start, LifetimePosition b_end) {
return a_start < b_end && b_start < a_end;
}
bool QueryIntersectsAllocatedInterval() const {
DCHECK(query_ != nullptr);
return pos_ != intervals_->end() &&
Intersects(query_->start(), query_->end(), pos_->start_, pos_->end_);
}
void Invalidate() {
query_ = nullptr;
pos_ = intervals_->end();
}
const UseInterval* query_;
interval_iterator pos_;
IntervalStore* intervals_;
};
// Collection of live ranges allocated to the same register.
// It supports efficiently finding all conflicts for a given, non-allocated
@ -30,45 +118,27 @@ class AllocationScheduler;
// traversal of conflicts.
class CoalescedLiveRanges : public ZoneObject {
public:
explicit CoalescedLiveRanges(Zone* zone) : storage_(zone) {}
void clear() { storage_.clear(); }
explicit CoalescedLiveRanges(Zone* zone) : intervals_(zone) {}
void clear() { intervals_.clear(); }
bool empty() const { return storage_.empty(); }
bool empty() const { return intervals_.empty(); }
// Returns kInvalidWeight if there are no conflicts, or the largest weight of
// a range conflicting with the given range.
float GetMaximumConflictingWeight(const LiveRange* range) const;
// Iterate over each live range conflicting with the provided one.
// The same live range may be observed multiple, but non-consecutive times.
LiveRangeConflictIterator GetConflicts(const LiveRange* range);
// Evicts all conflicts of the given range, and reschedules them with the
// provided scheduler.
void EvictAndRescheduleConflicts(LiveRange* range,
AllocationScheduler* scheduler);
// Allocates a range with a pre-calculated candidate weight.
void AllocateRange(LiveRange* range);
// TODO(mtrofin): remove this in favor of comprehensive unit tests.
bool VerifyAllocationsAreValid() const;
// Unit testing API, verifying that allocated intervals do not overlap.
bool VerifyAllocationsAreValidForTesting() const;
private:
static const float kAllocatedRangeMultiplier;
// Storage detail for CoalescedLiveRanges.
struct AllocatedInterval {
LifetimePosition start;
LifetimePosition end;
LiveRange* range;
bool operator<(const AllocatedInterval& other) const {
return start < other.start;
}
bool operator>(const AllocatedInterval& other) const {
return start > other.start;
}
};
typedef ZoneSet<AllocatedInterval> IntervalStore;
typedef IntervalStore::const_iterator interval_iterator;
IntervalStore& storage() { return storage_; }
const IntervalStore& storage() const { return storage_; }
IntervalStore& intervals() { return intervals_; }
const IntervalStore& intervals() const { return intervals_; }
// Augment the weight of a range that is about to be allocated.
static void UpdateWeightAtAllocation(LiveRange* range);
@ -76,29 +146,8 @@ class CoalescedLiveRanges : public ZoneObject {
// Reduce the weight of a range that has lost allocation.
static void UpdateWeightAtEviction(LiveRange* range);
// Intersection utilities.
static bool Intersects(LifetimePosition a_start, LifetimePosition a_end,
LifetimePosition b_start, LifetimePosition b_end) {
return a_start < b_end && b_start < a_end;
}
static AllocatedInterval AsAllocatedInterval(LifetimePosition pos) {
return {pos, LifetimePosition::Invalid(), nullptr};
}
bool QueryIntersectsAllocatedInterval(const UseInterval* query,
interval_iterator& pos) const {
DCHECK(query != nullptr);
return pos != storage().end() &&
Intersects(query->start(), query->end(), pos->start, pos->end);
}
void Remove(LiveRange* range);
// Get the first interval intersecting query. Since the intervals are sorted,
// subsequent intervals intersecting query follow.
interval_iterator GetFirstConflict(const UseInterval* query) const;
IntervalStore storage_;
IntervalStore intervals_;
DISALLOW_COPY_AND_ASSIGN(CoalescedLiveRanges);
};

View File

@ -9,12 +9,16 @@ namespace v8 {
namespace internal {
namespace compiler {
#define TRACE(...) \
do { \
if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
} while (false)
const float GreedyAllocator::kAllocatedRangeMultiplier = 10.0;
namespace {
@ -131,12 +135,10 @@ void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) {
DCHECK(!range->HasRegisterAssigned());
current_allocations(reg_id)->AllocateRange(range);
AllocateRegisterToRange(reg_id, range);
TRACE("Assigning %s to range %d\n", RegisterName(reg_id), range->id());
range->set_assigned_register(reg_id);
DCHECK(current_allocations(reg_id)->VerifyAllocationsAreValid());
}
@ -153,7 +155,7 @@ void GreedyAllocator::PreallocateFixedRanges() {
int reg_nr = fixed_range->assigned_register();
EnsureValidRangeWeight(fixed_range);
current_allocations(reg_nr)->AllocateRange(fixed_range);
AllocateRegisterToRange(reg_nr, fixed_range);
}
}
}
@ -190,8 +192,7 @@ void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) {
// where the maximum conflict is lower than the candidate's weight, the one
// with the smallest such weight.
for (int i = 0; i < num_registers(); i++) {
float max_conflict_weight =
current_allocations(i)->GetMaximumConflictingWeight(range);
float max_conflict_weight = GetMaximumConflictingWeight(i, range);
if (max_conflict_weight == LiveRange::kInvalidWeight) {
free_reg = i;
break;
@ -216,8 +217,7 @@ void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) {
if (evictable_reg >= 0) {
TRACE("Found evictable register %s for live range %d\n",
RegisterName(free_reg), range->id());
current_allocations(evictable_reg)
->EvictAndRescheduleConflicts(range, &scheduler());
EvictAndRescheduleConflicts(evictable_reg, range);
AssignRangeToRegister(evictable_reg, range);
return;
}
@ -227,6 +227,21 @@ void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) {
}
void GreedyAllocator::EvictAndRescheduleConflicts(unsigned reg_id,
const LiveRange* range) {
auto conflicts = current_allocations(reg_id)->GetConflicts(range);
for (LiveRange* conflict = conflicts.Current(); conflict != nullptr;
conflict = conflicts.RemoveCurrentAndGetNext()) {
DCHECK(conflict->HasRegisterAssigned());
CHECK(!conflict->IsFixed());
conflict->UnsetAssignedRegister();
UpdateWeightAtEviction(conflict);
scheduler().Schedule(conflict);
TRACE("Evicted range %d.\n", conflict->id());
}
}
void GreedyAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
size_t initial_range_count = data()->live_ranges().size();
for (size_t i = 0; i < initial_range_count; ++i) {
@ -298,6 +313,22 @@ void GreedyAllocator::AllocateRegisters() {
}
float GreedyAllocator::GetMaximumConflictingWeight(
unsigned reg_id, const LiveRange* range) const {
float ret = LiveRange::kInvalidWeight;
auto conflicts = current_allocations(reg_id)->GetConflicts(range);
for (LiveRange* conflict = conflicts.Current(); conflict != nullptr;
conflict = conflicts.GetNext()) {
DCHECK_NE(conflict->weight(), LiveRange::kInvalidWeight);
ret = Max(ret, conflict->weight());
if (ret == LiveRange::kMaxWeight) return ret;
}
return ret;
}
void GreedyAllocator::EnsureValidRangeWeight(LiveRange* range) {
// The live range weight will be invalidated when ranges are created or split.
// Otherwise, it is consistently updated when the range is allocated or

View File

@ -62,10 +62,28 @@ class GreedyAllocator final : public RegisterAllocator {
void AllocateRegisters();
private:
static const float kAllocatedRangeMultiplier;
static void UpdateWeightAtAllocation(LiveRange* range) {
DCHECK_NE(range->weight(), LiveRange::kInvalidWeight);
range->set_weight(range->weight() * kAllocatedRangeMultiplier);
}
static void UpdateWeightAtEviction(LiveRange* range) {
DCHECK_NE(range->weight(), LiveRange::kInvalidWeight);
range->set_weight(range->weight() / kAllocatedRangeMultiplier);
}
AllocationScheduler& scheduler() { return scheduler_; }
CoalescedLiveRanges* current_allocations(unsigned i) {
return allocations_[i];
}
CoalescedLiveRanges* current_allocations(unsigned i) const {
return allocations_[i];
}
Zone* local_zone() const { return local_zone_; }
// Insert fixed ranges.
@ -75,6 +93,13 @@ class GreedyAllocator final : public RegisterAllocator {
// TODO(mtrofin): groups.
void ScheduleAllocationCandidates();
void AllocateRegisterToRange(unsigned reg_id, LiveRange* range) {
UpdateWeightAtAllocation(range);
current_allocations(reg_id)->AllocateRange(range);
}
// Evict and reschedule conflicts of a given range, at a given register.
void EvictAndRescheduleConflicts(unsigned reg_id, const LiveRange* range);
// Find the optimal split for ranges defined by a memory operand, e.g.
// constants or function parameters passed on the stack.
void SplitAndSpillRangesDefinedByMemoryOperand();
@ -92,6 +117,11 @@ class GreedyAllocator final : public RegisterAllocator {
// Calculate the new weight of a range that is about to be allocated.
float GetAllocatedRangeWeight(float candidate_weight);
// Returns kInvalidWeight if there are no conflicts, or the largest weight of
// a range conflicting with the given range, at the given register.
float GetMaximumConflictingWeight(unsigned reg_id,
const LiveRange* range) const;
// This is the extension point for splitting heuristics.
void SplitOrSpillBlockedRange(LiveRange* range);

View File

@ -140,6 +140,10 @@ class LifetimePosition final {
return LifetimePosition(kMaxInt);
}
static inline LifetimePosition FromInt(int value) {
return LifetimePosition(value);
}
private:
static const int kHalfStep = 2;
static const int kStep = 2 * kHalfStep;

View File

@ -0,0 +1,309 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/coalesced-live-ranges.h"
#include "test/unittests/test-utils.h"
namespace v8 {
namespace internal {
namespace compiler {
// Utility offering shorthand syntax for building up a range by providing its ID
// and pairs (start, end) specifying intervals. Circumvents current incomplete
// support for C++ features such as instantiation lists, on OS X and Android.
class TestRangeBuilder {
public:
explicit TestRangeBuilder(Zone* zone) : id_(-1), pairs_(), zone_(zone) {}
TestRangeBuilder& Id(int id) {
id_ = id;
return *this;
}
TestRangeBuilder& Add(int start, int end) {
pairs_.push_back({start, end});
return *this;
}
LiveRange* Build(int start, int end) { return Add(start, end).Build(); }
LiveRange* Build() {
LiveRange* range = new (zone_) LiveRange(id_, MachineType::kRepTagged);
// Traverse the provided interval specifications backwards, because that is
// what LiveRange expects.
for (int i = static_cast<int>(pairs_.size()) - 1; i >= 0; --i) {
Interval pair = pairs_[i];
LifetimePosition start = LifetimePosition::FromInt(pair.first);
LifetimePosition end = LifetimePosition::FromInt(pair.second);
CHECK(start < end);
range->AddUseInterval(start, end, zone_);
}
pairs_.clear();
return range;
}
private:
typedef std::pair<int, int> Interval;
typedef std::vector<Interval> IntervalList;
int id_;
IntervalList pairs_;
Zone* zone_;
};
class CoalescedLiveRangesTest : public TestWithZone {
public:
CoalescedLiveRangesTest() : TestWithZone(), ranges_(zone()) {}
bool HasNoConflicts(const LiveRange* range);
bool ConflictsPreciselyWith(const LiveRange* range, int id);
bool ConflictsPreciselyWith(const LiveRange* range, int id1, int id2);
CoalescedLiveRanges& ranges() { return ranges_; }
const CoalescedLiveRanges& ranges() const { return ranges_; }
bool AllocationsAreValid() const;
void RemoveConflicts(LiveRange* range);
private:
typedef ZoneSet<int> LiveRangeIDs;
bool IsRangeConflictingWith(const LiveRange* range, const LiveRangeIDs& ids);
CoalescedLiveRanges ranges_;
};
bool CoalescedLiveRangesTest::ConflictsPreciselyWith(const LiveRange* range,
int id) {
LiveRangeIDs set(zone());
set.insert(id);
return IsRangeConflictingWith(range, set);
}
bool CoalescedLiveRangesTest::ConflictsPreciselyWith(const LiveRange* range,
int id1, int id2) {
LiveRangeIDs set(zone());
set.insert(id1);
set.insert(id2);
return IsRangeConflictingWith(range, set);
}
bool CoalescedLiveRangesTest::HasNoConflicts(const LiveRange* range) {
LiveRangeIDs set(zone());
return IsRangeConflictingWith(range, set);
}
void CoalescedLiveRangesTest::RemoveConflicts(LiveRange* range) {
auto conflicts = ranges().GetConflicts(range);
LiveRangeIDs seen(zone());
for (auto c = conflicts.Current(); c != nullptr;
c = conflicts.RemoveCurrentAndGetNext()) {
EXPECT_FALSE(seen.count(c->id()) > 0);
seen.insert(c->id());
}
}
bool CoalescedLiveRangesTest::AllocationsAreValid() const {
return ranges().VerifyAllocationsAreValidForTesting();
}
bool CoalescedLiveRangesTest::IsRangeConflictingWith(const LiveRange* range,
const LiveRangeIDs& ids) {
LiveRangeIDs found_ids(zone());
auto conflicts = ranges().GetConflicts(range);
for (auto conflict = conflicts.Current(); conflict != nullptr;
conflict = conflicts.GetNext()) {
found_ids.insert(conflict->id());
}
return found_ids == ids;
}
TEST_F(CoalescedLiveRangesTest, VisitEmptyAllocations) {
LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(1, 5);
ASSERT_TRUE(ranges().empty());
ASSERT_TRUE(AllocationsAreValid());
ASSERT_TRUE(HasNoConflicts(range));
}
TEST_F(CoalescedLiveRangesTest, CandidateBeforeAfterAllocations) {
LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(5, 6);
ranges().AllocateRange(range);
ASSERT_FALSE(ranges().empty());
ASSERT_TRUE(AllocationsAreValid());
LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(1, 2);
ASSERT_TRUE(HasNoConflicts(query));
query = TestRangeBuilder(zone()).Id(3).Build(1, 5);
ASSERT_TRUE(HasNoConflicts(query));
}
TEST_F(CoalescedLiveRangesTest, CandidateBeforeAfterManyAllocations) {
LiveRange* range =
TestRangeBuilder(zone()).Id(1).Add(5, 7).Add(10, 12).Build();
ranges().AllocateRange(range);
ASSERT_FALSE(ranges().empty());
ASSERT_TRUE(AllocationsAreValid());
LiveRange* query =
TestRangeBuilder(zone()).Id(2).Add(1, 2).Add(13, 15).Build();
ASSERT_TRUE(HasNoConflicts(query));
query = TestRangeBuilder(zone()).Id(3).Add(1, 5).Add(12, 15).Build();
ASSERT_TRUE(HasNoConflicts(query));
}
TEST_F(CoalescedLiveRangesTest, SelfConflictsPreciselyWithSelf) {
LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(1, 5);
ranges().AllocateRange(range);
ASSERT_FALSE(ranges().empty());
ASSERT_TRUE(AllocationsAreValid());
ASSERT_TRUE(ConflictsPreciselyWith(range, 1));
range = TestRangeBuilder(zone()).Id(2).Build(8, 10);
ranges().AllocateRange(range);
ASSERT_TRUE(ConflictsPreciselyWith(range, 2));
}
TEST_F(CoalescedLiveRangesTest, QueryStartsBeforeConflict) {
LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(2, 5);
ranges().AllocateRange(range);
LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(1, 3);
ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
range = TestRangeBuilder(zone()).Id(3).Build(8, 10);
ranges().AllocateRange(range);
query = TestRangeBuilder(zone()).Id(4).Build(6, 9);
ASSERT_TRUE(ConflictsPreciselyWith(query, 3));
}
TEST_F(CoalescedLiveRangesTest, QueryStartsInConflict) {
LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(2, 5);
ranges().AllocateRange(range);
LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(3, 6);
ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
range = TestRangeBuilder(zone()).Id(3).Build(8, 10);
ranges().AllocateRange(range);
query = TestRangeBuilder(zone()).Id(4).Build(9, 11);
ASSERT_TRUE(ConflictsPreciselyWith(query, 3));
}
TEST_F(CoalescedLiveRangesTest, QueryContainedInConflict) {
LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(1, 5);
ranges().AllocateRange(range);
LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(2, 3);
ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
}
TEST_F(CoalescedLiveRangesTest, QueryContainsConflict) {
LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(2, 3);
ranges().AllocateRange(range);
LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(1, 5);
ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
}
TEST_F(CoalescedLiveRangesTest, QueryCoversManyIntervalsSameRange) {
LiveRange* range =
TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(7, 9).Add(20, 25).Build();
ranges().AllocateRange(range);
LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(2, 8);
ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
}
TEST_F(CoalescedLiveRangesTest, QueryCoversManyIntervalsDifferentRanges) {
LiveRange* range =
TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(20, 25).Build();
ranges().AllocateRange(range);
range = TestRangeBuilder(zone()).Id(2).Build(7, 10);
ranges().AllocateRange(range);
LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(2, 22);
ASSERT_TRUE(ConflictsPreciselyWith(query, 1, 2));
}
TEST_F(CoalescedLiveRangesTest, QueryFitsInGaps) {
LiveRange* range =
TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(10, 15).Add(20, 25).Build();
ranges().AllocateRange(range);
LiveRange* query =
TestRangeBuilder(zone()).Id(3).Add(5, 10).Add(16, 19).Add(27, 30).Build();
ASSERT_TRUE(HasNoConflicts(query));
}
TEST_F(CoalescedLiveRangesTest, DeleteConflictBefore) {
LiveRange* range = TestRangeBuilder(zone()).Id(1).Add(1, 4).Add(5, 6).Build();
ranges().AllocateRange(range);
range = TestRangeBuilder(zone()).Id(2).Build(40, 50);
ranges().AllocateRange(range);
LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(3, 7);
RemoveConflicts(query);
query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
ASSERT_TRUE(ConflictsPreciselyWith(query, 2));
}
TEST_F(CoalescedLiveRangesTest, DeleteConflictAfter) {
LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(1, 5);
ranges().AllocateRange(range);
range = TestRangeBuilder(zone()).Id(2).Add(40, 50).Add(60, 70).Build();
ranges().AllocateRange(range);
LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(45, 60);
RemoveConflicts(query);
query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
}
TEST_F(CoalescedLiveRangesTest, DeleteConflictStraddle) {
LiveRange* range =
TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(10, 20).Build();
ranges().AllocateRange(range);
range = TestRangeBuilder(zone()).Id(2).Build(40, 50);
ranges().AllocateRange(range);
LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(4, 15);
RemoveConflicts(query);
query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
ASSERT_TRUE(ConflictsPreciselyWith(query, 2));
}
TEST_F(CoalescedLiveRangesTest, DeleteConflictManyOverlapsBefore) {
LiveRange* range =
TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(6, 10).Add(10, 20).Build();
ranges().AllocateRange(range);
range = TestRangeBuilder(zone()).Id(2).Build(40, 50);
ranges().AllocateRange(range);
LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(4, 15);
RemoveConflicts(query);
query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
ASSERT_TRUE(ConflictsPreciselyWith(query, 2));
}
TEST_F(CoalescedLiveRangesTest, DeleteWhenConflictRepeatsAfterNonConflict) {
LiveRange* range =
TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(6, 10).Add(20, 30).Build();
ranges().AllocateRange(range);
range = TestRangeBuilder(zone()).Id(2).Build(12, 15);
ranges().AllocateRange(range);
LiveRange* query =
TestRangeBuilder(zone()).Id(3).Add(1, 8).Add(22, 25).Build();
RemoveConflicts(query);
query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
ASSERT_TRUE(ConflictsPreciselyWith(query, 2));
}
} // namespace compiler
} // namespace internal
} // namespace v8

View File

@ -43,6 +43,7 @@
'base/utils/random-number-generator-unittest.cc',
'char-predicates-unittest.cc',
'compiler/change-lowering-unittest.cc',
'compiler/coalesced-live-ranges-unittest.cc',
'compiler/common-operator-reducer-unittest.cc',
'compiler/common-operator-unittest.cc',
'compiler/compiler-test-utils.h',