2010-01-29 09:42:13 +00:00
|
|
|
// Copyright 2010 the V8 project authors. All rights reserved.
|
2014-04-29 06:42:26 +00:00
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file.
|
2010-01-29 09:42:13 +00:00
|
|
|
|
2014-10-31 10:44:04 +00:00
|
|
|
#include "src/bit-vector.h"
|
2014-08-20 12:10:41 +00:00
|
|
|
|
|
|
|
#include "src/base/bits.h"
|
2010-01-29 09:42:13 +00:00
|
|
|
|
|
|
|
namespace v8 {
|
|
|
|
namespace internal {
|
|
|
|
|
2010-03-16 10:54:02 +00:00
|
|
|
#ifdef DEBUG
|
|
|
|
void BitVector::Print() {
|
|
|
|
bool first = true;
|
|
|
|
PrintF("{");
|
|
|
|
for (int i = 0; i < length(); i++) {
|
|
|
|
if (Contains(i)) {
|
|
|
|
if (!first) PrintF(",");
|
|
|
|
first = false;
|
2010-09-30 07:22:53 +00:00
|
|
|
PrintF("%d", i);
|
2010-03-16 10:54:02 +00:00
|
|
|
}
|
|
|
|
}
|
[turbofan] Deferred blocks splintering.
This change encompasses what is necessary to enable stack checks in loops without suffering large regressions.
Primarily, it consists of a new mechanism for dealing with deferred blocks by "splintering", rather than splitting, inside deferred blocks.
My initial change was splitting along deferred block boundaries, but the regression introduced by stackchecks wasn't resolved conclusively. After investigation, it appears that just splitting ranges along cold block boundaries leads to a greater opportunity for moves on the hot path, hence the suboptimal outcome.
The alternative "splinters" ranges rather than splitting them. While splitting creates 2 ranges and links them (parent-child), in contrast, splintering creates a new independent range with no parent-child relation to the original. The original range appears as if it has a liveness hole in the place of the splintered one. All thus obtained ranges are then register allocated with no change to the register allocator.
The splinters (cold blocks) do not conflict with the hot path ranges, by construction. The hot path ones have less pressure to split, because we remove a source of conflicts. After allocation, we merge the splinters back to their original ranges and continue the pipeline. We leverage the previous changes made for deferred blocks (determining where to spill, for example).
Review URL: https://codereview.chromium.org/1305393003
Cr-Commit-Position: refs/heads/master@{#30357}
2015-08-25 14:47:26 +00:00
|
|
|
PrintF("}\n");
|
2010-03-16 10:54:02 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
void BitVector::Iterator::Advance() {
|
|
|
|
current_++;
|
2014-10-31 10:44:04 +00:00
|
|
|
uintptr_t val = current_value_;
|
2010-12-07 11:31:57 +00:00
|
|
|
while (val == 0) {
|
|
|
|
current_index_++;
|
|
|
|
if (Done()) return;
|
|
|
|
val = target_->data_[current_index_];
|
2014-10-31 10:44:04 +00:00
|
|
|
current_ = current_index_ << kDataBitShift;
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
|
|
|
val = SkipZeroBytes(val);
|
|
|
|
val = SkipZeroBits(val);
|
|
|
|
current_value_ = val >> 1;
|
|
|
|
}
|
|
|
|
|
2014-08-20 12:10:41 +00:00
|
|
|
|
|
|
|
int BitVector::Count() const {
|
|
|
|
int count = 0;
|
|
|
|
for (int i = 0; i < data_length_; i++) {
|
2014-10-31 10:44:04 +00:00
|
|
|
uintptr_t data = data_[i];
|
|
|
|
if (sizeof(data) == 8) {
|
|
|
|
count += base::bits::CountPopulation64(data);
|
|
|
|
} else {
|
|
|
|
count += base::bits::CountPopulation32(static_cast<uint32_t>(data));
|
|
|
|
}
|
2014-08-20 12:10:41 +00:00
|
|
|
}
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace internal
|
|
|
|
} // namespace v8
|