[heap] Relax accessing markbits in ranges.
When calling the `bitmap(chunk)` method of the various *MarkingState accessors we would receive a raw `Bitmap` pointer which does not tell you if accesses to markbits should be made atomically or not. As a result, we would default to doing atomic operation when in fact it may not be necessary. Here we're introducing a templated `ConcurrentBitmap` class that wraps operations done on the markbits and allows them to be made non-atomic. Additionaly, some of the `Bitmap` methods were only used to verify the heap and in the tests so they do not need atomic implementations. Using them in a concurrent context should now fail to link to make sure they're not mis-used in the future. Change-Id: Ifb55f8522c8bf0c87d65da9227864ee428d21bbd Cq-Include-Trybots: luci.v8.try:v8_linux64_tsan_rel Reviewed-on: https://chromium-review.googlesource.com/c/1482916 Reviewed-by: Ulan Degenbaev <ulan@chromium.org> Commit-Queue: Pierre Langlois <pierre.langlois@arm.com> Cr-Commit-Position: refs/heads/master@{#59836}
This commit is contained in:
parent
bd9ef0f32a
commit
b152bb75f8
@ -37,11 +37,11 @@ class ConcurrentMarkingState final
|
||||
explicit ConcurrentMarkingState(MemoryChunkDataMap* memory_chunk_data)
|
||||
: memory_chunk_data_(memory_chunk_data) {}
|
||||
|
||||
Bitmap* bitmap(const MemoryChunk* chunk) {
|
||||
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) {
|
||||
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
|
||||
reinterpret_cast<intptr_t>(chunk),
|
||||
MemoryChunk::kMarkBitmapOffset);
|
||||
return chunk->marking_bitmap_;
|
||||
return chunk->marking_bitmap<AccessMode::ATOMIC>();
|
||||
}
|
||||
|
||||
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
|
||||
|
@ -64,7 +64,8 @@ class MarkingVerifier : public ObjectVisitor, public RootVisitor {
|
||||
protected:
|
||||
explicit MarkingVerifier(Heap* heap) : heap_(heap) {}
|
||||
|
||||
virtual Bitmap* bitmap(const MemoryChunk* chunk) = 0;
|
||||
virtual ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
|
||||
const MemoryChunk* chunk) = 0;
|
||||
|
||||
virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
|
||||
virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
|
||||
@ -181,7 +182,8 @@ class FullMarkingVerifier : public MarkingVerifier {
|
||||
}
|
||||
|
||||
protected:
|
||||
Bitmap* bitmap(const MemoryChunk* chunk) override {
|
||||
ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
|
||||
const MemoryChunk* chunk) override {
|
||||
return marking_state_->bitmap(chunk);
|
||||
}
|
||||
|
||||
@ -3774,7 +3776,8 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
|
||||
marking_state_(
|
||||
heap->minor_mark_compact_collector()->non_atomic_marking_state()) {}
|
||||
|
||||
Bitmap* bitmap(const MemoryChunk* chunk) override {
|
||||
ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
|
||||
const MemoryChunk* chunk) override {
|
||||
return marking_state_->bitmap(chunk);
|
||||
}
|
||||
|
||||
|
@ -300,8 +300,8 @@ class MarkCompactCollectorBase {
|
||||
class MinorMarkingState final
|
||||
: public MarkingStateBase<MinorMarkingState, AccessMode::ATOMIC> {
|
||||
public:
|
||||
Bitmap* bitmap(const MemoryChunk* chunk) const {
|
||||
return chunk->young_generation_bitmap_;
|
||||
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) const {
|
||||
return chunk->young_generation_bitmap<AccessMode::ATOMIC>();
|
||||
}
|
||||
|
||||
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
|
||||
@ -321,8 +321,9 @@ class MinorNonAtomicMarkingState final
|
||||
: public MarkingStateBase<MinorNonAtomicMarkingState,
|
||||
AccessMode::NON_ATOMIC> {
|
||||
public:
|
||||
Bitmap* bitmap(const MemoryChunk* chunk) const {
|
||||
return chunk->young_generation_bitmap_;
|
||||
ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
|
||||
const MemoryChunk* chunk) const {
|
||||
return chunk->young_generation_bitmap<AccessMode::NON_ATOMIC>();
|
||||
}
|
||||
|
||||
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
|
||||
@ -345,11 +346,11 @@ class MinorNonAtomicMarkingState final
|
||||
class IncrementalMarkingState final
|
||||
: public MarkingStateBase<IncrementalMarkingState, AccessMode::ATOMIC> {
|
||||
public:
|
||||
Bitmap* bitmap(const MemoryChunk* chunk) const {
|
||||
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) const {
|
||||
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
|
||||
reinterpret_cast<intptr_t>(chunk),
|
||||
MemoryChunk::kMarkBitmapOffset);
|
||||
return chunk->marking_bitmap_;
|
||||
return chunk->marking_bitmap<AccessMode::ATOMIC>();
|
||||
}
|
||||
|
||||
// Concurrent marking uses local live bytes so we may do these accesses
|
||||
@ -370,11 +371,11 @@ class IncrementalMarkingState final
|
||||
class MajorAtomicMarkingState final
|
||||
: public MarkingStateBase<MajorAtomicMarkingState, AccessMode::ATOMIC> {
|
||||
public:
|
||||
Bitmap* bitmap(const MemoryChunk* chunk) const {
|
||||
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) const {
|
||||
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
|
||||
reinterpret_cast<intptr_t>(chunk),
|
||||
MemoryChunk::kMarkBitmapOffset);
|
||||
return chunk->marking_bitmap_;
|
||||
return chunk->marking_bitmap<AccessMode::ATOMIC>();
|
||||
}
|
||||
|
||||
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
|
||||
@ -387,11 +388,12 @@ class MajorNonAtomicMarkingState final
|
||||
: public MarkingStateBase<MajorNonAtomicMarkingState,
|
||||
AccessMode::NON_ATOMIC> {
|
||||
public:
|
||||
Bitmap* bitmap(const MemoryChunk* chunk) const {
|
||||
ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
|
||||
const MemoryChunk* chunk) const {
|
||||
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
|
||||
reinterpret_cast<intptr_t>(chunk),
|
||||
MemoryChunk::kMarkBitmapOffset);
|
||||
return chunk->marking_bitmap_;
|
||||
return chunk->marking_bitmap<AccessMode::NON_ATOMIC>();
|
||||
}
|
||||
|
||||
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
|
||||
|
@ -7,89 +7,9 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
void Bitmap::Clear() {
|
||||
base::Atomic32* cell_base = reinterpret_cast<base::Atomic32*>(cells());
|
||||
for (int i = 0; i < CellsCount(); i++) {
|
||||
base::Relaxed_Store(cell_base + i, 0);
|
||||
}
|
||||
// This fence prevents re-ordering of publishing stores with the mark-bit
|
||||
// clearing stores.
|
||||
base::SeqCst_MemoryFence();
|
||||
}
|
||||
|
||||
void Bitmap::MarkAllBits() {
|
||||
base::Atomic32* cell_base = reinterpret_cast<base::Atomic32*>(cells());
|
||||
for (int i = 0; i < CellsCount(); i++) {
|
||||
base::Relaxed_Store(cell_base + i, 0xffffffff);
|
||||
}
|
||||
// This fence prevents re-ordering of publishing stores with the mark-bit
|
||||
// clearing stores.
|
||||
base::SeqCst_MemoryFence();
|
||||
}
|
||||
|
||||
void Bitmap::SetRange(uint32_t start_index, uint32_t end_index) {
|
||||
if (start_index >= end_index) return;
|
||||
end_index--;
|
||||
|
||||
unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
|
||||
MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
|
||||
unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
|
||||
MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
|
||||
if (start_cell_index != end_cell_index) {
|
||||
// Firstly, fill all bits from the start address to the end of the first
|
||||
// cell with 1s.
|
||||
SetBitsInCell<AccessMode::ATOMIC>(start_cell_index,
|
||||
~(start_index_mask - 1));
|
||||
// Then fill all in between cells with 1s.
|
||||
base::Atomic32* cell_base = reinterpret_cast<base::Atomic32*>(cells());
|
||||
for (unsigned int i = start_cell_index + 1; i < end_cell_index; i++) {
|
||||
base::Relaxed_Store(cell_base + i, ~0u);
|
||||
}
|
||||
// Finally, fill all bits until the end address in the last cell with 1s.
|
||||
SetBitsInCell<AccessMode::ATOMIC>(end_cell_index,
|
||||
end_index_mask | (end_index_mask - 1));
|
||||
} else {
|
||||
SetBitsInCell<AccessMode::ATOMIC>(
|
||||
start_cell_index, end_index_mask | (end_index_mask - start_index_mask));
|
||||
}
|
||||
// This fence prevents re-ordering of publishing stores with the mark-
|
||||
// bit setting stores.
|
||||
base::SeqCst_MemoryFence();
|
||||
}
|
||||
|
||||
void Bitmap::ClearRange(uint32_t start_index, uint32_t end_index) {
|
||||
if (start_index >= end_index) return;
|
||||
end_index--;
|
||||
|
||||
unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
|
||||
MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
|
||||
|
||||
unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
|
||||
MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
|
||||
|
||||
if (start_cell_index != end_cell_index) {
|
||||
// Firstly, fill all bits from the start address to the end of the first
|
||||
// cell with 0s.
|
||||
ClearBitsInCell<AccessMode::ATOMIC>(start_cell_index,
|
||||
~(start_index_mask - 1));
|
||||
// Then fill all in between cells with 0s.
|
||||
base::Atomic32* cell_base = reinterpret_cast<base::Atomic32*>(cells());
|
||||
for (unsigned int i = start_cell_index + 1; i < end_cell_index; i++) {
|
||||
base::Relaxed_Store(cell_base + i, 0);
|
||||
}
|
||||
// Finally, set all bits until the end address in the last cell with 0s.
|
||||
ClearBitsInCell<AccessMode::ATOMIC>(end_cell_index,
|
||||
end_index_mask | (end_index_mask - 1));
|
||||
} else {
|
||||
ClearBitsInCell<AccessMode::ATOMIC>(
|
||||
start_cell_index, end_index_mask | (end_index_mask - start_index_mask));
|
||||
}
|
||||
// This fence prevents re-ordering of publishing stores with the mark-
|
||||
// bit clearing stores.
|
||||
base::SeqCst_MemoryFence();
|
||||
}
|
||||
|
||||
bool Bitmap::AllBitsSetInRange(uint32_t start_index, uint32_t end_index) {
|
||||
template <>
|
||||
bool ConcurrentBitmap<AccessMode::NON_ATOMIC>::AllBitsSetInRange(
|
||||
uint32_t start_index, uint32_t end_index) {
|
||||
if (start_index >= end_index) return false;
|
||||
end_index--;
|
||||
|
||||
@ -116,7 +36,9 @@ bool Bitmap::AllBitsSetInRange(uint32_t start_index, uint32_t end_index) {
|
||||
}
|
||||
}
|
||||
|
||||
bool Bitmap::AllBitsClearInRange(uint32_t start_index, uint32_t end_index) {
|
||||
template <>
|
||||
bool ConcurrentBitmap<AccessMode::NON_ATOMIC>::AllBitsClearInRange(
|
||||
uint32_t start_index, uint32_t end_index) {
|
||||
if (start_index >= end_index) return true;
|
||||
end_index--;
|
||||
|
||||
@ -193,7 +115,8 @@ class CellPrinter {
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
void Bitmap::Print() {
|
||||
template <>
|
||||
void ConcurrentBitmap<AccessMode::NON_ATOMIC>::Print() {
|
||||
CellPrinter printer;
|
||||
for (int i = 0; i < CellsCount(); i++) {
|
||||
printer.Print(i, cells()[i]);
|
||||
@ -202,7 +125,8 @@ void Bitmap::Print() {
|
||||
PrintF("\n");
|
||||
}
|
||||
|
||||
bool Bitmap::IsClean() {
|
||||
template <>
|
||||
bool ConcurrentBitmap<AccessMode::NON_ATOMIC>::IsClean() {
|
||||
for (int i = 0; i < CellsCount(); i++) {
|
||||
if (cells()[i] != 0) {
|
||||
return false;
|
||||
|
@ -135,31 +135,36 @@ class V8_EXPORT_PRIVATE Bitmap {
|
||||
MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
|
||||
return MarkBit(cell, mask);
|
||||
}
|
||||
};
|
||||
|
||||
template <AccessMode mode>
|
||||
class ConcurrentBitmap : public Bitmap {
|
||||
public:
|
||||
void Clear();
|
||||
|
||||
void MarkAllBits();
|
||||
|
||||
// Clears bits in the given cell. The mask specifies bits to clear: if a
|
||||
// bit is set in the mask then the corresponding bit is cleared in the cell.
|
||||
template <AccessMode mode = AccessMode::NON_ATOMIC>
|
||||
void ClearBitsInCell(uint32_t cell_index, uint32_t mask);
|
||||
|
||||
// Sets bits in the given cell. The mask specifies bits to set: if a
|
||||
// bit is set in the mask then the corresponding bit is set in the cell.
|
||||
template <AccessMode mode = AccessMode::NON_ATOMIC>
|
||||
void SetBitsInCell(uint32_t cell_index, uint32_t mask);
|
||||
|
||||
// Sets all bits in the range [start_index, end_index). The cells at the
|
||||
// boundary of the range are updated with atomic compare and swap operation.
|
||||
// The inner cells are updated with relaxed write.
|
||||
// Sets all bits in the range [start_index, end_index). If the access is
|
||||
// atomic, the cells at the boundary of the range are updated with atomic
|
||||
// compare and swap operation. The inner cells are updated with relaxed write.
|
||||
void SetRange(uint32_t start_index, uint32_t end_index);
|
||||
|
||||
// Clears all bits in the range [start_index, end_index). The cells at the
|
||||
// boundary of the range are updated with atomic compare and swap operation.
|
||||
// The inner cells are updated with relaxed write.
|
||||
// Clears all bits in the range [start_index, end_index). If the access is
|
||||
// atomic, the cells at the boundary of the range are updated with atomic
|
||||
// compare and swap operation. The inner cells are updated with relaxed write.
|
||||
void ClearRange(uint32_t start_index, uint32_t end_index);
|
||||
|
||||
// The following methods are *not* safe to use in a concurrent context so they
|
||||
// are not implemented for `AccessMode::ATOMIC`.
|
||||
|
||||
// Returns true if all bits in the range [start_index, end_index) are set.
|
||||
bool AllBitsSetInRange(uint32_t start_index, uint32_t end_index);
|
||||
|
||||
@ -169,32 +174,174 @@ class V8_EXPORT_PRIVATE Bitmap {
|
||||
void Print();
|
||||
|
||||
bool IsClean();
|
||||
|
||||
private:
|
||||
// Clear all bits in the cell range [start_cell_index, end_cell_index). If the
|
||||
// access is atomic then *still* use a relaxed memory ordering.
|
||||
void ClearCellRangeRelaxed(uint32_t start_cell_index,
|
||||
uint32_t end_cell_index);
|
||||
|
||||
// Set all bits in the cell range [start_cell_index, end_cell_index). If the
|
||||
// access is atomic then *still* use a relaxed memory ordering.
|
||||
void SetCellRangeRelaxed(uint32_t start_cell_index, uint32_t end_cell_index);
|
||||
};
|
||||
|
||||
template <>
|
||||
inline void Bitmap::SetBitsInCell<AccessMode::NON_ATOMIC>(uint32_t cell_index,
|
||||
uint32_t mask) {
|
||||
inline void ConcurrentBitmap<AccessMode::ATOMIC>::ClearCellRangeRelaxed(
|
||||
uint32_t start_cell_index, uint32_t end_cell_index) {
|
||||
base::Atomic32* cell_base = reinterpret_cast<base::Atomic32*>(cells());
|
||||
for (uint32_t i = start_cell_index; i < end_cell_index; i++) {
|
||||
base::Relaxed_Store(cell_base + i, 0);
|
||||
}
|
||||
}
|
||||
|
||||
template <>
|
||||
inline void ConcurrentBitmap<AccessMode::NON_ATOMIC>::ClearCellRangeRelaxed(
|
||||
uint32_t start_cell_index, uint32_t end_cell_index) {
|
||||
for (uint32_t i = start_cell_index; i < end_cell_index; i++) {
|
||||
cells()[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
template <>
|
||||
inline void ConcurrentBitmap<AccessMode::ATOMIC>::SetCellRangeRelaxed(
|
||||
uint32_t start_cell_index, uint32_t end_cell_index) {
|
||||
base::Atomic32* cell_base = reinterpret_cast<base::Atomic32*>(cells());
|
||||
for (uint32_t i = start_cell_index; i < end_cell_index; i++) {
|
||||
base::Relaxed_Store(cell_base + i, 0xffffffff);
|
||||
}
|
||||
}
|
||||
|
||||
template <>
|
||||
inline void ConcurrentBitmap<AccessMode::NON_ATOMIC>::SetCellRangeRelaxed(
|
||||
uint32_t start_cell_index, uint32_t end_cell_index) {
|
||||
for (uint32_t i = start_cell_index; i < end_cell_index; i++) {
|
||||
cells()[i] = 0xffffffff;
|
||||
}
|
||||
}
|
||||
|
||||
template <AccessMode mode>
|
||||
inline void ConcurrentBitmap<mode>::Clear() {
|
||||
ClearCellRangeRelaxed(0, CellsCount());
|
||||
if (mode == AccessMode::ATOMIC) {
|
||||
// This fence prevents re-ordering of publishing stores with the mark-bit
|
||||
// setting stores.
|
||||
base::SeqCst_MemoryFence();
|
||||
}
|
||||
}
|
||||
|
||||
template <AccessMode mode>
|
||||
inline void ConcurrentBitmap<mode>::MarkAllBits() {
|
||||
SetCellRangeRelaxed(0, CellsCount());
|
||||
if (mode == AccessMode::ATOMIC) {
|
||||
// This fence prevents re-ordering of publishing stores with the mark-bit
|
||||
// setting stores.
|
||||
base::SeqCst_MemoryFence();
|
||||
}
|
||||
}
|
||||
|
||||
template <>
|
||||
inline void ConcurrentBitmap<AccessMode::NON_ATOMIC>::SetBitsInCell(
|
||||
uint32_t cell_index, uint32_t mask) {
|
||||
cells()[cell_index] |= mask;
|
||||
}
|
||||
|
||||
template <>
|
||||
inline void Bitmap::SetBitsInCell<AccessMode::ATOMIC>(uint32_t cell_index,
|
||||
uint32_t mask) {
|
||||
inline void ConcurrentBitmap<AccessMode::ATOMIC>::SetBitsInCell(
|
||||
uint32_t cell_index, uint32_t mask) {
|
||||
base::AsAtomic32::SetBits(cells() + cell_index, mask, mask);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline void Bitmap::ClearBitsInCell<AccessMode::NON_ATOMIC>(uint32_t cell_index,
|
||||
uint32_t mask) {
|
||||
inline void ConcurrentBitmap<AccessMode::NON_ATOMIC>::ClearBitsInCell(
|
||||
uint32_t cell_index, uint32_t mask) {
|
||||
cells()[cell_index] &= ~mask;
|
||||
}
|
||||
|
||||
template <>
|
||||
inline void Bitmap::ClearBitsInCell<AccessMode::ATOMIC>(uint32_t cell_index,
|
||||
uint32_t mask) {
|
||||
inline void ConcurrentBitmap<AccessMode::ATOMIC>::ClearBitsInCell(
|
||||
uint32_t cell_index, uint32_t mask) {
|
||||
base::AsAtomic32::SetBits(cells() + cell_index, 0u, mask);
|
||||
}
|
||||
|
||||
template <AccessMode mode>
|
||||
void ConcurrentBitmap<mode>::SetRange(uint32_t start_index,
|
||||
uint32_t end_index) {
|
||||
if (start_index >= end_index) return;
|
||||
end_index--;
|
||||
|
||||
unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
|
||||
MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
|
||||
|
||||
unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
|
||||
MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
|
||||
|
||||
if (start_cell_index != end_cell_index) {
|
||||
// Firstly, fill all bits from the start address to the end of the first
|
||||
// cell with 1s.
|
||||
SetBitsInCell(start_cell_index, ~(start_index_mask - 1));
|
||||
// Then fill all in between cells with 1s.
|
||||
SetCellRangeRelaxed(start_cell_index + 1, end_cell_index);
|
||||
// Finally, fill all bits until the end address in the last cell with 1s.
|
||||
SetBitsInCell(end_cell_index, end_index_mask | (end_index_mask - 1));
|
||||
} else {
|
||||
SetBitsInCell(start_cell_index,
|
||||
end_index_mask | (end_index_mask - start_index_mask));
|
||||
}
|
||||
if (mode == AccessMode::ATOMIC) {
|
||||
// This fence prevents re-ordering of publishing stores with the mark-bit
|
||||
// setting stores.
|
||||
base::SeqCst_MemoryFence();
|
||||
}
|
||||
}
|
||||
|
||||
template <AccessMode mode>
|
||||
void ConcurrentBitmap<mode>::ClearRange(uint32_t start_index,
|
||||
uint32_t end_index) {
|
||||
if (start_index >= end_index) return;
|
||||
end_index--;
|
||||
|
||||
unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
|
||||
MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
|
||||
|
||||
unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
|
||||
MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
|
||||
|
||||
if (start_cell_index != end_cell_index) {
|
||||
// Firstly, fill all bits from the start address to the end of the first
|
||||
// cell with 0s.
|
||||
ClearBitsInCell(start_cell_index, ~(start_index_mask - 1));
|
||||
// Then fill all in between cells with 0s.
|
||||
ClearCellRangeRelaxed(start_cell_index + 1, end_cell_index);
|
||||
// Finally, set all bits until the end address in the last cell with 0s.
|
||||
ClearBitsInCell(end_cell_index, end_index_mask | (end_index_mask - 1));
|
||||
} else {
|
||||
ClearBitsInCell(start_cell_index,
|
||||
end_index_mask | (end_index_mask - start_index_mask));
|
||||
}
|
||||
if (mode == AccessMode::ATOMIC) {
|
||||
// This fence prevents re-ordering of publishing stores with the mark-bit
|
||||
// clearing stores.
|
||||
base::SeqCst_MemoryFence();
|
||||
}
|
||||
}
|
||||
|
||||
template <>
|
||||
V8_EXPORT_PRIVATE bool
|
||||
ConcurrentBitmap<AccessMode::NON_ATOMIC>::AllBitsSetInRange(
|
||||
uint32_t start_index, uint32_t end_index);
|
||||
|
||||
template <>
|
||||
V8_EXPORT_PRIVATE bool
|
||||
ConcurrentBitmap<AccessMode::NON_ATOMIC>::AllBitsClearInRange(
|
||||
uint32_t start_index, uint32_t end_index);
|
||||
|
||||
template <>
|
||||
void ConcurrentBitmap<AccessMode::NON_ATOMIC>::Print();
|
||||
|
||||
template <>
|
||||
V8_EXPORT_PRIVATE bool ConcurrentBitmap<AccessMode::NON_ATOMIC>::IsClean();
|
||||
|
||||
class Marking : public AllStatic {
|
||||
public:
|
||||
// TODO(hpayer): The current mark bit operations use as default NON_ATOMIC
|
||||
|
@ -690,6 +690,16 @@ class MemoryChunk {
|
||||
|
||||
VirtualMemory* reserved_memory() { return &reservation_; }
|
||||
|
||||
template <AccessMode mode>
|
||||
ConcurrentBitmap<mode>* marking_bitmap() const {
|
||||
return reinterpret_cast<ConcurrentBitmap<mode>*>(marking_bitmap_);
|
||||
}
|
||||
|
||||
template <AccessMode mode>
|
||||
ConcurrentBitmap<mode>* young_generation_bitmap() const {
|
||||
return reinterpret_cast<ConcurrentBitmap<mode>*>(young_generation_bitmap_);
|
||||
}
|
||||
|
||||
size_t size_;
|
||||
uintptr_t flags_;
|
||||
|
||||
|
@ -5818,7 +5818,8 @@ TEST(ContinuousLeftTrimFixedArrayInBlackArea) {
|
||||
Address start_address = array->address();
|
||||
Address end_address = start_address + array->Size();
|
||||
Page* page = Page::FromAddress(start_address);
|
||||
IncrementalMarking::MarkingState* marking_state = marking->marking_state();
|
||||
IncrementalMarking::NonAtomicMarkingState* marking_state =
|
||||
marking->non_atomic_marking_state();
|
||||
CHECK(marking_state->IsBlack(*array));
|
||||
CHECK(marking_state->bitmap(page)->AllBitsSetInRange(
|
||||
page->AddressToMarkbitIndex(start_address),
|
||||
@ -5885,7 +5886,8 @@ TEST(ContinuousRightTrimFixedArrayInBlackArea) {
|
||||
Address start_address = array->address();
|
||||
Address end_address = start_address + array->Size();
|
||||
Page* page = Page::FromAddress(start_address);
|
||||
IncrementalMarking::MarkingState* marking_state = marking->marking_state();
|
||||
IncrementalMarking::NonAtomicMarkingState* marking_state =
|
||||
marking->non_atomic_marking_state();
|
||||
CHECK(marking_state->IsBlack(*array));
|
||||
|
||||
CHECK(marking_state->bitmap(page)->AllBitsSetInRange(
|
||||
|
@ -150,6 +150,7 @@ v8_source_set("unittests_sources") {
|
||||
"eh-frame-iterator-unittest.cc",
|
||||
"eh-frame-writer-unittest.cc",
|
||||
"heap/barrier-unittest.cc",
|
||||
"heap/bitmap-test-utils.h",
|
||||
"heap/bitmap-unittest.cc",
|
||||
"heap/embedder-tracing-unittest.cc",
|
||||
"heap/gc-idle-time-handler-unittest.cc",
|
||||
|
35
test/unittests/heap/bitmap-test-utils.h
Normal file
35
test/unittests/heap/bitmap-test-utils.h
Normal file
@ -0,0 +1,35 @@
|
||||
// Copyright 2019 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_UNITTESTS_HEAP_BITMAP_TEST_UTILS_H_
|
||||
#define V8_UNITTESTS_HEAP_BITMAP_TEST_UTILS_H_
|
||||
|
||||
#include "testing/gtest/include/gtest/gtest.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
template <typename T>
|
||||
class TestWithBitmap : public ::testing::Test {
|
||||
public:
|
||||
TestWithBitmap() : memory_(new uint8_t[Bitmap::kSize]) {
|
||||
memset(memory_, 0, Bitmap::kSize);
|
||||
}
|
||||
|
||||
~TestWithBitmap() override { delete[] memory_; }
|
||||
|
||||
T* bitmap() { return reinterpret_cast<T*>(memory_); }
|
||||
uint8_t* raw_bitmap() { return memory_; }
|
||||
|
||||
private:
|
||||
uint8_t* memory_;
|
||||
};
|
||||
|
||||
using BitmapTypes = ::testing::Types<ConcurrentBitmap<AccessMode::NON_ATOMIC>,
|
||||
ConcurrentBitmap<AccessMode::ATOMIC>>;
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_UNITTESTS_HEAP_BITMAP_TEST_UTILS_H_
|
@ -3,40 +3,26 @@
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/heap/spaces.h"
|
||||
#include "test/unittests/heap/bitmap-test-utils.h"
|
||||
#include "testing/gtest/include/gtest/gtest.h"
|
||||
|
||||
namespace {
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
using v8::internal::Bitmap;
|
||||
const uint32_t kBlackCell = 0xAAAAAAAA;
|
||||
const uint32_t kWhiteCell = 0x00000000;
|
||||
const uint32_t kBlackByte = 0xAA;
|
||||
const uint32_t kWhiteByte = 0x00;
|
||||
|
||||
class BitmapTest : public ::testing::Test {
|
||||
public:
|
||||
static const uint32_t kBlackCell;
|
||||
static const uint32_t kWhiteCell;
|
||||
static const uint32_t kBlackByte;
|
||||
static const uint32_t kWhiteByte;
|
||||
template <typename T>
|
||||
using BitmapTest = TestWithBitmap<T>;
|
||||
|
||||
BitmapTest() : memory_(new uint8_t[Bitmap::kSize]) {
|
||||
memset(memory_, 0, Bitmap::kSize);
|
||||
}
|
||||
TYPED_TEST_SUITE(BitmapTest, BitmapTypes);
|
||||
|
||||
~BitmapTest() override { delete[] memory_; }
|
||||
using NonAtomicBitmapTest =
|
||||
TestWithBitmap<ConcurrentBitmap<AccessMode::NON_ATOMIC>>;
|
||||
|
||||
Bitmap* bitmap() { return reinterpret_cast<Bitmap*>(memory_); }
|
||||
uint8_t* raw_bitmap() { return memory_; }
|
||||
|
||||
private:
|
||||
uint8_t* memory_;
|
||||
};
|
||||
|
||||
|
||||
const uint32_t BitmapTest::kBlackCell = 0xAAAAAAAA;
|
||||
const uint32_t BitmapTest::kWhiteCell = 0x00000000;
|
||||
const uint32_t BitmapTest::kBlackByte = 0xAA;
|
||||
const uint32_t BitmapTest::kWhiteByte = 0x00;
|
||||
|
||||
|
||||
TEST_F(BitmapTest, IsZeroInitialized) {
|
||||
TEST_F(NonAtomicBitmapTest, IsZeroInitialized) {
|
||||
// We require all tests to start from a zero-initialized bitmap. Manually
|
||||
// verify this invariant here.
|
||||
for (size_t i = 0; i < Bitmap::kSize; i++) {
|
||||
@ -44,9 +30,8 @@ TEST_F(BitmapTest, IsZeroInitialized) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
TEST_F(BitmapTest, Cells) {
|
||||
Bitmap* bm = bitmap();
|
||||
TEST_F(NonAtomicBitmapTest, Cells) {
|
||||
auto bm = bitmap();
|
||||
bm->cells()[1] = kBlackCell;
|
||||
uint8_t* raw = raw_bitmap();
|
||||
int second_cell_base = Bitmap::kBytesPerCell;
|
||||
@ -55,8 +40,7 @@ TEST_F(BitmapTest, Cells) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
TEST_F(BitmapTest, CellsCount) {
|
||||
TEST_F(NonAtomicBitmapTest, CellsCount) {
|
||||
int last_cell_index = bitmap()->CellsCount() - 1;
|
||||
bitmap()->cells()[last_cell_index] = kBlackCell;
|
||||
// Manually verify on raw memory.
|
||||
@ -71,17 +55,34 @@ TEST_F(BitmapTest, CellsCount) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
TEST_F(BitmapTest, IsClean) {
|
||||
Bitmap* bm = bitmap();
|
||||
TEST_F(NonAtomicBitmapTest, IsClean) {
|
||||
auto bm = bitmap();
|
||||
EXPECT_TRUE(bm->IsClean());
|
||||
bm->cells()[0] = kBlackCell;
|
||||
EXPECT_FALSE(bm->IsClean());
|
||||
}
|
||||
|
||||
TYPED_TEST(BitmapTest, Clear) {
|
||||
auto bm = this->bitmap();
|
||||
for (size_t i = 0; i < Bitmap::kSize; i++) {
|
||||
this->raw_bitmap()[i] = 0xFFu;
|
||||
}
|
||||
bm->Clear();
|
||||
for (size_t i = 0; i < Bitmap::kSize; i++) {
|
||||
EXPECT_EQ(this->raw_bitmap()[i], 0);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(BitmapTest, ClearRange1) {
|
||||
Bitmap* bm = bitmap();
|
||||
TYPED_TEST(BitmapTest, MarkAllBits) {
|
||||
auto bm = this->bitmap();
|
||||
bm->MarkAllBits();
|
||||
for (size_t i = 0; i < Bitmap::kSize; i++) {
|
||||
EXPECT_EQ(this->raw_bitmap()[i], 0xFF);
|
||||
}
|
||||
}
|
||||
|
||||
TYPED_TEST(BitmapTest, ClearRange1) {
|
||||
auto bm = this->bitmap();
|
||||
bm->cells()[0] = kBlackCell;
|
||||
bm->cells()[1] = kBlackCell;
|
||||
bm->cells()[2] = kBlackCell;
|
||||
@ -91,9 +92,8 @@ TEST_F(BitmapTest, ClearRange1) {
|
||||
EXPECT_EQ(bm->cells()[2], kBlackCell);
|
||||
}
|
||||
|
||||
|
||||
TEST_F(BitmapTest, ClearRange2) {
|
||||
Bitmap* bm = bitmap();
|
||||
TYPED_TEST(BitmapTest, ClearRange2) {
|
||||
auto bm = this->bitmap();
|
||||
bm->cells()[0] = kBlackCell;
|
||||
bm->cells()[1] = kBlackCell;
|
||||
bm->cells()[2] = kBlackCell;
|
||||
@ -104,4 +104,59 @@ TEST_F(BitmapTest, ClearRange2) {
|
||||
EXPECT_EQ(bm->cells()[2], kBlackCell);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
TYPED_TEST(BitmapTest, SetAndClearRange) {
|
||||
auto bm = this->bitmap();
|
||||
for (int i = 0; i < 3; i++) {
|
||||
bm->SetRange(i, Bitmap::kBitsPerCell + i);
|
||||
CHECK_EQ(bm->cells()[0], 0xFFFFFFFFu << i);
|
||||
CHECK_EQ(bm->cells()[1], (1u << i) - 1);
|
||||
bm->ClearRange(i, Bitmap::kBitsPerCell + i);
|
||||
CHECK_EQ(bm->cells()[0], 0x0u);
|
||||
CHECK_EQ(bm->cells()[1], 0x0u);
|
||||
}
|
||||
}
|
||||
|
||||
// AllBitsSetInRange() and AllBitsClearInRange() are only used when verifying
|
||||
// the heap on the main thread so they don't have atomic implementations.
|
||||
TEST_F(NonAtomicBitmapTest, ClearMultipleRanges) {
|
||||
auto bm = this->bitmap();
|
||||
|
||||
bm->SetRange(0, Bitmap::kBitsPerCell * 3);
|
||||
CHECK(bm->AllBitsSetInRange(0, Bitmap::kBitsPerCell));
|
||||
|
||||
bm->ClearRange(Bitmap::kBitsPerCell / 2, Bitmap::kBitsPerCell);
|
||||
bm->ClearRange(Bitmap::kBitsPerCell,
|
||||
Bitmap::kBitsPerCell + Bitmap::kBitsPerCell / 2);
|
||||
bm->ClearRange(Bitmap::kBitsPerCell * 2 + 8, Bitmap::kBitsPerCell * 2 + 16);
|
||||
bm->ClearRange(Bitmap::kBitsPerCell * 2 + 24, Bitmap::kBitsPerCell * 3);
|
||||
|
||||
CHECK_EQ(bm->cells()[0], 0xFFFFu);
|
||||
CHECK(bm->AllBitsSetInRange(0, Bitmap::kBitsPerCell / 2));
|
||||
CHECK(
|
||||
bm->AllBitsClearInRange(Bitmap::kBitsPerCell / 2, Bitmap::kBitsPerCell));
|
||||
|
||||
CHECK_EQ(bm->cells()[1], 0xFFFF0000u);
|
||||
CHECK(bm->AllBitsClearInRange(
|
||||
Bitmap::kBitsPerCell, Bitmap::kBitsPerCell + Bitmap::kBitsPerCell / 2));
|
||||
CHECK(bm->AllBitsSetInRange(Bitmap::kBitsPerCell + Bitmap::kBitsPerCell / 2,
|
||||
Bitmap::kBitsPerCell * 2));
|
||||
|
||||
CHECK_EQ(bm->cells()[2], 0xFF00FFu);
|
||||
CHECK(bm->AllBitsSetInRange(
|
||||
Bitmap::kBitsPerCell * 2,
|
||||
Bitmap::kBitsPerCell * 2 + Bitmap::kBitsPerCell / 4));
|
||||
CHECK(bm->AllBitsClearInRange(
|
||||
Bitmap::kBitsPerCell * 2 + Bitmap::kBitsPerCell / 4,
|
||||
Bitmap::kBitsPerCell * 2 + Bitmap::kBitsPerCell / 2));
|
||||
CHECK(bm->AllBitsSetInRange(
|
||||
Bitmap::kBitsPerCell * 2 + Bitmap::kBitsPerCell / 2,
|
||||
Bitmap::kBitsPerCell * 2 + Bitmap::kBitsPerCell / 2 +
|
||||
Bitmap::kBitsPerCell / 4));
|
||||
CHECK(bm->AllBitsClearInRange(Bitmap::kBitsPerCell * 2 +
|
||||
Bitmap::kBitsPerCell / 2 +
|
||||
Bitmap::kBitsPerCell / 4,
|
||||
Bitmap::kBitsPerCell * 3));
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -6,15 +6,19 @@
|
||||
|
||||
#include "src/globals.h"
|
||||
#include "src/heap/marking.h"
|
||||
#include "test/unittests/heap/bitmap-test-utils.h"
|
||||
#include "testing/gtest/include/gtest/gtest.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
template <typename T>
|
||||
using MarkingTest = TestWithBitmap<T>;
|
||||
|
||||
TEST(Marking, TransitionWhiteBlackWhite) {
|
||||
Bitmap* bitmap = reinterpret_cast<Bitmap*>(
|
||||
calloc(Bitmap::kSize / kTaggedSize, kTaggedSize));
|
||||
TYPED_TEST_SUITE(MarkingTest, BitmapTypes);
|
||||
|
||||
TYPED_TEST(MarkingTest, TransitionWhiteBlackWhite) {
|
||||
auto bitmap = this->bitmap();
|
||||
const int kLocationsSize = 3;
|
||||
int position[kLocationsSize] = {
|
||||
Bitmap::kBitsPerCell - 2, Bitmap::kBitsPerCell - 1, Bitmap::kBitsPerCell};
|
||||
@ -29,12 +33,10 @@ TEST(Marking, TransitionWhiteBlackWhite) {
|
||||
CHECK(Marking::IsWhite(mark_bit));
|
||||
CHECK(!Marking::IsImpossible(mark_bit));
|
||||
}
|
||||
free(bitmap);
|
||||
}
|
||||
|
||||
TEST(Marking, TransitionWhiteGreyBlack) {
|
||||
Bitmap* bitmap = reinterpret_cast<Bitmap*>(
|
||||
calloc(Bitmap::kSize / kTaggedSize, kTaggedSize));
|
||||
TYPED_TEST(MarkingTest, TransitionWhiteGreyBlack) {
|
||||
auto bitmap = this->bitmap();
|
||||
const int kLocationsSize = 3;
|
||||
int position[kLocationsSize] = {
|
||||
Bitmap::kBitsPerCell - 2, Bitmap::kBitsPerCell - 1, Bitmap::kBitsPerCell};
|
||||
@ -55,54 +57,7 @@ TEST(Marking, TransitionWhiteGreyBlack) {
|
||||
CHECK(Marking::IsWhite(mark_bit));
|
||||
CHECK(!Marking::IsImpossible(mark_bit));
|
||||
}
|
||||
free(bitmap);
|
||||
}
|
||||
|
||||
TEST(Marking, SetAndClearRange) {
|
||||
Bitmap* bitmap = reinterpret_cast<Bitmap*>(
|
||||
calloc(Bitmap::kSize / kTaggedSize, kTaggedSize));
|
||||
for (int i = 0; i < 3; i++) {
|
||||
bitmap->SetRange(i, Bitmap::kBitsPerCell + i);
|
||||
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0xFFFFFFFFu << i);
|
||||
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], (1u << i) - 1);
|
||||
bitmap->ClearRange(i, Bitmap::kBitsPerCell + i);
|
||||
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0x0u);
|
||||
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], 0x0u);
|
||||
}
|
||||
free(bitmap);
|
||||
}
|
||||
|
||||
TEST(Marking, ClearMultipleRanges) {
|
||||
Bitmap* bitmap = reinterpret_cast<Bitmap*>(
|
||||
calloc(Bitmap::kSize / kTaggedSize, kTaggedSize));
|
||||
CHECK(bitmap->AllBitsClearInRange(0, Bitmap::kBitsPerCell * 3));
|
||||
bitmap->SetRange(0, Bitmap::kBitsPerCell * 3);
|
||||
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0xFFFFFFFFu);
|
||||
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], 0xFFFFFFFFu);
|
||||
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[2], 0xFFFFFFFFu);
|
||||
CHECK(bitmap->AllBitsSetInRange(0, Bitmap::kBitsPerCell * 3));
|
||||
bitmap->ClearRange(Bitmap::kBitsPerCell / 2, Bitmap::kBitsPerCell);
|
||||
bitmap->ClearRange(Bitmap::kBitsPerCell,
|
||||
Bitmap::kBitsPerCell + Bitmap::kBitsPerCell / 2);
|
||||
bitmap->ClearRange(Bitmap::kBitsPerCell * 2 + 8,
|
||||
Bitmap::kBitsPerCell * 2 + 16);
|
||||
bitmap->ClearRange(Bitmap::kBitsPerCell * 2 + 24, Bitmap::kBitsPerCell * 3);
|
||||
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0xFFFFu);
|
||||
CHECK(bitmap->AllBitsSetInRange(0, Bitmap::kBitsPerCell / 2));
|
||||
CHECK(bitmap->AllBitsClearInRange(Bitmap::kBitsPerCell / 2,
|
||||
Bitmap::kBitsPerCell));
|
||||
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], 0xFFFF0000u);
|
||||
CHECK(
|
||||
bitmap->AllBitsSetInRange(Bitmap::kBitsPerCell + Bitmap::kBitsPerCell / 2,
|
||||
2 * Bitmap::kBitsPerCell));
|
||||
CHECK(bitmap->AllBitsClearInRange(
|
||||
Bitmap::kBitsPerCell, Bitmap::kBitsPerCell + Bitmap::kBitsPerCell / 2));
|
||||
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[2], 0xFF00FFu);
|
||||
CHECK(bitmap->AllBitsSetInRange(2 * Bitmap::kBitsPerCell,
|
||||
2 * Bitmap::kBitsPerCell + 8));
|
||||
CHECK(bitmap->AllBitsClearInRange(2 * Bitmap::kBitsPerCell + 24,
|
||||
Bitmap::kBitsPerCell * 3));
|
||||
free(bitmap);
|
||||
}
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
Loading…
Reference in New Issue
Block a user