[heap] Make SetRange and ClearRange operations of Bitmap thread-safe.

The boundary cells of the mark-bitmap can be access concurrently,
so they need to be updated with atomic CAS.

BUG=chromium:694255

Change-Id: Ibe85f00c8b4ccc61edc43b400c5b08a6d0ba620e
Reviewed-on: https://chromium-review.googlesource.com/521103
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: Hannes Payer <hpayer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#45731}
This commit is contained in:
Ulan Degenbaev 2017-06-06 14:44:12 +02:00 committed by Commit Bot
parent e3a8eb512f
commit bdf0ea99df
3 changed files with 95 additions and 11 deletions

View File

@ -169,7 +169,22 @@ class Bitmap {
for (int i = 0; i < CellsCount(); i++) cells()[i] = 0;
}
// Clears bits in the given cell. The mask specifies bits to clear: if a
// bit is set in the mask then the corresponding bit is cleared in the cell.
template <MarkBit::AccessMode mode = MarkBit::NON_ATOMIC>
void ClearBitsInCell(uint32_t cell_index, uint32_t mask);
// Sets bits in the given cell. The mask specifies bits to set: if a
// bit is set in the mask then the corresponding bit is set in the cell.
template <MarkBit::AccessMode mode = MarkBit::NON_ATOMIC>
void SetBitsInCell(uint32_t cell_index, uint32_t mask);
// Sets all bits in the range [start_index, end_index).
// If the access mode is ATOMIC then the cells at the boundary of the range
// are updated with atomic compare and swap operation. The inner cells are
// updated with ordinary write, so the caller must ensure exclusive access
// to the inner cells.
template <MarkBit::AccessMode mode = MarkBit::NON_ATOMIC>
void SetRange(uint32_t start_index, uint32_t end_index) {
unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
@ -180,19 +195,26 @@ class Bitmap {
if (start_cell_index != end_cell_index) {
// Firstly, fill all bits from the start address to the end of the first
// cell with 1s.
cells()[start_cell_index] |= ~(start_index_mask - 1);
SetBitsInCell<mode>(start_cell_index, ~(start_index_mask - 1));
// Then fill all in between cells with 1s.
for (unsigned int i = start_cell_index + 1; i < end_cell_index; i++) {
// The callers must ensure that the inner cells in the range are not
// accessed concurrently.
cells()[i] = ~0u;
}
// Finally, fill all bits until the end address in the last cell with 1s.
cells()[end_cell_index] |= (end_index_mask - 1);
SetBitsInCell<mode>(end_cell_index, (end_index_mask - 1));
} else {
cells()[start_cell_index] |= end_index_mask - start_index_mask;
SetBitsInCell<mode>(start_cell_index, end_index_mask - start_index_mask);
}
}
// Clears all bits in the range [start_index, end_index).
// If the access mode is ATOMIC then the cells at the boundary of the range
// are updated with atomic compare and swap operation. The inner cells are
// updated with ordinary write, so the caller must ensure exclusive access
// to the inner cells.
template <MarkBit::AccessMode mode = MarkBit::NON_ATOMIC>
void ClearRange(uint32_t start_index, uint32_t end_index) {
unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
@ -203,15 +225,18 @@ class Bitmap {
if (start_cell_index != end_cell_index) {
// Firstly, fill all bits from the start address to the end of the first
// cell with 0s.
cells()[start_cell_index] &= (start_index_mask - 1);
ClearBitsInCell<mode>(start_cell_index, ~(start_index_mask - 1));
// Then fill all in between cells with 0s.
for (unsigned int i = start_cell_index + 1; i < end_cell_index; i++) {
// The callers must ensure that the inner cells in the range are not
// accessed concurrently.
cells()[i] = 0;
}
// Finally, set all bits until the end address in the last cell with 0s.
cells()[end_cell_index] &= ~(end_index_mask - 1);
ClearBitsInCell<mode>(end_cell_index, (end_index_mask - 1));
} else {
cells()[start_cell_index] &= ~(end_index_mask - start_index_mask);
ClearBitsInCell<mode>(start_cell_index,
(end_index_mask - start_index_mask));
}
}
@ -340,6 +365,46 @@ class Bitmap {
}
};
template <>
inline void Bitmap::SetBitsInCell<MarkBit::NON_ATOMIC>(uint32_t cell_index,
uint32_t mask) {
cells()[cell_index] |= mask;
}
template <>
inline void Bitmap::SetBitsInCell<MarkBit::ATOMIC>(uint32_t cell_index,
uint32_t mask) {
base::Atomic32* cell =
reinterpret_cast<base::Atomic32*>(cells() + cell_index);
base::Atomic32 old_value;
base::Atomic32 new_value;
do {
old_value = base::Relaxed_Load(cell);
new_value = old_value | mask;
} while (base::Release_CompareAndSwap(cell, old_value, new_value) !=
old_value);
}
template <>
inline void Bitmap::ClearBitsInCell<MarkBit::NON_ATOMIC>(uint32_t cell_index,
uint32_t mask) {
cells()[cell_index] &= ~mask;
}
template <>
inline void Bitmap::ClearBitsInCell<MarkBit::ATOMIC>(uint32_t cell_index,
uint32_t mask) {
base::Atomic32* cell =
reinterpret_cast<base::Atomic32*>(cells() + cell_index);
base::Atomic32 old_value;
base::Atomic32 new_value;
do {
old_value = base::Relaxed_Load(cell);
new_value = old_value & ~mask;
} while (base::Release_CompareAndSwap(cell, old_value, new_value) !=
old_value);
}
class Marking : public AllStatic {
public:
// TODO(hpayer): The current mark bit operations use as default NON_ATOMIC

View File

@ -839,8 +839,10 @@ void Page::CreateBlackArea(Address start, Address end) {
DCHECK_EQ(Page::FromAddress(start), this);
DCHECK_NE(start, end);
DCHECK_EQ(Page::FromAddress(end - 1), this);
MarkingState::Internal(this).bitmap()->SetRange(AddressToMarkbitIndex(start),
AddressToMarkbitIndex(end));
MarkingState::Internal(this)
.bitmap()
->SetRange<IncrementalMarking::kAtomicity>(AddressToMarkbitIndex(start),
AddressToMarkbitIndex(end));
MarkingState::Internal(this)
.IncrementLiveBytes<IncrementalMarking::kAtomicity>(
static_cast<int>(end - start));
@ -851,8 +853,10 @@ void Page::DestroyBlackArea(Address start, Address end) {
DCHECK_EQ(Page::FromAddress(start), this);
DCHECK_NE(start, end);
DCHECK_EQ(Page::FromAddress(end - 1), this);
MarkingState::Internal(this).bitmap()->ClearRange(
AddressToMarkbitIndex(start), AddressToMarkbitIndex(end));
MarkingState::Internal(this)
.bitmap()
->ClearRange<IncrementalMarking::kAtomicity>(AddressToMarkbitIndex(start),
AddressToMarkbitIndex(end));
MarkingState::Internal(this)
.IncrementLiveBytes<IncrementalMarking::kAtomicity>(
-static_cast<int>(end - start));
@ -3147,7 +3151,8 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
ClearRecordedSlots::kNo);
if (heap()->incremental_marking()->black_allocation()) {
ObjectMarking::WhiteToBlack(object, MarkingState::Internal(object));
ObjectMarking::WhiteToBlack<IncrementalMarking::kAtomicity>(
object, MarkingState::Internal(object));
}
return object;
}

View File

@ -76,6 +76,20 @@ TEST(Marking, SetAndClearRange) {
free(bitmap);
}
TEST(Marking, SetAndClearRangeAtomic) {
Bitmap* bitmap = reinterpret_cast<Bitmap*>(
calloc(Bitmap::kSize / kPointerSize, kPointerSize));
for (int i = 0; i < 3; i++) {
bitmap->SetRange<MarkBit::ATOMIC>(i, Bitmap::kBitsPerCell + i);
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0xffffffffu << i);
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], (1u << i) - 1);
bitmap->ClearRange<MarkBit::ATOMIC>(i, Bitmap::kBitsPerCell + i);
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0x0u);
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], 0x0u);
}
free(bitmap);
}
TEST(Marking, ClearMultipleRanges) {
Bitmap* bitmap = reinterpret_cast<Bitmap*>(
calloc(Bitmap::kSize / kPointerSize, kPointerSize));