strengthen memory order of bit operations; insert memory fences

This commit is contained in:
daan 2020-07-26 14:19:30 -07:00
parent a9f46dc86f
commit a468430772
4 changed files with 10 additions and 8 deletions

View File

@ -232,15 +232,15 @@ static inline void mi_atomic_maxi64_relaxed(volatile _Atomic(int64_t)*p, int64_t
#endif
static inline uintptr_t mi_atomic_add(_Atomic(uintptr_t)* p, uintptr_t add) {
MI_USING_STD
return atomic_fetch_add_explicit(p, add, memory_order_relaxed);
return atomic_fetch_add_explicit(p, add, memory_order_acq_rel);
}
static inline uintptr_t mi_atomic_and(_Atomic(uintptr_t)* p, uintptr_t x) {
MI_USING_STD
return atomic_fetch_and_explicit(p, x, memory_order_release);
return atomic_fetch_and_explicit(p, x, memory_order_acq_rel);
}
static inline uintptr_t mi_atomic_or(_Atomic(uintptr_t)* p, uintptr_t x) {
MI_USING_STD
return atomic_fetch_or_explicit(p, x, memory_order_release);
return atomic_fetch_or_explicit(p, x, memory_order_acq_rel);
}
static inline bool mi_atomic_cas_weak(_Atomic(uintptr_t)* p, uintptr_t* expected, uintptr_t desired) {
MI_USING_STD

View File

@ -323,7 +323,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
if (mi_unlikely(use_delayed)) {
// racy read on `heap`, but ok because MI_DELAYED_FREEING is set (see `mi_heap_delete` and `mi_heap_collect_abandon`)
mi_heap_t* const heap = mi_page_heap(page);
mi_heap_t* const heap = (mi_heap_t*)(mi_atomic_read(&page->xheap)); //mi_page_heap(page);
mi_assert_internal(heap != NULL);
if (heap != NULL) {
// add to the delayed free list of this heap. (do this atomically as the lock only protects heap memory validity)

View File

@ -472,7 +472,6 @@ static void mi_segment_os_free(mi_segment_t* segment, size_t segment_size, mi_se
if (any_reset && mi_option_is_enabled(mi_option_reset_decommits)) {
fully_committed = false;
}
_mi_mem_free(segment, segment_size, segment->memid, fully_committed, any_reset, tld->os);
}
@ -629,6 +628,7 @@ static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_
return NULL;
}
}
atomic_thread_fence(memory_order_acq_rel);
segment->memid = memid;
segment->mem_is_fixed = mem_large;
segment->mem_is_committed = commit;
@ -638,6 +638,7 @@ static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_
mi_assert_internal(segment->mem_is_fixed ? segment->mem_is_committed : true);
if (!pages_still_good) {
// zero the segment info (but not the `mem` fields)
atomic_thread_fence(memory_order_release); // with read of `abandoned_next` in `mi_abandoned_pop`
ptrdiff_t ofs = offsetof(mi_segment_t, next);
memset((uint8_t*)segment + ofs, 0, info_size - ofs);
@ -791,6 +792,7 @@ static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, bool a
uint16_t reserved = page->reserved;
ptrdiff_t ofs = offsetof(mi_page_t,capacity);
memset((uint8_t*)page + ofs, 0, sizeof(*page) - ofs);
atomic_thread_fence(memory_order_release);
page->capacity = capacity;
page->reserved = reserved;
page->xblock_size = block_size;
@ -801,7 +803,7 @@ static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, bool a
mi_pages_reset_add(segment, page, tld);
}
page->capacity = 0; // after reset there can be zero'd now
page->capacity = 0; // after reset these can be zero'd now
page->reserved = 0;
}
@ -979,7 +981,7 @@ static mi_segment_t* mi_abandoned_pop(void) {
do {
segment = mi_tagged_segment_ptr(ts);
if (segment != NULL) {
mi_segment_t* anext = mi_atomic_read_ptr_relaxed(mi_segment_t, &segment->abandoned_next);
mi_segment_t* anext = mi_atomic_read_ptr(mi_segment_t, &segment->abandoned_next);
next = mi_tagged_segment(anext, ts); // note: reads the segment's `abandoned_next` field so should not be decommitted
}
} while (segment != NULL && !mi_atomic_cas_weak_acq_rel(&abandoned, &ts, next));

View File

@ -189,7 +189,7 @@ static void test_stress(void) {
}
}
// mi_collect(false);
#ifndef NDEBUG
#if !defined(NDEBUG) || defined(MI_TSAN)
if ((n + 1) % 10 == 0) { printf("- iterations left: %3d\n", ITER - (n + 1)); }
#endif
}