better padding implementation, more precise statistics

This commit is contained in:
daan 2020-01-31 20:34:24 -08:00
parent 8422ab125d
commit 68112a2751
7 changed files with 89 additions and 65 deletions

View File

@ -310,8 +310,10 @@ static inline uintptr_t _mi_ptr_cookie(const void* p) {
----------------------------------------------------------- */ ----------------------------------------------------------- */
static inline mi_page_t* _mi_heap_get_free_small_page(mi_heap_t* heap, size_t size) { static inline mi_page_t* _mi_heap_get_free_small_page(mi_heap_t* heap, size_t size) {
mi_assert_internal(size <= MI_SMALL_SIZE_MAX); mi_assert_internal(size <= (MI_SMALL_SIZE_MAX + MI_PADDING_SIZE));
return heap->pages_free_direct[_mi_wsize_from_size(size)]; const size_t idx = _mi_wsize_from_size(size);
mi_assert_internal(idx < MI_PAGES_DIRECT);
return heap->pages_free_direct[idx];
} }
// Get the page belonging to a certain size class // Get the page belonging to a certain size class
@ -375,6 +377,12 @@ static inline size_t mi_page_block_size(const mi_page_t* page) {
} }
} }
// Get the client usable block size of a page (without padding etc)
static inline size_t mi_page_usable_block_size(const mi_page_t* page) {
return mi_page_block_size(page) - MI_PADDING_SIZE;
}
// Thread free access // Thread free access
static inline mi_block_t* mi_page_thread_free(const mi_page_t* page) { static inline mi_block_t* mi_page_thread_free(const mi_page_t* page) {
return (mi_block_t*)(mi_atomic_read_relaxed(&page->xthread_free) & ~3); return (mi_block_t*)(mi_atomic_read_relaxed(&page->xthread_free) & ~3);

View File

@ -54,16 +54,17 @@ terms of the MIT license. A copy of the license can be found in the file
#define MI_ENCODE_FREELIST 1 #define MI_ENCODE_FREELIST 1
#endif #endif
// Reserve extra padding at the end of each block; must be a multiple of `2*sizeof(intptr_t)`! // Reserve extra padding at the end of each block to be more resilient against heap block overflows.
// If free lists are encoded, the padding is checked if it was modified on free. // If free lists are encoded, the padding is checked if it was modified on free.
#if (!defined(MI_PADDING) && (MI_SECURE>=3 || MI_DEBUG>=1)) #if (!defined(MI_PADDING) && (MI_SECURE>=3 || MI_DEBUG>=1))
#define MI_PADDING #define MI_PADDING
#endif #endif
// The padding size must be at least `sizeof(intptr_t)`!
#if defined(MI_PADDING) #if defined(MI_PADDING)
#define MI_PADDING_SIZE (2*sizeof(intptr_t)) #define MI_PADDING_WSIZE 1
#else #else
#define MI_PADDING_SIZE 0 #define MI_PADDING_WSIZE 0
#endif #endif
@ -94,11 +95,13 @@ terms of the MIT license. A copy of the license can be found in the file
#define MI_INTPTR_SIZE (1<<MI_INTPTR_SHIFT) #define MI_INTPTR_SIZE (1<<MI_INTPTR_SHIFT)
#define MI_INTPTR_BITS (MI_INTPTR_SIZE*8) #define MI_INTPTR_BITS (MI_INTPTR_SIZE*8)
#define MI_PADDING_SIZE (MI_PADDING_WSIZE * MI_INTPTR_SIZE)
#define KiB ((size_t)1024) #define KiB ((size_t)1024)
#define MiB (KiB*KiB) #define MiB (KiB*KiB)
#define GiB (MiB*KiB) #define GiB (MiB*KiB)
// ------------------------------------------------------ // ------------------------------------------------------
// Main internal data-structures // Main internal data-structures
// ------------------------------------------------------ // ------------------------------------------------------
@ -306,19 +309,20 @@ typedef struct mi_random_cxt_s {
int output_available; int output_available;
} mi_random_ctx_t; } mi_random_ctx_t;
#define MI_PAGES_DIRECT (MI_SMALL_WSIZE_MAX + MI_PADDING_WSIZE + 1)
// A heap owns a set of pages. // A heap owns a set of pages.
struct mi_heap_s { struct mi_heap_s {
mi_tld_t* tld; mi_tld_t* tld;
mi_page_t* pages_free_direct[MI_SMALL_WSIZE_MAX + 2]; // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size. mi_page_t* pages_free_direct[MI_PAGES_DIRECT]; // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size.
mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin") mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin")
volatile _Atomic(mi_block_t*) thread_delayed_free; volatile _Atomic(mi_block_t*) thread_delayed_free;
uintptr_t thread_id; // thread this heap belongs too uintptr_t thread_id; // thread this heap belongs too
uintptr_t cookie; // random cookie to verify pointers (see `_mi_ptr_cookie`) uintptr_t cookie; // random cookie to verify pointers (see `_mi_ptr_cookie`)
uintptr_t key[2]; // twb random keys used to encode the `thread_delayed_free` list uintptr_t key[2]; // two random keys used to encode the `thread_delayed_free` list
mi_random_ctx_t random; // random number context used for secure allocation mi_random_ctx_t random; // random number context used for secure allocation
size_t page_count; // total number of pages in the `pages` queues. size_t page_count; // total number of pages in the `pages` queues.
bool no_reclaim; // `true` if this heap should not reclaim abandoned pages bool no_reclaim; // `true` if this heap should not reclaim abandoned pages
}; };

View File

@ -25,7 +25,7 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
const uintptr_t align_mask = alignment-1; // for any x, `(x & align_mask) == (x % alignment)` const uintptr_t align_mask = alignment-1; // for any x, `(x & align_mask) == (x % alignment)`
// try if there is a small block available with just the right alignment // try if there is a small block available with just the right alignment
if (mi_likely(size <= (MI_SMALL_SIZE_MAX - MI_PADDING_SIZE))) { if (mi_likely(size <= MI_SMALL_SIZE_MAX)) {
mi_page_t* page = _mi_heap_get_free_small_page(heap,size + MI_PADDING_SIZE); mi_page_t* page = _mi_heap_get_free_small_page(heap,size + MI_PADDING_SIZE);
const bool is_aligned = (((uintptr_t)page->free+offset) & align_mask)==0; const bool is_aligned = (((uintptr_t)page->free+offset) & align_mask)==0;
if (mi_likely(page->free != NULL && is_aligned)) if (mi_likely(page->free != NULL && is_aligned))

View File

@ -38,14 +38,15 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
block->next = 0; // don't leak internal data block->next = 0; // don't leak internal data
#endif #endif
#if (MI_STAT>1) #if (MI_STAT>1)
if(size <= MI_LARGE_OBJ_SIZE_MAX) { const size_t bsize = mi_page_usable_block_size(page);
size_t bin = _mi_bin(size); if(bsize <= MI_LARGE_OBJ_SIZE_MAX) {
const size_t bin = _mi_bin(bsize);
mi_heap_stat_increase(heap,normal[bin], 1); mi_heap_stat_increase(heap,normal[bin], 1);
} }
#endif #endif
#if defined(MI_PADDING) && defined(MI_ENCODE_FREELIST) #if defined(MI_PADDING) && defined(MI_ENCODE_FREELIST)
mi_assert_internal((MI_PADDING_SIZE % sizeof(mi_block_t*)) == 0); mi_assert_internal((MI_PADDING_SIZE % sizeof(mi_block_t*)) == 0);
mi_block_t* const padding = (mi_block_t*)((uint8_t*)block + page->xblock_size - MI_PADDING_SIZE); mi_block_t* const padding = (mi_block_t*)((uint8_t*)block + mi_page_usable_block_size(page));
mi_block_set_nextx(page, padding, block, page->key[0], page->key[1]); mi_block_set_nextx(page, padding, block, page->key[0], page->key[1]);
#endif #endif
return block; return block;
@ -53,10 +54,18 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
// allocate a small block // allocate a small block
extern inline mi_decl_allocator void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept { extern inline mi_decl_allocator void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept {
mi_assert(size <= (MI_SMALL_SIZE_MAX - MI_PADDING_SIZE)); mi_assert(heap!=NULL);
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
mi_assert(size <= MI_SMALL_SIZE_MAX);
mi_page_t* page = _mi_heap_get_free_small_page(heap,size + MI_PADDING_SIZE); mi_page_t* page = _mi_heap_get_free_small_page(heap,size + MI_PADDING_SIZE);
void* p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE); void* p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE);
mi_assert_internal(p==NULL || mi_page_block_size(_mi_ptr_page(p)) >= (size + MI_PADDING_SIZE)); mi_assert_internal(p==NULL || mi_usable_size(p) >= size);
#if MI_STAT>1
if (p != NULL) {
if (!mi_heap_is_initialized(heap)) { heap = mi_get_default_heap(); }
mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
}
#endif
return p; return p;
} }
@ -66,23 +75,22 @@ extern inline mi_decl_allocator void* mi_malloc_small(size_t size) mi_attr_noexc
// The main allocation function // The main allocation function
extern inline mi_decl_allocator void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept { extern inline mi_decl_allocator void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
mi_assert(heap!=NULL); if (mi_likely(size <= MI_SMALL_SIZE_MAX)) {
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local return mi_heap_malloc_small(heap, size);
void* p;
if (mi_likely(size <= (MI_SMALL_SIZE_MAX - MI_PADDING_SIZE))) {
p = mi_heap_malloc_small(heap, size);
} }
else { else {
p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE); mi_assert(heap!=NULL);
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE);
mi_assert_internal(p == NULL || mi_usable_size(p) >= size);
#if MI_STAT>1
if (p != NULL) {
if (!mi_heap_is_initialized(heap)) { heap = mi_get_default_heap(); }
mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
}
#endif
return p;
} }
#if MI_STAT>1
if (p != NULL) {
if (!mi_heap_is_initialized(heap)) { heap = mi_get_default_heap(); }
mi_heap_stat_increase( heap, malloc, mi_good_size(size) ); // overestimate for aligned sizes
}
#endif
mi_assert_internal(p == NULL || mi_page_block_size(_mi_ptr_page(p)) >= (size + MI_PADDING_SIZE));
return p;
} }
extern inline mi_decl_allocator void* mi_malloc(size_t size) mi_attr_noexcept { extern inline mi_decl_allocator void* mi_malloc(size_t size) mi_attr_noexcept {
@ -91,20 +99,20 @@ extern inline mi_decl_allocator void* mi_malloc(size_t size) mi_attr_noexcept {
void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size) { void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size) {
// note: we need to initialize the whole block to zero, not just size // note: we need to initialize the whole usable block size to zero, not just the requested size,
// or the recalloc/rezalloc functions cannot safely expand in place (see issue #63) // or the recalloc/rezalloc functions cannot safely expand in place (see issue #63)
UNUSED_RELEASE(size); UNUSED_RELEASE(size);
mi_assert_internal(p != NULL); mi_assert_internal(p != NULL);
mi_assert_internal(mi_page_block_size(page) >= (size + MI_PADDING_SIZE)); // size can be zero mi_assert_internal(mi_usable_size(p) >= size); // size can be zero
mi_assert_internal(_mi_ptr_page(p)==page); mi_assert_internal(_mi_ptr_page(p)==page);
if (page->is_zero) { if (page->is_zero) {
// already zero initialized memory? // already zero initialized memory?
((mi_block_t*)p)->next = 0; // clear the free list pointer ((mi_block_t*)p)->next = 0; // clear the free list pointer
mi_assert_expensive(mi_mem_is_zero(p, mi_page_block_size(page) - MI_PADDING_SIZE)); mi_assert_expensive(mi_mem_is_zero(p, mi_page_usable_block_size(page)));
} }
else { else {
// otherwise memset // otherwise memset
memset(p, 0, mi_page_block_size(page) - MI_PADDING_SIZE); memset(p, 0, mi_page_usable_block_size(page));
} }
} }
@ -183,10 +191,11 @@ static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block
#if defined(MI_PADDING) && defined(MI_ENCODE_FREELIST) #if defined(MI_PADDING) && defined(MI_ENCODE_FREELIST)
static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) { static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
mi_block_t* const padding = (mi_block_t*)((uint8_t*)block + page->xblock_size - MI_PADDING_SIZE); mi_block_t* const padding = (mi_block_t*)((uint8_t*)block + mi_page_usable_block_size(page));
mi_block_t* const decoded = mi_block_nextx(page, padding, page->key[0], page->key[1]); mi_block_t* const decoded = mi_block_nextx(page, padding, page->key[0], page->key[1]);
if (decoded != block) { if (decoded != block) {
_mi_error_message(EFAULT, "buffer overflow in heap block %p: write after %zu bytes\n", block, page->xblock_size); const ptrdiff_t size = (uint8_t*)padding - (uint8_t*)block;
_mi_error_message(EFAULT, "buffer overflow in heap block %p: write after %zd bytes\n", block, size );
} }
} }
#else #else
@ -208,7 +217,7 @@ static mi_decl_noinline void mi_free_huge_block_mt(mi_segment_t* segment, mi_pag
mi_assert_internal(mi_atomic_read_relaxed(&segment->thread_id)==0); mi_assert_internal(mi_atomic_read_relaxed(&segment->thread_id)==0);
// claim it and free // claim it and free
mi_heap_t* heap = mi_get_default_heap(); mi_heap_t* const heap = mi_get_default_heap();
// paranoia: if this it the last reference, the cas should always succeed // paranoia: if this it the last reference, the cas should always succeed
if (mi_atomic_cas_strong(&segment->thread_id, heap->thread_id, 0)) { if (mi_atomic_cas_strong(&segment->thread_id, heap->thread_id, 0)) {
mi_block_set_next(page, block, page->free); mi_block_set_next(page, block, page->free);
@ -216,8 +225,8 @@ static mi_decl_noinline void mi_free_huge_block_mt(mi_segment_t* segment, mi_pag
page->used--; page->used--;
page->is_zero = false; page->is_zero = false;
mi_assert(page->used == 0); mi_assert(page->used == 0);
mi_tld_t* tld = heap->tld; mi_tld_t* const tld = heap->tld;
const size_t bsize = mi_page_block_size(page); const size_t bsize = mi_page_usable_block_size(page);
if (bsize > MI_HUGE_OBJ_SIZE_MAX) { if (bsize > MI_HUGE_OBJ_SIZE_MAX) {
_mi_stat_decrease(&tld->stats.giant, bsize); _mi_stat_decrease(&tld->stats.giant, bsize);
} }
@ -232,14 +241,17 @@ static mi_decl_noinline void mi_free_huge_block_mt(mi_segment_t* segment, mi_pag
static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* block) static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* block)
{ {
// huge page segments are always abandoned and can be freed immediately // huge page segments are always abandoned and can be freed immediately
mi_segment_t* segment = _mi_page_segment(page); mi_segment_t* const segment = _mi_page_segment(page);
if (segment->page_kind==MI_PAGE_HUGE) { if (segment->page_kind==MI_PAGE_HUGE) {
mi_free_huge_block_mt(segment, page, block); mi_free_huge_block_mt(segment, page, block);
return; return;
} }
// The padding check accesses the non-thread-owned page for the key values.
// that is safe as these are constant and the page won't be freed (as the block is not freed yet).
mi_check_padding(page, block); mi_check_padding(page, block);
// Try to put the block on either the page-local thread free list, or the heap delayed free list.
mi_thread_free_t tfree; mi_thread_free_t tfree;
mi_thread_free_t tfreex; mi_thread_free_t tfreex;
bool use_delayed; bool use_delayed;
@ -259,7 +271,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
if (mi_unlikely(use_delayed)) { if (mi_unlikely(use_delayed)) {
// racy read on `heap`, but ok because MI_DELAYED_FREEING is set (see `mi_heap_delete` and `mi_heap_collect_abandon`) // racy read on `heap`, but ok because MI_DELAYED_FREEING is set (see `mi_heap_delete` and `mi_heap_collect_abandon`)
mi_heap_t* heap = mi_page_heap(page); mi_heap_t* const heap = mi_page_heap(page);
mi_assert_internal(heap != NULL); mi_assert_internal(heap != NULL);
if (heap != NULL) { if (heap != NULL) {
// add to the delayed free list of this heap. (do this atomically as the lock only protects heap memory validity) // add to the delayed free list of this heap. (do this atomically as the lock only protects heap memory validity)
@ -311,15 +323,15 @@ static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block
// Adjust a block that was allocated aligned, to the actual start of the block in the page. // Adjust a block that was allocated aligned, to the actual start of the block in the page.
mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p) { mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p) {
mi_assert_internal(page!=NULL && p!=NULL); mi_assert_internal(page!=NULL && p!=NULL);
size_t diff = (uint8_t*)p - _mi_page_start(segment, page, NULL); const size_t diff = (uint8_t*)p - _mi_page_start(segment, page, NULL);
size_t adjust = (diff % mi_page_block_size(page)); const size_t adjust = (diff % mi_page_block_size(page));
return (mi_block_t*)((uintptr_t)p - adjust); return (mi_block_t*)((uintptr_t)p - adjust);
} }
static void mi_decl_noinline mi_free_generic(const mi_segment_t* segment, bool local, void* p) { static void mi_decl_noinline mi_free_generic(const mi_segment_t* segment, bool local, void* p) {
mi_page_t* page = _mi_segment_page_of(segment, p); mi_page_t* const page = _mi_segment_page_of(segment, p);
mi_block_t* block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(segment, page, p) : (mi_block_t*)p); mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(segment, page, p) : (mi_block_t*)p);
_mi_free_block(page, local, block); _mi_free_block(page, local, block);
} }
@ -356,12 +368,12 @@ void mi_free(void* p) mi_attr_noexcept
mi_page_t* const page = _mi_segment_page_of(segment, p); mi_page_t* const page = _mi_segment_page_of(segment, p);
#if (MI_STAT>1) #if (MI_STAT>1)
mi_heap_t* heap = mi_heap_get_default(); mi_heap_t* const heap = mi_heap_get_default();
mi_heap_stat_decrease(heap, malloc, mi_usable_size(p)); const size_t bsize = mi_page_usable_block_size(page);
if (page->xblock_size <= MI_LARGE_OBJ_SIZE_MAX) { mi_heap_stat_decrease(heap, malloc, bsize);
mi_heap_stat_decrease(heap, normal[_mi_bin(page->xblock_size)], 1); if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { // huge page stats are accounted for in `_mi_page_retire`
} mi_heap_stat_decrease(heap, normal[_mi_bin(bsize)], 1);
// huge page stat is accounted for in `_mi_page_retire` }
#endif #endif
if (mi_likely(tid == segment->thread_id && page->flags.full_aligned == 0)) { // the thread id matches and it is not a full page, nor has aligned blocks if (mi_likely(tid == segment->thread_id && page->flags.full_aligned == 0)) { // the thread id matches and it is not a full page, nor has aligned blocks
@ -385,10 +397,10 @@ void mi_free(void* p) mi_attr_noexcept
bool _mi_free_delayed_block(mi_block_t* block) { bool _mi_free_delayed_block(mi_block_t* block) {
// get segment and page // get segment and page
const mi_segment_t* segment = _mi_ptr_segment(block); const mi_segment_t* const segment = _mi_ptr_segment(block);
mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie); mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie);
mi_assert_internal(_mi_thread_id() == segment->thread_id); mi_assert_internal(_mi_thread_id() == segment->thread_id);
mi_page_t* page = _mi_segment_page_of(segment, block); mi_page_t* const page = _mi_segment_page_of(segment, block);
// Clear the no-delayed flag so delayed freeing is used again for this page. // Clear the no-delayed flag so delayed freeing is used again for this page.
// This must be done before collecting the free lists on this page -- otherwise // This must be done before collecting the free lists on this page -- otherwise
@ -408,9 +420,9 @@ bool _mi_free_delayed_block(mi_block_t* block) {
// Bytes available in a block // Bytes available in a block
size_t mi_usable_size(const void* p) mi_attr_noexcept { size_t mi_usable_size(const void* p) mi_attr_noexcept {
if (p==NULL) return 0; if (p==NULL) return 0;
const mi_segment_t* segment = _mi_ptr_segment(p); const mi_segment_t* const segment = _mi_ptr_segment(p);
const mi_page_t* page = _mi_segment_page_of(segment, p); const mi_page_t* const page = _mi_segment_page_of(segment, p);
size_t size = mi_page_block_size(page) - MI_PADDING_SIZE; const size_t size = mi_page_usable_block_size(page);
if (mi_unlikely(mi_page_has_aligned(page))) { if (mi_unlikely(mi_page_has_aligned(page))) {
ptrdiff_t adjust = (uint8_t*)p - (uint8_t*)_mi_page_ptr_unalign(segment,page,p); ptrdiff_t adjust = (uint8_t*)p - (uint8_t*)_mi_page_ptr_unalign(segment,page,p);
mi_assert_internal(adjust >= 0 && (size_t)adjust <= size); mi_assert_internal(adjust >= 0 && (size_t)adjust <= size);

View File

@ -752,7 +752,7 @@ static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size) {
mi_assert_internal(_mi_bin(block_size) == MI_BIN_HUGE); mi_assert_internal(_mi_bin(block_size) == MI_BIN_HUGE);
mi_page_t* page = mi_page_fresh_alloc(heap,NULL,block_size); mi_page_t* page = mi_page_fresh_alloc(heap,NULL,block_size);
if (page != NULL) { if (page != NULL) {
const size_t bsize = mi_page_block_size(page); const size_t bsize = mi_page_usable_block_size(page);
mi_assert_internal(mi_page_immediate_available(page)); mi_assert_internal(mi_page_immediate_available(page));
mi_assert_internal(bsize >= size); mi_assert_internal(bsize >= size);
mi_assert_internal(_mi_page_segment(page)->page_kind==MI_PAGE_HUGE); mi_assert_internal(_mi_page_segment(page)->page_kind==MI_PAGE_HUGE);
@ -761,11 +761,11 @@ static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size) {
mi_page_set_heap(page, NULL); mi_page_set_heap(page, NULL);
if (bsize > MI_HUGE_OBJ_SIZE_MAX) { if (bsize > MI_HUGE_OBJ_SIZE_MAX) {
_mi_stat_increase(&heap->tld->stats.giant, block_size); _mi_stat_increase(&heap->tld->stats.giant, bsize);
_mi_stat_counter_increase(&heap->tld->stats.giant_count, 1); _mi_stat_counter_increase(&heap->tld->stats.giant_count, 1);
} }
else { else {
_mi_stat_increase(&heap->tld->stats.huge, block_size); _mi_stat_increase(&heap->tld->stats.huge, bsize);
_mi_stat_counter_increase(&heap->tld->stats.huge_count, 1); _mi_stat_counter_increase(&heap->tld->stats.huge_count, 1);
} }
} }

View File

@ -19,7 +19,7 @@ int main() {
// double_free1(); // double_free1();
// double_free2(); // double_free2();
// corrupt_free(); // corrupt_free();
// block_overflow1(); //block_overflow1();
void* p1 = malloc(78); void* p1 = malloc(78);
void* p2 = malloc(24); void* p2 = malloc(24);

View File

@ -27,7 +27,7 @@ terms of the MIT license.
// argument defaults // argument defaults
static int THREADS = 32; // more repeatable if THREADS <= #processors static int THREADS = 32; // more repeatable if THREADS <= #processors
static int SCALE = 10; // scaling factor static int SCALE = 10; // scaling factor
static int ITER = 50; // N full iterations destructing and re-creating all threads static int ITER = 10; // N full iterations destructing and re-creating all threads
// static int THREADS = 8; // more repeatable if THREADS <= #processors // static int THREADS = 8; // more repeatable if THREADS <= #processors
// static int SCALE = 100; // scaling factor // static int SCALE = 100; // scaling factor