update page_flags to have more portable definition

This commit is contained in:
daan 2019-10-17 16:48:16 -07:00
parent 93b4281b82
commit 5de851a84d
7 changed files with 23 additions and 26 deletions

View File

@ -111,7 +111,7 @@
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<WarningLevel>Level4</WarningLevel>
<Optimization>Disabled</Optimization>
<SDLCheck>true</SDLCheck>
<ConformanceMode>true</ConformanceMode>

View File

@ -345,19 +345,19 @@ static inline mi_page_queue_t* mi_page_queue(const mi_heap_t* heap, size_t size)
// Page flags
//-----------------------------------------------------------
static inline bool mi_page_is_in_full(const mi_page_t* page) {
return page->flags.in_full;
return page->flags.x.in_full;
}
static inline void mi_page_set_in_full(mi_page_t* page, bool in_full) {
page->flags.in_full = in_full;
page->flags.x.in_full = in_full;
}
static inline bool mi_page_has_aligned(const mi_page_t* page) {
return page->flags.has_aligned;
return page->flags.x.has_aligned;
}
static inline void mi_page_set_has_aligned(mi_page_t* page, bool has_aligned) {
page->flags.has_aligned = has_aligned;
page->flags.x.has_aligned = has_aligned;
}

View File

@ -131,17 +131,13 @@ typedef enum mi_delayed_e {
// The `in_full` and `has_aligned` page flags are put in a union to efficiently
// test if both are false (`value == 0`) in the `mi_free` routine.
typedef struct mi_page_flags_s {
#pragma warning(suppress:4201)
union {
uint8_t full_aligned;
struct {
uint8_t in_full : 1;
uint8_t has_aligned : 1;
};
};
bool is_zero; // `true` if the blocks in the free list are zero initialized
// test if both are false (`full_aligned == 0`) in the `mi_free` routine.
typedef union mi_page_flags_s {
uint8_t full_aligned;
struct {
uint8_t in_full : 1;
uint8_t has_aligned : 1;
} x;
} mi_page_flags_t;
// Thread free list.
@ -177,7 +173,8 @@ typedef struct mi_page_s {
// layout like this to optimize access in `mi_malloc` and `mi_free`
uint16_t capacity; // number of blocks committed, must be the first field, see `segment.c:page_clear`
uint16_t reserved; // number of blocks reserved in memory
mi_page_flags_t flags; // `in_full` and `has_aligned` flags (16 bits)
mi_page_flags_t flags; // `in_full` and `has_aligned` flags (8 bits)
bool is_zero; // `true` if the blocks in the free list are zero initialized
mi_block_t* free; // list of available free blocks (`malloc` allocates from this list)
#if MI_SECURE

View File

@ -126,7 +126,7 @@ static void* mi_heap_realloc_zero_aligned_at(mi_heap_t* heap, void* p, size_t ne
if (newp != NULL) {
if (zero && newsize > size) {
const mi_page_t* page = _mi_ptr_page(newp);
if (page->flags.is_zero) {
if (page->is_zero) {
// already zero initialized
mi_assert_expensive(mi_mem_is_zero(newp,newsize));
}

View File

@ -33,7 +33,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
page->used++;
mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page);
#if (MI_DEBUG)
if (!page->flags.is_zero) { memset(block, MI_DEBUG_UNINIT, size); }
if (!page->is_zero) { memset(block, MI_DEBUG_UNINIT, size); }
#elif (MI_SECURE)
block->next = 0;
#endif
@ -96,7 +96,7 @@ void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size) {
mi_assert_internal(p != NULL);
mi_assert_internal(size > 0 && page->block_size >= size);
mi_assert_internal(_mi_ptr_page(p)==page);
if (page->flags.is_zero) {
if (page->is_zero) {
// already zero initialized memory?
((mi_block_t*)p)->next = 0; // clear the free list pointer
mi_assert_expensive(mi_mem_is_zero(p,page->block_size));
@ -147,7 +147,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
mi_block_set_next(page, block, page->free);
page->free = block;
page->used--;
page->flags.is_zero = false;
page->is_zero = false;
_mi_segment_page_free(page,true,&heap->tld->segments);
}
return;

View File

@ -13,7 +13,7 @@ terms of the MIT license. A copy of the license can be found in the file
// Empty page used to initialize the small free pages array
const mi_page_t _mi_page_empty = {
0, false, false, false, false, 0, 0,
{ { 0 }, false },
{ 0 }, false,
NULL, // free
#if MI_SECURE
0,

View File

@ -192,7 +192,7 @@ void _mi_page_free_collect(mi_page_t* page, bool force) {
// usual case
page->free = page->local_free;
page->local_free = NULL;
page->flags.is_zero = false;
page->is_zero = false;
}
else if (force) {
// append -- only on shutdown (force) as this is a linear operation
@ -204,7 +204,7 @@ void _mi_page_free_collect(mi_page_t* page, bool force) {
mi_block_set_next(page, tail, page->free);
page->free = page->local_free;
page->local_free = NULL;
page->flags.is_zero = false;
page->is_zero = false;
}
}
@ -559,7 +559,7 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_stats_t* st
// extension into zero initialized memory preserves the zero'd free list
if (!page->is_zero_init) {
page->flags.is_zero = false;
page->is_zero = false;
}
mi_assert_expensive(mi_page_is_valid_init(page));
}
@ -579,7 +579,7 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
#if MI_SECURE
page->cookie = _mi_heap_random(heap) | 1;
#endif
page->flags.is_zero = page->is_zero_init;
page->is_zero = page->is_zero_init;
mi_assert_internal(page->capacity == 0);
mi_assert_internal(page->free == NULL);