malloc: Use memsize instead of CHUNK_AVAILABLE_SIZE

This is a pure refactoring change that does not affect behaviour.

The CHUNK_AVAILABLE_SIZE name was unclear, the memsize name tries to
follow the existing convention of mem denoting the allocation that is
handed out to the user, while chunk is its internally used container.

The user owned memory for a given chunk starts at chunk2mem(p) and
the size is memsize(p).  It is not valid to use on dumped heap chunks.

Moved the definition next to other chunk and mem related macros.

Reviewed-by: DJ Delorie <dj@redhat.com>
This commit is contained in:
Szabolcs Nagy 2021-03-08 12:59:05 +00:00
parent 1dc17ea8f8
commit faf003ed8d
2 changed files with 24 additions and 26 deletions

View File

@ -102,7 +102,7 @@ malloc_check_get_size (mchunkptr p)
assert (using_malloc_checking == 1); assert (using_malloc_checking == 1);
for (size = CHUNK_AVAILABLE_SIZE (p) - 1; for (size = CHUNK_HDR_SZ + memsize (p) - 1;
(c = *SAFE_CHAR_OFFSET (p, size)) != magic; (c = *SAFE_CHAR_OFFSET (p, size)) != magic;
size -= c) size -= c)
{ {
@ -130,7 +130,7 @@ mem2mem_check (void *ptr, size_t req_sz)
p = mem2chunk (ptr); p = mem2chunk (ptr);
magic = magicbyte (p); magic = magicbyte (p);
max_sz = CHUNK_AVAILABLE_SIZE (p) - CHUNK_HDR_SZ; max_sz = memsize (p);
for (i = max_sz - 1; i > req_sz; i -= block_sz) for (i = max_sz - 1; i > req_sz; i -= block_sz)
{ {
@ -175,7 +175,7 @@ mem2chunk_check (void *mem, unsigned char **magic_p)
next_chunk (prev_chunk (p)) != p))) next_chunk (prev_chunk (p)) != p)))
return NULL; return NULL;
for (sz = CHUNK_AVAILABLE_SIZE (p) - 1; for (sz = CHUNK_HDR_SZ + memsize (p) - 1;
(c = *SAFE_CHAR_OFFSET (p, sz)) != magic; (c = *SAFE_CHAR_OFFSET (p, sz)) != magic;
sz -= c) sz -= c)
{ {
@ -200,7 +200,7 @@ mem2chunk_check (void *mem, unsigned char **magic_p)
((prev_size (p) + sz) & page_mask) != 0) ((prev_size (p) + sz) & page_mask) != 0)
return NULL; return NULL;
for (sz = CHUNK_AVAILABLE_SIZE (p) - 1; for (sz = CHUNK_HDR_SZ + memsize (p) - 1;
(c = *SAFE_CHAR_OFFSET (p, sz)) != magic; (c = *SAFE_CHAR_OFFSET (p, sz)) != magic;
sz -= c) sz -= c)
{ {
@ -279,8 +279,7 @@ free_check (void *mem, const void *caller)
else else
{ {
/* Mark the chunk as belonging to the library again. */ /* Mark the chunk as belonging to the library again. */
(void)tag_region (chunk2rawmem (p), CHUNK_AVAILABLE_SIZE (p) (void)tag_region (chunk2rawmem (p), memsize (p));
- CHUNK_HDR_SZ);
_int_free (&main_arena, p, 1); _int_free (&main_arena, p, 1);
__libc_lock_unlock (main_arena.mutex); __libc_lock_unlock (main_arena.mutex);
} }

View File

@ -1331,18 +1331,6 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
MINSIZE : \ MINSIZE : \
((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK) ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
/* Available size of chunk. This is the size of the real usable data
in the chunk, plus the chunk header. Note: If memory tagging is
enabled the layout changes to accomodate the granule size, this is
wasteful for small allocations so not done by default. The logic
does not work if chunk headers are not granule aligned. */
_Static_assert (__MTAG_GRANULE_SIZE <= CHUNK_HDR_SZ,
"memory tagging is not supported with large granule.");
#define CHUNK_AVAILABLE_SIZE(p) \
(__MTAG_GRANULE_SIZE > SIZE_SZ && __glibc_unlikely (mtag_enabled) ? \
chunksize (p) : \
chunksize (p) + (chunk_is_mmapped (p) ? 0 : SIZE_SZ))
/* Check if REQ overflows when padded and aligned and if the resulting value /* Check if REQ overflows when padded and aligned and if the resulting value
is less than PTRDIFF_T. Returns TRUE and the requested size or MINSIZE in is less than PTRDIFF_T. Returns TRUE and the requested size or MINSIZE in
case the value is less than MINSIZE on SZ or false if any of the previous case the value is less than MINSIZE on SZ or false if any of the previous
@ -1465,14 +1453,26 @@ checked_request2size (size_t req, size_t *sz) __nonnull (1)
#pragma GCC poison mchunk_size #pragma GCC poison mchunk_size
#pragma GCC poison mchunk_prev_size #pragma GCC poison mchunk_prev_size
/* This is the size of the real usable data in the chunk. Not valid for
dumped heap chunks. */
#define memsize(p) \
(__MTAG_GRANULE_SIZE > SIZE_SZ && __glibc_unlikely (mtag_enabled) ? \
chunksize (p) - CHUNK_HDR_SZ : \
chunksize (p) - CHUNK_HDR_SZ + (chunk_is_mmapped (p) ? 0 : SIZE_SZ))
/* If memory tagging is enabled the layout changes to accomodate the granule
size, this is wasteful for small allocations so not done by default.
Both the chunk header and user data has to be granule aligned. */
_Static_assert (__MTAG_GRANULE_SIZE <= CHUNK_HDR_SZ,
"memory tagging is not supported with large granule.");
static __always_inline void * static __always_inline void *
tag_new_usable (void *ptr) tag_new_usable (void *ptr)
{ {
if (__glibc_unlikely (mtag_enabled) && ptr) if (__glibc_unlikely (mtag_enabled) && ptr)
{ {
mchunkptr cp = mem2chunk(ptr); mchunkptr cp = mem2chunk(ptr);
ptr = __libc_mtag_tag_region (__libc_mtag_new_tag (ptr), ptr = __libc_mtag_tag_region (__libc_mtag_new_tag (ptr), memsize (cp));
CHUNK_AVAILABLE_SIZE (cp) - CHUNK_HDR_SZ);
} }
return ptr; return ptr;
} }
@ -3316,8 +3316,7 @@ __libc_free (void *mem)
MAYBE_INIT_TCACHE (); MAYBE_INIT_TCACHE ();
/* Mark the chunk as belonging to the library again. */ /* Mark the chunk as belonging to the library again. */
(void)tag_region (chunk2rawmem (p), (void)tag_region (chunk2rawmem (p), memsize (p));
CHUNK_AVAILABLE_SIZE (p) - CHUNK_HDR_SZ);
ar_ptr = arena_for_chunk (p); ar_ptr = arena_for_chunk (p);
_int_free (ar_ptr, p, 0); _int_free (ar_ptr, p, 0);
@ -3459,7 +3458,7 @@ __libc_realloc (void *oldmem, size_t bytes)
newp = __libc_malloc (bytes); newp = __libc_malloc (bytes);
if (newp != NULL) if (newp != NULL)
{ {
size_t sz = CHUNK_AVAILABLE_SIZE (oldp) - CHUNK_HDR_SZ; size_t sz = memsize (oldp);
memcpy (newp, oldmem, sz); memcpy (newp, oldmem, sz);
(void) tag_region (chunk2rawmem (oldp), sz); (void) tag_region (chunk2rawmem (oldp), sz);
_int_free (ar_ptr, oldp, 0); _int_free (ar_ptr, oldp, 0);
@ -3675,7 +3674,7 @@ __libc_calloc (size_t n, size_t elem_size)
regardless of MORECORE_CLEARS, so we zero the whole block while regardless of MORECORE_CLEARS, so we zero the whole block while
doing so. */ doing so. */
if (__glibc_unlikely (mtag_enabled)) if (__glibc_unlikely (mtag_enabled))
return tag_new_zero_region (mem, CHUNK_AVAILABLE_SIZE (p) - CHUNK_HDR_SZ); return tag_new_zero_region (mem, memsize (p));
INTERNAL_SIZE_T csz = chunksize (p); INTERNAL_SIZE_T csz = chunksize (p);
@ -4863,7 +4862,7 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
else else
{ {
void *oldmem = chunk2rawmem (oldp); void *oldmem = chunk2rawmem (oldp);
size_t sz = CHUNK_AVAILABLE_SIZE (oldp) - CHUNK_HDR_SZ; size_t sz = memsize (oldp);
(void) tag_region (oldmem, sz); (void) tag_region (oldmem, sz);
newmem = tag_new_usable (newmem); newmem = tag_new_usable (newmem);
memcpy (newmem, oldmem, sz); memcpy (newmem, oldmem, sz);
@ -5110,7 +5109,7 @@ musable (void *mem)
result = chunksize (p) - CHUNK_HDR_SZ; result = chunksize (p) - CHUNK_HDR_SZ;
} }
else if (inuse (p)) else if (inuse (p))
result = CHUNK_AVAILABLE_SIZE (p) - CHUNK_HDR_SZ; result = memsize (p);
return result; return result;
} }