* malloc/malloc.c (_int_free): Remove test for NULL parameter.
	(_int_realloc): Call _int_free only if memory parameter is not NULL.
This commit is contained in:
Ulrich Drepper 2004-11-13 05:43:36 +00:00
parent a9055cab7a
commit 37fa1953d5
4 changed files with 175 additions and 170 deletions

View File

@ -1,5 +1,8 @@
2004-11-12 Ulrich Drepper <drepper@redhat.com>
* malloc/malloc.c (_int_free): Remove test for NULL parameter.
(_int_realloc): Call _int_free only if memory parameter is not NULL.
* sysdeps/unix/sysv/linux/libc_fatal.c: Add new function __libc_message
which performs the printing and simple format string handling. The
string is written to tty, stderr, syslog in this order, stopping after

View File

@ -4196,191 +4196,188 @@ _int_free(mstate av, Void_t* mem)
mchunkptr fwd; /* misc temp for linking */
/* free(0) has no effect */
if (mem != 0) {
const char *errstr = NULL;
const char *errstr = NULL;
p = mem2chunk(mem);
size = chunksize(p);
p = mem2chunk(mem);
size = chunksize(p);
/* Little security check which won't hurt performance: the
allocator never wrapps around at the end of the address space.
Therefore we can exclude some size values which might appear
here by accident or by "design" from some intruder. */
if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0))
{
errstr = "free(): invalid pointer";
errout:
malloc_printerr (check_action, errstr, mem);
return;
}
/* Little security check which won't hurt performance: the
allocator never wrapps around at the end of the address space.
Therefore we can exclude some size values which might appear
here by accident or by "design" from some intruder. */
if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0))
{
errstr = "free(): invalid pointer";
errout:
malloc_printerr (check_action, errstr, mem);
return;
}
check_inuse_chunk(av, p);
check_inuse_chunk(av, p);
/*
If eligible, place chunk on a fastbin so it can be found
and used quickly in malloc.
*/
/*
If eligible, place chunk on a fastbin so it can be found
and used quickly in malloc.
*/
if ((unsigned long)(size) <= (unsigned long)(av->max_fast)
if ((unsigned long)(size) <= (unsigned long)(av->max_fast)
#if TRIM_FASTBINS
/*
If TRIM_FASTBINS set, don't place chunks
bordering top into fastbins
*/
&& (chunk_at_offset(p, size) != av->top)
/*
If TRIM_FASTBINS set, don't place chunks
bordering top into fastbins
*/
&& (chunk_at_offset(p, size) != av->top)
#endif
) {
) {
set_fastchunks(av);
fb = &(av->fastbins[fastbin_index(size)]);
/* Another simple check: make sure the top of the bin is not the
record we are going to add (i.e., double free). */
if (__builtin_expect (*fb == p, 0))
{
errstr = "double free or corruption (fasttop)";
goto errout;
}
p->fd = *fb;
*fb = p;
set_fastchunks(av);
fb = &(av->fastbins[fastbin_index(size)]);
/* Another simple check: make sure the top of the bin is not the
record we are going to add (i.e., double free). */
if (__builtin_expect (*fb == p, 0))
{
errstr = "double free or corruption (fasttop)";
goto errout;
}
p->fd = *fb;
*fb = p;
}
/*
Consolidate other non-mmapped chunks as they arrive.
*/
else if (!chunk_is_mmapped(p)) {
nextchunk = chunk_at_offset(p, size);
/* Lightweight tests: check whether the block is already the
top block. */
if (__builtin_expect (p == av->top, 0))
{
errstr = "double free or corruption (top)";
goto errout;
}
/* Or whether the next chunk is beyond the boundaries of the arena. */
if (__builtin_expect (contiguous (av)
&& (char *) nextchunk
>= ((char *) av->top + chunksize(av->top)), 0))
{
errstr = "double free or corruption (out)";
goto errout;
}
/* Or whether the block is actually not marked used. */
if (__builtin_expect (!prev_inuse(nextchunk), 0))
{
errstr = "double free or corruption (!prev)";
goto errout;
}
nextsize = chunksize(nextchunk);
assert(nextsize > 0);
/* consolidate backward */
if (!prev_inuse(p)) {
prevsize = p->prev_size;
size += prevsize;
p = chunk_at_offset(p, -((long) prevsize));
unlink(p, bck, fwd);
}
if (nextchunk != av->top) {
/* get and clear inuse bit */
nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
/* consolidate forward */
if (!nextinuse) {
unlink(nextchunk, bck, fwd);
size += nextsize;
} else
clear_inuse_bit_at_offset(nextchunk, 0);
/*
Place the chunk in unsorted chunk list. Chunks are
not placed into regular bins until after they have
been given one chance to be used in malloc.
*/
bck = unsorted_chunks(av);
fwd = bck->fd;
p->bk = bck;
p->fd = fwd;
bck->fd = p;
fwd->bk = p;
set_head(p, size | PREV_INUSE);
set_foot(p, size);
check_free_chunk(av, p);
}
/*
Consolidate other non-mmapped chunks as they arrive.
*/
else if (!chunk_is_mmapped(p)) {
nextchunk = chunk_at_offset(p, size);
/* Lightweight tests: check whether the block is already the
top block. */
if (__builtin_expect (p == av->top, 0))
{
errstr = "double free or corruption (top)";
goto errout;
}
/* Or whether the next chunk is beyond the boundaries of the arena. */
if (__builtin_expect (contiguous (av)
&& (char *) nextchunk
>= ((char *) av->top + chunksize(av->top)), 0))
{
errstr = "double free or corruption (out)";
goto errout;
}
/* Or whether the block is actually not marked used. */
if (__builtin_expect (!prev_inuse(nextchunk), 0))
{
errstr = "double free or corruption (!prev)";
goto errout;
}
nextsize = chunksize(nextchunk);
assert(nextsize > 0);
/* consolidate backward */
if (!prev_inuse(p)) {
prevsize = p->prev_size;
size += prevsize;
p = chunk_at_offset(p, -((long) prevsize));
unlink(p, bck, fwd);
}
if (nextchunk != av->top) {
/* get and clear inuse bit */
nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
/* consolidate forward */
if (!nextinuse) {
unlink(nextchunk, bck, fwd);
size += nextsize;
} else
clear_inuse_bit_at_offset(nextchunk, 0);
/*
Place the chunk in unsorted chunk list. Chunks are
not placed into regular bins until after they have
been given one chance to be used in malloc.
*/
bck = unsorted_chunks(av);
fwd = bck->fd;
p->bk = bck;
p->fd = fwd;
bck->fd = p;
fwd->bk = p;
set_head(p, size | PREV_INUSE);
set_foot(p, size);
check_free_chunk(av, p);
}
/*
If the chunk borders the current high end of memory,
consolidate into top
*/
else {
size += nextsize;
set_head(p, size | PREV_INUSE);
av->top = p;
check_chunk(av, p);
}
/*
If freeing a large space, consolidate possibly-surrounding
chunks. Then, if the total unused topmost memory exceeds trim
threshold, ask malloc_trim to reduce top.
Unless max_fast is 0, we don't know if there are fastbins
bordering top, so we cannot tell for sure whether threshold
has been reached unless fastbins are consolidated. But we
don't want to consolidate on each free. As a compromise,
consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
is reached.
*/
if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
if (have_fastchunks(av))
malloc_consolidate(av);
if (av == &main_arena) {
#ifndef MORECORE_CANNOT_TRIM
if ((unsigned long)(chunksize(av->top)) >=
(unsigned long)(mp_.trim_threshold))
sYSTRIm(mp_.top_pad, av);
#endif
} else {
/* Always try heap_trim(), even if the top chunk is not
large, because the corresponding heap might go away. */
heap_info *heap = heap_for_ptr(top(av));
assert(heap->ar_ptr == av);
heap_trim(heap, mp_.top_pad);
}
}
}
/*
If the chunk was allocated via mmap, release via munmap(). Note
that if HAVE_MMAP is false but chunk_is_mmapped is true, then
user must have overwritten memory. There's nothing we can do to
catch this error unless MALLOC_DEBUG is set, in which case
check_inuse_chunk (above) will have triggered error.
If the chunk borders the current high end of memory,
consolidate into top
*/
else {
#if HAVE_MMAP
int ret;
INTERNAL_SIZE_T offset = p->prev_size;
mp_.n_mmaps--;
mp_.mmapped_mem -= (size + offset);
ret = munmap((char*)p - offset, size + offset);
/* munmap returns non-zero on failure */
assert(ret == 0);
#endif
size += nextsize;
set_head(p, size | PREV_INUSE);
av->top = p;
check_chunk(av, p);
}
/*
If freeing a large space, consolidate possibly-surrounding
chunks. Then, if the total unused topmost memory exceeds trim
threshold, ask malloc_trim to reduce top.
Unless max_fast is 0, we don't know if there are fastbins
bordering top, so we cannot tell for sure whether threshold
has been reached unless fastbins are consolidated. But we
don't want to consolidate on each free. As a compromise,
consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
is reached.
*/
if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
if (have_fastchunks(av))
malloc_consolidate(av);
if (av == &main_arena) {
#ifndef MORECORE_CANNOT_TRIM
if ((unsigned long)(chunksize(av->top)) >=
(unsigned long)(mp_.trim_threshold))
sYSTRIm(mp_.top_pad, av);
#endif
} else {
/* Always try heap_trim(), even if the top chunk is not
large, because the corresponding heap might go away. */
heap_info *heap = heap_for_ptr(top(av));
assert(heap->ar_ptr == av);
heap_trim(heap, mp_.top_pad);
}
}
}
/*
If the chunk was allocated via mmap, release via munmap(). Note
that if HAVE_MMAP is false but chunk_is_mmapped is true, then
user must have overwritten memory. There's nothing we can do to
catch this error unless MALLOC_DEBUG is set, in which case
check_inuse_chunk (above) will have triggered error.
*/
else {
#if HAVE_MMAP
int ret;
INTERNAL_SIZE_T offset = p->prev_size;
mp_.n_mmaps--;
mp_.mmapped_mem -= (size + offset);
ret = munmap((char*)p - offset, size + offset);
/* munmap returns non-zero on failure */
assert(ret == 0);
#endif
}
}
@ -4528,7 +4525,8 @@ _int_realloc(mstate av, Void_t* oldmem, size_t bytes)
#if REALLOC_ZERO_BYTES_FREES
if (bytes == 0) {
_int_free(av, oldmem);
if (oldmem != 0)
_int_free(av, oldmem);
return 0;
}
#endif

View File

@ -129,6 +129,8 @@ __libc_message (int do_abort, const char *fmt, ...)
if (! written)
vsyslog (LOG_ERR, fmt, ap_copy);
va_end (ap_copy);
if (do_abort()
/* Kill the application. */
abort ();

View File

@ -141,6 +141,8 @@ __libc_message (int do_abort, const char *fmt, ...)
if (! written)
vsyslog (LOG_ERR, fmt, ap_copy);
va_end (ap_copy);
if (do_abort)
/* Terminate the process. */
abort ();