malloc: Remove malloc_get_state, malloc_set_state [BZ #19473]

After the removal of __malloc_initialize_hook, newly compiled
Emacs binaries are no longer able to use these interfaces.
malloc_get_state is only used during the Emacs build process,
so we provide a stub implementation only.  Existing Emacs binaries
will not call this stub function, but still reference the symbol.

The rewritten tst-mallocstate test constructs a dumped heap
which should approximates what existing Emacs binaries pass
to glibc malloc.
This commit is contained in:
Florian Weimer 2016-10-26 13:28:28 +02:00
parent 261e6758e7
commit e863cce57b
7 changed files with 512 additions and 108 deletions

View File

@ -1,3 +1,19 @@
2016-10-26 Florian Weimer <fweimer@redhat.com>
[BZ #19473]
* malloc/malloc.h (malloc_get_state, malloc_set_state): Remove
declarations.
* malloc/malloc.c (malloc_get_state, malloc_set_state): Remove
weak aliases.
* malloc/hooks.c (__malloc_get_state): Remove definition.
(malloc_get_state): New stub implementation as
compatibility symbol.
(malloc_set_state): Rename from __malloc_set_state. Turn into
compat symbol.
* malloc/tst-mallocstate.c: Rewrite to approximate how Emacs uses
malloc_set_state.
* malloc/Makefile (LDFLAGS-tst-mallocstate): Link with -rdynamic.
2016-10-26 Florian Weimer <fweimer@redhat.com>
* iconvdata/iso646.c (enum variant): Drop illegal_var.

8
NEWS
View File

@ -67,6 +67,14 @@ Version 2.25
for the Linux quota interface which predates kernel version 2.4.22 has
been removed.
* The malloc_get_state and malloc_set_state functions have been removed.
Already-existing binaries that dynamically link to these functions will
get a hidden implementation in which malloc_get_state is a stub. As far
as we know, these functions are used only by GNU Emacs and this change
will not adversely affect already-built Emacs executables. Any undumped
Emacs executables, which normally exist only during an Emacs build, should
be rebuilt by re-running “./configure; make” in the Emacs build tree.
* The “ip6-dotint” and “no-ip6-dotint” resolver options, and the
corresponding RES_NOIP6DOTINT flag from <resolv.h> have been removed.
“no-ip6-dotint” had already been the default, and support for the

View File

@ -69,6 +69,9 @@ $(objpfx)tst-malloc-thread-exit: $(shared-thread-library)
$(objpfx)tst-malloc-thread-fail: $(shared-thread-library)
$(objpfx)tst-malloc-fork-deadlock: $(shared-thread-library)
# Export the __malloc_initialize_hook variable to libc.so.
LDFLAGS-tst-mallocstate = -rdynamic
# These should be removed by `make clean'.
extra-objs = mcheck-init.o libmcheck.a

View File

@ -447,6 +447,7 @@ memalign_check (size_t alignment, size_t bytes, const void *caller)
return mem2mem_check (mem, bytes);
}
#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_25)
/* Get/set state: malloc_get_state() records the current state of all
malloc variables (_except_ for the actual heap contents and `hook'
@ -492,60 +493,21 @@ struct malloc_save_state
unsigned long narenas;
};
/* Dummy implementation which always fails. We need to provide this
symbol so that existing Emacs binaries continue to work with
BIND_NOW. */
void *
__malloc_get_state (void)
attribute_compat_text_section
malloc_get_state (void)
{
struct malloc_save_state *ms;
int i;
mbinptr b;
ms = (struct malloc_save_state *) __libc_malloc (sizeof (*ms));
if (!ms)
return 0;
__libc_lock_lock (main_arena.mutex);
malloc_consolidate (&main_arena);
ms->magic = MALLOC_STATE_MAGIC;
ms->version = MALLOC_STATE_VERSION;
ms->av[0] = 0;
ms->av[1] = 0; /* used to be binblocks, now no longer used */
ms->av[2] = top (&main_arena);
ms->av[3] = 0; /* used to be undefined */
for (i = 1; i < NBINS; i++)
{
b = bin_at (&main_arena, i);
if (first (b) == b)
ms->av[2 * i + 2] = ms->av[2 * i + 3] = 0; /* empty bin */
else
{
ms->av[2 * i + 2] = first (b);
ms->av[2 * i + 3] = last (b);
}
}
ms->sbrk_base = mp_.sbrk_base;
ms->sbrked_mem_bytes = main_arena.system_mem;
ms->trim_threshold = mp_.trim_threshold;
ms->top_pad = mp_.top_pad;
ms->n_mmaps_max = mp_.n_mmaps_max;
ms->mmap_threshold = mp_.mmap_threshold;
ms->check_action = check_action;
ms->max_sbrked_mem = main_arena.max_system_mem;
ms->max_total_mem = 0;
ms->n_mmaps = mp_.n_mmaps;
ms->max_n_mmaps = mp_.max_n_mmaps;
ms->mmapped_mem = mp_.mmapped_mem;
ms->max_mmapped_mem = mp_.max_mmapped_mem;
ms->using_malloc_checking = using_malloc_checking;
ms->max_fast = get_max_fast ();
ms->arena_test = mp_.arena_test;
ms->arena_max = mp_.arena_max;
ms->narenas = narenas;
__libc_lock_unlock (main_arena.mutex);
return (void *) ms;
__set_errno (ENOSYS);
return NULL;
}
compat_symbol (libc, malloc_get_state, malloc_get_state, GLIBC_2_0);
int
__malloc_set_state (void *msptr)
attribute_compat_text_section
malloc_set_state (void *msptr)
{
struct malloc_save_state *ms = (struct malloc_save_state *) msptr;
@ -612,6 +574,9 @@ __malloc_set_state (void *msptr)
return 0;
}
compat_symbol (libc, malloc_set_state, malloc_set_state, GLIBC_2_0);
#endif /* SHLIB_COMPAT */
/*
* Local variables:

View File

@ -5202,8 +5202,6 @@ strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt)
weak_alias (__malloc_stats, malloc_stats)
weak_alias (__malloc_usable_size, malloc_usable_size)
weak_alias (__malloc_trim, malloc_trim)
weak_alias (__malloc_get_state, malloc_get_state)
weak_alias (__malloc_set_state, malloc_set_state)
/* ------------------------------------------------------------

View File

@ -134,13 +134,6 @@ extern void malloc_stats (void) __THROW;
/* Output information about state of allocator to stream FP. */
extern int malloc_info (int __options, FILE *__fp) __THROW;
/* Record the state of all malloc variables in an opaque data structure. */
extern void *malloc_get_state (void) __THROW;
/* Restore the state of all malloc variables from data obtained with
malloc_get_state(). */
extern int malloc_set_state (void *__ptr) __THROW;
/* Hooks for debugging and user-defined versions. */
extern void (*__MALLOC_HOOK_VOLATILE __free_hook) (void *__ptr,
const void *)

View File

@ -1,4 +1,5 @@
/* Copyright (C) 2001-2016 Free Software Foundation, Inc.
/* Emulate Emacs heap dumping to test malloc_set_state.
Copyright (C) 2001-2016 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
@ -17,68 +18,488 @@
<http://www.gnu.org/licenses/>. */
#include <errno.h>
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <libc-symbols.h>
#include <shlib-compat.h>
#include "malloc.h"
static int errors = 0;
/* Make the compatibility symbols availabile to this test case. */
void *malloc_get_state (void);
compat_symbol_reference (libc, malloc_get_state, malloc_get_state, GLIBC_2_0);
int malloc_set_state (void *);
compat_symbol_reference (libc, malloc_set_state, malloc_set_state, GLIBC_2_0);
static int do_test (void);
#define TEST_FUNCTION do_test ()
#include "../test-skeleton.c"
/* Maximum object size in the fake heap. */
enum { max_size = 64 };
/* Allocation actions. These are randomized actions executed on the
dumped heap (see allocation_tasks below). They are interspersed
with operations on the new heap (see heap_activity). */
enum allocation_action
{
action_free, /* Dumped and freed. */
action_realloc, /* Dumped and realloc'ed. */
action_realloc_same, /* Dumped and realloc'ed, same size. */
action_realloc_smaller, /* Dumped and realloc'ed, shrinked. */
action_count
};
/* Dumped heap. Initialize it, so that the object is placed into the
.data section, for increased realism. The size is an upper bound;
we use about half of the space. */
static size_t dumped_heap[action_count * max_size * max_size
/ sizeof (size_t)] = {1};
/* Next free space in the dumped heap. Also top of the heap at the
end of the initialization procedure. */
static size_t *next_heap_chunk;
/* Copied from malloc.c and hooks.c. The version is deliberately
lower than the final version of malloc_set_state. */
#define NBINS 128
#define MALLOC_STATE_MAGIC 0x444c4541l
#define MALLOC_STATE_VERSION (0 * 0x100l + 4l)
static struct
{
long magic;
long version;
void *av[NBINS * 2 + 2];
char *sbrk_base;
int sbrked_mem_bytes;
unsigned long trim_threshold;
unsigned long top_pad;
unsigned int n_mmaps_max;
unsigned long mmap_threshold;
int check_action;
unsigned long max_sbrked_mem;
unsigned long max_total_mem;
unsigned int n_mmaps;
unsigned int max_n_mmaps;
unsigned long mmapped_mem;
unsigned long max_mmapped_mem;
int using_malloc_checking;
unsigned long max_fast;
unsigned long arena_test;
unsigned long arena_max;
unsigned long narenas;
} save_state =
{
.magic = MALLOC_STATE_MAGIC,
.version = MALLOC_STATE_VERSION,
};
/* Allocate a blob in the fake heap. */
static void *
dumped_heap_alloc (size_t length)
{
/* malloc needs three state bits in the size field, so the minimum
alignment is 8 even on 32-bit architectures. malloc_set_state
should be compatible with such heaps even if it currently
provides more alignment to applications. */
enum
{
heap_alignment = 8,
heap_alignment_mask = heap_alignment - 1
};
_Static_assert (sizeof (size_t) <= heap_alignment,
"size_t compatible with heap alignment");
/* Need at least this many bytes for metadata and application
data. */
size_t chunk_size = sizeof (size_t) + length;
/* Round up the allocation size to the heap alignment. */
chunk_size += heap_alignment_mask;
chunk_size &= ~heap_alignment_mask;
if ((chunk_size & 3) != 0)
{
/* The lower three bits in the chunk size have to be 0. */
write_message ("error: dumped_heap_alloc computed invalid chunk size\n");
_exit (1);
}
if (next_heap_chunk == NULL)
/* Initialize the top of the heap. Add one word of zero padding,
to match existing practice. */
{
dumped_heap[0] = 0;
next_heap_chunk = dumped_heap + 1;
}
else
/* The previous chunk is allocated. */
chunk_size |= 1;
*next_heap_chunk = chunk_size;
/* User data starts after the chunk header. */
void *result = next_heap_chunk + 1;
next_heap_chunk += chunk_size / sizeof (size_t);
/* Mark the previous chunk as used. */
*next_heap_chunk = 1;
return result;
}
/* Global seed variable for the random number generator. */
static unsigned long long global_seed;
/* Simple random number generator. The numbers are in the range from
0 to UINT_MAX (inclusive). */
static unsigned int
rand_next (unsigned long long *seed)
{
/* Linear congruential generated as used for MMIX. */
*seed = *seed * 6364136223846793005ULL + 1442695040888963407ULL;
return *seed >> 32;
}
/* Fill LENGTH bytes at BUFFER with random contents, as determined by
SEED. */
static void
randomize_buffer (unsigned char *buffer, size_t length,
unsigned long long seed)
{
for (size_t i = 0; i < length; ++i)
buffer[i] = rand_next (&seed);
}
/* Dumps the buffer to standard output, in hexadecimal. */
static void
dump_hex (unsigned char *buffer, size_t length)
{
for (int i = 0; i < length; ++i)
printf (" %02X", buffer[i]);
}
/* Set to true if an error is encountered. */
static bool errors = false;
/* Keep track of object allocations. */
struct allocation
{
unsigned char *data;
unsigned int size;
unsigned int seed;
};
/* Check that the allocation task allocation has the expected
contents. */
static void
check_allocation (const struct allocation *alloc, int index)
{
size_t size = alloc->size;
if (alloc->data == NULL)
{
printf ("error: NULL pointer for allocation of size %zu at %d, seed %u\n",
size, index, alloc->seed);
errors = true;
return;
}
unsigned char expected[4096];
if (size > sizeof (expected))
{
printf ("error: invalid allocation size %zu at %d, seed %u\n",
size, index, alloc->seed);
errors = true;
return;
}
randomize_buffer (expected, size, alloc->seed);
if (memcmp (alloc->data, expected, size) != 0)
{
printf ("error: allocation %d data mismatch, size %zu, seed %u\n",
index, size, alloc->seed);
printf (" expected:");
dump_hex (expected, size);
putc ('\n', stdout);
printf (" actual:");
dump_hex (alloc->data, size);
putc ('\n', stdout);
errors = true;
}
}
/* A heap allocation combined with pending actions on it. */
struct allocation_task
{
struct allocation allocation;
enum allocation_action action;
};
/* Allocation tasks. Initialized by init_allocation_tasks and used by
perform_allocations. */
enum { allocation_task_count = action_count * max_size };
static struct allocation_task allocation_tasks[allocation_task_count];
/* Fisher-Yates shuffle of allocation_tasks. */
static void
shuffle_allocation_tasks (void)
{
for (int i = 0; i < allocation_task_count - 1; ++i)
{
/* Pick pair in the tail of the array. */
int j = i + (rand_next (&global_seed)
% ((unsigned) (allocation_task_count - i)));
if (j < 0 || j >= allocation_task_count)
{
write_message ("error: test bug in shuffle\n");
_exit (1);
}
/* Exchange. */
struct allocation_task tmp = allocation_tasks[i];
allocation_tasks[i] = allocation_tasks[j];
allocation_tasks[j] = tmp;
}
}
/* Set up the allocation tasks and the dumped heap. */
static void
initial_allocations (void)
{
/* Initialize in a position-dependent way. */
for (int i = 0; i < allocation_task_count; ++i)
allocation_tasks[i] = (struct allocation_task)
{
.allocation =
{
.size = 1 + (i / action_count),
.seed = i,
},
.action = i % action_count
};
/* Execute the tasks in a random order. */
shuffle_allocation_tasks ();
/* Initialize the contents of the dumped heap. */
for (int i = 0; i < allocation_task_count; ++i)
{
struct allocation_task *task = allocation_tasks + i;
task->allocation.data = dumped_heap_alloc (task->allocation.size);
randomize_buffer (task->allocation.data, task->allocation.size,
task->allocation.seed);
}
for (int i = 0; i < allocation_task_count; ++i)
check_allocation (&allocation_tasks[i].allocation, i);
}
/* Indicates whether init_heap has run. This variable needs to be
volatile because malloc is declared __THROW, which implies it is a
leaf function, but we expect it to run our hooks. */
static volatile bool heap_initialized;
/* Executed by glibc malloc, through __malloc_initialize_hook
below. */
static void
init_heap (void)
{
write_message ("info: performing heap initialization\n");
heap_initialized = true;
/* Populate the dumped heap. */
initial_allocations ();
/* Complete initialization of the saved heap data structure. */
save_state.sbrk_base = (void *) dumped_heap;
save_state.sbrked_mem_bytes = sizeof (dumped_heap);
/* Top pointer. Adjust so that it points to the start of struct
malloc_chunk. */
save_state.av[2] = (void *) (next_heap_chunk - 1);
/* Integrate the dumped heap into the process heap. */
if (malloc_set_state (&save_state) != 0)
{
write_message ("error: malloc_set_state failed\n");
_exit (1);
}
}
/* Interpose the initialization callback. */
void (*volatile __malloc_initialize_hook) (void) = init_heap;
/* Simulate occasional unrelated heap activity in the non-dumped
heap. */
enum { heap_activity_allocations_count = 32 };
static struct allocation heap_activity_allocations
[heap_activity_allocations_count] = {};
static int heap_activity_seed_counter = 1000 * 1000;
static void
merror (const char *msg)
heap_activity (void)
{
++errors;
printf ("Error: %s\n", msg);
/* Only do this from time to time. */
if ((rand_next (&global_seed) % 4) == 0)
{
int slot = rand_next (&global_seed) % heap_activity_allocations_count;
struct allocation *alloc = heap_activity_allocations + slot;
if (alloc->data == NULL)
{
alloc->size = rand_next (&global_seed) % (4096U + 1);
alloc->data = xmalloc (alloc->size);
alloc->seed = heap_activity_seed_counter++;
randomize_buffer (alloc->data, alloc->size, alloc->seed);
check_allocation (alloc, 1000 + slot);
}
else
{
check_allocation (alloc, 1000 + slot);
free (alloc->data);
alloc->data = NULL;
}
}
}
static void
heap_activity_deallocate (void)
{
for (int i = 0; i < heap_activity_allocations_count; ++i)
free (heap_activity_allocations[i].data);
}
/* Perform a full heap check across the dumped heap allocation tasks,
and the simulated heap activity directly above. */
static void
full_heap_check (void)
{
/* Dumped heap. */
for (int i = 0; i < allocation_task_count; ++i)
if (allocation_tasks[i].allocation.data != NULL)
check_allocation (&allocation_tasks[i].allocation, i);
/* Heap activity allocations. */
for (int i = 0; i < heap_activity_allocations_count; ++i)
if (heap_activity_allocations[i].data != NULL)
check_allocation (heap_activity_allocations + i, i);
}
/* Used as an optimization barrier to force a heap allocation. */
__attribute__ ((noinline, noclone))
static void
my_free (void *ptr)
{
free (ptr);
}
static int
do_test (void)
{
void *p1, *p2;
void *save_state;
long i;
errno = 0;
p1 = malloc (10);
if (p1 == NULL)
merror ("malloc (10) failed.");
p2 = malloc (20);
if (p2 == NULL)
merror ("malloc (20) failed.");
free (malloc (10));
for (i = 0; i < 100; ++i)
my_free (malloc (1));
if (!heap_initialized)
{
save_state = malloc_get_state ();
if (save_state == NULL)
{
merror ("malloc_get_state () failed.");
break;
}
/*free (malloc (10)); This could change the top chunk! */
malloc_set_state (save_state);
p1 = realloc (p1, i * 4 + 4);
if (p1 == NULL)
merror ("realloc (i*4) failed.");
free (save_state);
printf ("error: heap was not initialized by malloc\n");
return 1;
}
p1 = realloc (p1, 40);
free (p2);
p2 = malloc (10);
if (p2 == NULL)
merror ("malloc (10) failed.");
free (p1);
/* The first pass performs the randomly generated allocation
tasks. */
write_message ("info: first pass through allocation tasks\n");
full_heap_check ();
return errors != 0;
/* Execute the post-undump tasks in a random order. */
shuffle_allocation_tasks ();
for (int i = 0; i < allocation_task_count; ++i)
{
heap_activity ();
struct allocation_task *task = allocation_tasks + i;
switch (task->action)
{
case action_free:
check_allocation (&task->allocation, i);
free (task->allocation.data);
task->allocation.data = NULL;
break;
case action_realloc:
check_allocation (&task->allocation, i);
task->allocation.data = xrealloc
(task->allocation.data, task->allocation.size + max_size);
check_allocation (&task->allocation, i);
break;
case action_realloc_same:
check_allocation (&task->allocation, i);
task->allocation.data = xrealloc
(task->allocation.data, task->allocation.size);
check_allocation (&task->allocation, i);
break;
case action_realloc_smaller:
check_allocation (&task->allocation, i);
size_t new_size = task->allocation.size - 1;
task->allocation.data = xrealloc (task->allocation.data, new_size);
if (new_size == 0)
{
if (task->allocation.data != NULL)
{
printf ("error: realloc with size zero did not deallocate\n");
errors = true;
}
/* No further action on this task. */
task->action = action_free;
}
else
{
task->allocation.size = new_size;
check_allocation (&task->allocation, i);
}
break;
case action_count:
abort ();
}
full_heap_check ();
}
/* The second pass frees the objects which were allocated during the
first pass. */
write_message ("info: second pass through allocation tasks\n");
shuffle_allocation_tasks ();
for (int i = 0; i < allocation_task_count; ++i)
{
heap_activity ();
struct allocation_task *task = allocation_tasks + i;
switch (task->action)
{
case action_free:
/* Already freed, nothing to do. */
break;
case action_realloc:
case action_realloc_same:
case action_realloc_smaller:
check_allocation (&task->allocation, i);
free (task->allocation.data);
task->allocation.data = NULL;
break;
case action_count:
abort ();
}
full_heap_check ();
}
heap_activity_deallocate ();
/* Check that the malloc_get_state stub behaves in the intended
way. */
errno = 0;
if (malloc_get_state () != NULL)
{
printf ("error: malloc_get_state succeeded\n");
errors = true;
}
if (errno != ENOSYS)
{
printf ("error: malloc_get_state: %m\n");
errors = true;
}
return errors;
}
/*
* Local variables:
* c-basic-offset: 2
* End:
*/
#define TEST_FUNCTION do_test ()
#include "../test-skeleton.c"