2000-12-28  Wolfram Gloger  <wg@malloc.de>

	* malloc/malloc.c (MALLOC_COPY): Handle case if source and
	destination overlap.  Assume dest is always below source if
	overlapping.
This commit is contained in:
Ulrich Drepper 2000-12-31 07:39:50 +00:00
parent c77a447822
commit 09f5e1635a
5 changed files with 56 additions and 39 deletions

View File

@ -1,3 +1,9 @@
2000-12-28 Wolfram Gloger <wg@malloc.de>
* malloc/malloc.c (MALLOC_COPY): Handle case if source and
destination overlap. Assume dest is always below source if
overlapping.
2000-12-30 Ulrich Drepper <drepper@redhat.com>
* elf/dl-close.c (_dl_close): We can ignore the NODELETE flag if the

View File

@ -269,10 +269,6 @@ dl_open_worker (void *a)
/* Load that object's dependencies. */
_dl_map_object_deps (new, NULL, 0, 0);
/* Increment the open count for all dependencies. */
for (i = 0; i < new->l_searchlist.r_nlist; ++i)
++new->l_searchlist.r_list[i]->l_opencount;
/* So far, so good. Now check the versions. */
for (i = 0; i < new->l_searchlist.r_nlist; ++i)
if (new->l_searchlist.r_list[i]->l_versions == NULL)
@ -321,6 +317,10 @@ dl_open_worker (void *a)
l = l->l_prev;
}
/* Increment the open count for all dependencies. */
for (i = 0; i < new->l_searchlist.r_nlist; ++i)
++new->l_searchlist.r_list[i]->l_opencount;
/* Run the initializer functions of new objects. */
_dl_init (new, __libc_argc, __libc_argv, __environ);
@ -399,11 +399,10 @@ _dl_open (const char *file, int mode, const void *caller)
{
int i;
/* Increment open counters for all objects which did not get
correctly loaded. */
/* Increment open counters for all objects since this has
not happened yet. */
for (i = 0; i < args.map->l_searchlist.r_nlist; ++i)
if (args.map->l_searchlist.r_list[i]->l_opencount == 0)
args.map->l_searchlist.r_list[i]->l_opencount = 1;
++args.map->l_searchlist.r_list[i]->l_opencount;
_dl_close (args.map);
}

View File

@ -1,3 +1,8 @@
2000-11-15 Wolfram Gloger <wg@malloc.de>
* manager.c (pthread_free): [!FLOATING_STACKS]: Only remap the
stack to PROT_NONE, don't unmap it, avoiding collisions with malloc.
2000-12-27 Andreas Jaeger <aj@suse.de>
* Examples/ex13.c: Make local functions static.

View File

@ -418,7 +418,7 @@ static int pthread_allocate_stack(const pthread_attr_t *attr,
new_thread_bottom = (char *) map_addr + guardsize;
new_thread = ((pthread_descr) (new_thread_bottom + stacksize)) - 1;
# else
# else /* !FLOATING_STACKS */
if (attr != NULL)
{
guardsize = page_roundup (attr->__guardsize, granularity);
@ -696,23 +696,24 @@ static void pthread_free(pthread_descr th)
{
size_t guardsize = th->p_guardsize;
/* Free the stack and thread descriptor area */
char *guardaddr = th->p_guardaddr;
/* Guardaddr is always set, even if guardsize is 0. This allows
us to compute everything else. */
size_t stacksize = (char *)(th+1) - guardaddr - guardsize;
#ifdef NEED_SEPARATE_REGISTER_STACK
char *guardaddr = th->p_guardaddr;
/* We unmap exactly what we mapped, in case there was something
else in the same region. Guardaddr is always set, eve if
guardsize is 0. This allows us to compute everything else. */
size_t stacksize = (char *)(th+1) - guardaddr - guardsize;
/* Unmap the register stack, which is below guardaddr. */
munmap((caddr_t)(guardaddr-stacksize),
2 * stacksize + th->p_guardsize);
/* Take account of the register stack, which is below guardaddr. */
guardaddr -= stacksize;
stacksize *= 2;
#endif
#if FLOATING_STACKS
/* Can unmap safely. */
munmap(guardaddr, stacksize + guardsize);
#else
char *guardaddr = th->p_guardaddr;
/* We unmap exactly what we mapped, in case there was something
else in the same region. Guardaddr is always set, eve if
guardsize is 0. This allows us to compute everything else. */
size_t stacksize = (char *)(th+1) - guardaddr - guardsize;
munmap (guardaddr, stacksize + guardsize);
/* Only remap to PROT_NONE, so that the region is reserved in
case we map the stack again later. Avoid collision with
other mmap()s, in particular by malloc(). */
mmap(guardaddr, stacksize + guardsize, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
#endif
}
}

View File

@ -423,11 +423,12 @@ Void_t* memmove();
#endif
#endif
#if USE_MEMCPY
/* The following macros are only invoked with (2n+1)-multiples of
INTERNAL_SIZE_T units, with a positive integer n. This is exploited
for fast inline execution when n is small. */
for fast inline execution when n is small. If the regions to be
copied do overlap, the destination lies always _below_ the source. */
#if USE_MEMCPY
#define MALLOC_ZERO(charp, nbytes) \
do { \
@ -446,7 +447,9 @@ do { \
} else memset((charp), 0, mzsz); \
} while(0)
#define MALLOC_COPY(dest,src,nbytes) \
/* If the regions overlap, dest is always _below_ src. */
#define MALLOC_COPY(dest,src,nbytes,overlap) \
do { \
INTERNAL_SIZE_T mcsz = (nbytes); \
if(mcsz <= 9*sizeof(mcsz)) { \
@ -461,12 +464,12 @@ do { \
*mcdst++ = *mcsrc++; \
*mcdst++ = *mcsrc++; \
*mcdst = *mcsrc ; \
} else memcpy(dest, src, mcsz); \
} else if(overlap) \
memmove(dest, src, mcsz); \
else \
memcpy(dest, src, mcsz); \
} while(0)
#define MALLOC_MEMMOVE(dest,src,nbytes) \
memmove(dest, src, mcsz)
#else /* !USE_MEMCPY */
/* Use Duff's device for good zeroing/copying performance. */
@ -488,7 +491,9 @@ do { \
} \
} while(0)
#define MALLOC_COPY(dest,src,nbytes) \
/* If the regions overlap, dest is always _below_ src. */
#define MALLOC_COPY(dest,src,nbytes,overlap) \
do { \
INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \
INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \
@ -3255,7 +3260,7 @@ Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
/* Must alloc, copy, free. */
newmem = mALLOc(bytes);
if (newmem == 0) return 0; /* propagate failure */
MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ, 0);
munmap_chunk(oldp);
return newmem;
}
@ -3370,7 +3375,8 @@ arena* ar_ptr; mchunkptr oldp; INTERNAL_SIZE_T oldsize, nb;
unlink(prev, bck, fwd);
newp = prev;
newsize += prevsize + nextsize;
MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize);
MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize,
1);
top(ar_ptr) = chunk_at_offset(newp, nb);
set_head(top(ar_ptr), (newsize - nb) | PREV_INUSE);
set_head_size(newp, nb);
@ -3385,7 +3391,7 @@ arena* ar_ptr; mchunkptr oldp; INTERNAL_SIZE_T oldsize, nb;
unlink(prev, bck, fwd);
newp = prev;
newsize += nextsize + prevsize;
MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize);
MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize, 1);
goto split;
}
}
@ -3396,7 +3402,7 @@ arena* ar_ptr; mchunkptr oldp; INTERNAL_SIZE_T oldsize, nb;
unlink(prev, bck, fwd);
newp = prev;
newsize += prevsize;
MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize);
MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize, 1);
goto split;
}
}
@ -3436,7 +3442,7 @@ arena* ar_ptr; mchunkptr oldp; INTERNAL_SIZE_T oldsize, nb;
}
/* Otherwise copy, free, and exit */
MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize);
MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize, 0);
chunk_free(ar_ptr, oldp);
return newp;
}
@ -4605,7 +4611,7 @@ realloc_check(oldmem, bytes, caller)
newp = (top_check() >= 0) ? chunk_alloc(&main_arena, nb) : NULL;
if (newp) {
MALLOC_COPY(BOUNDED_N(chunk2mem(newp), nb),
oldmem, oldsize - 2*SIZE_SZ);
oldmem, oldsize - 2*SIZE_SZ, 0);
munmap_chunk(oldp);
}
}