2004-03-18  Jakub Jelinek  <jakub@redhat.com>

	* malloc/arena.c (aligned_heap_area): New variable.
	(new_heap): If aligned_heap_area != NULL, attempt to use that
	first.  If HEAP_MAX_SIZE << 1 area is already HEAP_MAX_SIZE bytes
	aligned, remember the second half in aligned_heap_area.
	(delete_heap): Clear aligned_heap_area if deleting the area right
	before aligned_heap_area.
This commit is contained in:
Ulrich Drepper 2004-10-04 02:27:39 +00:00
parent 48ad81fa2f
commit 26d550d38b
2 changed files with 56 additions and 15 deletions

View File

@ -1,3 +1,12 @@
2004-03-18 Jakub Jelinek <jakub@redhat.com>
* malloc/arena.c (aligned_heap_area): New variable.
(new_heap): If aligned_heap_area != NULL, attempt to use that
first. If HEAP_MAX_SIZE << 1 area is already HEAP_MAX_SIZE bytes
aligned, remember the second half in aligned_heap_area.
(delete_heap): Clear aligned_heap_area if deleting the area right
before aligned_heap_area.
2004-10-03 Juerg Billeter <j@bitron.ch>
* nscd/nscd_initgroups.c (__nscd_getgrouplist): Return -1 if nscd

View File

@ -550,6 +550,16 @@ dump_heap(heap) heap_info *heap;
#endif /* MALLOC_DEBUG > 1 */
/* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
addresses as opposed to increasing, new_heap would badly fragment the
address space. In that case remember the second HEAP_MAX_SIZE part
aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
call (if it is already aligned) and try to reuse it next time. We need
no locking for it, as kernel ensures the atomicity for us - worst case
we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
multiple threads, but only one will succeed. */
static char *aligned_heap_area;
/* Create a new heap. size is automatically rounded up to a multiple
of the page size. */
@ -580,21 +590,38 @@ new_heap(size, top_pad) size_t size, top_pad;
No swap space needs to be reserved for the following large
mapping (on Linux, this is the case for all non-writable mappings
anyway). */
p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_NONE, MAP_PRIVATE|MAP_NORESERVE);
if(p1 != MAP_FAILED) {
p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE-1)) & ~(HEAP_MAX_SIZE-1));
ul = p2 - p1;
munmap(p1, ul);
munmap(p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
} else {
/* Try to take the chance that an allocation of only HEAP_MAX_SIZE
is already aligned. */
p2 = (char *)MMAP(0, HEAP_MAX_SIZE, PROT_NONE, MAP_PRIVATE|MAP_NORESERVE);
if(p2 == MAP_FAILED)
return 0;
if((unsigned long)p2 & (HEAP_MAX_SIZE-1)) {
p2 = MAP_FAILED;
if(aligned_heap_area) {
p2 = (char *)MMAP(aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
MAP_PRIVATE|MAP_NORESERVE);
aligned_heap_area = NULL;
if (p2 != MAP_FAILED && ((unsigned long)p2 & (HEAP_MAX_SIZE-1))) {
munmap(p2, HEAP_MAX_SIZE);
return 0;
p2 = MAP_FAILED;
}
}
if(p2 == MAP_FAILED) {
p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_NONE,
MAP_PRIVATE|MAP_NORESERVE);
if(p1 != MAP_FAILED) {
p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE-1))
& ~(HEAP_MAX_SIZE-1));
ul = p2 - p1;
if (ul)
munmap(p1, ul);
else
aligned_heap_area = p2 + HEAP_MAX_SIZE;
munmap(p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
} else {
/* Try to take the chance that an allocation of only HEAP_MAX_SIZE
is already aligned. */
p2 = (char *)MMAP(0, HEAP_MAX_SIZE, PROT_NONE, MAP_PRIVATE|MAP_NORESERVE);
if(p2 == MAP_FAILED)
return 0;
if((unsigned long)p2 & (HEAP_MAX_SIZE-1)) {
munmap(p2, HEAP_MAX_SIZE);
return 0;
}
}
}
if(mprotect(p2, size, PROT_READ|PROT_WRITE) != 0) {
@ -644,7 +671,12 @@ grow_heap(h, diff) heap_info *h; long diff;
/* Delete a heap. */
#define delete_heap(heap) munmap((char*)(heap), HEAP_MAX_SIZE)
#define delete_heap(heap) \
do { \
if ((char *)(heap) + HEAP_MAX_SIZE == aligned_heap_area) \
aligned_heap_area = NULL; \
munmap((char*)(heap), HEAP_MAX_SIZE); \
} while (0)
static int
internal_function