Cleanup code duplication in malloc on fallback to use another arena

Break the fallback code to try another arena into a separate function
for readability.
This commit is contained in:
Siddhesh Poyarekar 2012-09-07 14:39:52 +05:30
parent 01f49f59ce
commit c78ab09473
3 changed files with 43 additions and 75 deletions

View File

@ -1,3 +1,14 @@
2012-09-07 Siddhesh Poyarekar <siddhesh@redhat.com>
* malloc/arena.c (arena_get_retry): New function that gets
another arena for the caller to try its request on.
* malloc/malloc.c (__libc_malloc): Use get_another_arena if the
current arena cannot fulfill the request.
(__libc_memalign): Likewise.
(__libc_memalign): Likewise.
(__libc_pvalloc): Likewise.
(__libc_calloc): Likewise.
2012-09-05 John Tobey <john.tobey@gmail.com> 2012-09-05 John Tobey <john.tobey@gmail.com>
[BZ #13542] [BZ #13542]

View File

@ -917,6 +917,27 @@ arena_get2(mstate a_tsd, size_t size, mstate avoid_arena)
return a; return a;
} }
/* If we don't have the main arena, then maybe the failure is due to running
out of mmapped areas, so we can try allocating on the main arena.
Otherwise, it is likely that sbrk() has failed and there is still a chance
to mmap(), so try one of the other arenas. */
static mstate
arena_get_retry (mstate ar_ptr, size_t bytes)
{
if(ar_ptr != &main_arena) {
(void)mutex_unlock(&ar_ptr->mutex);
ar_ptr = &main_arena;
(void)mutex_lock(&ar_ptr->mutex);
} else {
/* Grab ar_ptr->next prior to releasing its lock. */
mstate prev = ar_ptr->next ? ar_ptr : 0;
(void)mutex_unlock(&ar_ptr->mutex);
ar_ptr = arena_get2(prev, bytes, ar_ptr);
}
return ar_ptr;
}
#ifdef PER_THREAD #ifdef PER_THREAD
static void __attribute__ ((section ("__libc_thread_freeres_fn"))) static void __attribute__ ((section ("__libc_thread_freeres_fn")))
arena_thread_freeres (void) arena_thread_freeres (void)

View File

@ -2858,23 +2858,10 @@ __libc_malloc(size_t bytes)
return 0; return 0;
victim = _int_malloc(ar_ptr, bytes); victim = _int_malloc(ar_ptr, bytes);
if(!victim) { if(!victim) {
/* Maybe the failure is due to running out of mmapped areas. */ ar_ptr = arena_get_retry(ar_ptr, bytes);
if(ar_ptr != &main_arena) { if (__builtin_expect(ar_ptr != NULL, 1)) {
(void)mutex_unlock(&ar_ptr->mutex);
ar_ptr = &main_arena;
(void)mutex_lock(&ar_ptr->mutex);
victim = _int_malloc(ar_ptr, bytes); victim = _int_malloc(ar_ptr, bytes);
(void)mutex_unlock(&ar_ptr->mutex); (void)mutex_unlock(&ar_ptr->mutex);
} else {
/* ... or sbrk() has failed and there is still a chance to mmap()
Grab ar_ptr->next prior to releasing its lock. */
mstate prev = ar_ptr->next ? ar_ptr : 0;
(void)mutex_unlock(&ar_ptr->mutex);
ar_ptr = arena_get2(prev, bytes, ar_ptr);
if(ar_ptr) {
victim = _int_malloc(ar_ptr, bytes);
(void)mutex_unlock(&ar_ptr->mutex);
}
} }
} else } else
(void)mutex_unlock(&ar_ptr->mutex); (void)mutex_unlock(&ar_ptr->mutex);
@ -3038,23 +3025,10 @@ __libc_memalign(size_t alignment, size_t bytes)
return 0; return 0;
p = _int_memalign(ar_ptr, alignment, bytes); p = _int_memalign(ar_ptr, alignment, bytes);
if(!p) { if(!p) {
/* Maybe the failure is due to running out of mmapped areas. */ ar_ptr = arena_get_retry (ar_ptr, bytes);
if(ar_ptr != &main_arena) { if (__builtin_expect(ar_ptr != NULL, 1)) {
(void)mutex_unlock(&ar_ptr->mutex);
ar_ptr = &main_arena;
(void)mutex_lock(&ar_ptr->mutex);
p = _int_memalign(ar_ptr, alignment, bytes); p = _int_memalign(ar_ptr, alignment, bytes);
(void)mutex_unlock(&ar_ptr->mutex); (void)mutex_unlock(&ar_ptr->mutex);
} else {
/* ... or sbrk() has failed and there is still a chance to mmap()
Grab ar_ptr->next prior to releasing its lock. */
mstate prev = ar_ptr->next ? ar_ptr : 0;
(void)mutex_unlock(&ar_ptr->mutex);
ar_ptr = arena_get2(prev, bytes, ar_ptr);
if(ar_ptr) {
p = _int_memalign(ar_ptr, alignment, bytes);
(void)mutex_unlock(&ar_ptr->mutex);
}
} }
} else } else
(void)mutex_unlock(&ar_ptr->mutex); (void)mutex_unlock(&ar_ptr->mutex);
@ -3088,23 +3062,10 @@ __libc_valloc(size_t bytes)
return 0; return 0;
p = _int_valloc(ar_ptr, bytes); p = _int_valloc(ar_ptr, bytes);
if(!p) { if(!p) {
/* Maybe the failure is due to running out of mmapped areas. */ ar_ptr = arena_get_retry (ar_ptr, bytes);
if(ar_ptr != &main_arena) { if (__builtin_expect(ar_ptr != NULL, 1)) {
(void)mutex_unlock(&ar_ptr->mutex);
ar_ptr = &main_arena;
(void)mutex_lock(&ar_ptr->mutex);
p = _int_memalign(ar_ptr, pagesz, bytes); p = _int_memalign(ar_ptr, pagesz, bytes);
(void)mutex_unlock(&ar_ptr->mutex); (void)mutex_unlock(&ar_ptr->mutex);
} else {
/* ... or sbrk() has failed and there is still a chance to mmap()
Grab ar_ptr->next prior to releasing its lock. */
mstate prev = ar_ptr->next ? ar_ptr : 0;
(void)mutex_unlock(&ar_ptr->mutex);
ar_ptr = arena_get2(prev, bytes, ar_ptr);
if(ar_ptr) {
p = _int_memalign(ar_ptr, pagesz, bytes);
(void)mutex_unlock(&ar_ptr->mutex);
}
} }
} else } else
(void)mutex_unlock (&ar_ptr->mutex); (void)mutex_unlock (&ar_ptr->mutex);
@ -3136,23 +3097,10 @@ __libc_pvalloc(size_t bytes)
arena_get(ar_ptr, bytes + 2*pagesz + MINSIZE); arena_get(ar_ptr, bytes + 2*pagesz + MINSIZE);
p = _int_pvalloc(ar_ptr, bytes); p = _int_pvalloc(ar_ptr, bytes);
if(!p) { if(!p) {
/* Maybe the failure is due to running out of mmapped areas. */ ar_ptr = arena_get_retry (ar_ptr, bytes + 2*pagesz + MINSIZE);
if(ar_ptr != &main_arena) { if (__builtin_expect(ar_ptr != NULL, 1)) {
(void)mutex_unlock(&ar_ptr->mutex);
ar_ptr = &main_arena;
(void)mutex_lock(&ar_ptr->mutex);
p = _int_memalign(ar_ptr, pagesz, rounded_bytes); p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
(void)mutex_unlock(&ar_ptr->mutex); (void)mutex_unlock(&ar_ptr->mutex);
} else {
/* ... or sbrk() has failed and there is still a chance to mmap()
Grab ar_ptr->next prior to releasing its lock. */
mstate prev = ar_ptr->next ? ar_ptr : 0;
(void)mutex_unlock(&ar_ptr->mutex);
ar_ptr = arena_get2(prev, bytes + 2*pagesz + MINSIZE, ar_ptr);
if(ar_ptr) {
p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
(void)mutex_unlock(&ar_ptr->mutex);
}
} }
} else } else
(void)mutex_unlock(&ar_ptr->mutex); (void)mutex_unlock(&ar_ptr->mutex);
@ -3225,22 +3173,10 @@ __libc_calloc(size_t n, size_t elem_size)
av == arena_for_chunk(mem2chunk(mem))); av == arena_for_chunk(mem2chunk(mem)));
if (mem == 0) { if (mem == 0) {
/* Maybe the failure is due to running out of mmapped areas. */ av = arena_get_retry (av, sz);
if(av != &main_arena) { if (__builtin_expect(av != NULL, 1)) {
mem = _int_malloc(av, sz);
(void)mutex_unlock(&av->mutex); (void)mutex_unlock(&av->mutex);
(void)mutex_lock(&main_arena.mutex);
mem = _int_malloc(&main_arena, sz);
(void)mutex_unlock(&main_arena.mutex);
} else {
/* ... or sbrk() has failed and there is still a chance to mmap()
Grab av->next prior to releasing its lock. */
mstate prev = av->next ? av : 0;
(void)mutex_unlock(&av->mutex);
av = arena_get2(prev, sz, av);
if(av) {
mem = _int_malloc(av, sz);
(void)mutex_unlock(&av->mutex);
}
} }
if (mem == 0) return 0; if (mem == 0) return 0;
} else } else