elf: Properly align PT_LOAD segments [BZ #28676]

When PT_LOAD segment alignment > the page size, allocate enough space to
ensure that the segment can be properly aligned.  This change helps code
segments use huge pages become simple and available.

This fixes [BZ #28676].

Signed-off-by: Xu Yu <xuyu@linux.alibaba.com>
Signed-off-by: Rongwei Wang <rongwei.wang@linux.alibaba.com>
This commit is contained in:
Rongwei Wang 2021-12-10 20:39:10 +08:00 committed by H.J. Lu
parent 2e75604f83
commit 718fdd87b1
3 changed files with 50 additions and 5 deletions

View File

@ -1,5 +1,6 @@
/* Map in a shared object's segments from the file.
Copyright (C) 1995-2021 Free Software Foundation, Inc.
Copyright The GNU Toolchain Authors.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@ -1150,6 +1151,7 @@ _dl_map_object_from_fd (const char *name, const char *origname, int fd,
c->mapend = ALIGN_UP (ph->p_vaddr + ph->p_filesz, GLRO(dl_pagesize));
c->dataend = ph->p_vaddr + ph->p_filesz;
c->allocend = ph->p_vaddr + ph->p_memsz;
c->mapalign = ph->p_align;
c->mapoff = ALIGN_DOWN (ph->p_offset, GLRO(dl_pagesize));
/* Determine whether there is a gap between the last segment

View File

@ -1,5 +1,6 @@
/* Map in a shared object's segments from the file.
Copyright (C) 1995-2021 Free Software Foundation, Inc.
Copyright The GNU Toolchain Authors.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@ -74,7 +75,7 @@ ELF_PREFERRED_ADDRESS_DATA;
Its details have been expanded out and converted. */
struct loadcmd
{
ElfW(Addr) mapstart, mapend, dataend, allocend;
ElfW(Addr) mapstart, mapend, dataend, allocend, mapalign;
ElfW(Off) mapoff;
int prot; /* PROT_* bits. */
};

View File

@ -1,5 +1,6 @@
/* Map in a shared object's segments. Generic version.
Copyright (C) 1995-2021 Free Software Foundation, Inc.
Copyright The GNU Toolchain Authors.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@ -18,6 +19,50 @@
#include <dl-load.h>
/* Map a segment and align it properly. */
static __always_inline ElfW(Addr)
_dl_map_segment (const struct loadcmd *c, ElfW(Addr) mappref,
const size_t maplength, int fd)
{
if (__glibc_likely (c->mapalign <= GLRO(dl_pagesize)))
return (ElfW(Addr)) __mmap ((void *) mappref, maplength, c->prot,
MAP_COPY|MAP_FILE, fd, c->mapoff);
/* If the segment alignment > the page size, allocate enough space to
ensure that the segment can be properly aligned. */
ElfW(Addr) maplen = (maplength >= c->mapalign
? (maplength + c->mapalign)
: (2 * c->mapalign));
ElfW(Addr) map_start = (ElfW(Addr)) __mmap ((void *) mappref, maplen,
PROT_NONE,
MAP_ANONYMOUS|MAP_PRIVATE,
-1, 0);
if (__glibc_unlikely ((void *) map_start == MAP_FAILED))
return map_start;
ElfW(Addr) map_start_aligned = ALIGN_UP (map_start, c->mapalign);
map_start_aligned = (ElfW(Addr)) __mmap ((void *) map_start_aligned,
maplength, c->prot,
MAP_COPY|MAP_FILE|MAP_FIXED,
fd, c->mapoff);
if (__glibc_unlikely ((void *) map_start_aligned == MAP_FAILED))
__munmap ((void *) map_start, maplen);
else
{
/* Unmap the unused regions. */
ElfW(Addr) delta = map_start_aligned - map_start;
if (delta)
__munmap ((void *) map_start, delta);
ElfW(Addr) map_end = map_start_aligned + maplength;
delta = map_start + maplen - map_end;
if (delta)
__munmap ((void *) map_end, delta);
}
return map_start_aligned;
}
/* This implementation assumes (as does the corresponding implementation
of _dl_unmap_segments, in dl-unmap-segments.h) that shared objects
are always laid out with all segments contiguous (or with gaps
@ -53,10 +98,7 @@ _dl_map_segments (struct link_map *l, int fd,
- MAP_BASE_ADDR (l));
/* Remember which part of the address space this object uses. */
l->l_map_start = (ElfW(Addr)) __mmap ((void *) mappref, maplength,
c->prot,
MAP_COPY|MAP_FILE,
fd, c->mapoff);
l->l_map_start = _dl_map_segment (c, mappref, maplength, fd);
if (__glibc_unlikely ((void *) l->l_map_start == MAP_FAILED))
return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT;