mirror of
https://sourceware.org/git/glibc.git
synced 2024-11-14 01:00:07 +00:00
581c785bf3
I used these shell commands: ../glibc/scripts/update-copyrights $PWD/../gnulib/build-aux/update-copyright (cd ../glibc && git commit -am"[this commit message]") and then ignored the output, which consisted lines saying "FOO: warning: copyright statement not found" for each of 7061 files FOO. I then removed trailing white space from math/tgmath.h, support/tst-support-open-dev-null-range.c, and sysdeps/x86_64/multiarch/strlen-vec.S, to work around the following obscure pre-commit check failure diagnostics from Savannah. I don't know why I run into these diagnostics whereas others evidently do not. remote: *** 912-#endif remote: *** 913: remote: *** 914- remote: *** error: lines with trailing whitespace found ... remote: *** error: sysdeps/unix/sysv/linux/statx_cp.c: trailing lines
201 lines
7.7 KiB
C
201 lines
7.7 KiB
C
/* Map in a shared object's segments. Generic version.
|
|
Copyright (C) 1995-2022 Free Software Foundation, Inc.
|
|
Copyright The GNU Toolchain Authors.
|
|
This file is part of the GNU C Library.
|
|
|
|
The GNU C Library is free software; you can redistribute it and/or
|
|
modify it under the terms of the GNU Lesser General Public
|
|
License as published by the Free Software Foundation; either
|
|
version 2.1 of the License, or (at your option) any later version.
|
|
|
|
The GNU C Library is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
Lesser General Public License for more details.
|
|
|
|
You should have received a copy of the GNU Lesser General Public
|
|
License along with the GNU C Library; if not, see
|
|
<https://www.gnu.org/licenses/>. */
|
|
|
|
#include <dl-load.h>
|
|
|
|
/* Map a segment and align it properly. */
|
|
|
|
static __always_inline ElfW(Addr)
|
|
_dl_map_segment (const struct loadcmd *c, ElfW(Addr) mappref,
|
|
const size_t maplength, int fd)
|
|
{
|
|
if (__glibc_likely (c->mapalign <= GLRO(dl_pagesize)))
|
|
return (ElfW(Addr)) __mmap ((void *) mappref, maplength, c->prot,
|
|
MAP_COPY|MAP_FILE, fd, c->mapoff);
|
|
|
|
/* If the segment alignment > the page size, allocate enough space to
|
|
ensure that the segment can be properly aligned. */
|
|
ElfW(Addr) maplen = (maplength >= c->mapalign
|
|
? (maplength + c->mapalign)
|
|
: (2 * c->mapalign));
|
|
ElfW(Addr) map_start = (ElfW(Addr)) __mmap ((void *) mappref, maplen,
|
|
PROT_NONE,
|
|
MAP_ANONYMOUS|MAP_PRIVATE,
|
|
-1, 0);
|
|
if (__glibc_unlikely ((void *) map_start == MAP_FAILED))
|
|
return map_start;
|
|
|
|
ElfW(Addr) map_start_aligned = ALIGN_UP (map_start, c->mapalign);
|
|
map_start_aligned = (ElfW(Addr)) __mmap ((void *) map_start_aligned,
|
|
maplength, c->prot,
|
|
MAP_COPY|MAP_FILE|MAP_FIXED,
|
|
fd, c->mapoff);
|
|
if (__glibc_unlikely ((void *) map_start_aligned == MAP_FAILED))
|
|
__munmap ((void *) map_start, maplen);
|
|
else
|
|
{
|
|
/* Unmap the unused regions. */
|
|
ElfW(Addr) delta = map_start_aligned - map_start;
|
|
if (delta)
|
|
__munmap ((void *) map_start, delta);
|
|
ElfW(Addr) map_end = map_start_aligned + maplength;
|
|
map_end = ALIGN_UP (map_end, GLRO(dl_pagesize));
|
|
delta = map_start + maplen - map_end;
|
|
if (delta)
|
|
__munmap ((void *) map_end, delta);
|
|
}
|
|
|
|
return map_start_aligned;
|
|
}
|
|
|
|
/* This implementation assumes (as does the corresponding implementation
|
|
of _dl_unmap_segments, in dl-unmap-segments.h) that shared objects
|
|
are always laid out with all segments contiguous (or with gaps
|
|
between them small enough that it's preferable to reserve all whole
|
|
pages inside the gaps with PROT_NONE mappings rather than permitting
|
|
other use of those parts of the address space). */
|
|
|
|
static __always_inline const char *
|
|
_dl_map_segments (struct link_map *l, int fd,
|
|
const ElfW(Ehdr) *header, int type,
|
|
const struct loadcmd loadcmds[], size_t nloadcmds,
|
|
const size_t maplength, bool has_holes,
|
|
struct link_map *loader)
|
|
{
|
|
const struct loadcmd *c = loadcmds;
|
|
|
|
if (__glibc_likely (type == ET_DYN))
|
|
{
|
|
/* This is a position-independent shared object. We can let the
|
|
kernel map it anywhere it likes, but we must have space for all
|
|
the segments in their specified positions relative to the first.
|
|
So we map the first segment without MAP_FIXED, but with its
|
|
extent increased to cover all the segments. Then we remove
|
|
access from excess portion, and there is known sufficient space
|
|
there to remap from the later segments.
|
|
|
|
As a refinement, sometimes we have an address that we would
|
|
prefer to map such objects at; but this is only a preference,
|
|
the OS can do whatever it likes. */
|
|
ElfW(Addr) mappref
|
|
= (ELF_PREFERRED_ADDRESS (loader, maplength,
|
|
c->mapstart & GLRO(dl_use_load_bias))
|
|
- MAP_BASE_ADDR (l));
|
|
|
|
/* Remember which part of the address space this object uses. */
|
|
l->l_map_start = _dl_map_segment (c, mappref, maplength, fd);
|
|
if (__glibc_unlikely ((void *) l->l_map_start == MAP_FAILED))
|
|
return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT;
|
|
|
|
l->l_map_end = l->l_map_start + maplength;
|
|
l->l_addr = l->l_map_start - c->mapstart;
|
|
|
|
if (has_holes)
|
|
{
|
|
/* Change protection on the excess portion to disallow all access;
|
|
the portions we do not remap later will be inaccessible as if
|
|
unallocated. Then jump into the normal segment-mapping loop to
|
|
handle the portion of the segment past the end of the file
|
|
mapping. */
|
|
if (__glibc_unlikely
|
|
(__mprotect ((caddr_t) (l->l_addr + c->mapend),
|
|
loadcmds[nloadcmds - 1].mapstart - c->mapend,
|
|
PROT_NONE) < 0))
|
|
return DL_MAP_SEGMENTS_ERROR_MPROTECT;
|
|
}
|
|
|
|
l->l_contiguous = 1;
|
|
|
|
goto postmap;
|
|
}
|
|
|
|
/* Remember which part of the address space this object uses. */
|
|
l->l_map_start = c->mapstart + l->l_addr;
|
|
l->l_map_end = l->l_map_start + maplength;
|
|
l->l_contiguous = !has_holes;
|
|
|
|
while (c < &loadcmds[nloadcmds])
|
|
{
|
|
if (c->mapend > c->mapstart
|
|
/* Map the segment contents from the file. */
|
|
&& (__mmap ((void *) (l->l_addr + c->mapstart),
|
|
c->mapend - c->mapstart, c->prot,
|
|
MAP_FIXED|MAP_COPY|MAP_FILE,
|
|
fd, c->mapoff)
|
|
== MAP_FAILED))
|
|
return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT;
|
|
|
|
postmap:
|
|
_dl_postprocess_loadcmd (l, header, c);
|
|
|
|
if (c->allocend > c->dataend)
|
|
{
|
|
/* Extra zero pages should appear at the end of this segment,
|
|
after the data mapped from the file. */
|
|
ElfW(Addr) zero, zeroend, zeropage;
|
|
|
|
zero = l->l_addr + c->dataend;
|
|
zeroend = l->l_addr + c->allocend;
|
|
zeropage = ((zero + GLRO(dl_pagesize) - 1)
|
|
& ~(GLRO(dl_pagesize) - 1));
|
|
|
|
if (zeroend < zeropage)
|
|
/* All the extra data is in the last page of the segment.
|
|
We can just zero it. */
|
|
zeropage = zeroend;
|
|
|
|
if (zeropage > zero)
|
|
{
|
|
/* Zero the final part of the last page of the segment. */
|
|
if (__glibc_unlikely ((c->prot & PROT_WRITE) == 0))
|
|
{
|
|
/* Dag nab it. */
|
|
if (__mprotect ((caddr_t) (zero
|
|
& ~(GLRO(dl_pagesize) - 1)),
|
|
GLRO(dl_pagesize), c->prot|PROT_WRITE) < 0)
|
|
return DL_MAP_SEGMENTS_ERROR_MPROTECT;
|
|
}
|
|
memset ((void *) zero, '\0', zeropage - zero);
|
|
if (__glibc_unlikely ((c->prot & PROT_WRITE) == 0))
|
|
__mprotect ((caddr_t) (zero & ~(GLRO(dl_pagesize) - 1)),
|
|
GLRO(dl_pagesize), c->prot);
|
|
}
|
|
|
|
if (zeroend > zeropage)
|
|
{
|
|
/* Map the remaining zero pages in from the zero fill FD. */
|
|
caddr_t mapat;
|
|
mapat = __mmap ((caddr_t) zeropage, zeroend - zeropage,
|
|
c->prot, MAP_ANON|MAP_PRIVATE|MAP_FIXED,
|
|
-1, 0);
|
|
if (__glibc_unlikely (mapat == MAP_FAILED))
|
|
return DL_MAP_SEGMENTS_ERROR_MAP_ZERO_FILL;
|
|
}
|
|
}
|
|
|
|
++c;
|
|
}
|
|
|
|
/* Notify ELF_PREFERRED_ADDRESS that we have to load this one
|
|
fixed. */
|
|
ELF_FIXED_ADDRESS (loader, c->mapstart);
|
|
|
|
return NULL;
|
|
}
|