glibc/elf/dl-map-segments.h
Szabolcs Nagy 40b60cc1db cheri: elf: Fix segment mapping permissions
Ensure mmap returns pointers with RWX permission covering all segments.
These pointers later get restricted to RX and RW permission.
2022-10-26 15:32:01 +01:00

260 lines
9.5 KiB
C

/* Map in a shared object's segments. Generic version.
Copyright (C) 1995-2022 Free Software Foundation, Inc.
Copyright The GNU Toolchain Authors.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <dl-load.h>
#ifdef __CHERI_PURE_CAPABILITY__
# include <cheri_perms.h>
#endif
/* Allow RWX mprotect later, on CHERI this means RWX capability permission. */
#ifdef PROT_MAX
# define PROT_MAX_RWX PROT_MAX (PROT_READ | PROT_WRITE | PROT_EXEC)
#else
# define PROT_MAX_RWX 0
#endif
/* Map a segment and align it properly. */
static __always_inline elfptr_t
_dl_map_segment (const struct loadcmd *c, ElfW(Addr) mappref,
const size_t maplength, int fd)
{
if (__glibc_likely (c->mapalign <= GLRO(dl_pagesize)))
return (elfptr_t) __mmap ((void *) mappref, maplength,
c->prot|PROT_MAX_RWX,
MAP_COPY|MAP_FILE, fd, c->mapoff);
/* If the segment alignment > the page size, allocate enough space to
ensure that the segment can be properly aligned. */
ElfW(Addr) maplen = (maplength >= c->mapalign
? (maplength + c->mapalign)
: (2 * c->mapalign));
elfptr_t map_start = (elfptr_t) __mmap ((void *) mappref, maplen,
PROT_NONE|PROT_MAX_RWX,
MAP_ANONYMOUS|MAP_PRIVATE,
-1, 0);
if (__glibc_unlikely ((void *) map_start == MAP_FAILED))
return map_start;
elfptr_t map_start_aligned = ALIGN_UP (map_start, c->mapalign);
map_start_aligned = (elfptr_t) __mmap ((void *) map_start_aligned,
maplength, c->prot|PROT_MAX_RWX,
MAP_COPY|MAP_FILE|MAP_FIXED,
fd, c->mapoff);
if (__glibc_unlikely ((void *) map_start_aligned == MAP_FAILED))
__munmap ((void *) map_start, maplen);
else
{
/* Unmap the unused regions. */
ElfW(Addr) delta = map_start_aligned - map_start;
if (delta)
__munmap ((void *) map_start, delta);
elfptr_t map_end = map_start + (map_start_aligned - map_start) + maplength;
map_end = ALIGN_UP (map_end, GLRO(dl_pagesize));
delta = map_start + maplen - map_end;
if (delta)
__munmap ((void *) map_end, delta);
}
return map_start_aligned;
}
/* This implementation assumes (as does the corresponding implementation
of _dl_unmap_segments, in dl-unmap-segments.h) that shared objects
are always laid out with all segments contiguous (or with gaps
between them small enough that it's preferable to reserve all whole
pages inside the gaps with PROT_NONE mappings rather than permitting
other use of those parts of the address space). */
static __always_inline const char *
_dl_map_segments (struct link_map *l, int fd,
const ElfW(Ehdr) *header, int type,
const struct loadcmd loadcmds[], size_t nloadcmds,
const size_t maplength, bool has_holes,
struct link_map *loader)
{
const struct loadcmd *c = loadcmds;
#ifdef __CHERI_PURE_CAPABILITY__
ElfW(Addr) rw_start = -1;
ElfW(Addr) rw_end = 0;
#endif
if (__glibc_likely (type == ET_DYN))
{
/* This is a position-independent shared object. We can let the
kernel map it anywhere it likes, but we must have space for all
the segments in their specified positions relative to the first.
So we map the first segment without MAP_FIXED, but with its
extent increased to cover all the segments. Then we remove
access from excess portion, and there is known sufficient space
there to remap from the later segments.
As a refinement, sometimes we have an address that we would
prefer to map such objects at; but this is only a preference,
the OS can do whatever it likes. */
ElfW(Addr) mappref
= (ELF_PREFERRED_ADDRESS (loader, maplength, c->mapstart)
- MAP_BASE_ADDR (l));
/* Remember which part of the address space this object uses. */
l->l_map_start = _dl_map_segment (c, mappref, maplength, fd);
if (__glibc_unlikely ((void *) l->l_map_start == MAP_FAILED))
return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT;
l->l_map_end = l->l_map_start + maplength;
l->l_addr = l->l_map_start - c->mapstart;
if (has_holes)
{
/* Change protection on the excess portion to disallow all access;
the portions we do not remap later will be inaccessible as if
unallocated. Then jump into the normal segment-mapping loop to
handle the portion of the segment past the end of the file
mapping. */
if (__glibc_unlikely (loadcmds[nloadcmds - 1].mapstart <
c->mapend))
return N_("ELF load command address/offset not page-aligned");
if (__glibc_unlikely
(__mprotect ((caddr_t) dl_rx_ptr (l, c->mapend),
loadcmds[nloadcmds - 1].mapstart - c->mapend,
PROT_NONE) < 0))
return DL_MAP_SEGMENTS_ERROR_MPROTECT;
}
l->l_contiguous = 1;
goto postmap;
}
#ifdef __CHERI_PURE_CAPABILITY__
else
{
/* Need a single capability to cover all load segments. */
void *p = __mmap ((void *) c->mapstart, maplength,
c->prot|PROT_MAX_RWX,
MAP_FIXED|MAP_COPY|MAP_FILE,
fd, c->mapoff);
if (p == MAP_FAILED)
return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT;
l->l_map_start = (elfptr_t) p;
l->l_map_end = l->l_map_start + maplength;
l->l_contiguous = !has_holes;
goto postmap;
}
#endif
/* Remember which part of the address space this object uses. */
l->l_map_start = c->mapstart + l->l_addr;
l->l_map_end = l->l_map_start + maplength;
l->l_contiguous = !has_holes;
while (c < &loadcmds[nloadcmds])
{
if (c->dataend > c->mapstart
/* Map the segment contents from the file. */
&& (__mmap ((void *) dl_rx_ptr (l, c->mapstart),
c->dataend - c->mapstart, c->prot,
MAP_FIXED|MAP_COPY|MAP_FILE,
fd, c->mapoff)
== MAP_FAILED))
return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT;
postmap:
_dl_postprocess_loadcmd (l, header, c);
#ifdef __CHERI_PURE_CAPABILITY__
if (c->prot & PROT_WRITE)
{
if (l->l_rw_count >= DL_MAX_RW_COUNT)
return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT; // TODO: right error code
if (c->mapstart < rw_start)
rw_start = c->mapstart;
if (c->allocend > rw_end)
rw_end = c->allocend;
l->l_rw_range[l->l_rw_count].start = l->l_addr + c->mapstart;
l->l_rw_range[l->l_rw_count].end = l->l_addr + c->allocend;
l->l_rw_count++;
}
#endif
if (c->allocend > c->dataend)
{
/* Extra zero pages should appear at the end of this segment,
after the data mapped from the file. */
elfptr_t zero, zeroend, zeropage;
zero = dl_rx_ptr (l, c->dataend);
zeroend = l->l_addr + c->allocend;
zeropage = ((zero + GLRO(dl_pagesize) - 1)
& ~(GLRO(dl_pagesize) - 1));
if (zeroend < zeropage)
/* All the extra data is in the last page of the segment.
We can just zero it. */
zeropage = zeroend;
if (zeropage > zero)
{
/* Zero the final part of the last page of the segment. */
if (__glibc_unlikely ((c->prot & PROT_WRITE) == 0))
{
/* Dag nab it. */
if (__mprotect ((caddr_t) (zero
& ~(GLRO(dl_pagesize) - 1)),
GLRO(dl_pagesize), c->prot|PROT_WRITE) < 0)
return DL_MAP_SEGMENTS_ERROR_MPROTECT;
}
memset ((void *) zero, '\0', zeropage - zero);
if (__glibc_unlikely ((c->prot & PROT_WRITE) == 0))
__mprotect ((caddr_t) (zero & ~(GLRO(dl_pagesize) - 1)),
GLRO(dl_pagesize), c->prot);
}
if (zeroend > zeropage)
{
/* Map the remaining zero pages in from the zero fill FD. */
caddr_t mapat;
mapat = __mmap ((caddr_t) zeropage, zeroend - zeropage,
c->prot, MAP_ANON|MAP_PRIVATE|MAP_FIXED,
-1, 0);
if (__glibc_unlikely (mapat == MAP_FAILED))
return DL_MAP_SEGMENTS_ERROR_MAP_ZERO_FILL;
}
}
++c;
}
#ifdef __CHERI_PURE_CAPABILITY__
if (l->l_rw_count > 0)
{
l->l_rw_start = __builtin_cheri_address_set (l->l_map_start, l->l_addr + rw_start);
l->l_rw_start = __builtin_cheri_bounds_set (l->l_rw_start, rw_end - rw_start);
l->l_rw_start = __builtin_cheri_perms_and (l->l_rw_start, CAP_PERM_MASK_RW);
}
l->l_map_start = __builtin_cheri_perms_and (l->l_map_start, CAP_PERM_MASK_RX);
#endif
/* Notify ELF_PREFERRED_ADDRESS that we have to load this one
fixed. */
ELF_FIXED_ADDRESS (loader, c->mapstart);
return NULL;
}