Tue Jan 9 16:10:26 1996 Roland McGrath <roland@churchy.gnu.ai.mit.edu>

* elf/dl-load.c (_dl_map_object_from_fd): After mapping first
 	segment to find location, mprotect excess region to no access,
 	don't munmap it.

	* sysdeps/mach/hurd/dl-sysdep.c (mmap): If vm_map returns
	KERN_NO_SPACE for fixed location, deallocate it and try again.
This commit is contained in:
Roland McGrath 1996-01-10 10:00:22 +00:00
parent 7410183b8b
commit 22930c9bf2
3 changed files with 37 additions and 9 deletions

View File

@ -1,3 +1,12 @@
Tue Jan 9 16:10:26 1996 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
* elf/dl-load.c (_dl_map_object_from_fd): After mapping first
segment to find location, mprotect excess region to no access,
don't munmap it.
* sysdeps/mach/hurd/dl-sysdep.c (mmap): If vm_map returns
KERN_NO_SPACE for fixed location, deallocate it and try again.
Mon Jan 8 17:43:23 1996 Roland McGrath <roland@churchy.gnu.ai.mit.edu> Mon Jan 8 17:43:23 1996 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
* locale/Makefile (lib-modules): Renamed hash to locfile-hash. * locale/Makefile (lib-modules): Renamed hash to locfile-hash.

View File

@ -355,20 +355,23 @@ _dl_map_object_from_fd (const char *name, int fd, char *realname)
kernel map it anywhere it likes, but we must have space for all kernel map it anywhere it likes, but we must have space for all
the segments in their specified positions relative to the first. the segments in their specified positions relative to the first.
So we map the first segment without MAP_FIXED, but with its So we map the first segment without MAP_FIXED, but with its
extent increased to cover all the segments. Then we unmap the extent increased to cover all the segments. Then we remove
excess portion, and there is known sufficient space there to map access from excess portion, and there is known sufficient space
the later segments. */ there to remap from the later segments. */
caddr_t mapat; caddr_t mapat;
mapat = map_segment (c->mapstart, mapat = map_segment (c->mapstart,
loadcmds[nloadcmds - 1].allocend - c->mapstart, loadcmds[nloadcmds - 1].allocend - c->mapstart,
c->prot, 0, c->mapoff); c->prot, 0, c->mapoff);
l->l_addr = (Elf32_Addr) mapat - c->mapstart; l->l_addr = (Elf32_Addr) mapat - c->mapstart;
/* Unmap the excess portion, and then jump into the normal /* Change protection on the excess portion to disallow all access;
segment-mapping loop to handle the portion of the segment past the portions we do not remap later will be inaccessible as if
the end of the file mapping. */ unallocated. Then jump into the normal segment-mapping loop to
munmap (mapat + c->mapend, handle the portion of the segment past the end of the file
loadcmds[nloadcmds - 1].allocend - c->mapend); mapping. */
mprotect (mapat + c->mapend,
loadcmds[nloadcmds - 1].allocend - c->mapend,
0);
goto postmap; goto postmap;
} }

View File

@ -1,5 +1,5 @@
/* Operating system support for run-time dynamic linker. Hurd version. /* Operating system support for run-time dynamic linker. Hurd version.
Copyright (C) 1995 Free Software Foundation, Inc. Copyright (C) 1995, 1996 Free Software Foundation, Inc.
This file is part of the GNU C Library. This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or The GNU C Library is free software; you can redistribute it and/or
@ -518,6 +518,22 @@ mmap (caddr_t addr, size_t len, int prot, int flags, int fd, off_t offset)
flags & (MAP_COPY|MAP_PRIVATE), flags & (MAP_COPY|MAP_PRIVATE),
vmprot, VM_PROT_ALL, vmprot, VM_PROT_ALL,
(flags & MAP_SHARED) ? VM_INHERIT_SHARE : VM_INHERIT_COPY); (flags & MAP_SHARED) ? VM_INHERIT_SHARE : VM_INHERIT_COPY);
if (err == KERN_NO_SPACE && (flags & MAP_FIXED))
{
/* XXX this is not atomic as it is in unix! */
/* The region is already allocated; deallocate it first. */
err = __vm_deallocate (__mach_task_self (), mapaddr, len);
if (! err)
err = __vm_map (__mach_task_self (),
&mapaddr, (vm_size_t) len, 0 /*ELF_MACHINE_USER_ADDRESS_MASK*/,
!(flags & MAP_FIXED),
(mach_port_t) fd, (vm_offset_t) offset,
flags & (MAP_COPY|MAP_PRIVATE),
vmprot, VM_PROT_ALL,
(flags & MAP_SHARED)
? VM_INHERIT_SHARE : VM_INHERIT_COPY);
}
return err ? (caddr_t) __hurd_fail (err) : (caddr_t) mapaddr; return err ? (caddr_t) __hurd_fail (err) : (caddr_t) mapaddr;
} }