base-linux: revised region management

Revised region management detects region conflicts by using _soft_
mappings per default. Overmapping is activated for population of managed
dataspaces only. For more information see header documentation of
base-linux/src/base/env/rm_session_mmap.cc.

Fixes #883.
This commit is contained in:
Christian Helmuth
2013-09-06 16:30:46 +02:00
parent f763e5ec2a
commit 385b7cdd31
12 changed files with 381 additions and 61 deletions

View File

@ -196,6 +196,13 @@ namespace Genode {
void _add_to_rmap(Region const &);
/**
* Reserve VM region for sub-rm dataspace
*/
addr_t _reserve_local(bool use_local_addr,
addr_t local_addr,
Genode::size_t size);
/**
* Map dataspace into local address space
*/
@ -204,7 +211,8 @@ namespace Genode {
addr_t offset,
bool use_local_addr,
addr_t local_addr,
bool executable);
bool executable,
bool overmap = false);
/**
* Determine size of dataspace

View File

@ -2,6 +2,27 @@
* \brief Implementation of Linux-specific local region manager
* \author Norman Feske
* \date 2008-10-22
*
* Under Linux, region management happens at the mercy of the Linux kernel. So,
* all we can do in user land is 1) keep track of regions and (managed)
* dataspaces and 2) get the kernel to manage VM regions as we intent.
*
* The kernel sets up mappings for the binary on execve(), which are text and
* data segments, the context area and special regions (stack, vdso, vsyscall).
* Later mappings are done by the Genode program itself, which knows nothing
* about these initial mappings. Therefore, most mmap() operations are _soft_
* to detect region conflicts with existing mappings or let the kernel find
* some empty VM area (as core does on other platforms). The only _hard_
* overmaps happen on attachment and population of managed dataspaces. Mapped,
* but not populated dataspaces are "holes" in the Linux VM space represented
* by PROT_NONE mappings (see _reserve_local()).
*
* The context area is a managed dataspace as on other platforms, which is
* created and attached during program launch. The managed dataspace replaces
* the inital reserved area, which is therefore flushed beforehand. Hybrid
* programs have no context area.
*
* Note, we do not support nesting of managed dataspaces.
*/
/*
@ -15,6 +36,7 @@
#include <base/thread.h>
#include <linux_dataspace/client.h>
#include <linux_syscalls.h>
#include <context_area.h>
/* local includes */
#include <platform_env.h>
@ -31,18 +53,66 @@ static bool is_sub_rm_session(Dataspace_capability ds)
}
addr_t Platform_env_base::Rm_session_mmap::_reserve_local(bool use_local_addr,
addr_t local_addr,
Genode::size_t size)
{
/* special handling for context area */
if (use_local_addr
&& local_addr == Native_config::context_area_virtual_base()
&& size == Native_config::context_area_virtual_size()) {
/*
* On the first request to reserve the context area, we flush the
* initial mapping preserved in linker script and apply the actual
* reservation. Subsequent requests are just ignored.
*/
static struct Context
{
Context()
{
flush_context_area();
reserve_context_area();
}
} inst;
return local_addr;
}
int const flags = MAP_ANONYMOUS | MAP_PRIVATE;
int const prot = PROT_NONE;
void * const addr_in = use_local_addr ? (void *)local_addr : 0;
void * const addr_out = lx_mmap(addr_in, size, prot, flags, -1, 0);
/* reserve at local address failed - unmap incorrect mapping */
if (use_local_addr && addr_in != addr_out)
lx_munmap((void *)addr_out, size);
if ((use_local_addr && addr_in != addr_out)
|| (((long)addr_out < 0) && ((long)addr_out > -4095))) {
PERR("_reserve_local: lx_mmap failed (addr_in=%p,addr_out=%p/%ld)",
addr_in, addr_out, (long)addr_out);
throw Rm_session::Region_conflict();
}
return (addr_t) addr_out;
}
void *
Platform_env_base::Rm_session_mmap::_map_local(Dataspace_capability ds,
Genode::size_t size,
addr_t offset,
bool use_local_addr,
addr_t local_addr,
bool executable)
bool executable,
bool overmap)
{
int const fd = _dataspace_fd(ds);
bool const writable = _dataspace_writable(ds);
int const flags = MAP_SHARED | (use_local_addr ? MAP_FIXED : 0);
int const flags = MAP_SHARED | (overmap ? MAP_FIXED : 0);
int const prot = PROT_READ
| (writable ? PROT_WRITE : 0)
| (executable ? PROT_EXEC : 0);
@ -57,8 +127,14 @@ Platform_env_base::Rm_session_mmap::_map_local(Dataspace_capability ds,
*/
lx_close(fd);
if (((long)addr_out < 0) && ((long)addr_out > -4095)) {
PERR("_map_local: return value of mmap is %ld", (long)addr_out);
/* attach at local address failed - unmap incorrect mapping */
if (use_local_addr && addr_in != addr_out)
lx_munmap((void *)addr_out, size);
if ((use_local_addr && addr_in != addr_out)
|| (((long)addr_out < 0) && ((long)addr_out > -4095))) {
PERR("_map_local: lx_mmap failed (addr_in=%p,addr_out=%p/%ld) overmap=%d",
addr_in, addr_out, (long)addr_out, overmap);
throw Rm_session::Region_conflict();
}
@ -142,11 +218,12 @@ Platform_env::Rm_session_mmap::attach(Dataspace_capability ds,
* Case 3.1
*
* This RM session is a sub RM session. If the sub RM session is
* attached (_base > 0), add its attachement offset to the local base
* and map it.
* attached (_base > 0), add its attachment offset to the local base
* and map it. We have to enforce the mapping via the 'overmap'
* argument as the region was reserved by a PROT_NONE mapping.
*/
if (_is_attached())
_map_local(ds, region_size, offset, true, _base + (addr_t)local_addr, executable);
_map_local(ds, region_size, offset, true, _base + (addr_t)local_addr, executable, true);
return (void *)local_addr;
@ -171,20 +248,19 @@ Platform_env::Rm_session_mmap::attach(Dataspace_capability ds,
throw Out_of_metadata();
}
_add_to_rmap(Region(local_addr, offset, ds, region_size));
/*
* Allocate local address range that can hold the entire sub RM
* Reserve local address range that can hold the entire sub RM
* session.
*/
rm->_base = lx_vm_reserve(use_local_addr ? (addr_t)local_addr : 0,
region_size);
rm->_base = _reserve_local(use_local_addr, local_addr, region_size);
_add_to_rmap(Region(rm->_base, offset, ds, region_size));
/*
* Cases 2.2, 3.2
*
* The sub rm session was not attached until now but it may have
* been populated with dataspaces. Go through all regions an map
* been populated with dataspaces. Go through all regions and map
* each of them.
*/
for (int i = 0; i < Region_map::MAX_REGIONS; i++) {
@ -192,9 +268,13 @@ Platform_env::Rm_session_mmap::attach(Dataspace_capability ds,
if (!region.used())
continue;
/*
* We have to enforce the mapping via the 'overmap' argument as
* the region was reserved by a PROT_NONE mapping.
*/
_map_local(region.dataspace(), region.size(), region.offset(),
true, rm->_base + region.start() + region.offset(),
executable);
executable, true);
}
return rm->_base;
@ -205,6 +285,7 @@ Platform_env::Rm_session_mmap::attach(Dataspace_capability ds,
* Case 1
*
* Boring, a plain dataspace is attached to a root RM session.
* Note, we do not overmap.
*/
void *addr = _map_local(ds, region_size, offset, use_local_addr,
local_addr, executable);
@ -252,8 +333,10 @@ void Platform_env::Rm_session_mmap::detach(Rm_session::Local_addr local_addr)
* If we are not attached, no local address-space manipulation is
* needed.
*/
if (_is_attached())
lx_vm_reserve((addr_t)local_addr + _base, region.size());
if (_is_attached()) {
lx_munmap((void *)((addr_t)local_addr + _base), region.size());
_reserve_local(true, (addr_t)local_addr + _base, region.size());
}
} else {
@ -277,5 +360,4 @@ void Platform_env::Rm_session_mmap::detach(Rm_session::Local_addr local_addr)
if (rm)
rm->_base = 0;
}
}