core: rework page-fault resolution

The change "core: allow offset-attached managed dataspaces" addressed a
corner case of the use of nested region maps. Apparently, this change
negatively affects other scenarios (tool_chain_auto).

In order to confidently cover all the differnt situations, this patch
reworks the page-fault resolution code for improved clarity and safety,
by introducing dedicated result types, reducing the use of basic types,
choosing expressive names, and fostering constness.

It also introduces a number of 'print' hooks that greatly ease manual
instrumentation and streamlines the error messages printed by core.
Those messages no longer appear when a user-level page-fault handler
is reistered for the faulted-at region map. So the monitor component
produces less noise on the attempt to dump non-existing memory.

Issue #4917
Fixes #4920
This commit is contained in:
Norman Feske
2023-06-09 18:08:47 +02:00
parent 65d3b3a32f
commit a4c59c03e3
29 changed files with 638 additions and 444 deletions

View File

@ -0,0 +1,62 @@
/*
* \brief Memory-address range
* \author Norman Feske
* \date 2023-06-11
*/
/*
* Copyright (C) 2023 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
#ifndef _CORE__INCLUDE__ADDR_RANGE_H_
#define _CORE__INCLUDE__ADDR_RANGE_H_
/* core includes */
#include <types.h>
namespace Core { struct Addr_range; }
struct Core::Addr_range
{
addr_t start;
addr_t end; /* last byte */
bool valid() const { return end > start; }
Addr_range intersected(Addr_range const &other) const
{
if (!valid() || !other.valid())
return { };
return { max(start, other.start), min(end, other.end) };
}
bool contains(addr_t at) const { return (at >= start) && (at <= end); }
Addr_range reduced_by(addr_t offset) const
{
if (!valid() || (offset > start))
return { };
return Addr_range { start - offset, end - offset };
}
Addr_range increased_by(addr_t offset) const
{
if (!valid() || (offset + start < offset) || (offset + end < offset))
return { };
return Addr_range { start + offset, end + offset };
}
void print(Output &out) const
{
Genode::print(out, "[", Hex(start), ",", Hex(end), "]");
}
};
#endif /* _CORE__INCLUDE__ADDR_RANGE_H_ */

View File

@ -37,6 +37,10 @@ namespace Core {
class Core::Dataspace_component : public Rpc_object<Dataspace>
{
public:
struct Attr { addr_t base; size_t size; bool writeable; };
private:
addr_t const _phys_addr = 0; /* address of dataspace in physical memory */
@ -137,6 +141,10 @@ class Core::Dataspace_component : public Rpc_object<Dataspace>
return Core::map_src_addr(_core_local_addr, _phys_addr);
}
Attr attr() const { return { .base = map_src_addr(),
.size = _size,
.writeable = _writeable }; }
void assign_core_local_addr(void *addr) { _core_local_addr = (addr_t)addr; }
void attached_to(Rm_region &region);
@ -161,6 +169,13 @@ class Core::Dataspace_component : public Rpc_object<Dataspace>
size_t size() override { return _size; }
bool writeable() override { return _writeable; }
void print(Output &out) const
{
addr_t const base = map_src_addr();
Genode::print(out, "[", Hex(base), ",", Hex(base + _size - 1), "]");
}
};
#endif /* _CORE__INCLUDE__DATASPACE_COMPONENT_H_ */

View File

@ -0,0 +1,136 @@
/*
* \brief Utility for dealing with log2 alignment constraints
* \author Norman Feske
* \date 2023-06-11
*/
/*
* Copyright (C) 2023 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
#ifndef _CORE__INCLUDE__LOG2_RANGE_H_
#define _CORE__INCLUDE__LOG2_RANGE_H_
/* core includes */
#include <util.h>
#include <addr_range.h>
namespace Core { struct Log2_range; }
struct Core::Log2_range
{
Addr hotspot { 0 };
Addr base { 0 };
Log2 size { 0 };
bool valid() const { return size.log2 >= get_page_size_log2(); }
static constexpr Log2 UNCONSTRAINED = { uint8_t(~0) };
/**
* Default constructor, constructs invalid range
*/
Log2_range() { }
/**
* Constructor, hotspot area spans the maximum address-space size
*/
Log2_range(Addr hotspot) : hotspot(hotspot), size(UNCONSTRAINED) { }
/**
* Constrain range to specified region
*/
Log2_range constrained(Addr_range region) const
{
addr_t const upper_bound = (size.log2 == UNCONSTRAINED.log2)
? ~0UL : (base.value + (1UL << size.log2) - 1);
/*
* Find flexpage around hotspot that lies within the specified region.
*
* Start with a 'size' of one less than the minimal page size.
* If the specified constraint conflicts with the existing range,
* the loop breaks at the first iteration and we can check for this
* condition after the loop.
*/
Log2_range result { hotspot };
result.size = { get_page_size_log2() - 1 };
for (uint8_t try_size_log2 = get_page_size_log2();
try_size_log2 < sizeof(addr_t)*8 ; try_size_log2++) {
addr_t const fpage_mask = ~((1UL << try_size_log2) - 1);
addr_t const try_base = hotspot.value & fpage_mask;
/* check lower bound of existing range */
if (try_base < base.value)
break;
/* check against upper bound of existing range */
if (try_base + (1UL << try_size_log2) - 1 > upper_bound)
break;
/* check against lower bound of region */
if (try_base < region.start)
break;
/* check against upper bound of region */
if (try_base + (1UL << try_size_log2) - 1 > region.end)
break;
/* flexpage is compatible with the range, use it */
result.size = { try_size_log2 };
result.base = { try_base };
}
return result.valid() ? result : Log2_range { };
}
/**
* Constrain range around hotspot to specified log2 size
*/
Log2_range constrained(Log2 value) const
{
Log2_range result = *this;
if (value.log2 >= size.log2)
return result;
result.base = { hotspot.value & ~((1UL << value.log2) - 1) };
result.size = value;
return result;
}
/**
* Determine common log2 size compatible with both ranges
*/
static Log2 common_log2(Log2_range const &r1, Log2_range const &r2)
{
/*
* We have to make sure that the offset of hotspot
* relative to the flexpage base is the same for both ranges.
* This condition is met by the flexpage size equal to the number
* of common least-significant bits of both offsets.
*/
size_t const diff = (r1.hotspot.value - r1.base.value)
^ (r2.hotspot.value - r2.base.value);
/*
* Find highest clear bit in 'diff', starting from the least
* significant candidate. We can skip all bits lower then
* 'get_page_size_log2()' because they are not relevant as
* flexpage size (and are always zero).
*/
uint8_t n = get_page_size_log2();
size_t const min_size_log2 = min(r1.size.log2, r2.size.log2);
for (; n < min_size_log2 && !(diff & (1UL << n)); n++);
return Log2 { n };
}
};
#endif /* _CORE__INCLUDE__LOG2_RANGE_H_ */

View File

@ -101,6 +101,8 @@ class Core::Pager_object : public Object_pool<Pager_object>::Entry
unsigned long badge() const { return _badge; }
enum class Pager_result { STOP, CONTINUE };
/**
* Interface to be implemented by a derived class
*
@ -108,7 +110,7 @@ class Core::Pager_object : public Object_pool<Pager_object>::Entry
*
* Returns !0 on error and pagefault will not be answered.
*/
virtual int pager(Ipc_pager &ps) = 0;
virtual Pager_result pager(Ipc_pager &ps) = 0;
/**
* Wake up the faulter

View File

@ -33,20 +33,22 @@
#include <platform.h>
#include <dataspace_component.h>
#include <util.h>
#include <log2_range.h>
#include <address_space.h>
/* base-internal includes */
#include <base/internal/stack_area.h>
namespace Core {
class Cpu_thread_component;
class Dataspace_component;
class Region_map_component;
class Region_map_detach;
class Rm_client;
class Rm_region;
class Rm_faulter;
class Rm_session_component;
class Region_map_detach;
class Rm_region;
struct Fault;
class Cpu_thread_component;
class Dataspace_component;
class Region_map_component;
class Rm_client;
class Rm_faulter;
class Rm_session_component;
}
@ -56,9 +58,9 @@ class Core::Region_map_detach : Interface
virtual void detach(Region_map::Local_addr) = 0;
virtual void unmap_region(addr_t base, size_t size) = 0;
};
/**
* Representation of a single entry of a region map
*
@ -80,6 +82,13 @@ class Core::Rm_region : public List<Rm_region>::Element
bool exec;
off_t off;
bool dma;
void print(Output &out) const
{
Genode::print(out, "[", Hex(base), ",", Hex(base + size - 1), " "
"(r", write ? "w" : "-", exec ? "x" : "-", ") "
"offset: ", Hex(off), dma ? " DMA" : "");
}
};
private:
@ -104,6 +113,71 @@ class Core::Rm_region : public List<Rm_region>::Element
bool dma() const { return _attr.dma; }
Dataspace_component &dataspace() const { return _dsc; }
Region_map_detach &rm() const { return _rm; }
Addr_range range() const { return { .start = _attr.base,
.end = _attr.base + _attr.size - 1 }; }
void print(Output &out) const { Genode::print(out, _attr); }
};
struct Core::Fault
{
Addr hotspot; /* page-fault address */
Access access; /* reason for the fault, used to detect violations */
Rwx rwx; /* mapping rights, downgraded by 'within_' methods */
Addr_range bounds; /* limits of the fault's coordinate system */
bool write_access() const { return access == Access::WRITE; }
bool exec_access() const { return access == Access::EXEC; }
/**
* Translate fault information to region-relative coordinates
*/
Fault within_region(Rm_region const &region) const
{
return Fault {
.hotspot = hotspot.reduced_by(region.base()),
.access = access,
.rwx = { .w = rwx.w && region.write(),
.x = rwx.x && region.executable() },
.bounds = bounds.intersected(region.range())
.reduced_by(region.base())
};
}
/**
* Translate fault information to coordinates within a sub region map
*/
Fault within_sub_region_map(addr_t offset, size_t region_map_size) const
{
return {
.hotspot = hotspot.increased_by(offset),
.access = access,
.rwx = rwx,
.bounds = bounds.intersected({ 0, region_map_size })
.increased_by(offset)
};
};
/**
* Translate fault information to physical coordinates for memory mapping
*/
Fault within_ram(addr_t offset, Dataspace_component::Attr dataspace) const
{
return {
.hotspot = hotspot.increased_by(offset)
.increased_by(dataspace.base),
.access = access,
.rwx = { .w = rwx.w && dataspace.writeable,
.x = rwx.x },
.bounds = bounds.increased_by(offset)
.intersected({ 0, dataspace.size })
.increased_by(dataspace.base)
};
};
void print(Output &out) const { Genode::print(out, access, " at address ", hotspot); }
};
@ -209,7 +283,7 @@ class Core::Rm_client : public Pager_object, public Rm_faulter,
Rm_faulter(static_cast<Pager_object &>(*this)), _region_map(rm)
{ }
int pager(Ipc_pager &pager) override;
Pager_result pager(Ipc_pager &pager) override;
/**
* Return region map that the RM client is member of
@ -223,6 +297,11 @@ class Core::Region_map_component : private Weak_object<Region_map_component>,
private List<Region_map_component>::Element,
public Region_map_detach
{
public:
enum class With_mapping_result { RESOLVED, RECURSION_LIMIT, NO_REGION,
REFLECTED, WRITE_VIOLATION, EXEC_VIOLATION };
private:
friend class List<Region_map_component>;
@ -235,8 +314,7 @@ class Core::Region_map_component : private Weak_object<Region_map_component>,
Allocator &_md_alloc;
Signal_transmitter _fault_notifier { }; /* notification mechanism for
region-manager faults */
Signal_context_capability _fault_sigh { };
Address_space *_address_space { nullptr };
@ -305,56 +383,87 @@ class Core::Region_map_component : private Weak_object<Region_map_component>,
the region map and wait
for fault resolution */
List<Rm_client> _clients { }; /* list of RM clients using this region map */
Mutex _mutex { }; /* mutex for map and list */
Mutex mutable _mutex { }; /* mutex for map and list */
Pager_entrypoint &_pager_ep;
Rm_dataspace_component _ds; /* dataspace representation of region map */
Dataspace_capability _ds_cap;
template <typename F>
auto _apply_to_dataspace(addr_t addr, F const &f, addr_t offset,
unsigned level, addr_t dst_region_size)
-> typename Trait::Functor<decltype(&F::operator())>::Return_type
struct Recursion_limit { unsigned value; };
/**
* Resolve region at a given fault address
*
* /param resolved_fn functor called with the resolved region and the
* region-relative fault information
*
* Called recursively when resolving a page fault in nested region maps.
*/
With_mapping_result _with_region_at_fault(Recursion_limit const recursion_limit,
Fault const &fault,
auto const &resolved_fn,
auto const &reflect_fn)
{
using Functor = Trait::Functor<decltype(&F::operator())>;
using Return_type = typename Functor::Return_type;
using Result = With_mapping_result;
if (recursion_limit.value == 0)
return Result::RECURSION_LIMIT;
Mutex::Guard lock_guard(_mutex);
/* skip further lookup when reaching the recursion limit */
if (!level)
return f(this, nullptr, 0, 0, dst_region_size);
/* lookup region and dataspace */
Rm_region *region = _map.metadata((void*)addr);
Dataspace_component *dsc = region ? &region->dataspace()
: nullptr;
Rm_region const * const region_ptr = _map.metadata((void*)fault.hotspot.value);
if (region && dst_region_size > region->size())
dst_region_size = region->size();
/* calculate offset in dataspace */
addr_t ds_offset = region ? (addr - region->base()
+ region->offset()) : 0;
/* check for nested dataspace */
Native_capability cap = dsc ? dsc->sub_rm()
: Native_capability();
if (!cap.valid())
return f(this, region, ds_offset, offset, dst_region_size);
/* in case of a nested dataspace perform a recursive lookup */
auto lambda = [&] (Region_map_component *rmc) -> Return_type
auto reflect_fault = [&] (Result result)
{
if (rmc)
return rmc->_apply_to_dataspace(ds_offset, f,
offset + region->base() - region->offset(),
--level,
dst_region_size);
if (!_fault_sigh.valid())
return result; /* not reflected to user land */
return f(nullptr, nullptr, ds_offset, offset, dst_region_size);
reflect_fn(*this, fault);
return Result::REFLECTED; /* omit diagnostics */
};
return _session_ep.apply(cap, lambda);
if (!region_ptr)
return reflect_fault(Result::NO_REGION);
Rm_region const &region = *region_ptr;
/* fault information relative to 'region' */
Fault const relative_fault = fault.within_region(region);
Dataspace_component &dataspace = region.dataspace();
Native_capability managed_ds_cap = dataspace.sub_rm();
/* region refers to a regular dataspace */
if (!managed_ds_cap.valid()) {
bool const writeable = relative_fault.rwx.w
&& dataspace.writeable();
bool const write_violation = relative_fault.write_access()
&& !writeable;
bool const exec_violation = relative_fault.exec_access()
&& !relative_fault.rwx.x;
if (write_violation) return reflect_fault(Result::WRITE_VIOLATION);
if (exec_violation) return reflect_fault(Result::EXEC_VIOLATION);
return resolved_fn(region, relative_fault);
}
/* traverse into managed dataspace */
Fault const sub_region_map_relative_fault =
relative_fault.within_sub_region_map(region.offset(),
dataspace.size());
Result result = Result::NO_REGION;
_session_ep.apply(managed_ds_cap, [&] (Region_map_component *rmc_ptr) {
if (rmc_ptr)
result = rmc_ptr->_with_region_at_fault({ recursion_limit.value - 1 },
sub_region_map_relative_fault,
resolved_fn, reflect_fn); });
return result;
}
/*
@ -410,8 +519,6 @@ class Core::Region_map_component : private Weak_object<Region_map_component>,
void address_space(Address_space *space) { _address_space = space; }
Address_space *address_space() { return _address_space; }
class Fault_area;
/**
* Register fault
*
@ -436,18 +543,60 @@ class Core::Region_map_component : private Weak_object<Region_map_component>,
Rm_dataspace_component &dataspace_component() { return _ds; }
/**
* Apply a function to dataspace attached at a given address
* Call 'apply_fn' with resolved mapping information for given fault
*
* /param addr address where the dataspace is attached
* /param f functor or lambda to apply
* /param apply_fn functor called with a 'Mapping' that is suitable
* for resolving given the 'fault'
* /param reflect_fn functor called to reflect a missing mapping
* to user space if a fault handler is registered
*/
template <typename F>
auto apply_to_dataspace(addr_t addr, F f)
-> typename Trait::Functor<decltype(&F::operator())>::Return_type
With_mapping_result with_mapping_for_fault(Fault const &fault,
auto const &apply_fn,
auto const &reflect_fn)
{
enum { RECURSION_LIMIT = 5 };
return _with_region_at_fault(Recursion_limit { 5 }, fault,
[&] (Rm_region const &region, Fault const &region_relative_fault)
{
Dataspace_component &dataspace = region.dataspace();
return _apply_to_dataspace(addr, f, 0, RECURSION_LIMIT, ~0UL);
Fault const ram_relative_fault =
region_relative_fault.within_ram(region.offset(), dataspace.attr());
Log2_range src_range { ram_relative_fault.hotspot };
Log2_range dst_range { fault.hotspot };
src_range = src_range.constrained(ram_relative_fault.bounds);
Log2 const common_size = Log2_range::common_log2(dst_range,
src_range);
Log2 const map_size = kernel_constrained_map_size(common_size);
src_range = src_range.constrained(map_size);
dst_range = dst_range.constrained(map_size);
if (!src_range.valid() || !dst_range.valid()) {
error("invalid mapping");
return With_mapping_result::NO_REGION;
}
Mapping const mapping {
.dst_addr = dst_range.base.value,
.src_addr = src_range.base.value,
.size_log2 = map_size.log2,
.cached = dataspace.cacheability() == CACHED,
.io_mem = dataspace.io_mem(),
.dma_buffer = region.dma(),
.write_combined = dataspace.cacheability() == WRITE_COMBINED,
.writeable = ram_relative_fault.rwx.w,
.executable = ram_relative_fault.rwx.x
};
apply_fn(mapping);
return With_mapping_result::RESOLVED;
},
reflect_fn
);
}
/**
@ -458,16 +607,6 @@ class Core::Region_map_component : private Weak_object<Region_map_component>,
void add_client(Rm_client &);
void remove_client(Rm_client &);
/**
* Create mapping item to be placed into the page table
*/
static Mapping create_map_item(Region_map_component *region_map,
Rm_region &region,
addr_t ds_offset,
addr_t region_offset,
Dataspace_component &dsc,
addr_t, addr_t);
using Attach_dma_result = Pd_session::Attach_dma_result;
Attach_dma_result attach_dma(Dataspace_capability, addr_t);

View File

@ -19,6 +19,56 @@
#include <util/interface.h>
#include <base/log.h>
namespace Core { using namespace Genode; }
namespace Core {
using namespace Genode;
struct Log2 { uint8_t log2; };
enum class Access { READ, WRITE, EXEC };
struct Addr
{
addr_t value;
Addr reduced_by(addr_t offset) const
{
return { (value >= offset) ? (value - offset) : 0 };
}
Addr increased_by(addr_t offset) const
{
return { (value + offset >= offset) ? (value + offset) : 0 };
}
void print(Output &out) const { Genode::print(out, Hex(value)); }
};
struct Rwx
{
bool w, x;
static constexpr bool r = true;
static constexpr Rwx rwx() { return { true, true }; }
void print(Output &out) const
{
Genode::print(out, "(r", w ? "w" : "-", x ? "x" : "-", ")");
}
};
}
namespace Genode {
static inline void print(Output &out, Core::Access access)
{
switch (access) {
case Core::Access::READ: print(out, "READ"); break;
case Core::Access::WRITE: print(out, "WRITE"); break;
case Core::Access::EXEC: print(out, "EXEC"); break;
}
}
}
#endif /* _CORE__INCLUDE__TYPES_H_ */

View File

@ -39,11 +39,8 @@ void Pager_entrypoint::entry()
obj->submit_exception_signal();
} else {
/* send reply if page-fault handling succeeded */
reply_pending = !obj->pager(_pager);
if (!reply_pending)
warning("page-fault, ", *obj,
" ip=", Hex(_pager.fault_ip()),
" pf-addr=", Hex(_pager.fault_addr()));
using Result = Pager_object::Pager_result;
reply_pending = (obj->pager(_pager) == Result::CONTINUE);
}
} else {

View File

@ -23,240 +23,93 @@
#include <region_map_component.h>
#include <dataspace_component.h>
static const bool verbose_page_faults = false;
struct Core::Region_map_component::Fault_area
{
addr_t _fault_addr = 0;
addr_t _base = 0;
size_t _size_log2 = 0;
addr_t _upper_bound() const {
return (_size_log2 == ~0UL) ? ~0UL : (_base + (1UL << _size_log2) - 1); }
/**
* Default constructor, constructs invalid fault area
*/
Fault_area() { }
/**
* Constructor, fault area spans the maximum address-space size
*/
Fault_area(addr_t fault_addr) :
_fault_addr(fault_addr), _size_log2(~0UL) { }
/**
* Constrain fault area to specified region
*/
void constrain(addr_t region_base, size_t region_size)
{
/*
* Find flexpage around _fault_addr that lies within the
* specified region.
*
* Start with a 'size_log2' of one less than the minimal
* page size. If the specified constraint conflicts with
* the existing fault area, the loop breaks at the first
* iteration and we can check for this condition after the
* loop.
*/
size_t size_log2 = get_page_size_log2() - 1;
addr_t base = 0;
for (size_t try_size_log2 = get_page_size_log2();
try_size_log2 < sizeof(addr_t)*8 ; try_size_log2++) {
addr_t fpage_mask = ~((1UL << try_size_log2) - 1);
addr_t try_base = _fault_addr & fpage_mask;
/* check lower bound of existing fault area */
if (try_base < _base)
break;
/* check against upper bound of existing fault area */
if (try_base + (1UL << try_size_log2) - 1 > _upper_bound())
break;
/* check against lower bound of region */
if (try_base < region_base)
break;
/* check against upper bound of region */
if (try_base + (1UL << try_size_log2) - 1 > region_base + region_size - 1)
break;
/* flexpage is compatible with fault area, use it */
size_log2 = try_size_log2;
base = try_base;
}
/* if constraint is compatible with the fault area, invalidate */
if (size_log2 < get_page_size_log2()) {
_size_log2 = 0;
_base = 0;
} else {
_size_log2 = size_log2;
_base = base;
}
}
/**
* Constrain fault area to specified flexpage size
*/
void constrain(size_t size_log2)
{
if (size_log2 >= _size_log2)
return;
_base = _fault_addr & ~((1UL << size_log2) - 1);
_size_log2 = size_log2;
}
/**
* Determine common flexpage size compatible with specified fault areas
*/
static size_t common_size_log2(Fault_area const &a1, Fault_area const &a2)
{
/*
* We have to make sure that the offset of page-fault address
* relative to the flexpage base is the same for both fault areas.
* This condition is met by the flexpage size equal to the number
* of common least-significant bits of both offsets.
*/
size_t const diff = (a1.fault_addr() - a1.base())
^ (a2.fault_addr() - a2.base());
/*
* Find highest clear bit in 'diff', starting from the least
* significant candidate. We can skip all bits lower then
* 'get_page_size_log2()' because they are not relevant as
* flexpage size (and are always zero).
*/
size_t n = get_page_size_log2();
size_t const min_size_log2 = min(a1._size_log2, a2._size_log2);
for (; n < min_size_log2 && !(diff & (1UL << n)); n++);
return n;
}
addr_t fault_addr() const { return _fault_addr; }
addr_t base() const { return _base; }
bool valid() const { return _size_log2 > 0; }
};
using namespace Core;
static void print_page_fault(char const *msg,
addr_t pf_addr,
addr_t pf_ip,
Region_map::State::Fault_type pf_type,
Pager_object const &obj)
{
log(msg, " (",
pf_type == Region_map::State::WRITE_FAULT ? "WRITE" :
pf_type == Region_map::State::READ_FAULT ? "READ" : "EXEC",
" pf_addr=", Hex(pf_addr), " pf_ip=", Hex(pf_ip), " from ", obj, ") ");
}
/***********************
** Region-map client **
***********************/
/*
* This code is executed by the page-fault handler thread.
*/
int Rm_client::pager(Ipc_pager &pager)
Pager_object::Pager_result Rm_client::pager(Ipc_pager &pager)
{
Region_map::State::Fault_type pf_type = pager.write_fault() ? Region_map::State::WRITE_FAULT
: Region_map::State::READ_FAULT;
if (pager.exec_fault())
pf_type = Region_map::State::EXEC_FAULT;
addr_t pf_addr = pager.fault_addr();
addr_t pf_ip = pager.fault_ip();
if (verbose_page_faults)
print_page_fault("page fault", pf_addr, pf_ip, pf_type, *this);
auto lambda = [&] (Region_map_component *region_map,
Rm_region *region,
addr_t const ds_offset,
addr_t const region_offset,
addr_t const dst_region_size) -> int
{
Dataspace_component * dsc = region ? &region->dataspace() : nullptr;
if (!dsc) {
/*
* We found no attachment at the page-fault address and therefore have
* to reflect the page fault as region-manager fault. The signal
* handler is then expected to request the state of the region map.
*/
/* print a warning if it's no managed-dataspace */
if (region_map == &member_rm())
print_page_fault("no RM attachment", pf_addr, pf_ip,
pf_type, *this);
/* register fault at responsible region map */
if (region_map)
region_map->fault(*this, pf_addr - region_offset, pf_type);
/* there is no attachment return an error condition */
return 1;
}
/*
* Check if dataspace is compatible with page-fault type
*/
if (pf_type == Region_map::State::WRITE_FAULT &&
(!region->write() || !dsc->writeable())) {
print_page_fault("attempted write at read-only memory",
pf_addr, pf_ip, pf_type, *this);
/* register fault at responsible region map */
if (region_map)
region_map->fault(*this, pf_addr - region_offset, pf_type);
return 2;
}
if (pf_type == Region_map::State::EXEC_FAULT) {
print_page_fault("attempted exec at non-executable memory",
pf_addr, pf_ip, pf_type, *this);
/* register fault at responsible region map */
if (region_map)
region_map->fault(*this, pf_addr - region_offset, pf_type);
return 3;
}
Mapping mapping = Region_map_component::create_map_item(region_map,
*region,
ds_offset,
region_offset,
*dsc, pf_addr,
dst_region_size);
/*
* On kernels with a mapping database, the 'dsc' dataspace is a leaf
* dataspace that corresponds to a virtual address range within core. To
* prepare the answer for the page fault, we make sure that this range is
* locally mapped in core. On platforms that support map operations of
* pages that are not locally mapped, the 'map_core_local' function may be
* empty.
*/
if (!dsc->io_mem())
mapping.prepare_map_operation();
/* answer page fault with a flex-page mapping */
pager.set_reply_mapping(mapping);
return 0;
Fault const fault {
.hotspot = { pager.fault_addr() },
.access = pager.write_fault() ? Access::WRITE
: pager.exec_fault() ? Access::EXEC
: Access::READ,
.rwx = Rwx::rwx(),
.bounds = { .start = 0, .end = ~0UL },
};
return member_rm().apply_to_dataspace(pf_addr, lambda);
using Result = Region_map_component::With_mapping_result;
Result const result = member_rm().with_mapping_for_fault(fault,
[&] (Mapping const &mapping)
{
/*
* On kernels with a mapping database, the leaf dataspace
* corresponds to a virtual address range within core. To prepare
* the answer for the page fault, we make sure that this range is
* locally mapped in core.
*/
if (!mapping.io_mem)
mapping.prepare_map_operation();
/* answer page fault with a flex-page mapping */
pager.set_reply_mapping(mapping);
},
[&] (Region_map_component &rm, Fault const &fault) /* reflect to user space */
{
using Type = Region_map::State::Fault_type;
Type const type = (fault.access == Access::READ) ? Type::READ_FAULT
: (fault.access == Access::WRITE) ? Type::WRITE_FAULT
: Type::EXEC_FAULT;
/* deliver fault info to responsible region map */
rm.fault(*this, fault.hotspot.value, type);
}
);
if (result == Result::RESOLVED)
return Pager_result::CONTINUE;
/*
* Error diagnostics
*/
struct Origin
{
addr_t ip;
Pager_object &obj;
void print(Output &out) const
{
Genode::print(out, "by ", obj, " ip=", Hex(ip));
}
} origin { pager.fault_ip(), *this };
switch (result) {
case Result::RESOLVED:
case Result::REFLECTED:
break;
case Result::RECURSION_LIMIT:
error("giving up on unexpectedly deep memory-mapping structure");
error(fault, " ", origin);
break;
case Result::NO_REGION:
error("illegal ", fault, " ", origin);
break;
case Result::WRITE_VIOLATION:
case Result::EXEC_VIOLATION:
error(fault.access, " violation at address ",
fault.hotspot, " ", origin);
break;
}
return Pager_result::STOP;
}
@ -309,47 +162,6 @@ void Rm_faulter::continue_after_resolved_fault()
** Region-map component **
**************************/
Mapping Region_map_component::create_map_item(Region_map_component *,
Rm_region &region,
addr_t const ds_offset,
addr_t const region_offset,
Dataspace_component &dataspace,
addr_t const page_addr,
addr_t const dst_region_size)
{
addr_t const ds_base = dataspace.map_src_addr();
Fault_area src_fault_area(ds_base + ds_offset);
Fault_area dst_fault_area(page_addr);
src_fault_area.constrain(ds_base, dataspace.size());
dst_fault_area.constrain(region_offset + region.base(), dst_region_size);
/*
* Determine mapping size compatible with source and destination,
* and apply platform-specific constraint of mapping sizes.
*/
size_t map_size_log2 = dst_fault_area.common_size_log2(dst_fault_area,
src_fault_area);
map_size_log2 = constrain_map_size_log2(map_size_log2);
src_fault_area.constrain(map_size_log2);
dst_fault_area.constrain(map_size_log2);
if (!src_fault_area.valid() || !dst_fault_area.valid())
error("invalid mapping");
return Mapping { .dst_addr = dst_fault_area.base(),
.src_addr = src_fault_area.base(),
.size_log2 = map_size_log2,
.cached = dataspace.cacheability() == CACHED,
.io_mem = dataspace.io_mem(),
.dma_buffer = region.dma(),
.write_combined = dataspace.cacheability() == WRITE_COMBINED,
.writeable = region.write() && dataspace.writeable(),
.executable = region.executable() };
}
Region_map::Local_addr
Region_map_component::_attach(Dataspace_capability ds_cap, Attach_attr const attr)
{
@ -548,7 +360,7 @@ Region_map_component::attach(Dataspace_capability ds_cap, size_t size,
.dma = false,
};
return _attach(ds_cap, attr);
return _attach(ds_cap, attr);
}
@ -665,7 +477,7 @@ void Region_map_component::fault(Rm_faulter &faulter, addr_t pf_addr,
_faulters.enqueue(faulter);
/* issue fault signal */
_fault_notifier.submit();
Signal_transmitter(_fault_sigh).submit();
}
@ -679,9 +491,9 @@ void Region_map_component::discard_faulter(Rm_faulter &faulter, bool do_lock)
}
void Region_map_component::fault_handler(Signal_context_capability handler)
void Region_map_component::fault_handler(Signal_context_capability sigh)
{
_fault_notifier.context(handler);
_fault_sigh = sigh;
}