Rework Region_map interface

- Remove exceptions
- Use 'Attr' struct for attach arguments
- Let 'attach' return 'Range' instead of 'Local_addr'
- Renamed 'Region_map::State' to 'Region_map::Fault'

Issue #5245
Fixes #5070
This commit is contained in:
Norman Feske
2024-06-18 18:29:31 +02:00
parent d866b6b053
commit 0105494223
117 changed files with 2058 additions and 1636 deletions

View File

@ -21,18 +21,16 @@
using namespace Core;
Region_map::Local_addr
Core_region_map::attach(Dataspace_capability ds_cap, size_t, off_t, bool,
Region_map::Local_addr, bool, bool)
Region_map::Attach_result
Core_region_map::attach(Dataspace_capability ds_cap, Attr const &)
{
auto lambda = [] (Dataspace_component *ds) {
return _ep.apply(ds_cap, [] (Dataspace_component *ds) -> Attach_result {
if (!ds)
throw Invalid_dataspace();
return Attach_error::INVALID_DATASPACE;
return (void *)ds->phys_addr();
};
return _ep.apply(ds_cap, lambda);
return Range { .start = ds->phys_addr(), .num_bytes = ds->size() };
});
}
void Core_region_map::detach(Local_addr) { }
void Core_region_map::detach(addr_t) { }

View File

@ -31,6 +31,7 @@ void Dataspace_component::detached_from(Rm_region &region)
_regions.remove(&region);
}
void Dataspace_component::detach_from_rm_sessions()
{
_mutex.acquire();
@ -44,13 +45,14 @@ void Dataspace_component::detach_from_rm_sessions()
* removes the current region from the '_regions' list.
*/
_mutex.release();
r->rm().reserve_and_flush((void *)r->base());
r->rm().reserve_and_flush(r->base());
_mutex.acquire();
}
_mutex.release();
}
Dataspace_component::~Dataspace_component()
{
detach_from_rm_sessions();

View File

@ -34,18 +34,11 @@ class Core::Core_region_map : public Region_map
Core_region_map(Rpc_entrypoint &ep) : _ep(ep) { }
Local_addr attach(Dataspace_capability, size_t size = 0,
off_t offset=0, bool use_local_addr = false,
Local_addr local_addr = 0,
bool executable = false,
bool writeable = true) override;
void detach(Local_addr) override;
void fault_handler (Signal_context_capability) override { }
State state () override { return State(); }
Dataspace_capability dataspace() override { return Dataspace_capability(); }
Attach_result attach(Dataspace_capability, Attr const &) override;
void detach(addr_t) override;
void fault_handler (Signal_context_capability) override { }
Fault fault() override { return { }; }
Dataspace_capability dataspace() override { return { }; }
};
#endif /* _CORE__INCLUDE__CORE_REGION_MAP_H_ */

View File

@ -40,7 +40,7 @@
#include <base/internal/stack_area.h>
namespace Core {
class Region_map_detach;
struct Region_map_detach;
class Rm_region;
struct Fault;
class Cpu_thread_component;
@ -52,13 +52,19 @@ namespace Core {
}
class Core::Region_map_detach : Interface
struct Core::Region_map_detach : Interface
{
public:
virtual void detach_at(addr_t) = 0;
virtual void detach(Region_map::Local_addr) = 0;
virtual void unmap_region(addr_t base, size_t size) = 0;
virtual void reserve_and_flush(Region_map::Local_addr) = 0;
/**
* Unmap memory area from all address spaces referencing it
*
* \param base base address of region to unmap
* \param size size of region to unmap in bytes
*/
virtual void unmap_region(addr_t base, size_t size) = 0;
virtual void reserve_and_flush(addr_t) = 0;
};
@ -81,7 +87,7 @@ class Core::Rm_region : public List<Rm_region>::Element
size_t size;
bool write;
bool exec;
off_t off;
addr_t off;
bool dma;
void print(Output &out) const
@ -110,7 +116,7 @@ class Core::Rm_region : public List<Rm_region>::Element
size_t size() const { return _attr.size; }
bool write() const { return _attr.write; }
bool executable() const { return _attr.exec; }
off_t offset() const { return _attr.off; }
addr_t offset() const { return _attr.off; }
bool dma() const { return _attr.dma; }
Region_map_detach &rm() const { return _rm; }
@ -213,7 +219,7 @@ class Core::Rm_faulter : Fifo<Rm_faulter>::Element, Interface
Pager_object &_pager_object;
Mutex _mutex { };
Weak_ptr<Region_map_component> _faulting_region_map { };
Region_map::State _fault_state { };
Region_map::Fault _fault { };
friend class Fifo<Rm_faulter>;
@ -231,8 +237,7 @@ class Core::Rm_faulter : Fifo<Rm_faulter>::Element, Interface
/**
* Assign fault state
*/
void fault(Region_map_component &faulting_region_map,
Region_map::State fault_state);
void fault(Region_map_component &faulting_region_map, Region_map::Fault);
/**
* Disassociate faulter from the faulted region map
@ -246,12 +251,12 @@ class Core::Rm_faulter : Fifo<Rm_faulter>::Element, Interface
* Return true if page fault occurred in specified address range
*/
bool fault_in_addr_range(addr_t addr, size_t size) {
return (_fault_state.addr >= addr) && (_fault_state.addr <= addr + size - 1); }
return (_fault.addr >= addr) && (_fault.addr <= addr + size - 1); }
/**
* Return fault state as exported via the region-map interface
*/
Region_map::State fault_state() { return _fault_state; }
Region_map::Fault fault() { return _fault; }
/**
* Wake up faulter by answering the pending page fault
@ -412,7 +417,7 @@ class Core::Region_map_component : private Weak_object<Region_map_component>,
* Called recursively when resolving a page fault in nested region maps.
*/
With_mapping_result _with_region_at_fault(Recursion_limit const recursion_limit,
Fault const &fault,
Core::Fault const &fault,
auto const &resolved_fn,
auto const &reflect_fn)
{
@ -441,7 +446,7 @@ class Core::Region_map_component : private Weak_object<Region_map_component>,
Rm_region const &region = *region_ptr;
/* fault information relative to 'region' */
Fault const relative_fault = fault.within_region(region);
Core::Fault const relative_fault = fault.within_region(region);
Result result = Result::NO_REGION;
@ -476,7 +481,7 @@ class Core::Region_map_component : private Weak_object<Region_map_component>,
}
/* traverse into managed dataspace */
Fault const sub_region_map_relative_fault =
Core::Fault const sub_region_map_relative_fault =
relative_fault.within_sub_region_map(region.offset(),
dataspace.size());
@ -497,30 +502,25 @@ class Core::Region_map_component : private Weak_object<Region_map_component>,
struct Attach_attr
{
size_t size;
off_t offset;
bool use_local_addr;
addr_t local_addr;
bool executable;
bool writeable;
bool dma;
Attr attr;
bool dma;
};
Local_addr _attach(Dataspace_capability, Attach_attr);
Attach_result _attach(Dataspace_capability, Attach_attr);
void _with_region(Local_addr local_addr, auto const &fn)
void _with_region(addr_t at, auto const &fn)
{
/* read meta data for address */
Rm_region *region_ptr = _map.metadata(local_addr);
Rm_region * const region_ptr = _map.metadata((void *)at);
if (!region_ptr) {
if (_diag.enabled)
warning("_with_region: no attachment at ", (void *)local_addr);
warning("_with_region: no attachment at ", (void *)at);
return;
}
if ((region_ptr->base() != static_cast<addr_t>(local_addr)) && _diag.enabled)
warning("_with_region: ", static_cast<void *>(local_addr), " is not "
if ((region_ptr->base() != static_cast<addr_t>(at)) && _diag.enabled)
warning("_with_region: ", reinterpret_cast<void *>(at), " is not "
"the beginning of the region ", Hex(region_ptr->base()));
fn(*region_ptr);
@ -530,16 +530,6 @@ class Core::Region_map_component : private Weak_object<Region_map_component>,
public:
/*
* Unmaps a memory area from all address spaces referencing it.
*
* \param base base address of region to unmap
* \param size size of region to unmap
*/
void unmap_region(addr_t base, size_t size) override;
void reserve_and_flush(Local_addr) override;
/**
* Constructor
*
@ -572,11 +562,9 @@ class Core::Region_map_component : private Weak_object<Region_map_component>,
* for resolution.
*
* \param faulter faulting region-manager client
* \param pf_addr page-fault address
* \param pf_type type of page fault (read/write/execute)
* \param fault fault information
*/
void fault(Rm_faulter &faulter, addr_t pf_addr,
Region_map::State::Fault_type pf_type);
void fault(Rm_faulter &faulter, Fault);
/**
* Dissolve faulter from region map
@ -596,16 +584,16 @@ class Core::Region_map_component : private Weak_object<Region_map_component>,
* /param reflect_fn functor called to reflect a missing mapping
* to user space if a fault handler is registered
*/
With_mapping_result with_mapping_for_fault(Fault const &fault,
auto const &apply_fn,
auto const &reflect_fn)
With_mapping_result with_mapping_for_fault(Core::Fault const &fault,
auto const &apply_fn,
auto const &reflect_fn)
{
return _with_region_at_fault(Recursion_limit { 5 }, fault,
[&] (Rm_region const &region, Fault const &region_relative_fault)
[&] (Rm_region const &region, Core::Fault const &region_relative_fault)
{
With_mapping_result result = With_mapping_result::NO_REGION;
region.with_dataspace([&] (Dataspace_component &dataspace) {
Fault const ram_relative_fault =
Core::Fault const ram_relative_fault =
region_relative_fault.within_ram(region.offset(), dataspace.attr());
Log2_range src_range { ram_relative_fault.hotspot };
@ -661,15 +649,23 @@ class Core::Region_map_component : private Weak_object<Region_map_component>,
Attach_dma_result attach_dma(Dataspace_capability, addr_t);
/*********************************
** Region_map_detach interface **
*********************************/
void unmap_region (addr_t, size_t) override;
void detach_at (addr_t) override;
void reserve_and_flush (addr_t) override;
/**************************
** Region map interface **
**************************/
Local_addr attach (Dataspace_capability, size_t, off_t,
bool, Local_addr, bool, bool) override;
void detach (Local_addr) override;
void fault_handler (Signal_context_capability handler) override;
State state () override;
Attach_result attach (Dataspace_capability, Attr const &) override;
void detach (addr_t at) override { detach_at(at); }
void fault_handler (Signal_context_capability) override;
Fault fault () override;
Dataspace_capability dataspace () override { return _ds_cap; }
};

View File

@ -110,13 +110,10 @@ class Core::Trace::Subject
_size = size;
/* copy content */
void *src = local_rm.attach(from_ds),
*dst = local_rm.attach(_ds);
Attached_dataspace from { local_rm, from_ds },
to { local_rm, _ds };
Genode::memcpy(dst, src, _size);
local_rm.detach(src);
local_rm.detach(dst);
Genode::memcpy(to.local_addr<char>(), from.local_addr<char const>(), _size);
}
/**

View File

@ -62,12 +62,12 @@ Pager_object::Pager_result Rm_client::pager(Ipc_pager &pager)
[&] (Region_map_component &rm, Fault const &fault) /* reflect to user space */
{
using Type = Region_map::State::Fault_type;
Type const type = (fault.access == Access::READ) ? Type::READ_FAULT
: (fault.access == Access::WRITE) ? Type::WRITE_FAULT
: Type::EXEC_FAULT;
using Type = Region_map::Fault::Type;
Type const type = (fault.access == Access::READ) ? Type::READ
: (fault.access == Access::WRITE) ? Type::WRITE
: Type::EXEC;
/* deliver fault info to responsible region map */
rm.fault(*this, fault.hotspot.value, type);
rm.fault(*this, { .type = type, .addr = fault.hotspot.value });
}
);
@ -118,12 +118,12 @@ Pager_object::Pager_result Rm_client::pager(Ipc_pager &pager)
*************/
void Rm_faulter::fault(Region_map_component &faulting_region_map,
Region_map::State fault_state)
Region_map::Fault fault)
{
Mutex::Guard lock_guard(_mutex);
_faulting_region_map = faulting_region_map.weak_ptr();
_fault_state = fault_state;
_fault = fault;
_pager_object.unresolved_page_fault_occurred();
}
@ -154,7 +154,7 @@ void Rm_faulter::continue_after_resolved_fault()
_pager_object.wake_up();
_faulting_region_map = Weak_ptr<Core::Region_map_component>();
_fault_state = Region_map::State();
_fault = { };
}
@ -162,55 +162,54 @@ void Rm_faulter::continue_after_resolved_fault()
** Region-map component **
**************************/
Region_map::Local_addr
Region_map_component::_attach(Dataspace_capability ds_cap, Attach_attr const attr)
Region_map::Attach_result
Region_map_component::_attach(Dataspace_capability ds_cap, Attach_attr const core_attr)
{
Attr const attr = core_attr.attr;
/* serialize access */
Mutex::Guard lock_guard(_mutex);
/* offset must be positive and page-aligned */
if (attr.offset < 0 || align_addr(attr.offset, get_page_size_log2()) != attr.offset)
throw Region_conflict();
/* offset must be page-aligned */
if (align_addr(attr.offset, get_page_size_log2()) != attr.offset)
return Attach_error::REGION_CONFLICT;
auto lambda = [&] (Dataspace_component *dsc) {
auto lambda = [&] (Dataspace_component *dsc) -> Attach_result {
using Alloc_error = Range_allocator::Alloc_error;
/* check dataspace validity */
if (!dsc)
throw Invalid_dataspace();
return Attach_error::INVALID_DATASPACE;
unsigned const min_align_log2 = get_page_size_log2();
size_t const off = attr.offset;
if (off >= dsc->size())
throw Region_conflict();
size_t const ds_size = dsc->size();
size_t size = attr.size;
if (attr.offset >= ds_size)
return Attach_error::REGION_CONFLICT;
if (!size)
size = dsc->size() - attr.offset;
size_t size = attr.size ? attr.size : ds_size - attr.offset;
/* work with page granularity */
size = align_addr(size, min_align_log2);
/* deny creation of regions larger then the actual dataspace */
if (dsc->size() < size + attr.offset)
throw Region_conflict();
if (ds_size < size + attr.offset)
return Attach_error::REGION_CONFLICT;
/* allocate region for attachment */
void *attach_at = nullptr;
if (attr.use_local_addr) {
_map.alloc_addr(size, attr.local_addr).with_result(
[&] (void *ptr) { attach_at = ptr; },
[&] (Range_allocator::Alloc_error error) {
switch (error) {
case Alloc_error::OUT_OF_RAM: throw Out_of_ram();
case Alloc_error::OUT_OF_CAPS: throw Out_of_caps();
case Alloc_error::DENIED: break;
}
throw Region_conflict();
});
bool at_defined = false;
addr_t at { };
if (attr.use_at) {
Alloc_error error = Alloc_error::DENIED;
_map.alloc_addr(size, attr.at).with_result(
[&] (void *ptr) { at = addr_t(ptr); at_defined = true; },
[&] (Alloc_error e) { error = e; });
if (error == Alloc_error::OUT_OF_RAM) return Attach_error::OUT_OF_RAM;
if (error == Alloc_error::OUT_OF_CAPS) return Attach_error::OUT_OF_CAPS;
} else {
/*
@ -222,8 +221,7 @@ Region_map_component::_attach(Dataspace_capability ds_cap, Attach_attr const att
if (align_log2 >= sizeof(void *)*8)
align_log2 = min_align_log2;
bool done = false;
for (; !done && (align_log2 >= min_align_log2); align_log2--) {
for (; !at_defined && (align_log2 >= min_align_log2); align_log2--) {
/*
* Don't use an alignment higher than the alignment of the backing
@ -233,60 +231,52 @@ Region_map_component::_attach(Dataspace_capability ds_cap, Attach_attr const att
if (((dsc->map_src_addr() + attr.offset) & ((1UL << align_log2) - 1)) != 0)
continue;
/* try allocating the align region */
_map.alloc_aligned(size, (unsigned)align_log2).with_result(
/* try allocating the aligned region */
Alloc_error error = Alloc_error::DENIED;
_map.alloc_aligned(size, unsigned(align_log2)).with_result(
[&] (void *ptr) { at = addr_t(ptr); at_defined = true; },
[&] (Alloc_error e) { error = e; });
[&] (void *ptr) {
attach_at = ptr;
done = true; },
[&] (Range_allocator::Alloc_error error) {
switch (error) {
case Alloc_error::OUT_OF_RAM: throw Out_of_ram();
case Alloc_error::OUT_OF_CAPS: throw Out_of_caps();
case Alloc_error::DENIED: break; /* no fit */
}
/* try smaller alignment in next iteration... */
});
if (error == Alloc_error::OUT_OF_RAM) return Attach_error::OUT_OF_RAM;
if (error == Alloc_error::OUT_OF_CAPS) return Attach_error::OUT_OF_CAPS;
}
if (!done)
throw Region_conflict();
}
if (!at_defined)
return Attach_error::REGION_CONFLICT;
Rm_region::Attr const region_attr
{
.base = (addr_t)attach_at,
.base = at,
.size = size,
.write = attr.writeable,
.exec = attr.executable,
.off = attr.offset,
.dma = attr.dma,
.dma = core_attr.dma,
};
/* store attachment info in meta data */
try {
_map.construct_metadata(attach_at, *dsc, *this, region_attr);
_map.construct_metadata((void *)at, *dsc, *this, region_attr);
}
catch (Allocator_avl_tpl<Rm_region>::Assign_metadata_failed) {
error("failed to store attachment info");
throw Invalid_dataspace();
return Attach_error::INVALID_DATASPACE;
}
/* inform dataspace about attachment */
Rm_region * const region_ptr = _map.metadata(attach_at);
Rm_region * const region_ptr = _map.metadata((void *)at);
if (region_ptr)
dsc->attached_to(*region_ptr);
/* check if attach operation resolves any faulting region-manager clients */
_faulters.for_each([&] (Rm_faulter &faulter) {
if (faulter.fault_in_addr_range((addr_t)attach_at, size)) {
if (faulter.fault_in_addr_range(at, size)) {
_faulters.remove(faulter);
faulter.continue_after_resolved_fault();
}
});
return attach_at;
return Range { .start = at, .num_bytes = size };
};
return _ds_ep.apply(ds_cap, lambda);
@ -351,23 +341,10 @@ void Region_map_component::unmap_region(addr_t base, size_t size)
}
Region_map::Local_addr
Region_map_component::attach(Dataspace_capability ds_cap, size_t size,
off_t offset, bool use_local_addr,
Region_map::Local_addr local_addr,
bool executable, bool writeable)
Region_map::Attach_result
Region_map_component::attach(Dataspace_capability ds_cap, Attr const &attr)
{
Attach_attr const attr {
.size = size,
.offset = offset,
.use_local_addr = use_local_addr,
.local_addr = local_addr,
.executable = executable,
.writeable = writeable,
.dma = false,
};
return _attach(ds_cap, attr);
return _attach(ds_cap, { .attr = attr, .dma = false });
}
@ -375,25 +352,30 @@ Region_map_component::Attach_dma_result
Region_map_component::attach_dma(Dataspace_capability ds_cap, addr_t at)
{
Attach_attr const attr {
.size = 0,
.offset = 0,
.use_local_addr = true,
.local_addr = at,
.executable = false,
.writeable = true,
.attr = {
.size = { },
.offset = { },
.use_at = true,
.at = at,
.executable = false,
.writeable = true,
},
.dma = true,
};
using Attach_dma_error = Pd_session::Attach_dma_error;
try {
_attach(ds_cap, attr);
return Pd_session::Attach_dma_ok();
}
catch (Invalid_dataspace) { return Attach_dma_error::DENIED; }
catch (Region_conflict) { return Attach_dma_error::DENIED; }
catch (Out_of_ram) { return Attach_dma_error::OUT_OF_RAM; }
catch (Out_of_caps) { return Attach_dma_error::OUT_OF_CAPS; }
return _attach(ds_cap, attr).convert<Attach_dma_result>(
[&] (Range) { return Pd_session::Attach_dma_ok(); },
[&] (Attach_error e) {
switch (e) {
case Attach_error::OUT_OF_RAM: return Attach_dma_error::OUT_OF_RAM;
case Attach_error::OUT_OF_CAPS: return Attach_dma_error::OUT_OF_CAPS;
case Attach_error::REGION_CONFLICT: break;
case Attach_error::INVALID_DATASPACE: break;
}
return Attach_dma_error::DENIED;
});
}
@ -448,23 +430,23 @@ void Region_map_component::_reserve_and_flush_unsynchronized(Rm_region &region)
/*
* Flush the region, but keep it reserved until 'detach()' is called.
*/
void Region_map_component::reserve_and_flush(Local_addr local_addr)
void Region_map_component::reserve_and_flush(addr_t const at)
{
/* serialize access */
Mutex::Guard lock_guard(_mutex);
_with_region(local_addr, [&] (Rm_region &region) {
_with_region(at, [&] (Rm_region &region) {
_reserve_and_flush_unsynchronized(region);
});
}
void Region_map_component::detach(Local_addr local_addr)
void Region_map_component::detach_at(addr_t const at)
{
/* serialize access */
Mutex::Guard lock_guard(_mutex);
_with_region(local_addr, [&] (Rm_region &region) {
_with_region(at, [&] (Rm_region &region) {
if (!region.reserved())
_reserve_and_flush_unsynchronized(region);
/* free the reserved region */
@ -490,11 +472,10 @@ void Region_map_component::remove_client(Rm_client &rm_client)
}
void Region_map_component::fault(Rm_faulter &faulter, addr_t pf_addr,
Region_map::State::Fault_type pf_type)
void Region_map_component::fault(Rm_faulter &faulter, Region_map::Fault fault)
{
/* remember fault state in faulting thread */
faulter.fault(*this, Region_map::State(pf_type, pf_addr));
faulter.fault(*this, fault);
/* enqueue faulter */
_faulters.enqueue(faulter);
@ -520,17 +501,15 @@ void Region_map_component::fault_handler(Signal_context_capability sigh)
}
Region_map::State Region_map_component::state()
Region_map::Fault Region_map_component::fault()
{
/* serialize access */
Mutex::Guard lock_guard(_mutex);
/* return ready state if there are not current faulters */
Region_map::State result;
/* otherwise return fault information regarding the first faulter */
/* return fault information regarding the first faulter */
Region_map::Fault result { };
_faulters.head([&] (Rm_faulter &faulter) {
result = faulter.fault_state(); });
result = faulter.fault(); });
return result;
}
@ -609,7 +588,7 @@ Region_map_component::~Region_map_component()
break;
}
detach(out_addr);
detach_at(out_addr);
}
/* revoke dataspace representation */

View File

@ -65,52 +65,53 @@ class Stack_area_region_map : public Region_map
/**
* Allocate and attach on-the-fly backing store to stack area
*/
Local_addr attach(Dataspace_capability, size_t size, off_t,
bool, Local_addr local_addr, bool, bool) override
Attach_result attach(Dataspace_capability, Attr const &attr) override
{
/* allocate physical memory */
size = round_page(size);
size_t const size = round_page(attr.size);
Range_allocator &phys = platform_specific().ram_alloc();
return phys.alloc_aligned(size, get_page_size_log2()).convert<Local_addr>(
return phys.alloc_aligned(size, get_page_size_log2()).convert<Attach_result>(
[&] (void *phys_ptr) {
[&] (void *phys_ptr) -> Attach_result {
addr_t const phys_base = (addr_t)phys_ptr;
try {
addr_t const phys_base = (addr_t)phys_ptr;
Dataspace_component &ds = *new (&_ds_slab)
Dataspace_component(size, 0, (addr_t)phys_base, CACHED, true, 0);
Dataspace_component &ds = *new (&_ds_slab)
Dataspace_component(size, 0, (addr_t)phys_base, CACHED, true, 0);
addr_t const core_local_addr = stack_area_virtual_base()
+ (addr_t)local_addr;
addr_t const core_local_addr = stack_area_virtual_base()
+ attr.at;
if (!map_local(ds.phys_addr(), core_local_addr,
ds.size() >> get_page_size_log2())) {
error("could not map phys ", Hex(ds.phys_addr()),
" at local ", Hex(core_local_addr));
if (!map_local(ds.phys_addr(), core_local_addr,
ds.size() >> get_page_size_log2())) {
error("could not map phys ", Hex(ds.phys_addr()),
" at local ", Hex(core_local_addr));
phys.free(phys_ptr);
return Local_addr { (addr_t)0 };
phys.free(phys_ptr);
return Attach_error::INVALID_DATASPACE;
}
ds.assign_core_local_addr((void*)core_local_addr);
return Range { .start = attr.at, .num_bytes = size };
}
ds.assign_core_local_addr((void*)core_local_addr);
return local_addr;
catch (Out_of_ram) { return Attach_error::OUT_OF_RAM; }
catch (Out_of_caps) { return Attach_error::OUT_OF_CAPS; }
},
[&] (Range_allocator::Alloc_error) {
error("could not allocate backing store for new stack");
return (addr_t)0; });
return Attach_error::REGION_CONFLICT; });
}
void detach(Local_addr local_addr) override
void detach(addr_t const at) override
{
using Genode::addr_t;
if ((addr_t)local_addr >= stack_area_virtual_size())
if (at >= stack_area_virtual_size())
return;
addr_t const detach = stack_area_virtual_base() + (addr_t)local_addr;
addr_t const detach = stack_area_virtual_base() + at;
addr_t const stack = stack_virtual_size();
addr_t const pages = ((detach & ~(stack - 1)) + stack - detach)
>> get_page_size_log2();
@ -120,9 +121,9 @@ class Stack_area_region_map : public Region_map
void fault_handler(Signal_context_capability) override { }
State state() override { return State(); }
Fault fault() override { return { }; }
Dataspace_capability dataspace() override { return Dataspace_capability(); }
Dataspace_capability dataspace() override { return { }; }
};

View File

@ -65,6 +65,8 @@ void Vm_session_component::attach(Dataspace_capability const cap,
using Alloc_error = Range_allocator::Alloc_error;
Region_map_detach &rm_detach = *this;
_map.alloc_addr(attribute.size, guest_phys).with_result(
[&] (void *) {
@ -75,14 +77,14 @@ void Vm_session_component::attach(Dataspace_capability const cap,
.size = attribute.size,
.write = dsc.writeable() && attribute.writeable,
.exec = attribute.executable,
.off = (off_t)attribute.offset,
.off = attribute.offset,
.dma = false,
};
/* store attachment info in meta data */
try {
_map.construct_metadata((void *)guest_phys,
dsc, *this, region_attr);
dsc, rm_detach, region_attr);
} catch (Allocator_avl_tpl<Rm_region>::Assign_metadata_failed) {
error("failed to store attachment info");
@ -149,7 +151,7 @@ void Vm_session_component::detach(addr_t guest_phys, size_t size)
if (region) {
iteration_size = region->size();
detach(region->base());
detach_at(region->base());
}
if (addr >= guest_phys_end - (iteration_size - 1))
@ -160,10 +162,10 @@ void Vm_session_component::detach(addr_t guest_phys, size_t size)
}
void Vm_session_component::_with_region(Region_map::Local_addr addr,
void Vm_session_component::_with_region(addr_t const addr,
auto const &fn)
{
Rm_region *region = _map.metadata(addr);
Rm_region *region = _map.metadata((void *)addr);
if (region)
fn(*region);
else
@ -171,7 +173,7 @@ void Vm_session_component::_with_region(Region_map::Local_addr addr,
}
void Vm_session_component::detach(Region_map::Local_addr addr)
void Vm_session_component::detach_at(addr_t const addr)
{
_with_region(addr, [&] (Rm_region &region) {
@ -190,7 +192,7 @@ void Vm_session_component::unmap_region(addr_t base, size_t size)
}
void Vm_session_component::reserve_and_flush(Region_map::Local_addr addr)
void Vm_session_component::reserve_and_flush(addr_t const addr)
{
_with_region(addr, [&] (Rm_region &region) {