mirror of
https://github.com/genodelabs/genode.git
synced 2025-06-20 16:10:29 +00:00
base: introduce Allocator::try_alloc
This patch changes the 'Allocator' interface to the use of 'Attempt' return values instead of using exceptions for propagating errors. To largely uphold compatibility with components using the original exception-based interface - in particluar use cases where an 'Allocator' is passed to the 'new' operator - the traditional 'alloc' is still supported. But it existes merely as a wrapper around the new 'try_alloc'. Issue #4324
This commit is contained in:
committed by
Christian Helmuth
parent
9591e6caee
commit
dc39a8db62
@ -33,42 +33,46 @@ void * Mapped_avl_allocator::map_addr(void * addr)
|
||||
}
|
||||
|
||||
|
||||
Range_allocator::Alloc_return
|
||||
Mapped_mem_allocator::alloc_aligned(size_t size, void **out_addr,
|
||||
unsigned align, Range range)
|
||||
Range_allocator::Alloc_result
|
||||
Mapped_mem_allocator::alloc_aligned(size_t size, unsigned align, Range range)
|
||||
{
|
||||
size_t page_rounded_size = align_addr(size, get_page_size_log2());
|
||||
void *phys_addr = 0;
|
||||
align = max((size_t)align, get_page_size_log2());
|
||||
|
||||
/* allocate physical pages */
|
||||
Alloc_return ret1 = _phys_alloc->alloc_aligned(page_rounded_size,
|
||||
&phys_addr, align, range);
|
||||
if (!ret1.ok()) {
|
||||
error("Could not allocate physical memory region of size ",
|
||||
page_rounded_size);
|
||||
return ret1;
|
||||
}
|
||||
return _phys_alloc->alloc_aligned(page_rounded_size, align, range)
|
||||
.convert<Alloc_result>(
|
||||
|
||||
/* allocate range in core's virtual address space */
|
||||
Alloc_return ret2 = _virt_alloc->alloc_aligned(page_rounded_size,
|
||||
out_addr, align);
|
||||
if (!ret2.ok()) {
|
||||
error("Could not allocate virtual address range in core of size ",
|
||||
page_rounded_size);
|
||||
[&] (void *phys_addr) -> Alloc_result {
|
||||
|
||||
/* revert physical allocation */
|
||||
_phys_alloc->free(phys_addr);
|
||||
return ret2;
|
||||
}
|
||||
/* allocate range in core's virtual address space */
|
||||
return _virt_alloc->alloc_aligned(page_rounded_size, align)
|
||||
.convert<Alloc_result>(
|
||||
|
||||
_phys_alloc->metadata(phys_addr, { *out_addr });
|
||||
_virt_alloc->metadata(*out_addr, { phys_addr });
|
||||
[&] (void *virt_addr) {
|
||||
|
||||
/* make physical page accessible at the designated virtual address */
|
||||
_map_local((addr_t)*out_addr, (addr_t)phys_addr, page_rounded_size);
|
||||
_phys_alloc->metadata(phys_addr, { virt_addr });
|
||||
_virt_alloc->metadata(virt_addr, { phys_addr });
|
||||
|
||||
return Alloc_return::OK;
|
||||
/* make physical page accessible at the designated virtual address */
|
||||
_map_local((addr_t)virt_addr, (addr_t)phys_addr, page_rounded_size);
|
||||
|
||||
return virt_addr;
|
||||
},
|
||||
[&] (Alloc_error e) {
|
||||
error("Could not allocate virtual address range in core of size ",
|
||||
page_rounded_size, " (error ", (int)e, ")");
|
||||
|
||||
/* revert physical allocation */
|
||||
_phys_alloc->free(phys_addr);
|
||||
return e;
|
||||
});
|
||||
},
|
||||
[&] (Alloc_error e) {
|
||||
error("Could not allocate physical memory region of size ",
|
||||
page_rounded_size, " (error ", (int)e, ")");
|
||||
return e;
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
|
@ -47,7 +47,7 @@ class Genode::Constrained_core_ram : public Allocator
|
||||
" in core !");
|
||||
}
|
||||
|
||||
bool alloc(size_t const size, void **ptr) override
|
||||
Alloc_result try_alloc(size_t const size) override
|
||||
{
|
||||
size_t const page_aligned_size = align_addr(size, 12);
|
||||
|
||||
@ -56,15 +56,16 @@ class Genode::Constrained_core_ram : public Allocator
|
||||
/* on some kernels we require a cap, on some not XXX */
|
||||
Cap_quota_guard::Reservation caps(_cap_guard, Cap_quota{1});
|
||||
|
||||
if (!_core_mem.alloc(page_aligned_size, ptr))
|
||||
return false;
|
||||
return _core_mem.try_alloc(page_aligned_size).convert<Alloc_result>(
|
||||
|
||||
ram.acknowledge();
|
||||
caps.acknowledge();
|
||||
[&] (void *ptr) {
|
||||
ram.acknowledge();
|
||||
caps.acknowledge();
|
||||
core_mem_allocated += page_aligned_size;
|
||||
return ptr; },
|
||||
|
||||
core_mem_allocated += page_aligned_size;
|
||||
|
||||
return true;
|
||||
[&] (Alloc_error error) {
|
||||
return error; });
|
||||
}
|
||||
|
||||
void free(void *ptr, size_t const size) override
|
||||
|
@ -163,11 +163,10 @@ class Genode::Mapped_mem_allocator : public Genode::Core_mem_translator
|
||||
** Range allocator interface **
|
||||
*******************************/
|
||||
|
||||
int add_range(addr_t, size_t) override { return -1; }
|
||||
int remove_range(addr_t, size_t) override { return -1; }
|
||||
Alloc_return alloc_aligned(size_t, void **, unsigned, Range) override;
|
||||
Alloc_return alloc_addr(size_t, addr_t) override {
|
||||
return Alloc_return::RANGE_CONFLICT; }
|
||||
Range_result add_range(addr_t, size_t) override { return Alloc_error::DENIED; }
|
||||
Range_result remove_range(addr_t, size_t) override { return Alloc_error::DENIED; }
|
||||
Alloc_result alloc_aligned(size_t, unsigned, Range) override;
|
||||
Alloc_result alloc_addr(size_t, addr_t) override { return Alloc_error::DENIED; }
|
||||
void free(void *) override;
|
||||
size_t avail() const override { return _phys_alloc->avail(); }
|
||||
bool valid_addr(addr_t addr) const override {
|
||||
@ -180,8 +179,8 @@ class Genode::Mapped_mem_allocator : public Genode::Core_mem_translator
|
||||
** Allocator interface **
|
||||
*************************/
|
||||
|
||||
bool alloc(size_t size, void **out_addr) override {
|
||||
return alloc_aligned(size, out_addr, log2(sizeof(addr_t))).ok(); }
|
||||
Alloc_result try_alloc(size_t size) override {
|
||||
return alloc_aligned(size, log2(sizeof(addr_t))); }
|
||||
void free(void *addr, size_t) override;
|
||||
size_t consumed() const override { return _phys_alloc->consumed(); }
|
||||
size_t overhead(size_t size) const override {
|
||||
@ -276,16 +275,14 @@ class Genode::Core_mem_allocator : public Genode::Core_mem_translator
|
||||
** Range allocator interface **
|
||||
*******************************/
|
||||
|
||||
int add_range(addr_t, size_t) override { return -1; }
|
||||
int remove_range(addr_t, size_t) override { return -1; }
|
||||
Alloc_return alloc_addr(size_t, addr_t) override {
|
||||
return Alloc_return::RANGE_CONFLICT; }
|
||||
Range_result add_range(addr_t, size_t) override { return Alloc_error::DENIED; }
|
||||
Range_result remove_range(addr_t, size_t) override { return Alloc_error::DENIED; }
|
||||
Alloc_result alloc_addr(size_t, addr_t) override { return Alloc_error::DENIED; }
|
||||
|
||||
Alloc_return alloc_aligned(size_t size, void **out_addr,
|
||||
unsigned align, Range range) override
|
||||
Alloc_result alloc_aligned(size_t size, unsigned align, Range range) override
|
||||
{
|
||||
Mutex::Guard lock_guard(_mutex);
|
||||
return _mem_alloc.alloc_aligned(size, out_addr, align, range);
|
||||
return _mem_alloc.alloc_aligned(size, align, range);
|
||||
}
|
||||
|
||||
void free(void *addr) override
|
||||
@ -305,8 +302,10 @@ class Genode::Core_mem_allocator : public Genode::Core_mem_translator
|
||||
** Allocator interface **
|
||||
*************************/
|
||||
|
||||
bool alloc(size_t size, void **out_addr) override {
|
||||
return alloc_aligned(size, out_addr, log2(sizeof(addr_t))).ok(); }
|
||||
Alloc_result try_alloc(size_t size) override
|
||||
{
|
||||
return alloc_aligned(size, log2(sizeof(addr_t)));
|
||||
}
|
||||
|
||||
void free(void *addr, size_t size) override
|
||||
{
|
||||
|
@ -40,7 +40,7 @@ class Genode::Synced_range_allocator : public Range_allocator
|
||||
friend class Mapped_mem_allocator;
|
||||
|
||||
Mutex _default_mutex { };
|
||||
Mutex &_mutex;
|
||||
Mutex &_mutex { _default_mutex };
|
||||
ALLOC _alloc;
|
||||
Synced_interface<ALLOC, Mutex> _synced_object;
|
||||
|
||||
@ -54,8 +54,7 @@ class Genode::Synced_range_allocator : public Range_allocator
|
||||
|
||||
template <typename... ARGS>
|
||||
Synced_range_allocator(ARGS &&... args)
|
||||
: _mutex(_default_mutex), _alloc(args...),
|
||||
_synced_object(_mutex, &_alloc) { }
|
||||
: _alloc(args...), _synced_object(_mutex, &_alloc) { }
|
||||
|
||||
Guard operator () () { return _synced_object(); }
|
||||
Guard operator () () const { return _synced_object(); }
|
||||
@ -67,8 +66,8 @@ class Genode::Synced_range_allocator : public Range_allocator
|
||||
** Allocator interface **
|
||||
*************************/
|
||||
|
||||
bool alloc(size_t size, void **out_addr) override {
|
||||
return _synced_object()->alloc(size, out_addr); }
|
||||
Alloc_result try_alloc(size_t size) override {
|
||||
return _synced_object()->try_alloc(size); }
|
||||
|
||||
void free(void *addr, size_t size) override {
|
||||
_synced_object()->free(addr, size); }
|
||||
@ -87,17 +86,16 @@ class Genode::Synced_range_allocator : public Range_allocator
|
||||
** Range-allocator interface **
|
||||
*******************************/
|
||||
|
||||
int add_range(addr_t base, size_t size) override {
|
||||
Range_result add_range(addr_t base, size_t size) override {
|
||||
return _synced_object()->add_range(base, size); }
|
||||
|
||||
int remove_range(addr_t base, size_t size) override {
|
||||
Range_result remove_range(addr_t base, size_t size) override {
|
||||
return _synced_object()->remove_range(base, size); }
|
||||
|
||||
Alloc_return alloc_aligned(size_t size, void **out_addr,
|
||||
unsigned align, Range range) override {
|
||||
return _synced_object()->alloc_aligned(size, out_addr, align, range); }
|
||||
Alloc_result alloc_aligned(size_t size, unsigned align, Range range) override {
|
||||
return _synced_object()->alloc_aligned(size, align, range); }
|
||||
|
||||
Alloc_return alloc_addr(size_t size, addr_t addr) override {
|
||||
Alloc_result alloc_addr(size_t size, addr_t addr) override {
|
||||
return _synced_object()->alloc_addr(size, addr); }
|
||||
|
||||
void free(void *addr) override {
|
||||
|
@ -41,24 +41,16 @@ Io_mem_session_component::_prepare_io_mem(const char *args,
|
||||
_cacheable = WRITE_COMBINED;
|
||||
|
||||
/* check for RAM collision */
|
||||
int ret;
|
||||
if ((ret = ram_alloc.remove_range(base, size))) {
|
||||
if (ram_alloc.remove_range(base, size).failed()) {
|
||||
error("I/O memory ", Hex_range<addr_t>(base, size), " "
|
||||
"used by RAM allocator (", ret, ")");
|
||||
"used by RAM allocator");
|
||||
return Dataspace_attr();
|
||||
}
|
||||
|
||||
/* allocate region */
|
||||
switch (_io_mem_alloc.alloc_addr(req_size, req_base).value) {
|
||||
case Range_allocator::Alloc_return::RANGE_CONFLICT:
|
||||
if (_io_mem_alloc.alloc_addr(req_size, req_base).failed()) {
|
||||
error("I/O memory ", Hex_range<addr_t>(req_base, req_size), " not available");
|
||||
return Dataspace_attr();
|
||||
|
||||
case Range_allocator::Alloc_return::OUT_OF_METADATA:
|
||||
error("I/O memory allocator ran out of meta data");
|
||||
return Dataspace_attr();
|
||||
|
||||
case Range_allocator::Alloc_return::OK: break;
|
||||
}
|
||||
|
||||
/* request local mapping */
|
||||
|
@ -38,8 +38,7 @@ Ram_dataspace_factory::try_alloc(size_t ds_size, Cache cache)
|
||||
* If this does not work, we subsequently weaken the alignment constraint
|
||||
* until the allocation succeeds.
|
||||
*/
|
||||
void *ds_addr = nullptr;
|
||||
bool alloc_succeeded = false;
|
||||
Range_allocator::Alloc_result allocated_range = Allocator::Alloc_error::DENIED;
|
||||
|
||||
/*
|
||||
* If no physical constraint exists, try to allocate physical memory at
|
||||
@ -53,63 +52,57 @@ Ram_dataspace_factory::try_alloc(size_t ds_size, Cache cache)
|
||||
Phys_range const range { .start = high_start, .end = _phys_range.end };
|
||||
|
||||
for (size_t align_log2 = log2(ds_size); align_log2 >= 12; align_log2--) {
|
||||
if (_phys_alloc.alloc_aligned(ds_size, &ds_addr, align_log2, range).ok()) {
|
||||
alloc_succeeded = true;
|
||||
allocated_range = _phys_alloc.alloc_aligned(ds_size, align_log2, range);
|
||||
if (allocated_range.ok())
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* apply constraints, or retry if larger memory allocation failed */
|
||||
if (!alloc_succeeded) {
|
||||
if (!allocated_range.ok()) {
|
||||
for (size_t align_log2 = log2(ds_size); align_log2 >= 12; align_log2--) {
|
||||
if (_phys_alloc.alloc_aligned(ds_size, &ds_addr, align_log2,
|
||||
_phys_range).ok()) {
|
||||
alloc_succeeded = true;
|
||||
allocated_range = _phys_alloc.alloc_aligned(ds_size, align_log2, _phys_range);
|
||||
if (allocated_range.ok())
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper to release the allocated physical memory whenever we leave the
|
||||
* scope via an exception.
|
||||
*/
|
||||
class Phys_alloc_guard
|
||||
{
|
||||
private:
|
||||
|
||||
/*
|
||||
* Noncopyable
|
||||
*/
|
||||
Phys_alloc_guard(Phys_alloc_guard const &);
|
||||
Phys_alloc_guard &operator = (Phys_alloc_guard const &);
|
||||
|
||||
public:
|
||||
|
||||
Range_allocator &phys_alloc;
|
||||
void * const ds_addr;
|
||||
bool ack = false;
|
||||
|
||||
Phys_alloc_guard(Range_allocator &phys_alloc, void *ds_addr)
|
||||
: phys_alloc(phys_alloc), ds_addr(ds_addr) { }
|
||||
|
||||
~Phys_alloc_guard() { if (!ack) phys_alloc.free(ds_addr); }
|
||||
|
||||
} phys_alloc_guard(_phys_alloc, ds_addr);
|
||||
|
||||
/*
|
||||
* Normally, init's quota equals the size of physical memory and this quota
|
||||
* is distributed among the processes. As we check the quota before
|
||||
* allocating, the allocation should always succeed in theory. However,
|
||||
* fragmentation could cause a failing allocation.
|
||||
*/
|
||||
if (!alloc_succeeded) {
|
||||
if (allocated_range.failed()) {
|
||||
error("out of physical memory while allocating ", ds_size, " bytes ",
|
||||
"in range [", Hex(_phys_range.start), "-", Hex(_phys_range.end), "]");
|
||||
return Alloc_error::OUT_OF_RAM;
|
||||
|
||||
return allocated_range.convert<Ram_allocator::Alloc_result>(
|
||||
[&] (void *) { return Alloc_error::DENIED; },
|
||||
[&] (Alloc_error error) { return error; });
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper to release the allocated physical memory whenever we leave the
|
||||
* scope via an exception.
|
||||
*/
|
||||
struct Phys_alloc_guard
|
||||
{
|
||||
Range_allocator &phys_alloc;
|
||||
struct { void * ds_addr = nullptr; };
|
||||
bool keep = false;
|
||||
|
||||
Phys_alloc_guard(Range_allocator &phys_alloc)
|
||||
: phys_alloc(phys_alloc) { }
|
||||
|
||||
~Phys_alloc_guard() { if (!keep && ds_addr) phys_alloc.free(ds_addr); }
|
||||
|
||||
} phys_alloc_guard(_phys_alloc);
|
||||
|
||||
allocated_range.with_result(
|
||||
[&] (void *ptr) { phys_alloc_guard.ds_addr = ptr; },
|
||||
[&] (Alloc_error) { /* already checked above */ });
|
||||
|
||||
/*
|
||||
* For non-cached RAM dataspaces, we mark the dataspace as write
|
||||
* combined and expect the pager to evaluate this dataspace property
|
||||
@ -118,7 +111,8 @@ Ram_dataspace_factory::try_alloc(size_t ds_size, Cache cache)
|
||||
Dataspace_component *ds_ptr = nullptr;
|
||||
try {
|
||||
ds_ptr = new (_ds_slab)
|
||||
Dataspace_component(ds_size, (addr_t)ds_addr, cache, true, this);
|
||||
Dataspace_component(ds_size, (addr_t)phys_alloc_guard.ds_addr,
|
||||
cache, true, this);
|
||||
}
|
||||
catch (Out_of_ram) { return Alloc_error::OUT_OF_RAM; }
|
||||
catch (Out_of_caps) { return Alloc_error::OUT_OF_CAPS; }
|
||||
@ -145,7 +139,7 @@ Ram_dataspace_factory::try_alloc(size_t ds_size, Cache cache)
|
||||
|
||||
Dataspace_capability ds_cap = _ep.manage(&ds);
|
||||
|
||||
phys_alloc_guard.ack = true;
|
||||
phys_alloc_guard.keep = true;
|
||||
|
||||
return static_cap_cast<Ram_dataspace>(ds_cap);
|
||||
}
|
||||
|
@ -365,8 +365,14 @@ Region_map_component::attach(Dataspace_capability ds_cap, size_t size,
|
||||
throw Region_conflict();
|
||||
|
||||
auto lambda = [&] (Dataspace_component *dsc) {
|
||||
|
||||
using Alloc_error = Range_allocator::Alloc_error;
|
||||
|
||||
/* check dataspace validity */
|
||||
if (!dsc) throw Invalid_dataspace();
|
||||
if (!dsc)
|
||||
throw Invalid_dataspace();
|
||||
|
||||
unsigned const min_align_log2 = get_page_size_log2();
|
||||
|
||||
size_t const off = offset;
|
||||
if (off >= dsc->size())
|
||||
@ -376,27 +382,25 @@ Region_map_component::attach(Dataspace_capability ds_cap, size_t size,
|
||||
size = dsc->size() - offset;
|
||||
|
||||
/* work with page granularity */
|
||||
size = align_addr(size, get_page_size_log2());
|
||||
size = align_addr(size, min_align_log2);
|
||||
|
||||
/* deny creation of regions larger then the actual dataspace */
|
||||
if (dsc->size() < size + offset)
|
||||
throw Region_conflict();
|
||||
|
||||
/* allocate region for attachment */
|
||||
void *attach_at = 0;
|
||||
void *attach_at = nullptr;
|
||||
if (use_local_addr) {
|
||||
switch (_map.alloc_addr(size, local_addr).value) {
|
||||
|
||||
case Range_allocator::Alloc_return::OUT_OF_METADATA:
|
||||
throw Out_of_ram();
|
||||
|
||||
case Range_allocator::Alloc_return::RANGE_CONFLICT:
|
||||
throw Region_conflict();
|
||||
|
||||
case Range_allocator::Alloc_return::OK:
|
||||
attach_at = local_addr;
|
||||
break;
|
||||
}
|
||||
_map.alloc_addr(size, local_addr).with_result(
|
||||
[&] (void *ptr) { attach_at = ptr; },
|
||||
[&] (Range_allocator::Alloc_error error) {
|
||||
switch (error) {
|
||||
case Alloc_error::OUT_OF_RAM: throw Out_of_ram();
|
||||
case Alloc_error::OUT_OF_CAPS: throw Out_of_caps();
|
||||
case Alloc_error::DENIED: break;
|
||||
}
|
||||
throw Region_conflict();
|
||||
});
|
||||
} else {
|
||||
|
||||
/*
|
||||
@ -406,9 +410,10 @@ Region_map_component::attach(Dataspace_capability ds_cap, size_t size,
|
||||
*/
|
||||
size_t align_log2 = log2(size);
|
||||
if (align_log2 >= sizeof(void *)*8)
|
||||
align_log2 = get_page_size_log2();
|
||||
align_log2 = min_align_log2;
|
||||
|
||||
for (; align_log2 >= get_page_size_log2(); align_log2--) {
|
||||
bool done = false;
|
||||
for (; !done && (align_log2 >= min_align_log2); align_log2--) {
|
||||
|
||||
/*
|
||||
* Don't use an alignment higher than the alignment of the backing
|
||||
@ -419,21 +424,23 @@ Region_map_component::attach(Dataspace_capability ds_cap, size_t size,
|
||||
continue;
|
||||
|
||||
/* try allocating the align region */
|
||||
Range_allocator::Alloc_return alloc_return =
|
||||
_map.alloc_aligned(size, &attach_at, align_log2);
|
||||
_map.alloc_aligned(size, align_log2).with_result(
|
||||
|
||||
typedef Range_allocator::Alloc_return Alloc_return;
|
||||
|
||||
switch (alloc_return.value) {
|
||||
case Alloc_return::OK: break; /* switch */
|
||||
case Alloc_return::OUT_OF_METADATA: throw Out_of_ram();
|
||||
case Alloc_return::RANGE_CONFLICT: continue; /* for loop */
|
||||
}
|
||||
break; /* for loop */
|
||||
[&] (void *ptr) {
|
||||
attach_at = ptr;
|
||||
done = true; },
|
||||
|
||||
[&] (Range_allocator::Alloc_error error) {
|
||||
switch (error) {
|
||||
case Alloc_error::OUT_OF_RAM: throw Out_of_ram();
|
||||
case Alloc_error::OUT_OF_CAPS: throw Out_of_caps();
|
||||
case Alloc_error::DENIED: break; /* no fit */
|
||||
}
|
||||
/* try smaller alignment in next iteration... */
|
||||
});
|
||||
}
|
||||
|
||||
if (align_log2 < get_page_size_log2())
|
||||
if (!done)
|
||||
throw Region_conflict();
|
||||
}
|
||||
|
||||
|
@ -35,18 +35,23 @@ Io_port_session_component::Io_port_session_component(Range_allocator &io_port_al
|
||||
unsigned size = Arg_string::find_arg(args, "io_port_size").ulong_value(0);
|
||||
|
||||
/* allocate region (also checks out-of-bounds regions) */
|
||||
switch (io_port_alloc.alloc_addr(size, base).value) {
|
||||
io_port_alloc.alloc_addr(size, base).with_error(
|
||||
[&] (Allocator::Alloc_error e) {
|
||||
|
||||
case Range_allocator::Alloc_return::RANGE_CONFLICT:
|
||||
error("I/O port ", Hex_range<uint16_t>(base, size), " not available");
|
||||
throw Service_denied();
|
||||
switch (e) {
|
||||
case Range_allocator::Alloc_error::DENIED:
|
||||
error("I/O port ", Hex_range<uint16_t>(base, size), " not available");
|
||||
throw Service_denied();
|
||||
|
||||
case Range_allocator::Alloc_return::OUT_OF_METADATA:
|
||||
error("I/O port allocator ran out of meta data");
|
||||
throw Service_denied();
|
||||
case Range_allocator::Alloc_error::OUT_OF_RAM:
|
||||
error("I/O port allocator ran out of RAM");
|
||||
throw Service_denied();
|
||||
|
||||
case Range_allocator::Alloc_return::OK: break;
|
||||
}
|
||||
case Range_allocator::Alloc_error::OUT_OF_CAPS:
|
||||
error("I/O port allocator ran out of caps");
|
||||
throw Service_denied();
|
||||
}
|
||||
});
|
||||
|
||||
/* store information */
|
||||
_base = base;
|
||||
|
@ -71,29 +71,37 @@ class Stack_area_region_map : public Region_map
|
||||
{
|
||||
/* allocate physical memory */
|
||||
size = round_page(size);
|
||||
void *phys_base = nullptr;
|
||||
Range_allocator &ra = platform_specific().ram_alloc();
|
||||
if (ra.alloc_aligned(size, &phys_base,
|
||||
get_page_size_log2()).error()) {
|
||||
error("could not allocate backing store for new stack");
|
||||
return (addr_t)0;
|
||||
}
|
||||
|
||||
Dataspace_component &ds = *new (&_ds_slab)
|
||||
Dataspace_component(size, 0, (addr_t)phys_base, CACHED, true, 0);
|
||||
Range_allocator &phys = platform_specific().ram_alloc();
|
||||
|
||||
addr_t const core_local_addr = stack_area_virtual_base() + (addr_t)local_addr;
|
||||
return phys.alloc_aligned(size, get_page_size_log2()).convert<Local_addr>(
|
||||
|
||||
if (!map_local(ds.phys_addr(), core_local_addr,
|
||||
ds.size() >> get_page_size_log2())) {
|
||||
error("could not map phys ", Hex(ds.phys_addr()),
|
||||
" at local ", Hex(core_local_addr));
|
||||
return (addr_t)0;
|
||||
}
|
||||
[&] (void *phys_ptr) {
|
||||
|
||||
ds.assign_core_local_addr((void*)core_local_addr);
|
||||
addr_t const phys_base = (addr_t)phys_ptr;
|
||||
|
||||
return local_addr;
|
||||
Dataspace_component &ds = *new (&_ds_slab)
|
||||
Dataspace_component(size, 0, (addr_t)phys_base, CACHED, true, 0);
|
||||
|
||||
addr_t const core_local_addr = stack_area_virtual_base()
|
||||
+ (addr_t)local_addr;
|
||||
|
||||
if (!map_local(ds.phys_addr(), core_local_addr,
|
||||
ds.size() >> get_page_size_log2())) {
|
||||
error("could not map phys ", Hex(ds.phys_addr()),
|
||||
" at local ", Hex(core_local_addr));
|
||||
|
||||
phys.free(phys_ptr);
|
||||
return Local_addr { (addr_t)0 };
|
||||
}
|
||||
|
||||
ds.assign_core_local_addr((void*)core_local_addr);
|
||||
|
||||
return local_addr;
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
error("could not allocate backing store for new stack");
|
||||
return (addr_t)0; });
|
||||
}
|
||||
|
||||
void detach(Local_addr local_addr) override
|
||||
|
@ -63,48 +63,57 @@ void Vm_session_component::attach(Dataspace_capability const cap,
|
||||
attribute.offset > dsc.size() - attribute.size)
|
||||
throw Invalid_dataspace();
|
||||
|
||||
switch (_map.alloc_addr(attribute.size, guest_phys).value) {
|
||||
case Range_allocator::Alloc_return::OUT_OF_METADATA:
|
||||
throw Out_of_ram();
|
||||
case Range_allocator::Alloc_return::RANGE_CONFLICT:
|
||||
{
|
||||
Rm_region *region_ptr = _map.metadata((void *)guest_phys);
|
||||
if (!region_ptr)
|
||||
throw Region_conflict();
|
||||
using Alloc_error = Range_allocator::Alloc_error;
|
||||
|
||||
Rm_region ®ion = *region_ptr;
|
||||
_map.alloc_addr(attribute.size, guest_phys).with_result(
|
||||
|
||||
if (!(cap == region.dataspace().cap()))
|
||||
throw Region_conflict();
|
||||
if (guest_phys < region.base() ||
|
||||
guest_phys > region.base() + region.size() - 1)
|
||||
throw Region_conflict();
|
||||
[&] (void *) {
|
||||
|
||||
/* re-attach all */
|
||||
break;
|
||||
}
|
||||
case Range_allocator::Alloc_return::OK:
|
||||
{
|
||||
/* store attachment info in meta data */
|
||||
try {
|
||||
_map.construct_metadata((void *)guest_phys,
|
||||
guest_phys, attribute.size,
|
||||
dsc.writable() && attribute.writeable,
|
||||
dsc, attribute.offset, *this,
|
||||
attribute.executable);
|
||||
} catch (Allocator_avl_tpl<Rm_region>::Assign_metadata_failed) {
|
||||
error("failed to store attachment info");
|
||||
throw Invalid_dataspace();
|
||||
/* store attachment info in meta data */
|
||||
try {
|
||||
_map.construct_metadata((void *)guest_phys,
|
||||
guest_phys, attribute.size,
|
||||
dsc.writable() && attribute.writeable,
|
||||
dsc, attribute.offset, *this,
|
||||
attribute.executable);
|
||||
} catch (Allocator_avl_tpl<Rm_region>::Assign_metadata_failed) {
|
||||
error("failed to store attachment info");
|
||||
throw Invalid_dataspace();
|
||||
}
|
||||
|
||||
Rm_region ®ion = *_map.metadata((void *)guest_phys);
|
||||
|
||||
/* inform dataspace about attachment */
|
||||
dsc.attached_to(region);
|
||||
},
|
||||
|
||||
[&] (Alloc_error error) {
|
||||
|
||||
switch (error) {
|
||||
|
||||
case Alloc_error::OUT_OF_RAM: throw Out_of_ram();
|
||||
case Alloc_error::OUT_OF_CAPS: throw Out_of_caps();
|
||||
case Alloc_error::DENIED:
|
||||
{
|
||||
/*
|
||||
* Handle attach after partial detach
|
||||
*/
|
||||
Rm_region *region_ptr = _map.metadata((void *)guest_phys);
|
||||
if (!region_ptr)
|
||||
throw Region_conflict();
|
||||
|
||||
Rm_region ®ion = *region_ptr;
|
||||
|
||||
if (!(cap == region.dataspace().cap()))
|
||||
throw Region_conflict();
|
||||
|
||||
if (guest_phys < region.base() ||
|
||||
guest_phys > region.base() + region.size() - 1)
|
||||
throw Region_conflict();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
Rm_region ®ion = *_map.metadata((void *)guest_phys);
|
||||
|
||||
/* inform dataspace about attachment */
|
||||
dsc.attached_to(region);
|
||||
|
||||
break;
|
||||
}
|
||||
};
|
||||
);
|
||||
|
||||
/* kernel specific code to attach memory to guest */
|
||||
_attach_vm_memory(dsc, guest_phys, attribute);
|
||||
|
Reference in New Issue
Block a user