Introduce new 'Ram' API types

The new types in base/ram.h model different allocation scenarios and
error cases by mere C++ types without using exceptions. They are meant
to replace the former 'Ram_allocator' interface. As of now, the
'Unmapped_allocator' closely captures the former 'Ram_allocator'
semantics. The 'Constrained_allocator' is currently an alias for
'Unmapped_allocator' but is designated for eventually allocating
mapped RAM.

In contrast to the 'Ram_allocator' interface, which talked about
dataspace capabilites but left the lifetime management of the
allocated RAM to the caller, the new API represents an allocation
as a guard type 'Allocation', which deallocates on destruction by
default.

Allocation errors are captured by a 'Result' type that follows
the 'Attempt' pattern.

As a transitionary feature, the patch largely maintains API
compatibility with the original 'Ram_allocator' by providing
the original (exception-based) 'Ram_allocator::alloc' and
'Ram_allocator::free' methods as a wrapper around the new
'Ram::Constrained_allocator'. So components can be gradually
updated to the new 'Ram::' interface.

Issue 
This commit is contained in:
Norman Feske 2025-04-02 11:11:01 +02:00
parent e9558a36f9
commit 689fc1eb93
40 changed files with 593 additions and 502 deletions

@ -31,13 +31,19 @@ using namespace Core;
addr_t Platform_thread::Utcb::_attach(Region_map &core_rm)
{
Region_map::Attr attr { };
attr.writeable = true;
return core_rm.attach(_ds, attr).convert<addr_t>(
[&] (Region_map::Range range) { return range.start; },
[&] (Region_map::Attach_error) {
error("failed to attach UTCB of new thread within core");
return 0ul; });
addr_t start = 0;
ds.with_result(
[&] (Ram::Allocation const &allocation) {
Region_map::Attr attr { };
attr.writeable = true;
core_rm.attach(allocation.cap, attr).with_result(
[&] (Region_map::Range range) { start = range.start; },
[&] (Region_map::Attach_error) {
error("failed to attach UTCB of new thread within core"); });
},
[&] (Ram::Error) { });
return start;
}
@ -66,6 +72,7 @@ static addr_t _alloc_core_local_utcb(addr_t core_addr)
Platform_thread::Utcb::Utcb(addr_t core_addr)
:
ds(Ram::Error::DENIED),
core_addr(core_addr),
phys_addr(_alloc_core_local_utcb(core_addr))
{ }
@ -118,6 +125,8 @@ Platform_thread::Platform_thread(Platform_pd &pd,
_kobj(_kobj.CALLED_FROM_CORE, _location.xpos(),
_priority, _quota, _label.string())
{
_utcb.ds.with_error([] (Ram::Error e) { throw_exception(e); });
_address_space = pd.weak_ptr();
pd.has_any_thread = true;
}
@ -126,7 +135,7 @@ Platform_thread::Platform_thread(Platform_pd &pd,
Platform_thread::~Platform_thread()
{
/* core/kernel threads have no dataspace, but plain memory as UTCB */
if (!_utcb._ds.valid()) {
if (!_utcb.ds_cap().valid()) {
error("UTCB of core/kernel thread gets destructed!");
return;
}
@ -179,7 +188,7 @@ void Platform_thread::start(void * const ip, void * const sp)
utcb.cap_add(Capability_space::capid(_kobj.cap()));
if (_main_thread) {
utcb.cap_add(Capability_space::capid(_pd.parent()));
utcb.cap_add(Capability_space::capid(_utcb._ds));
utcb.cap_add(Capability_space::capid(_utcb.ds_cap()));
}
Kernel::start_thread(*_kobj, _pd.kernel_pd(),

@ -64,28 +64,26 @@ class Core::Platform_thread : Noncopyable
Region_map *_core_rm_ptr = nullptr;
};
Ram_dataspace_capability _ds { }; /* UTCB ds of non-core threads */
Ram_allocator::Result const ds; /* UTCB ds of non-core threads */
addr_t const core_addr; /* UTCB address within core/kernel */
addr_t const phys_addr;
/*
* \throw Out_of_ram
* \throw Out_of_caps
*/
Ram_dataspace_capability _allocate(Ram_allocator &ram)
{
return ram.alloc(sizeof(Native_utcb), CACHED);
}
addr_t _attach(Region_map &);
static addr_t _ds_phys(Rpc_entrypoint &ep, Dataspace_capability ds)
static addr_t _phys(Rpc_entrypoint &ep, Dataspace_capability ds)
{
return ep.apply(ds, [&] (Dataspace_component *dsc) {
return dsc ? dsc->phys_addr() : 0; });
}
static addr_t _ds_phys(Rpc_entrypoint &ep, Ram_allocator::Result const &ram)
{
return ram.convert<addr_t>(
[&] (Ram::Allocation const &ds) { return _phys(ep, ds.cap); },
[&] (Ram::Error) { return 0UL; });
}
/**
* Constructor used for core-local threads
*/
@ -97,18 +95,22 @@ class Core::Platform_thread : Noncopyable
Utcb(Rpc_entrypoint &ep, Ram_allocator &ram, Region_map &core_rm)
:
_core_rm_ptr(&core_rm),
_ds(_allocate(ram)),
ds(ram.try_alloc(sizeof(Native_utcb), CACHED)),
core_addr(_attach(core_rm)),
phys_addr(_ds_phys(ep, _ds))
phys_addr(_ds_phys(ep, ds))
{ }
~Utcb()
{
if (_core_rm_ptr)
_core_rm_ptr->detach(core_addr);
}
if (_ram_ptr && _ds.valid())
_ram_ptr->free(_ds);
Ram_dataspace_capability ds_cap() const
{
return ds.convert<Ram_dataspace_capability>(
[&] (Ram::Allocation const &ds) { return ds.cap; },
[&] (Ram::Error) { return Ram_dataspace_capability { }; });
}
};
@ -299,7 +301,7 @@ class Core::Platform_thread : Noncopyable
Platform_pd &pd() const { return _pd; }
Ram_dataspace_capability utcb() const { return _utcb._ds; }
Ram_dataspace_capability utcb() const { return _utcb.ds_cap(); }
};
#endif /* _CORE__PLATFORM_THREAD_H_ */

@ -135,10 +135,8 @@ Vm_session_component::~Vm_session_component()
continue;
Vcpu & vcpu = *_vcpus[i];
if (vcpu.ds_cap.valid()) {
if (vcpu.state().valid())
_region_map.detach(vcpu.ds_addr);
_ram.free(vcpu.ds_cap);
}
}
/* free guest-to-host page tables */

@ -102,10 +102,8 @@ Vm_session_component::~Vm_session_component()
continue;
Vcpu & vcpu = *_vcpus[i];
if (vcpu.ds_cap.valid()) {
if (vcpu.state().valid())
_region_map.detach(vcpu.ds_addr);
_ram.free(vcpu.ds_cap);
}
}
id_alloc--;

@ -42,7 +42,7 @@ class Core::Vcpu : public Rpc_object<Vm_session::Native_vcpu, Vcpu>
Vcpu_data _vcpu_data { };
Kernel_object<Kernel::Vm> _kobj { };
Accounted_ram_allocator &_ram;
Ram_dataspace_capability _ds_cap { };
Ram_allocator::Result _ds;
Region_map &_region_map;
Affinity::Location _location;
Phys_allocated<Data_pages> _vcpu_data_pages;
@ -64,36 +64,35 @@ class Core::Vcpu : public Rpc_object<Vm_session::Native_vcpu, Vcpu>
_id(id),
_ep(ep),
_ram(ram),
_ds_cap( {_ram.alloc(vcpu_state_size(), Cache::UNCACHED)} ),
_ds( {_ram.try_alloc(vcpu_state_size(), Cache::UNCACHED)} ),
_region_map(region_map),
_location(location),
_vcpu_data_pages(ep, ram, region_map)
{
Region_map::Attr attr { };
attr.writeable = true;
_vcpu_data.vcpu_state = _region_map.attach(_ds_cap, attr).convert<Vcpu_state *>(
[&] (Region_map::Range range) { return (Vcpu_state *)range.start; },
[&] (Region_map::Attach_error) -> Vcpu_state * {
error("failed to attach VCPU data within core");
return nullptr;
});
_ds.with_result([&] (Ram::Allocation &allocation) {
Region_map::Attr attr { };
attr.writeable = true;
_vcpu_data.vcpu_state = _region_map.attach(allocation.cap, attr).convert<Vcpu_state *>(
[&] (Region_map::Range range) { return (Vcpu_state *)range.start; },
[&] (Region_map::Attach_error) -> Vcpu_state * {
error("failed to attach VCPU data within core");
return nullptr;
});
if (!_vcpu_data.vcpu_state) {
_ram.free(_ds_cap);
if (!_vcpu_data.vcpu_state)
throw Attached_dataspace::Region_conflict();
throw Attached_dataspace::Region_conflict();
}
_vcpu_data.virt_area = &_vcpu_data_pages.obj;
_vcpu_data.phys_addr = _vcpu_data_pages.phys_addr();
_vcpu_data.virt_area = &_vcpu_data_pages.obj;
_vcpu_data.phys_addr = _vcpu_data_pages.phys_addr();
ep.manage(this);
ep.manage(this);
},
[&] (Ram::Error e) { throw_exception(e); });
}
~Vcpu()
{
_region_map.detach((addr_t)_vcpu_data.vcpu_state);
_ram.free(_ds_cap);
_ep.dissolve(this);
}
@ -101,7 +100,13 @@ class Core::Vcpu : public Rpc_object<Vm_session::Native_vcpu, Vcpu>
** Native_vcpu RPC interface **
*******************************/
Capability<Dataspace> state() const { return _ds_cap; }
Capability<Dataspace> state() const
{
return _ds.convert<Capability<Dataspace>>(
[&] (Ram::Allocation const &ds) { return ds.cap; },
[&] (Ram::Error) { return Capability<Dataspace>(); });
}
Native_capability native_vcpu() { return _kobj.cap(); }
void exception_handler(Signal_context_capability handler)

@ -23,7 +23,7 @@
using namespace Core;
size_t Vm_session_component::_ds_size() {
size_t Vm_session_component::Vcpu::_ds_size() {
return align_addr(sizeof(Board::Vcpu_state), get_page_size_log2()); }
@ -59,26 +59,22 @@ Capability<Vm_session::Native_vcpu> Vm_session_component::create_vcpu(Thread_cap
if (_vcpus[_vcpu_id_alloc].constructed())
return { };
_vcpus[_vcpu_id_alloc].construct(_id, _ep);
_vcpus[_vcpu_id_alloc].construct(_ram, _id, _ep);
Vcpu & vcpu = *_vcpus[_vcpu_id_alloc];
try {
vcpu.ds_cap = _ram.alloc(_ds_size(), Cache::UNCACHED);
vcpu.ds.with_error([&] (Ram::Error e) { throw_exception(e); });
try {
Region_map::Attr attr { };
attr.writeable = true;
vcpu.ds_addr = _region_map.attach(vcpu.ds_cap, attr).convert<addr_t>(
vcpu.ds_addr = _region_map.attach(vcpu.state(), attr).convert<addr_t>(
[&] (Region_map::Range range) { return _alloc_vcpu_data(range.start); },
[&] (Region_map::Attach_error) -> addr_t {
error("failed to attach VCPU data within core");
if (vcpu.ds_cap.valid())
_ram.free(vcpu.ds_cap);
_vcpus[_vcpu_id_alloc].destruct();
return 0;
});
} catch (...) {
if (vcpu.ds_cap.valid())
_ram.free(vcpu.ds_cap);
_vcpus[_vcpu_id_alloc].destruct();
throw;
}

@ -55,14 +55,18 @@ class Core::Vm_session_component
struct Vcpu : public Rpc_object<Vm_session::Native_vcpu, Vcpu>
{
static size_t _ds_size();
Kernel::Vm::Identity &id;
Rpc_entrypoint &ep;
Ram_dataspace_capability ds_cap { };
Ram_allocator::Result ds;
addr_t ds_addr { };
Kernel_object<Kernel::Vm> kobj { };
Affinity::Location location { };
Vcpu(Kernel::Vm::Identity &id, Rpc_entrypoint &ep) : id(id), ep(ep)
Vcpu(Ram_allocator &ram, Kernel::Vm::Identity &id, Rpc_entrypoint &ep)
:
id(id), ep(ep), ds(ram.try_alloc(_ds_size(), Cache::UNCACHED))
{
ep.manage(this);
}
@ -76,8 +80,14 @@ class Core::Vm_session_component
** Native_vcpu RPC interface **
*******************************/
Capability<Dataspace> state() const { return ds_cap; }
Native_capability native_vcpu() { return kobj.cap(); }
Capability<Dataspace> state() const
{
return ds.convert<Capability<Dataspace>>(
[&] (Ram::Allocation const &ds) { return ds.cap; },
[&] (Ram::Error) { return Capability<Dataspace>(); });
}
Native_capability native_vcpu() { return kobj.cap(); }
void exception_handler(Signal_context_capability);
};
@ -95,7 +105,6 @@ class Core::Vm_session_component
Kernel::Vm::Identity _id;
unsigned _vcpu_id_alloc { 0 };
static size_t _ds_size();
static size_t _alloc_vcpu_data(Genode::addr_t ds_addr);
void *_alloc_table();

@ -76,12 +76,9 @@ class Stack_area_region_map : public Genode::Region_map
struct Stack_area_ram_allocator : Genode::Ram_allocator
{
Alloc_result try_alloc(Genode::size_t, Genode::Cache) override {
return Genode::Ram_dataspace_capability(); }
Result try_alloc(Genode::size_t, Genode::Cache) override { return { *this, { } }; }
void free(Genode::Ram_dataspace_capability) override { }
Genode::size_t dataspace_size(Genode::Ram_dataspace_capability) override { return 0; }
void _free(Genode::Ram::Allocation &) override { }
};

@ -40,10 +40,10 @@ class Core::Vm_session_component
{
private:
Rpc_entrypoint &_ep;
Accounted_ram_allocator &_ram_alloc;
Ram_dataspace_capability const _ds_cap;
Cap_sel _notification { 0 };
Rpc_entrypoint &_ep;
Accounted_ram_allocator &_ram_alloc;
Ram_allocator::Result const _ds;
Cap_sel _notification { 0 };
void _free_up();
@ -60,7 +60,12 @@ class Core::Vm_session_component
** Native_vcpu RPC interface **
*******************************/
Capability<Dataspace> state() const { return _ds_cap; }
Capability<Dataspace> state() const
{
return _ds.convert<Capability<Dataspace>>(
[&] (Ram::Allocation const &ds) { return ds.cap; },
[&] (Ram::Error) { return Capability<Dataspace>(); });
}
};
using Avl_region = Allocator_avl_tpl<Rm_region>;

@ -30,9 +30,6 @@ using namespace Core;
void Vm_session_component::Vcpu::_free_up()
{
if (_ds_cap.valid())
_ram_alloc.free(_ds_cap);
if (_notification.value()) {
int ret = seL4_CNode_Delete(seL4_CapInitThreadCNode,
_notification.value(), 32);
@ -51,9 +48,10 @@ Vm_session_component::Vcpu::Vcpu(Rpc_entrypoint &ep,
:
_ep(ep),
_ram_alloc(ram_alloc),
_ds_cap (_ram_alloc.alloc(align_addr(sizeof(Vcpu_state), 12),
Cache::CACHED))
_ds(_ram_alloc.try_alloc(align_addr(sizeof(Vcpu_state), 12), Cache::CACHED))
{
_ds.with_error([] (Ram::Error e) { throw_exception(e); });
try {
/* notification cap */
Cap_quota_guard::Reservation caps(cap_alloc, Cap_quota{1});
@ -67,7 +65,6 @@ Vm_session_component::Vcpu::Vcpu(Rpc_entrypoint &ep,
caps.acknowledge();
} catch (...) {
_free_up();
throw;
}
}

@ -113,13 +113,10 @@ class Stack_area_region_map : public Region_map
struct Stack_area_ram_allocator : Ram_allocator
{
Alloc_result try_alloc(size_t, Cache) override {
return reinterpret_cap_cast<Ram_dataspace>(Native_capability()); }
Result try_alloc(size_t, Cache) override { return { *this, { } }; }
void free(Ram_dataspace_capability) override {
void _free(Ram::Allocation &) override {
warning(__func__, " not implemented"); }
size_t dataspace_size(Ram_dataspace_capability) override { return 0; }
};

@ -15,6 +15,7 @@
#define _INCLUDE__BASE__ALLOCATOR_H_
#include <util/interface.h>
#include <util/attempt.h>
#include <base/stdint.h>
#include <base/exception.h>
#include <base/quota_guard.h>

@ -51,7 +51,7 @@ class Genode::Attached_ram_dataspace
_rm->detach(_at);
if (_ds.valid())
_ram->free(_ds);
_ram->free(_ds, _size);
}
void _alloc_and_attach()
@ -66,7 +66,7 @@ class Genode::Attached_ram_dataspace
[&] (Region_map::Range range) { _at = range.start; },
[&] (Region_map::Attach_error e) {
/* revert allocation if attaching the dataspace failed */
_ram->free(_ds);
_ram->free(_ds, _size);
if (e == Region_map::Attach_error::OUT_OF_RAM) throw Out_of_ram();
if (e == Region_map::Attach_error::OUT_OF_CAPS) throw Out_of_caps();
throw Attached_dataspace::Region_conflict();

@ -0,0 +1,143 @@
/*
* \brief Interfaces for allocating RAM
* \author Norman Feske
* \date 2025-04-03
*/
/*
* Copyright (C) 2025 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
#ifndef _INCLUDE__BASE__RAM_H_
#define _INCLUDE__BASE__RAM_H_
#include <util/allocation.h>
#include <base/capability.h>
#include <base/quota_guard.h>
#include <base/cache.h>
#include <dataspace/dataspace.h>
namespace Genode::Ram {
struct Dataspace : Genode::Dataspace { };
using Capability = Genode::Capability<Dataspace>;
enum class Error { OUT_OF_RAM, OUT_OF_CAPS, DENIED };
struct Unmapped_allocator;
template <typename> struct Accounted_allocator;
}
/**
* Allocator of RAM inaccessible by the component at allocation time
*/
struct Genode::Ram::Unmapped_allocator : Interface, Noncopyable
{
struct Attr
{
Capability cap;
size_t num_bytes;
};
using Error = Ram::Error;
using Allocation = Genode::Allocation<Unmapped_allocator>;
using Result = Allocation::Attempt;
virtual Result try_alloc(size_t size, Cache cache = CACHED) = 0;
/**
* Release allocation
*
* \noapi
*/
virtual void _free(Allocation &) = 0;
};
/**
* Quota-bounds-checking wrapper of a constrained RAM allocator
*/
template <typename ALLOC>
struct Genode::Ram::Accounted_allocator : ALLOC
{
ALLOC &_alloc;
Ram_quota_guard &_ram_guard;
Cap_quota_guard &_cap_guard;
using Allocation = typename ALLOC::Allocation;
using Result = typename ALLOC::Result;
Accounted_allocator(ALLOC &alloc,
Ram_quota_guard &ram_guard,
Cap_quota_guard &cap_guard)
:
_alloc(alloc), _ram_guard(ram_guard), _cap_guard(cap_guard)
{ }
Result try_alloc(size_t size, Cache cache = CACHED) override
{
size_t const page_aligned_size = align_addr(size, 12);
Ram_quota const needed_ram { page_aligned_size };
Cap_quota const needed_caps { 1 };
Capability cap { };
bool ok { };
Error error { };
_ram_guard.with_reservation<void>(needed_ram,
[&] (Reservation &ram_reservation) {
_cap_guard.with_reservation<void>(needed_caps,
[&] (Reservation &cap_reservation) {
_alloc.try_alloc(page_aligned_size, cache).with_result(
[&] (Allocation &allocation) {
cap = allocation.cap;
ok = true;
allocation.deallocate = false;
},
[&] (Error e) {
cap_reservation.cancel();
ram_reservation.cancel();
error = e;
});
},
[&] {
ram_reservation.cancel();
error = Error::OUT_OF_CAPS;
}
);
},
[&] { error = Error::OUT_OF_RAM; }
);
if (!ok)
return error;
return { *this, { cap, page_aligned_size } };
}
void _free(Allocation &allocation) override
{
_alloc._free(allocation);
_ram_guard.replenish(Ram_quota{allocation.num_bytes});
_cap_guard.replenish(Cap_quota{1});
}
};
namespace Genode::Ram {
/* shortcuts for the most commonly used type of allocator */
using Allocator = Unmapped_allocator;
using Allocation = Allocator::Allocation;
}
#endif /* _INCLUDE__BASE__RAM_H_ */

@ -1,11 +1,11 @@
/*
* \brief Interface for allocating RAM dataspaces
* \brief Exception-based interface for allocating RAM dataspaces
* \author Norman Feske
* \date 2017-05-02
*/
/*
* Copyright (C) 2017 Genode Labs GmbH
* Copyright (C) 2017-2025 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
@ -14,161 +14,81 @@
#ifndef _INCLUDE__BASE__RAM_ALLOCATOR_H_
#define _INCLUDE__BASE__RAM_ALLOCATOR_H_
#include <util/attempt.h>
#include <base/capability.h>
#include <base/quota_guard.h>
#include <base/cache.h>
#include <dataspace/dataspace.h>
#include <util/noncopyable.h>
#include <base/ram.h>
namespace Genode {
struct Ram_dataspace : Dataspace { };
using Ram_dataspace_capability = Capability<Ram_dataspace>;
static inline void throw_exception(Ram::Error e) __attribute__((noreturn));
struct Ram_allocator;
class Accounted_ram_allocator;
/* type aliases used for API transition */
using Ram_dataspace = Ram::Dataspace;
using Ram_dataspace_capability = Ram::Capability;
using Accounted_ram_allocator = Ram::Accounted_allocator<Ram_allocator>;
}
struct Genode::Ram_allocator : Interface, Noncopyable
struct Genode::Ram_allocator : Ram::Unmapped_allocator
{
enum class Alloc_error { OUT_OF_RAM, OUT_OF_CAPS, DENIED };
using Alloc_result = Attempt<Ram_dataspace_capability, Alloc_error>;
struct Denied : Exception { };
/**
* Allocate RAM dataspace
* Allocate RAM
*
* \param size size of RAM dataspace
* \param cache selects cacheability attributes of the memory,
* uncached memory, i.e., for DMA buffers
*
* \return capability to RAM dataspace, or error code of type 'Alloc_error'
*/
virtual Alloc_result try_alloc(size_t size, Cache cache = CACHED) = 0;
/**
* Allocate RAM dataspace
*
* \param size size of RAM dataspace
* \param cache selects cacheability attributes of the memory,
* \param cache selects cache attributes of the memory,
* uncached memory, i.e., for DMA buffers
*
* \throw Out_of_ram
* \throw Out_of_caps
* \throw Denied
*
* \return capability to new RAM dataspace
* \return new RAM dataspace capability
*/
Ram_dataspace_capability alloc(size_t size, Cache cache = CACHED)
Ram::Capability alloc(size_t size, Cache cache = CACHED)
{
return try_alloc(size, cache).convert<Ram_dataspace_capability>(
[&] (Ram_dataspace_capability cap) {
return cap; },
[&] (Alloc_error error) -> Ram_dataspace_capability {
switch (error) {
case Alloc_error::OUT_OF_RAM: throw Out_of_ram();
case Alloc_error::OUT_OF_CAPS: throw Out_of_caps();
case Alloc_error::DENIED: break;
}
throw Denied();
});
[&] (Allocation &a) { a.deallocate = false; return a.cap; },
[&] (Ram::Error e) -> Ram::Capability { throw_exception(e); });
}
/**
* Free RAM dataspace
*
* \param ds dataspace capability as returned by alloc
*/
virtual void free(Ram_dataspace_capability ds) = 0;
size_t _legacy_dataspace_size(Capability<Dataspace>);
/**
* Return size of dataspace in bytes
*/
virtual size_t dataspace_size(Ram_dataspace_capability) = 0;
void free(Ram::Capability cap)
{
/*
* Deallocate via '~Ram::Allocation'.
*
* The real dataspace is merely needed for the quota tracking by
* 'Accounted_ram_allocator::_free'.
*/
Allocation { *this, { cap, _legacy_dataspace_size(cap) } };
}
void free(Ram::Capability cap, size_t size)
{
/* avoid call of '_legacy_dataspace_size' when size is known */
Allocation { *this, { cap, size } };
}
/* type aliases used for API transition */
using Alloc_result = Ram::Unmapped_allocator::Result;
using Alloc_error = Ram::Error;
};
/**
* Quota-bounds-checking wrapper of the 'Ram_allocator' interface
*/
class Genode::Accounted_ram_allocator : public Ram_allocator
{
private:
namespace Genode {
Ram_allocator &_ram_alloc;
Ram_quota_guard &_ram_guard;
Cap_quota_guard &_cap_guard;
public:
Accounted_ram_allocator(Ram_allocator &ram_alloc,
Ram_quota_guard &ram_guard,
Cap_quota_guard &cap_guard)
:
_ram_alloc(ram_alloc), _ram_guard(ram_guard), _cap_guard(cap_guard)
{ }
Alloc_result try_alloc(size_t size, Cache cache = CACHED) override
{
using Result = Alloc_result;
size_t const page_aligned_size = align_addr(size, 12);
Ram_quota const needed_ram { page_aligned_size };
Cap_quota const needed_caps { 1 };
return _ram_guard.with_reservation<Result>(needed_ram,
[&] (Reservation &ram_reservation) {
return _cap_guard.with_reservation<Result>(needed_caps,
[&] (Reservation &cap_reservation) -> Result {
return _ram_alloc.try_alloc(page_aligned_size, cache)
.convert<Result>(
[&] (Ram_dataspace_capability ds) -> Result {
return ds; },
[&] (Alloc_error error) {
cap_reservation.cancel();
ram_reservation.cancel();
return error; }
);
},
[&] () -> Result {
ram_reservation.cancel();
return Alloc_error::OUT_OF_CAPS;
}
);
},
[&] () -> Result {
return Alloc_error::OUT_OF_RAM; }
);
static inline void throw_exception(Ram::Error e)
{
switch (e) {
case Ram::Error::OUT_OF_RAM: throw Out_of_ram();
case Ram::Error::OUT_OF_CAPS: throw Out_of_caps();
case Ram::Error::DENIED: break;
}
void free(Ram_dataspace_capability ds) override
{
size_t const size = _ram_alloc.dataspace_size(ds);
_ram_alloc.free(ds);
_ram_guard.replenish(Ram_quota{size});
_cap_guard.replenish(Cap_quota{1});
}
size_t dataspace_size(Ram_dataspace_capability ds) override
{
return _ram_alloc.dataspace_size(ds);
}
};
throw Ram_allocator::Denied();
}
}
#endif /* _INCLUDE__BASE__RAM_ALLOCATOR_H_ */

@ -417,30 +417,30 @@ struct Genode::Pd_ram_allocator : Ram_allocator
{
Pd_session &_pd;
Alloc_result try_alloc(size_t size, Cache cache) override
using Pd_error = Pd_session::Alloc_ram_error;
static Ram::Error ram_error(Pd_error e)
{
using Pd_error = Pd_session::Alloc_ram_error;
switch (e) {
case Pd_error::OUT_OF_CAPS: return Ram::Error::OUT_OF_CAPS;
case Pd_error::OUT_OF_RAM: return Ram::Error::OUT_OF_RAM;;
case Pd_error::DENIED: break;
}
return Ram::Error::DENIED;
}
Result try_alloc(size_t size, Cache cache) override
{
using Capability = Ram::Capability;
return _pd.alloc_ram(size, cache).convert<Alloc_result>(
[&] (Ram_dataspace_capability cap) { return cap; },
[&] (Pd_error e) {
switch (e) {
case Pd_error::OUT_OF_CAPS: return Alloc_error::OUT_OF_CAPS;
case Pd_error::OUT_OF_RAM: return Alloc_error::OUT_OF_RAM;;
case Pd_error::DENIED:
break;
}
return Alloc_error::DENIED;
});
[&] (Capability ds) -> Result { return { *this, { ds, size } }; },
[&] (Pd_error e) -> Result { return ram_error(e); });
}
void free(Ram_dataspace_capability ds) override
void _free(Ram::Allocation &allocation) override
{
_pd.free_ram(ds);
}
size_t dataspace_size(Ram_dataspace_capability ds) override
{
return _pd.ram_size(ds);
_pd.free_ram(allocation.cap);
}
Pd_ram_allocator(Pd_session &pd) : _pd(pd) { }

@ -22,13 +22,17 @@ namespace Genode { template <typename> class Allocation; }
/**
* Representation of an allocation
*
* The 'Allocation' base class provides a guard mechanism for reverting the
* allocation at destruction time of an 'Allocation' object, unless the
* 'deallocate' member is manually set to false. The 'ALLOCATOR' is expected
* to implement a '_free' method performing the deallocation.
* An 'Allocation' object holds allocator-type-specific attributes ('Attr'),
* which are directly accessible in the scope of the 'Allocation' object.
* It provides a guard mechanism for reverting the allocation at destruction
* time of an 'Allocation' object. The automatic deallocation can be manually
* discharged by setting the 'deallocate' member to 'false'.
*
* An 'Allocation' object holds allocator-type-specific attributes ('Attr')
* that are directly accessible in the scope of the 'Allocation' object.
* The 'ALLOCATOR' type is expected to implement a '_free' method performing
* the deallocation. This method is prefixed with '_' to indicate that it is
* not meant to be explicitly called. It is supposed to be called only at the
* destruction time of an 'Allocation' or by allocator wrappers such as
* 'Ram::Accounted_allocator'.
*/
template <typename ALLOC>
class Genode::Allocation : Noncopyable, public ALLOC::Attr

@ -78,6 +78,7 @@ _ZN6Genode13Avl_node_base6insertEPS0_RNS0_6PolicyE T
_ZN6Genode13Avl_node_base6removeERNS0_6PolicyE T
_ZN6Genode13Avl_node_baseC1Ev T
_ZN6Genode13Avl_node_baseC2Ev T
_ZN6Genode13Ram_allocator22_legacy_dataspace_sizeENS_10CapabilityINS_9DataspaceEEE T
_ZN6Genode13Registry_base7ElementC1ERS0_Pv T
_ZN6Genode13Registry_base7ElementC2ERS0_Pv T
_ZN6Genode13Registry_base7ElementD1Ev T

@ -27,30 +27,27 @@ struct Core::Core_ram_allocator : Ram_allocator
{
Ram_dataspace_factory &_factory;
Alloc_result try_alloc(size_t size, Cache cache) override
Result try_alloc(size_t size, Cache cache) override
{
using Pd_error = Pd_session::Alloc_ram_error;
return _factory.alloc_ram(size, cache).convert<Alloc_result>(
[&] (Ram_dataspace_capability cap) { return cap; },
[&] (Pd_error e) {
return _factory.alloc_ram(size, cache).convert<Result>(
[&] (Ram_dataspace_capability cap) -> Result {
return Result { *this, { cap, size } };
},
[&] (Pd_error e) -> Result {
switch (e) {
case Pd_error::OUT_OF_CAPS: return Alloc_error::OUT_OF_CAPS;
case Pd_error::OUT_OF_RAM: return Alloc_error::OUT_OF_RAM;;
case Pd_error::OUT_OF_CAPS: return Ram::Error::OUT_OF_CAPS;
case Pd_error::OUT_OF_RAM: return Ram::Error::OUT_OF_RAM;;
case Pd_error::DENIED:
break;
}
return Alloc_error::DENIED;
return Ram::Error::DENIED;
});
}
void free(Ram_dataspace_capability ds) override
void _free(Ram::Allocation &allocation) override
{
_factory.free_ram(ds);
}
size_t dataspace_size(Ram_dataspace_capability ds) override
{
return _factory.ram_size(ds);
_factory.free_ram(allocation.cap);
}
Core_ram_allocator(Ram_dataspace_factory &factory) : _factory(factory) { }

@ -31,42 +31,36 @@ namespace Core { namespace Trace {
class Core::Trace::Policy_owner : Interface { };
class Core::Trace::Policy : public List<Policy>::Element
struct Core::Trace::Policy : List<Policy>::Element
{
friend class Policy_registry;
Policy_owner const &_owner;
Policy_id const _id;
Allocator &md_alloc;
private:
Ram::Allocator::Result const ds;
Policy_owner const &_owner;
Allocator &_md_alloc;
Policy_id const _id;
Dataspace_capability _ds;
Policy_size const _size;
Policy(Policy_owner const &owner, Policy_id id, Allocator &md_alloc,
Ram::Allocator &ram, Policy_size size)
:
_owner(owner), _id(id), md_alloc(md_alloc), ds(ram.try_alloc(size.num_bytes))
{ }
/**
* Constructor
*
* \param md_alloc allocator that holds the 'Policy' object
*/
Policy(Policy_owner const &owner, Policy_id const id,
Allocator &md_alloc, Dataspace_capability ds, Policy_size size)
:
_owner(owner), _md_alloc(md_alloc), _id(id), _ds(ds), _size(size)
{ }
bool has_id (Policy_id const id) const { return _id == id; }
bool owned_by(Policy_owner const &owner) const { return &_owner == &owner; }
Allocator &md_alloc() { return _md_alloc; }
Ram::Capability dataspace() const
{
return ds.convert<Ram::Capability>(
[&] (Ram::Allocation const &a) { return a.cap; },
[&] (Ram::Error) { return Ram::Capability(); });
}
bool owned_by(Policy_owner const &owner) const
{
return &_owner == &owner;
}
bool has_id(Policy_id id) const { return id == _id; }
public:
Dataspace_capability dataspace() const { return _ds; }
Policy_size size() const { return _size; }
Policy_size size() const
{
return ds.convert<Policy_size>(
[&] (Ram::Allocation const &a) -> Policy_size { return { a.num_bytes }; },
[&] (Ram::Error) -> Policy_size { return { }; });
}
};
@ -108,13 +102,24 @@ class Core::Trace::Policy_registry
_policies.remove(p);
}
void insert(Policy_owner const &owner, Policy_id const id,
Allocator &md_alloc, Dataspace_capability ds, Policy_size size)
struct Insert_ok { };
using Insert_result = Attempt<Insert_ok, Ram::Error>;
Insert_result insert(Policy_owner const &owner, Policy_id const id,
Allocator &md_alloc, Ram::Allocator &ram,
Policy_size size)
{
Mutex::Guard guard(_mutex);
Policy &policy = *new (&md_alloc) Policy(owner, id, md_alloc, ds, size);
_policies.insert(&policy);
try {
Policy &policy = *new (&md_alloc) Policy(owner, id, md_alloc, ram, size);
_policies.insert(&policy);
return policy.ds.convert<Insert_result>(
[&] (Ram::Allocation const &) { return Insert_ok(); },
[&] (Ram::Error e) { return e; });
}
catch (Out_of_ram) { return Ram::Error::OUT_OF_RAM; }
catch (Out_of_caps) { return Ram::Error::OUT_OF_CAPS; }
}
void remove(Policy_owner &owner, Policy_id id)
@ -127,7 +132,7 @@ class Core::Trace::Policy_registry
if (tmp->owned_by(owner) && tmp->has_id(id)) {
_policies.remove(tmp);
destroy(&tmp->md_alloc(), tmp);
destroy(&tmp->md_alloc, tmp);
}
}
}
@ -138,7 +143,7 @@ class Core::Trace::Policy_registry
while (Policy *p = _any_policy_owned_by(owner)) {
_policies.remove(p);
destroy(&p->md_alloc(), p);
destroy(&p->md_alloc, p);
}
}

@ -53,82 +53,76 @@ class Core::Trace::Subject
{
private:
class Ram_dataspace
struct Ram_dataspace : Noncopyable
{
private:
enum class Setup_result { OK, OUT_OF_RAM, OUT_OF_CAPS, DENIED };
Ram_allocator *_ram_ptr { nullptr };
size_t _size { 0 };
Ram_dataspace_capability _ds { };
Ram_allocator::Result ds { { } };
void _reset()
{
_ram_ptr = nullptr;
_size = 0;
_ds = Ram_dataspace_capability();
Ram_dataspace() { }
static Setup_result _result(Ram::Error e)
{
switch (e) {
case Ram::Error::OUT_OF_RAM: return Setup_result::OUT_OF_RAM;
case Ram::Error::OUT_OF_CAPS: return Setup_result::OUT_OF_CAPS;
case Ram::Error::DENIED: break;
}
return Setup_result::DENIED;
}
/*
* Noncopyable
*/
Ram_dataspace(Ram_dataspace const &);
Ram_dataspace &operator = (Ram_dataspace const &);
public:
Ram_dataspace() { _reset(); }
~Ram_dataspace() { flush(); }
/**
* Allocate new dataspace
*/
void setup(Ram_allocator &ram, size_t size)
{
if (_size && _size == size)
return;
if (_size)
_ram_ptr->free(_ds);
_ds = ram.alloc(size); /* may throw */
_ram_ptr = &ram;
_size = size;
}
/**
* Clone dataspace into newly allocated dataspace
*/
void setup(Ram_allocator &ram, Region_map &local_rm,
Dataspace_capability &from_ds, size_t size)
{
if (_size)
flush();
_ds = ram.alloc(size); /* may throw */
_ram_ptr = &ram;
_size = size;
/* copy content */
Setup_result _copy_content(Region_map &local_rm, size_t num_bytes,
Dataspace_capability from_ds,
Dataspace_capability to_ds)
{
try {
Attached_dataspace from { local_rm, from_ds },
to { local_rm, _ds };
to { local_rm, to_ds };
Genode::memcpy(to.local_addr<char>(), from.local_addr<char const>(), _size);
using Genode::memcpy;
memcpy(to.local_addr<char>(), from.local_addr<char const>(),
num_bytes);
}
catch (Out_of_caps) { return Setup_result::OUT_OF_CAPS; }
catch (Out_of_ram) { return Setup_result::OUT_OF_RAM; }
catch (...) { return Setup_result::DENIED; }
return Setup_result::OK;
}
/**
* Release dataspace
*/
size_t flush()
{
if (_ram_ptr)
_ram_ptr->free(_ds);
/**
* Allocate new dataspace
*/
[[nodiscard]] Setup_result setup(Ram_allocator &ram, size_t size)
{
ds = ram.try_alloc(size);
return ds.convert<Setup_result>(
[] (Ram::Allocation &) { return Setup_result::OK; },
[] (Ram::Error e) { return _result(e); });
}
_reset();
return 0;
}
/**
* Clone dataspace into newly allocated dataspace
*/
[[nodiscard]] Setup_result setup(Ram_allocator &ram,
Region_map &local_rm,
Dataspace_capability &from_ds,
size_t size)
{
ds = ram.try_alloc(size);
return ds.convert<Setup_result>(
[&] (Ram::Allocation &allocation) {
_copy_content(local_rm, size, from_ds, allocation.cap);
return Setup_result::OK;
},
[] (Ram::Error e) { return _result(e); });
}
Dataspace_capability dataspace() const { return _ds; }
Ram::Capability dataspace() const
{
return ds.convert<Ram::Capability>(
[&] (Ram::Allocation const &a) { return a.cap; },
[&] (Ram::Error) { return Ram::Capability(); });
}
};
friend class Subject_registry;
@ -213,24 +207,28 @@ class Core::Trace::Subject
case Subject_info::TRACED: break;
}
try {
_buffer.setup(ram, size.num_bytes);
}
catch (Out_of_ram) { return Trace_result::OUT_OF_RAM; }
catch (Out_of_caps) { return Trace_result::OUT_OF_CAPS; }
using Setup_result = Ram_dataspace::Setup_result;
try {
_policy.setup(ram, local_rm, policy_ds, policy_size.num_bytes);
switch (_buffer.setup(ram, size.num_bytes)) {
case Setup_result::OUT_OF_RAM: return Trace_result::OUT_OF_RAM;
case Setup_result::OUT_OF_CAPS: return Trace_result::OUT_OF_CAPS;
case Setup_result::DENIED: return Trace_result::INVALID_SUBJECT;
case Setup_result::OK: break;
}
switch (_policy.setup(ram, local_rm, policy_ds, policy_size.num_bytes)) {
case Setup_result::OUT_OF_RAM: return Trace_result::OUT_OF_RAM;
case Setup_result::OUT_OF_CAPS: return Trace_result::OUT_OF_CAPS;
case Setup_result::DENIED: return Trace_result::INVALID_SUBJECT;
case Setup_result::OK: break;
}
catch (Out_of_ram) { _buffer.flush(); return Trace_result::OUT_OF_RAM; }
catch (Out_of_caps) { _buffer.flush(); return Trace_result::OUT_OF_CAPS; }
/* inform trace source about the new buffer */
Locked_ptr<Source> source(_source);
if (!source->try_acquire(*this)) {
_policy.flush();
_buffer.flush();
_policy.ds = { };
_buffer.ds = { };
return Trace_result::FOREIGN;
}
@ -291,8 +289,8 @@ class Core::Trace::Subject
source->disable();
source->release_ownership(*this);
_buffer.flush();
_policy.flush();
_buffer.ds = { };
_policy.ds = { };
}
};

@ -129,12 +129,9 @@ class Stack_area_region_map : public Region_map
struct Stack_area_ram_allocator : Ram_allocator
{
Alloc_result try_alloc(size_t, Cache) override {
return reinterpret_cap_cast<Ram_dataspace>(Native_capability()); }
Result try_alloc(size_t, Cache) override { return { *this, { } }; }
void free(Ram_dataspace_capability) override { }
size_t dataspace_size(Ram_dataspace_capability) override { return 0; }
void _free(Ram::Allocation &) override { }
};

@ -65,24 +65,25 @@ Session_component::Alloc_policy_rpc_result Session_component::alloc_policy(Polic
Policy_id const id { ++_policy_cnt };
return _ram.try_alloc(size.num_bytes).convert<Alloc_policy_rpc_result>(
auto policy_error = [&] (Ram::Error e)
{
switch (e) {
case Ram::Error::OUT_OF_RAM: return Alloc_policy_rpc_error::OUT_OF_RAM;
case Ram::Error::OUT_OF_CAPS: return Alloc_policy_rpc_error::OUT_OF_CAPS;
case Ram::Error::DENIED: break;
}
return Alloc_policy_rpc_error::INVALID;
};
[&] (Ram_dataspace_capability const ds_cap) -> Alloc_policy_rpc_result {
try {
_policies.insert(*this, id, _policies_slab, ds_cap, size);
}
catch (Out_of_ram) { _ram.free(ds_cap); return Alloc_policy_rpc_error::OUT_OF_RAM; }
catch (Out_of_caps) { _ram.free(ds_cap); return Alloc_policy_rpc_error::OUT_OF_CAPS; }
return id;
},
[&] (Ram_allocator::Alloc_error const e) -> Alloc_policy_rpc_result {
switch (e) {
case Ram_allocator::Alloc_error::OUT_OF_RAM: return Alloc_policy_rpc_error::OUT_OF_RAM;
case Ram_allocator::Alloc_error::OUT_OF_CAPS: return Alloc_policy_rpc_error::OUT_OF_CAPS;
case Ram_allocator::Alloc_error::DENIED: break;
}
return Alloc_policy_rpc_error::INVALID;
});
try {
Policy_registry::Insert_result r =
_policies.insert(*this, id, _policies_slab, _ram, size);
return r.convert<Alloc_policy_rpc_result>(
[&] (auto) /* ok */ { return id; },
[&] (Ram::Error e) { return policy_error(e); });
}
catch (Out_of_ram) { return Alloc_policy_rpc_error::OUT_OF_RAM; }
catch (Out_of_caps) { return Alloc_policy_rpc_error::OUT_OF_CAPS; }
}
@ -99,7 +100,9 @@ void Session_component::unload_policy(Policy_id const id)
{
_policies.with_dataspace(*this, id, [&] (Dataspace_capability ds) {
_policies.remove(*this, id);
_ram.free(static_cap_cast<Ram_dataspace>(ds)); });
/* deallocate via '~Allocation' */
Ram::Allocation { _ram, { static_cap_cast<Ram_dataspace>(ds), 0 } };
});
}

@ -84,7 +84,7 @@ Child::Load_result Child::_load_static_elf(Dataspace_capability elf_ds,
*/
/* alloc dataspace */
Ram_allocator::Alloc_result const alloc_result = ram.try_alloc(size);
Ram_allocator::Result alloc_result = ram.try_alloc(size);
if (alloc_result.failed())
error("allocation of read-write segment failed");
@ -96,7 +96,10 @@ Child::Load_result Child::_load_static_elf(Dataspace_capability elf_ds,
if (alloc_result.failed()) return Load_error::INVALID;
Dataspace_capability ds_cap = alloc_result.convert<Dataspace_capability>(
[&] (Ram_dataspace_capability cap) { return cap; },
[&] (Ram::Allocation &allocation) {
allocation.deallocate = false;
return allocation.cap;
},
[&] (Alloc_error) { /* handled above */ return Dataspace_capability(); });
/* attach dataspace */

@ -289,3 +289,9 @@ void Genode::bootstrap_component(Platform &platform)
/* never reached */
}
size_t Genode::Ram_allocator::_legacy_dataspace_size(Dataspace_capability ds)
{
return Dataspace_client(ds).size();
}

@ -45,6 +45,7 @@ void Heap::Dataspace_pool::remove_and_free(Dataspace &ds)
Ram_dataspace_capability ds_cap = ds.cap;
addr_t const at = addr_t(ds.local_addr);
size_t const size = ds.size;
remove(&ds);
@ -57,7 +58,11 @@ void Heap::Dataspace_pool::remove_and_free(Dataspace &ds)
ds.~Dataspace();
region_map->detach(at);
ram_alloc->free(ds_cap);
{
/* deallocate via '~Allocation' */
Ram::Allocation { *ram_alloc, { ds_cap, size } };
}
}
@ -84,20 +89,7 @@ Heap::_allocate_dataspace(size_t size, bool enforce_separate_metadata)
return _ds_pool.ram_alloc->try_alloc(size).convert<Result>(
[&] (Ram_dataspace_capability ds_cap) -> Result {
struct Alloc_guard
{
Ram_allocator &ram;
Ram_dataspace_capability ds;
bool keep = false;
Alloc_guard(Ram_allocator &ram, Ram_dataspace_capability ds)
: ram(ram), ds(ds) { }
~Alloc_guard() { if (!keep) ram.free(ds); }
} alloc_guard(*_ds_pool.ram_alloc, ds_cap);
[&] (Ram::Allocation &allocation) -> Result {
struct Attach_guard
{
@ -113,7 +105,7 @@ Heap::_allocate_dataspace(size_t size, bool enforce_separate_metadata)
Region_map::Attr attr { };
attr.writeable = true;
Region_map::Attach_result const result = _ds_pool.region_map->attach(ds_cap, attr);
Region_map::Attach_result const result = _ds_pool.region_map->attach(allocation.cap, attr);
if (result.failed()) {
using Error = Region_map::Attach_error;
return result.convert<Alloc_error>(
@ -129,6 +121,9 @@ Heap::_allocate_dataspace(size_t size, bool enforce_separate_metadata)
});
}
allocation.deallocate = false;
attach_guard.keep = true;
result.with_result(
[&] (Region_map::Range range) { attach_guard.range = range; },
[&] (auto) { /* handled above */ });
@ -151,10 +146,9 @@ Heap::_allocate_dataspace(size_t size, bool enforce_separate_metadata)
return metadata.convert<Result>(
[&] (void *md_ptr) -> Result {
Dataspace &ds = *construct_at<Dataspace>(md_ptr, ds_cap,
Dataspace &ds = *construct_at<Dataspace>(md_ptr, allocation.cap,
(void *)attach_guard.range.start, size);
_ds_pool.insert(&ds);
alloc_guard.keep = attach_guard.keep = true;
return &ds;
},
[&] (Alloc_error error) {
@ -312,7 +306,7 @@ void Heap::free(void *addr, size_t)
_quota_used -= ds->size;
_ds_pool.remove_and_free(*ds);
_alloc->free(ds);
_alloc->free(ds, ds->size);
}

@ -45,20 +45,7 @@ Allocator::Alloc_result Sliced_heap::try_alloc(size_t size)
return _ram_alloc.try_alloc(size).convert<Alloc_result>(
[&] (Ram_dataspace_capability ds_cap) -> Alloc_result {
struct Alloc_guard
{
Ram_allocator &ram;
Ram_dataspace_capability ds;
bool keep = false;
Alloc_guard(Ram_allocator &ram, Ram_dataspace_capability ds)
: ram(ram), ds(ds) { }
~Alloc_guard() { if (!keep) ram.free(ds); }
} alloc_guard(_ram_alloc, ds_cap);
[&] (Ram::Allocation &allocation) -> Alloc_result {
struct Attach_guard
{
@ -74,7 +61,7 @@ Allocator::Alloc_result Sliced_heap::try_alloc(size_t size)
Region_map::Attr attr { };
attr.writeable = true;
Region_map::Attach_result const result = _region_map.attach(ds_cap, attr);
Region_map::Attach_result const result = _region_map.attach(allocation.cap, attr);
if (result.failed()) {
using Error = Region_map::Attach_error;
return result.convert<Alloc_error>(
@ -98,18 +85,18 @@ Allocator::Alloc_result Sliced_heap::try_alloc(size_t size)
Mutex::Guard guard(_mutex);
Block * const block = construct_at<Block>((void *)attach_guard.range.start,
ds_cap, size);
allocation.cap, size);
_consumed += size;
_blocks.insert(block);
alloc_guard.keep = attach_guard.keep = true;
allocation.deallocate = false;
attach_guard.keep = true;
/* skip meta data prepended to the payload portion of the block */
void *ptr = block + 1;
return ptr;
},
[&] (Alloc_error error) {
return error; });
[&] (Ram::Error e) { return e; });
}
@ -117,6 +104,7 @@ void Sliced_heap::free(void *addr, size_t)
{
Ram_dataspace_capability ds_cap;
void *local_addr = nullptr;
size_t num_bytes = 0;
{
/* serialize access to block list */
Mutex::Guard guard(_mutex);
@ -130,8 +118,10 @@ void Sliced_heap::free(void *addr, size_t)
_blocks.remove(block);
_consumed -= block->size;
ds_cap = block->ds;
ds_cap = block->ds;
local_addr = block;
num_bytes = block->size;
/*
* Call destructor to properly destruct the dataspace capability
@ -141,7 +131,11 @@ void Sliced_heap::free(void *addr, size_t)
}
_region_map.detach(addr_t(local_addr));
_ram_alloc.free(ds_cap);
{
/* deallocate via '~Allocation' */
Ram::Allocation { _ram_alloc, { ds_cap, num_bytes } };
}
}

@ -55,9 +55,9 @@ Stack::Size_result Stack::size(size_t const size)
Region_map &rm = *env_stack_area_region_map;
return ram.try_alloc(ds_size).convert<Size_result>(
[&] (Ram_dataspace_capability ds_cap) {
[&] (Ram::Allocation &allocation) {
return rm.attach(ds_cap, Region_map::Attr {
return rm.attach(allocation.cap, Region_map::Attr {
.size = ds_size,
.offset = 0,
.use_at = true,
@ -73,6 +73,8 @@ Stack::Size_result Stack::size(size_t const size)
/* update stack information */
_base -= ds_size;
allocation.deallocate = false;
return (addr_t)_stack - _base;
},
[&] (Region_map::Attach_error) {
@ -118,11 +120,11 @@ Thread::_alloc_stack(size_t stack_size, Name const &name, bool main_thread)
/* allocate and attach backing store for the stack */
return ram.try_alloc(ds_size).convert<Alloc_stack_result>(
[&] (Ram_dataspace_capability const ds_cap)
{
[&] (Ram::Allocation &allocation)
{
addr_t const attach_addr = ds_addr - stack_area_virtual_base();
return env_stack_area_region_map->attach(ds_cap, Region_map::Attr {
return env_stack_area_region_map->attach(allocation.cap, Region_map::Attr {
.size = ds_size,
.offset = { },
.use_at = true,
@ -132,10 +134,8 @@ Thread::_alloc_stack(size_t stack_size, Name const &name, bool main_thread)
}).convert<Alloc_stack_result>(
[&] (Region_map::Range const range) -> Alloc_stack_result {
if (range.start != attach_addr) {
ram.free(ds_cap);
if (range.start != attach_addr)
return Stack_error::STACK_TOO_LARGE;
}
/*
* Now the stack is backed by memory, it is safe to access
@ -146,15 +146,14 @@ Thread::_alloc_stack(size_t stack_size, Name const &name, bool main_thread)
* cause trouble when the assignment operator of
* Native_capability is used.
*/
construct_at<Stack>(stack, name, *this, ds_addr, ds_cap);
construct_at<Stack>(stack, name, *this, ds_addr, allocation.cap);
Abi::init_stack(stack->top());
allocation.deallocate = false;
return stack;
},
[&] (Region_map::Attach_error) -> Alloc_stack_result {
ram.free(ds_cap);
return Stack_error::STACK_AREA_EXHAUSTED;
}
return Stack_error::STACK_AREA_EXHAUSTED; }
);
},
[&] (Ram_allocator::Alloc_error) -> Alloc_stack_result {
@ -172,7 +171,11 @@ void Thread::_free_stack(Stack &stack)
stack.~Stack();
Genode::env_stack_area_region_map->detach(ds_addr);
Genode::env_stack_area_ram_allocator->free(ds_cap);
/* deallocate RAM block */
{
Ram::Allocation { *env_stack_area_ram_allocator, { ds_cap, 0 } };
}
/* stack ready for reuse */
Stack_allocator::stack_allocator().free(stack);

@ -363,7 +363,14 @@ struct Linker::Elf_file : File
addr_t const dst = p.p_vaddr + reloc_base;
ram_cap[nr] = env.ram().alloc(p.p_memsz);
env.pd().alloc_ram(p.p_memsz).with_result(
[&] (Ram_dataspace_capability cap) { ram_cap[nr] = cap; },
[&] (Pd_session::Alloc_ram_error) { });
if (!ram_cap[nr].valid()) {
error("dynamic linker failed to allocate RAM for RW segment ", nr);
return;
}
Region_map::r()->attach(ram_cap[nr], Region_map::Attr {
.size = { },
@ -404,9 +411,8 @@ struct Linker::Elf_file : File
/* free ram of RW segments */
for (unsigned i = 0; i < Phdr::MAX_PHDR; i++)
if (ram_cap[i].valid()) {
env.ram().free(ram_cap[i]);
}
if (ram_cap[i].valid())
env.pd().free_ram(ram_cap[i]);
}
};

@ -409,9 +409,11 @@ struct Slab_backend_alloc : public Genode::Allocator,
return _ram.try_alloc(BLOCK_SIZE).convert<Extend_result>(
[&] (Ram_dataspace_capability ds) -> Extend_result {
[&] (Ram::Allocation &allocation) -> Extend_result {
_ds_cap[_index] = ds;
allocation.deallocate = false;
_ds_cap[_index] = allocation.cap;
return Region_map_client::attach(_ds_cap[_index], {
.size = BLOCK_SIZE,
@ -435,7 +437,6 @@ struct Slab_backend_alloc : public Genode::Allocator,
[&] (Region_map::Attach_error e) {
Genode::error("Slab_backend_alloc: local attach_at failed");
_ram.free(ds);
_ds_cap[_index] = { };
using Error = Region_map::Attach_error;

@ -102,8 +102,8 @@ namespace Allocator {
_ds_cap[_index] = Rump::env().env().ram().try_alloc(BLOCK_SIZE, _cache)
.template convert<Ram_dataspace_capability>(
[&] (Ram_dataspace_capability cap) { return cap; },
[&] (Allocator::Alloc_error) { return Ram_dataspace_capability(); }
[&] (Ram::Allocation &a) { a.deallocate = false; return a.cap; },
[&] (Allocator::Alloc_error) { return Ram_dataspace_capability(); }
);
if (!_ds_cap[_index].valid()) {

@ -486,8 +486,8 @@ class Vfs::Rump_file_system : public File_system
};
return _env.env().ram().try_alloc(s.st_size).convert<Dataspace_capability>(
[&] (Ram_dataspace_capability const ds_cap) {
return _env.env().rm().attach(ds_cap, {
[&] (Genode::Ram::Allocation &a) {
return _env.env().rm().attach(a.cap, {
.size = { }, .offset = { }, .use_at = { },
.at = { }, .executable = { }, .writeable = true
}).convert<Dataspace_capability>(
@ -496,15 +496,15 @@ class Vfs::Rump_file_system : public File_system
bool const complete = read_file_content(range);
_env.env().rm().detach(range.start);
if (complete)
return ds_cap;
if (complete) {
a.deallocate = false;
return a.cap;
}
Genode::error("rump failed to read content into VFS dataspace");
_env.env().ram().free(ds_cap);
return Dataspace_capability();
},
[&] (Region_map::Attach_error) {
_env.env().ram().free(ds_cap);
return Dataspace_capability();
}
);

@ -60,25 +60,22 @@ struct Libc::Malloc_ram_allocator : Ram_allocator
{
return _ram.try_alloc(size, cache).convert<Alloc_result>(
[&] (Ram_dataspace_capability cap) {
new (_md_alloc) Registered<Dataspace>(_dataspaces, cap);
return cap; },
[&] (Ram::Allocation &a) -> Result {
new (_md_alloc) Registered<Dataspace>(_dataspaces, a.cap);
a.deallocate = false;
return { *this, { a.cap, size } };
},
[&] (Alloc_error error) {
[&] (Alloc_error error) -> Result {
return error; });
}
void free(Ram_dataspace_capability ds_cap) override
void _free(Ram::Allocation &a) override
{
_dataspaces.for_each([&] (Registered<Dataspace> &ds) {
if (ds_cap == ds.cap)
if (a.cap == ds.cap)
_release(ds); });
}
size_t dataspace_size(Ram_dataspace_capability ds_cap) override
{
return _ram.dataspace_size(ds_cap);
}
};
#endif /* _LIBC__INTERNAL__MALLOC_RAM_ALLOCATOR_H_ */

@ -53,13 +53,14 @@ int Libc::Mem_alloc_impl::Dataspace_pool::expand(size_t size, Range_allocator *a
Ram_dataspace_capability new_ds_cap { };
int result = 0;
_ram->try_alloc(size).with_result(
[&] (Ram_dataspace_capability cap) { new_ds_cap = cap; },
[&] (Ram_allocator::Alloc_error e) {
[&] (Ram::Allocation &a) { a.deallocate = false; new_ds_cap = a.cap; },
[&] (Ram::Error e) {
switch (e) {
case Ram_allocator::Alloc_error::OUT_OF_RAM: result = -2; break;
case Ram_allocator::Alloc_error::OUT_OF_CAPS: result = -4; break;
case Ram_allocator::Alloc_error::DENIED: break;
case Ram::Error::OUT_OF_RAM: result = -2; break;
case Ram::Error::OUT_OF_CAPS: result = -4; break;
case Ram::Error::DENIED: break;
}
result = -5;
});

@ -131,6 +131,8 @@ struct Igd::Device
_pci.free_dma_buffer(cap);
}
void _free(Genode::Ram::Allocation &a) override { free(a.cap); }
addr_t dma_addr(Ram_dataspace_capability ds_cap) override
{
return _pci.dma_addr(ds_cap);
@ -140,11 +142,9 @@ struct Igd::Device
* RAM allocator interface
*/
size_t dataspace_size(Ram_dataspace_capability) override { return 0; }
Alloc_result try_alloc(size_t size, Cache) override
{
return alloc(size);
return { *this, { alloc(size), size } };
}
} _pci_backend_alloc;

@ -391,7 +391,7 @@ Session_component::alloc_dma_buffer(size_t const size, Cache cache)
try {
Dma_buffer & buf = _dma_allocator.alloc_buffer(guard.ram_cap,
_env.pd().dma_addr(guard.ram_cap),
_env_ram.dataspace_size(guard.ram_cap));
_env.pd().ram_size(guard.ram_cap));
guard.buf = &buf;
_domain_registry.for_each_domain([&] (Io_mmu::Domain & domain) {

@ -909,18 +909,18 @@ class Vfs::Ram_file_system : public Vfs::File_system
size_t const len = file->length();
return _env.env().ram().try_alloc(len).convert<Dataspace_capability>(
[&] (Ram_dataspace_capability ds_cap) {
return _env.env().rm().attach(ds_cap, {
[&] (Ram::Allocation &allocation) {
return _env.env().rm().attach(allocation.cap, {
.size = { }, .offset = { }, .use_at = { },
.at = { }, .executable = { }, .writeable = true
}).convert<Dataspace_capability>(
[&] (Region_map::Range const range) {
file->read(Byte_range_ptr((char *)range.start, len), Seek{0});
_env.env().rm().detach(range.start);
return ds_cap;
allocation.deallocate = false;
return allocation.cap;
},
[&] (Region_map::Attach_error) {
_env.env().ram().free(ds_cap);
return Dataspace_capability();
}
);

@ -568,18 +568,18 @@ class Vfs::Tar_file_system : public File_system
using Region_map = Genode::Region_map;
return _env.ram().try_alloc(len).convert<Dataspace_capability>(
[&] (Ram_dataspace_capability ds_cap) {
return _env.rm().attach(ds_cap, {
[&] (Genode::Ram::Allocation &allocation) {
return _env.rm().attach(allocation.cap, {
.size = { }, .offset = { }, .use_at = { },
.at = { }, .executable = { }, .writeable = true
}).convert<Dataspace_capability>(
[&] (Region_map::Range const range) {
memcpy((void *)range.start, record->data(), len);
_env.rm().detach(range.start);
return ds_cap;
allocation.deallocate = false;
return allocation.cap;
},
[&] (Region_map::Attach_error) {
_env.ram().free(ds_cap);
return Dataspace_capability();
}
);

@ -129,38 +129,39 @@ class Genode::Session_env : public Ram_allocator,
enum { MAX_SHARED_RAM = 4096 };
enum { DS_SIZE_GRANULARITY_LOG2 = 12 };
Alloc_result result = Alloc_error::DENIED;
size_t const ds_size = align_addr(size, DS_SIZE_GRANULARITY_LOG2);
Alloc_result result = Alloc_error::DENIED;
try {
_consume(ds_size, MAX_SHARED_RAM, 1, MAX_SHARED_CAP, [&] ()
_consume(ds_size, MAX_SHARED_RAM, 1, MAX_SHARED_CAP, [&]
{
result = _env.ram().try_alloc(ds_size, cache);
});
}
catch (Out_of_ram) { result = Alloc_error::OUT_OF_RAM; }
catch (Out_of_caps) { result = Alloc_error::OUT_OF_CAPS; }
catch (Out_of_ram) { return Alloc_error::OUT_OF_RAM; }
catch (Out_of_caps) { return Alloc_error::OUT_OF_CAPS; }
return result;
return result.convert<Alloc_result>(
[&] (Allocation &a) -> Alloc_result {
a.deallocate = false;
return { *this, a };
},
[&] (Alloc_error e) { return e; });
}
void free(Ram_dataspace_capability ds) override
void _free(Allocation &ds) override
{
_replenish(_env.ram().dataspace_size(ds), 1, [&] () {
_env.ram().free(ds);
});
_replenish(_env.pd().ram_size(ds.cap), 1, [&] {
_env.ram().free(ds.cap); });
}
size_t dataspace_size(Ram_dataspace_capability ds) override { return _env.ram().dataspace_size(ds); }
/****************
** Region_map **
****************/
Attach_result attach(Dataspace_capability ds, Attr const &attr) override
Attach_result attach(Dataspace_capability ds, Region_map::Attr const &attr) override
{
enum { MAX_SHARED_CAP = 2 };
enum { MAX_SHARED_RAM = 4 * 4096 };

@ -17,7 +17,10 @@
#include <base/stdint.h>
#include <exception.h>
class Ram {
namespace Vmm { class Ram; }
class Vmm::Ram {
private: