core: improve coding-style consistency

This commit is contained in:
Norman Feske 2023-02-08 16:06:57 +01:00 committed by Christian Helmuth
parent 5bdc88bf57
commit c10904967b
38 changed files with 1517 additions and 1480 deletions

View File

@ -17,6 +17,7 @@
namespace Genode { class Irq_object; class Irq_args; }
class Genode::Irq_object
{
private:

View File

@ -29,379 +29,381 @@
#include <ipc_pager.h>
#include <rpc_cap_factory.h>
namespace Genode {
typedef Cpu_session::Thread_creation_failed Invalid_thread;
class Pager_entrypoint;
class Pager_object;
class Exception_handlers
{
private:
template <uint8_t EV>
__attribute__((regparm(1))) static void _handler(Pager_object &);
public:
Exception_handlers(Pager_object &);
template <uint8_t EV>
void register_handler(Pager_object &, Nova::Mtd,
void (__attribute__((regparm(1)))*)(Pager_object &) = nullptr);
};
class Pager_object : public Object_pool<Pager_object>::Entry
{
private:
unsigned long _badge; /* used for debugging */
/**
* User-level signal handler registered for this pager object via
* 'Cpu_session::exception_handler()'.
*/
Signal_context_capability _exception_sigh { };
/**
* selectors for
* - cleanup portal
* - semaphore used by caller used to notify paused state
* - semaphore used to block during page fault handling or pausing
*/
addr_t _selectors;
addr_t _initial_esp = 0;
addr_t _initial_eip = 0;
addr_t _client_exc_pt_sel;
Mutex _state_lock { };
struct
{
struct Thread_state thread;
addr_t sel_client_ec;
enum {
BLOCKED = 0x1U,
DEAD = 0x2U,
SINGLESTEP = 0x4U,
SIGNAL_SM = 0x8U,
DISSOLVED = 0x10U,
SUBMIT_SIGNAL = 0x20U,
BLOCKED_PAUSE_SM = 0x40U,
MIGRATE = 0x80U
};
uint8_t _status;
bool modified;
/* convenience function to access pause/recall state */
inline bool blocked() { return _status & BLOCKED;}
inline void block() { _status |= BLOCKED; }
inline void unblock() { _status &= (uint8_t)(~BLOCKED); }
inline bool blocked_pause_sm() { return _status & BLOCKED_PAUSE_SM;}
inline void block_pause_sm() { _status |= (uint8_t)BLOCKED_PAUSE_SM; }
inline void unblock_pause_sm() { _status &= (uint8_t)(~BLOCKED_PAUSE_SM); }
inline void mark_dead() { _status |= DEAD; }
inline bool is_dead() { return _status & DEAD; }
inline bool singlestep() { return _status & SINGLESTEP; }
inline void mark_signal_sm() { _status |= SIGNAL_SM; }
inline bool has_signal_sm() { return _status & SIGNAL_SM; }
inline void mark_dissolved() { _status |= DISSOLVED; }
inline bool dissolved() { return _status & DISSOLVED; }
inline bool to_submit() { return _status & SUBMIT_SIGNAL; }
inline void submit_signal() { _status |= SUBMIT_SIGNAL; }
inline void reset_submit() { _status &= (uint8_t)(~SUBMIT_SIGNAL); }
bool migrate() const { return _status & MIGRATE; }
void reset_migrate() { _status &= (uint8_t)(~MIGRATE); }
void request_migrate() { _status |= MIGRATE; }
} _state { };
Cpu_session_capability _cpu_session_cap;
Thread_capability _thread_cap;
Affinity::Location _location;
Affinity::Location _next_location { };
Exception_handlers _exceptions;
addr_t _pd_target;
void _copy_state_from_utcb(Nova::Utcb const &utcb);
void _copy_state_to_utcb(Nova::Utcb &utcb) const;
uint8_t _unsynchronized_client_recall(bool get_state_and_block);
addr_t sel_pt_cleanup() const { return _selectors; }
addr_t sel_sm_block_pause() const { return _selectors + 1; }
addr_t sel_sm_block_oom() const { return _selectors + 2; }
addr_t sel_oom_portal() const { return _selectors + 3; }
__attribute__((regparm(1)))
static void _page_fault_handler(Pager_object &);
__attribute__((regparm(1)))
static void _startup_handler(Pager_object &);
__attribute__((regparm(1)))
static void _invoke_handler(Pager_object &);
__attribute__((regparm(1)))
static void _recall_handler(Pager_object &);
__attribute__((regparm(3)))
static void _oom_handler(addr_t, addr_t, addr_t);
void _construct_pager();
bool _migrate_thread();
public:
Pager_object(Cpu_session_capability cpu_session_cap,
Thread_capability thread_cap,
unsigned long badge, Affinity::Location location,
Genode::Session_label const &,
Cpu_session::Name const &);
virtual ~Pager_object();
unsigned long badge() const { return _badge; }
void reset_badge()
{
Genode::Mutex::Guard guard(_state_lock);
_badge = 0;
}
const char * client_thread() const;
const char * client_pd() const;
virtual int pager(Ipc_pager &ps) = 0;
/**
* Assign user-level exception handler for the pager object
*/
void exception_handler(Signal_context_capability sigh)
{
_exception_sigh = sigh;
}
Affinity::Location location() const { return _location; }
void migrate(Affinity::Location);
/**
* Assign PD selector to PD
*/
void assign_pd(addr_t pd_sel) { _pd_target = pd_sel; }
addr_t pd_sel() const { return _pd_target; }
void exception(uint8_t exit_id);
/**
* Return base of initial portal window
*/
addr_t exc_pt_sel_client() { return _client_exc_pt_sel; }
/**
* Set initial stack pointer used by the startup handler
*/
addr_t initial_esp() { return _initial_esp; }
void initial_esp(addr_t esp) { _initial_esp = esp; }
/**
* Set initial instruction pointer used by the startup handler
*/
void initial_eip(addr_t eip) { _initial_eip = eip; }
/**
* Continue execution of pager object
*/
void wake_up();
/**
* Notify exception handler about the occurrence of an exception
*/
bool submit_exception_signal()
{
if (!_exception_sigh.valid()) return false;
_state.reset_submit();
Signal_transmitter transmitter(_exception_sigh);
transmitter.submit();
return true;
}
/**
* Copy thread state of recalled thread.
*/
bool copy_thread_state(Thread_state * state_dst)
{
Mutex::Guard _state_lock_guard(_state_lock);
if (!state_dst || !_state.blocked())
return false;
*state_dst = _state.thread;
return true;
}
/*
* Copy thread state to recalled thread.
*/
bool copy_thread_state(Thread_state state_src)
{
Mutex::Guard _state_lock_guard(_state_lock);
if (!_state.blocked())
return false;
_state.thread = state_src;
_state.modified = true;
return true;
}
uint8_t client_recall(bool get_state_and_block);
void client_set_ec(addr_t ec) { _state.sel_client_ec = ec; }
inline void single_step(bool on)
{
_state_lock.acquire();
if (_state.is_dead() || !_state.blocked() ||
(on && (_state._status & _state.SINGLESTEP)) ||
(!on && !(_state._status & _state.SINGLESTEP))) {
_state_lock.release();
return;
}
if (on)
_state._status |= _state.SINGLESTEP;
else
_state._status &= (uint8_t)(~_state.SINGLESTEP);
_state_lock.release();
/* force client in exit and thereby apply single_step change */
client_recall(false);
}
/**
* Return CPU session that was used to created the thread
*/
Cpu_session_capability cpu_session_cap() const { return _cpu_session_cap; }
/**
* Return thread capability
*
* This function enables the destructor of the thread's
* address-space region map to kill the thread.
*/
Thread_capability thread_cap() const { return _thread_cap; }
/**
* Note in the thread state that an unresolved page
* fault occurred.
*/
void unresolved_page_fault_occurred()
{
_state.thread.unresolved_page_fault = true;
}
/**
* Make sure nobody is in the handler anymore by doing an IPC to a
* local cap pointing to same serving thread (if not running in the
* context of the serving thread). When the call returns
* we know that nobody is handled by this object anymore, because
* all remotely available portals had been revoked beforehand.
*/
void cleanup_call();
/**
* Portal called by thread that causes a out of memory in kernel.
*/
addr_t create_oom_portal();
enum Policy {
STOP = 1,
UPGRADE_CORE_TO_DST = 2,
UPGRADE_PREFER_SRC_TO_DST = 3,
};
enum Oom {
SEND = 1, REPLY = 2, SELF = 4,
SRC_CORE_PD = ~0UL, SRC_PD_UNKNOWN = 0,
NO_NOTIFICATION = 0
};
/**
* Implements policy on how to react on out of memory in kernel.
*
* Used solely inside core. On Genode core creates all the out
* of memory portals per EC. If the PD of a EC runs out of kernel
* memory it causes a OOM portal traversal, which is handled
* by the pager object of the causing thread.
*
* /param pd_sel PD selector from where to transfer kernel memory
* resources. The PD of this pager_object is the
* target PD.
* /param pd debug feature - string of PD (transfer_from)
* /param thread debug feature - string of EC (transfer_from)
*/
uint8_t handle_oom(addr_t pd_sel = SRC_CORE_PD,
const char * pd = "core",
const char * thread = "unknown",
Policy = Policy::UPGRADE_CORE_TO_DST);
static uint8_t handle_oom(addr_t pd_from, addr_t pd_to,
char const * src_pd,
char const * src_thread,
Policy policy,
addr_t sm_notify = NO_NOTIFICATION,
char const * dst_pd = "unknown",
char const * dst_thread = "unknown");
void print(Output &out) const;
};
/**
* Paging entry point
*
* For a paging entry point can hold only one activation. So, paging is
* strictly serialized for one entry point.
*/
class Pager_entrypoint : public Object_pool<Pager_object>
{
public:
/**
* Constructor
*
* \param cap_factory factory for creating capabilities
* for the pager objects managed by this
* entry point
*/
Pager_entrypoint(Rpc_cap_factory &cap_factory);
/**
* Associate Pager_object with the entry point
*/
Pager_capability manage(Pager_object &) {
return Pager_capability(); }
/**
* Dissolve Pager_object from entry point
*/
void dissolve(Pager_object &obj);
};
class Exception_handlers;
}
class Genode::Exception_handlers
{
private:
template <uint8_t EV>
__attribute__((regparm(1))) static void _handler(Pager_object &);
public:
Exception_handlers(Pager_object &);
template <uint8_t EV>
void register_handler(Pager_object &, Nova::Mtd,
void (__attribute__((regparm(1)))*)(Pager_object &) = nullptr);
};
class Genode::Pager_object : public Object_pool<Pager_object>::Entry
{
private:
unsigned long _badge; /* used for debugging */
/**
* User-level signal handler registered for this pager object via
* 'Cpu_session::exception_handler()'.
*/
Signal_context_capability _exception_sigh { };
/**
* selectors for
* - cleanup portal
* - semaphore used by caller used to notify paused state
* - semaphore used to block during page fault handling or pausing
*/
addr_t _selectors;
addr_t _initial_esp = 0;
addr_t _initial_eip = 0;
addr_t _client_exc_pt_sel;
Mutex _state_lock { };
struct
{
struct Thread_state thread;
addr_t sel_client_ec;
enum {
BLOCKED = 0x1U,
DEAD = 0x2U,
SINGLESTEP = 0x4U,
SIGNAL_SM = 0x8U,
DISSOLVED = 0x10U,
SUBMIT_SIGNAL = 0x20U,
BLOCKED_PAUSE_SM = 0x40U,
MIGRATE = 0x80U
};
uint8_t _status;
bool modified;
/* convenience function to access pause/recall state */
inline bool blocked() { return _status & BLOCKED;}
inline void block() { _status |= BLOCKED; }
inline void unblock() { _status &= (uint8_t)(~BLOCKED); }
inline bool blocked_pause_sm() { return _status & BLOCKED_PAUSE_SM;}
inline void block_pause_sm() { _status |= (uint8_t)BLOCKED_PAUSE_SM; }
inline void unblock_pause_sm() { _status &= (uint8_t)(~BLOCKED_PAUSE_SM); }
inline void mark_dead() { _status |= DEAD; }
inline bool is_dead() { return _status & DEAD; }
inline bool singlestep() { return _status & SINGLESTEP; }
inline void mark_signal_sm() { _status |= SIGNAL_SM; }
inline bool has_signal_sm() { return _status & SIGNAL_SM; }
inline void mark_dissolved() { _status |= DISSOLVED; }
inline bool dissolved() { return _status & DISSOLVED; }
inline bool to_submit() { return _status & SUBMIT_SIGNAL; }
inline void submit_signal() { _status |= SUBMIT_SIGNAL; }
inline void reset_submit() { _status &= (uint8_t)(~SUBMIT_SIGNAL); }
bool migrate() const { return _status & MIGRATE; }
void reset_migrate() { _status &= (uint8_t)(~MIGRATE); }
void request_migrate() { _status |= MIGRATE; }
} _state { };
Cpu_session_capability _cpu_session_cap;
Thread_capability _thread_cap;
Affinity::Location _location;
Affinity::Location _next_location { };
Exception_handlers _exceptions;
addr_t _pd_target;
void _copy_state_from_utcb(Nova::Utcb const &utcb);
void _copy_state_to_utcb(Nova::Utcb &utcb) const;
uint8_t _unsynchronized_client_recall(bool get_state_and_block);
addr_t sel_pt_cleanup() const { return _selectors; }
addr_t sel_sm_block_pause() const { return _selectors + 1; }
addr_t sel_sm_block_oom() const { return _selectors + 2; }
addr_t sel_oom_portal() const { return _selectors + 3; }
__attribute__((regparm(1)))
static void _page_fault_handler(Pager_object &);
__attribute__((regparm(1)))
static void _startup_handler(Pager_object &);
__attribute__((regparm(1)))
static void _invoke_handler(Pager_object &);
__attribute__((regparm(1)))
static void _recall_handler(Pager_object &);
__attribute__((regparm(3)))
static void _oom_handler(addr_t, addr_t, addr_t);
void _construct_pager();
bool _migrate_thread();
public:
Pager_object(Cpu_session_capability cpu_session_cap,
Thread_capability thread_cap,
unsigned long badge, Affinity::Location location,
Genode::Session_label const &,
Cpu_session::Name const &);
virtual ~Pager_object();
unsigned long badge() const { return _badge; }
void reset_badge()
{
Genode::Mutex::Guard guard(_state_lock);
_badge = 0;
}
const char * client_thread() const;
const char * client_pd() const;
virtual int pager(Ipc_pager &ps) = 0;
/**
* Assign user-level exception handler for the pager object
*/
void exception_handler(Signal_context_capability sigh)
{
_exception_sigh = sigh;
}
Affinity::Location location() const { return _location; }
void migrate(Affinity::Location);
/**
* Assign PD selector to PD
*/
void assign_pd(addr_t pd_sel) { _pd_target = pd_sel; }
addr_t pd_sel() const { return _pd_target; }
void exception(uint8_t exit_id);
/**
* Return base of initial portal window
*/
addr_t exc_pt_sel_client() { return _client_exc_pt_sel; }
/**
* Set initial stack pointer used by the startup handler
*/
addr_t initial_esp() { return _initial_esp; }
void initial_esp(addr_t esp) { _initial_esp = esp; }
/**
* Set initial instruction pointer used by the startup handler
*/
void initial_eip(addr_t eip) { _initial_eip = eip; }
/**
* Continue execution of pager object
*/
void wake_up();
/**
* Notify exception handler about the occurrence of an exception
*/
bool submit_exception_signal()
{
if (!_exception_sigh.valid()) return false;
_state.reset_submit();
Signal_transmitter transmitter(_exception_sigh);
transmitter.submit();
return true;
}
/**
* Copy thread state of recalled thread.
*/
bool copy_thread_state(Thread_state * state_dst)
{
Mutex::Guard _state_lock_guard(_state_lock);
if (!state_dst || !_state.blocked())
return false;
*state_dst = _state.thread;
return true;
}
/*
* Copy thread state to recalled thread.
*/
bool copy_thread_state(Thread_state state_src)
{
Mutex::Guard _state_lock_guard(_state_lock);
if (!_state.blocked())
return false;
_state.thread = state_src;
_state.modified = true;
return true;
}
uint8_t client_recall(bool get_state_and_block);
void client_set_ec(addr_t ec) { _state.sel_client_ec = ec; }
inline void single_step(bool on)
{
_state_lock.acquire();
if (_state.is_dead() || !_state.blocked() ||
(on && (_state._status & _state.SINGLESTEP)) ||
(!on && !(_state._status & _state.SINGLESTEP))) {
_state_lock.release();
return;
}
if (on)
_state._status |= _state.SINGLESTEP;
else
_state._status &= (uint8_t)(~_state.SINGLESTEP);
_state_lock.release();
/* force client in exit and thereby apply single_step change */
client_recall(false);
}
/**
* Return CPU session that was used to created the thread
*/
Cpu_session_capability cpu_session_cap() const { return _cpu_session_cap; }
/**
* Return thread capability
*
* This function enables the destructor of the thread's
* address-space region map to kill the thread.
*/
Thread_capability thread_cap() const { return _thread_cap; }
/**
* Note in the thread state that an unresolved page
* fault occurred.
*/
void unresolved_page_fault_occurred()
{
_state.thread.unresolved_page_fault = true;
}
/**
* Make sure nobody is in the handler anymore by doing an IPC to a
* local cap pointing to same serving thread (if not running in the
* context of the serving thread). When the call returns
* we know that nobody is handled by this object anymore, because
* all remotely available portals had been revoked beforehand.
*/
void cleanup_call();
/**
* Portal called by thread that causes a out of memory in kernel.
*/
addr_t create_oom_portal();
enum Policy {
STOP = 1,
UPGRADE_CORE_TO_DST = 2,
UPGRADE_PREFER_SRC_TO_DST = 3,
};
enum Oom {
SEND = 1, REPLY = 2, SELF = 4,
SRC_CORE_PD = ~0UL, SRC_PD_UNKNOWN = 0,
NO_NOTIFICATION = 0
};
/**
* Implements policy on how to react on out of memory in kernel.
*
* Used solely inside core. On Genode core creates all the out
* of memory portals per EC. If the PD of a EC runs out of kernel
* memory it causes a OOM portal traversal, which is handled
* by the pager object of the causing thread.
*
* /param pd_sel PD selector from where to transfer kernel memory
* resources. The PD of this pager_object is the
* target PD.
* /param pd debug feature - string of PD (transfer_from)
* /param thread debug feature - string of EC (transfer_from)
*/
uint8_t handle_oom(addr_t pd_sel = SRC_CORE_PD,
const char * pd = "core",
const char * thread = "unknown",
Policy = Policy::UPGRADE_CORE_TO_DST);
static uint8_t handle_oom(addr_t pd_from, addr_t pd_to,
char const * src_pd,
char const * src_thread,
Policy policy,
addr_t sm_notify = NO_NOTIFICATION,
char const * dst_pd = "unknown",
char const * dst_thread = "unknown");
void print(Output &out) const;
};
/**
* Paging entry point
*
* For a paging entry point can hold only one activation. So, paging is
* strictly serialized for one entry point.
*/
class Genode::Pager_entrypoint : public Object_pool<Pager_object>
{
public:
/**
* Constructor
*
* \param cap_factory factory for creating capabilities
* for the pager objects managed by this
* entry point
*/
Pager_entrypoint(Rpc_cap_factory &cap_factory);
/**
* Associate Pager_object with the entry point
*/
Pager_capability manage(Pager_object &) {
return Pager_capability(); }
/**
* Dissolve Pager_object from entry point
*/
void dissolve(Pager_object &obj);
};
#endif /* _CORE__INCLUDE__PAGER_H_ */

View File

@ -20,127 +20,127 @@
#include <core_mem_alloc.h>
#include <address_space.h>
namespace Genode {
class Platform : public Platform_generic
{
public:
enum { MAX_SUPPORTED_CPUS = 64};
private:
Core_mem_allocator _core_mem_alloc { }; /* core-accessible memory */
Phys_allocator _io_mem_alloc; /* MMIO allocator */
Phys_allocator _io_port_alloc; /* I/O port allocator */
Phys_allocator _irq_alloc; /* IRQ allocator */
Rom_fs _rom_fs { }; /* ROM file system */
unsigned _gsi_base_sel { 0 }; /* cap selector of 1st IRQ */
unsigned _core_pd_sel { 0 }; /* cap selector of root PD */
addr_t _core_phys_start { 0 };
/**
* Virtual address range usable by non-core processes
*/
const addr_t _vm_base;
size_t _vm_size;
/* available CPUs */
Affinity::Space _cpus;
/* map of virtual cpu ids in Genode to kernel cpu ids */
uint16_t map_cpu_ids[MAX_SUPPORTED_CPUS];
addr_t _map_pages(addr_t phys_page, addr_t pages,
bool guard_page = false);
size_t _max_caps = 0;
void _init_rom_modules();
addr_t _rom_module_phys(addr_t virt);
public:
/**
* Constructor
*/
Platform();
namespace Genode { class Platform; }
/********************************
** Generic platform interface **
********************************/
class Genode::Platform : public Platform_generic
{
public:
Range_allocator &ram_alloc() override { return _core_mem_alloc.phys_alloc(); }
Range_allocator &io_mem_alloc() override { return _io_mem_alloc; }
Range_allocator &io_port_alloc() override { return _io_port_alloc; }
Range_allocator &irq_alloc() override { return _irq_alloc; }
Range_allocator &region_alloc() override { return _core_mem_alloc.virt_alloc(); }
Range_allocator &core_mem_alloc() override { return _core_mem_alloc; }
addr_t vm_start() const override { return _vm_base; }
size_t vm_size() const override { return _vm_size; }
Rom_fs &rom_fs() override { return _rom_fs; }
size_t max_caps() const override { return _max_caps; }
void wait_for_exit() override;
enum { MAX_SUPPORTED_CPUS = 64};
bool supports_direct_unmap() const override { return true; }
private:
Address_space &core_pd() { ASSERT_NEVER_CALLED; }
Core_mem_allocator _core_mem_alloc { }; /* core-accessible memory */
Phys_allocator _io_mem_alloc; /* MMIO allocator */
Phys_allocator _io_port_alloc; /* I/O port allocator */
Phys_allocator _irq_alloc; /* IRQ allocator */
Rom_fs _rom_fs { }; /* ROM file system */
unsigned _gsi_base_sel { 0 }; /* cap selector of 1st IRQ */
unsigned _core_pd_sel { 0 }; /* cap selector of root PD */
addr_t _core_phys_start { 0 };
Affinity::Space affinity_space() const override { return _cpus; }
/**
* Virtual address range usable by non-core processes
*/
const addr_t _vm_base;
size_t _vm_size;
/* available CPUs */
Affinity::Space _cpus;
/* map of virtual cpu ids in Genode to kernel cpu ids */
uint16_t map_cpu_ids[MAX_SUPPORTED_CPUS];
addr_t _map_pages(addr_t phys_page, addr_t pages,
bool guard_page = false);
size_t _max_caps = 0;
void _init_rom_modules();
addr_t _rom_module_phys(addr_t virt);
public:
/**
* Constructor
*/
Platform();
/*******************
** NOVA specific **
*******************/
/********************************
** Generic platform interface **
********************************/
/**
* Return capability selector of first global system interrupt
*/
int gsi_base_sel() const { return _gsi_base_sel; }
Range_allocator &ram_alloc() override { return _core_mem_alloc.phys_alloc(); }
Range_allocator &io_mem_alloc() override { return _io_mem_alloc; }
Range_allocator &io_port_alloc() override { return _io_port_alloc; }
Range_allocator &irq_alloc() override { return _irq_alloc; }
Range_allocator &region_alloc() override { return _core_mem_alloc.virt_alloc(); }
Range_allocator &core_mem_alloc() override { return _core_mem_alloc; }
addr_t vm_start() const override { return _vm_base; }
size_t vm_size() const override { return _vm_size; }
Rom_fs &rom_fs() override { return _rom_fs; }
size_t max_caps() const override { return _max_caps; }
void wait_for_exit() override;
/**
* Determine size of a core local mapping required for a
* core_rm_session detach().
*/
size_t region_alloc_size_at(void * addr)
{
using Size_at_error = Allocator_avl::Size_at_error;
bool supports_direct_unmap() const override { return true; }
return (_core_mem_alloc.virt_alloc())()->size_at(addr).convert<size_t>(
[ ] (size_t s) { return s; },
[ ] (Size_at_error) { return 0U; });
}
Address_space &core_pd() { ASSERT_NEVER_CALLED; }
/**
* Return kernel CPU ID for given Genode CPU
*/
unsigned pager_index(Affinity::Location location) const;
unsigned kernel_cpu_id(Affinity::Location location) const;
Affinity::Space affinity_space() const override { return _cpus; }
Affinity::Location sanitize(Affinity::Location location) {
return Affinity::Location(location.xpos() % _cpus.width(),
location.ypos() % _cpus.height(),
location.width(), location.height());
}
/**
* PD kernel capability selector of core
*/
unsigned core_pd_sel() const { return _core_pd_sel; }
/*******************
** NOVA specific **
*******************/
template <typename FUNC>
void for_each_location(FUNC const &fn)
{
for (unsigned x = 0; x < _cpus.width(); x++) {
for (unsigned y = 0; y < _cpus.height(); y++) {
Affinity::Location location(x, y, 1, 1);
fn(location);
}
/**
* Return capability selector of first global system interrupt
*/
int gsi_base_sel() const { return _gsi_base_sel; }
/**
* Determine size of a core local mapping required for a
* core_rm_session detach().
*/
size_t region_alloc_size_at(void * addr)
{
using Size_at_error = Allocator_avl::Size_at_error;
return (_core_mem_alloc.virt_alloc())()->size_at(addr).convert<size_t>(
[ ] (size_t s) { return s; },
[ ] (Size_at_error) { return 0U; });
}
/**
* Return kernel CPU ID for given Genode CPU
*/
unsigned pager_index(Affinity::Location location) const;
unsigned kernel_cpu_id(Affinity::Location location) const;
Affinity::Location sanitize(Affinity::Location location) {
return Affinity::Location(location.xpos() % _cpus.width(),
location.ypos() % _cpus.height(),
location.width(), location.height());
}
/**
* PD kernel capability selector of core
*/
unsigned core_pd_sel() const { return _core_pd_sel; }
template <typename FUNC>
void for_each_location(FUNC const &fn)
{
for (unsigned x = 0; x < _cpus.width(); x++) {
for (unsigned y = 0; y < _cpus.height(); y++) {
Affinity::Location location(x, y, 1, 1);
fn(location);
}
}
};
}
}
};
#endif /* _CORE__INCLUDE__PLATFORM_H_ */

View File

@ -18,80 +18,82 @@
#include <platform_thread.h>
#include <address_space.h>
namespace Genode {
class Platform_thread;
class Platform_pd : public Address_space
{
private:
Native_capability _parent { };
int _thread_cnt;
addr_t const _pd_sel;
const char * _label;
/*
* Noncopyable
*/
Platform_pd(Platform_pd const &);
Platform_pd &operator = (Platform_pd const &);
public:
/**
* Constructors
*/
Platform_pd(Allocator &md_alloc, char const *,
signed pd_id = -1, bool create = true);
/**
* Destructor
*/
~Platform_pd();
/**
* Bind thread to protection domain
*/
bool bind_thread(Platform_thread &thread);
/**
* Unbind thread from protection domain
*
* Free the thread's slot and update thread object.
*/
void unbind_thread(Platform_thread &thread);
/**
* Assign parent interface to protection domain
*/
void assign_parent(Native_capability parent);
/**
* Return portal capability selector for parent interface
*/
addr_t parent_pt_sel() { return _parent.local_name(); }
/**
* Capability selector of this task.
*
* \return PD selector
*/
addr_t pd_sel() const { return _pd_sel; }
/**
* Label of this protection domain
*
* \return name of this protection domain
*/
const char * name() const { return _label; }
/*****************************
** Address-space interface **
*****************************/
void flush(addr_t, size_t, Core_local_addr) override;
};
class Platform_pd;
}
class Genode::Platform_pd : public Address_space
{
private:
Native_capability _parent { };
int _thread_cnt;
addr_t const _pd_sel;
const char * _label;
/*
* Noncopyable
*/
Platform_pd(Platform_pd const &);
Platform_pd &operator = (Platform_pd const &);
public:
/**
* Constructors
*/
Platform_pd(Allocator &md_alloc, char const *,
signed pd_id = -1, bool create = true);
/**
* Destructor
*/
~Platform_pd();
/**
* Bind thread to protection domain
*/
bool bind_thread(Platform_thread &thread);
/**
* Unbind thread from protection domain
*
* Free the thread's slot and update thread object.
*/
void unbind_thread(Platform_thread &thread);
/**
* Assign parent interface to protection domain
*/
void assign_parent(Native_capability parent);
/**
* Return portal capability selector for parent interface
*/
addr_t parent_pt_sel() { return _parent.local_name(); }
/**
* Capability selector of this task.
*
* \return PD selector
*/
addr_t pd_sel() const { return _pd_sel; }
/**
* Label of this protection domain
*
* \return name of this protection domain
*/
const char * name() const { return _label; }
/*****************************
** Address-space interface **
*****************************/
void flush(addr_t, size_t, Core_local_addr) override;
};
#endif /* _CORE__INCLUDE__PLATFORM_PD_H_ */

View File

@ -31,195 +31,198 @@
namespace Genode {
class Platform_pd;
class Platform_thread
{
private:
Platform_pd *_pd;
Pager_object *_pager;
addr_t _id_base;
addr_t _sel_exc_base;
Affinity::Location _location;
enum {
MAIN_THREAD = 0x1U,
VCPU = 0x2U,
WORKER = 0x4U,
SC_CREATED = 0x8U,
REMOTE_PD = 0x10U,
};
uint8_t _features;
uint8_t _priority;
Stack::Name _name;
addr_t _sel_ec() const { return _id_base; }
addr_t _sel_pt_oom() const { return _id_base + 1; }
addr_t _sel_sc() const { return _id_base + 2; }
/* convenience function to access _feature variable */
inline bool main_thread() const { return _features & MAIN_THREAD; }
inline bool vcpu() const { return _features & VCPU; }
inline bool worker() const { return _features & WORKER; }
inline bool sc_created() const { return _features & SC_CREATED; }
inline bool remote_pd() const { return _features & REMOTE_PD; }
/*
* Noncopyable
*/
Platform_thread(Platform_thread const &);
Platform_thread &operator = (Platform_thread const &);
/**
* Create OOM portal and delegate it
*/
bool _create_and_map_oom_portal(Nova::Utcb &);
public:
/* mark as vcpu in remote pd if it is a vcpu */
addr_t remote_vcpu() {
if (!vcpu())
return Native_thread::INVALID_INDEX;
_features |= Platform_thread::REMOTE_PD;
return _sel_exc_base;
}
/**
* Constructor
*/
Platform_thread(size_t quota, char const *name,
unsigned priority,
Affinity::Location affinity,
addr_t utcb);
/**
* Destructor
*/
~Platform_thread();
/**
* Start thread
*
* \param ip instruction pointer to start at
* \param sp stack pointer to use
*
* \retval 0 successful
* \retval -1 thread/vCPU could not be started
*/
int start(void *ip, void *sp);
/**
* Pause this thread
*/
void pause();
/**
* Enable/disable single stepping
*/
void single_step(bool);
/**
* Resume this thread
*/
void resume();
/**
* Override thread state with 's'
*
* \throw Cpu_session::State_access_failed
*/
void state(Thread_state s);
/**
* Read thread state
*
* \throw Cpu_session::State_access_failed
*/
Thread_state state();
/************************
** Accessor functions **
************************/
/**
* Set thread type and exception portal base
*/
void thread_type(Cpu_session::Native_cpu::Thread_type thread_type,
Cpu_session::Native_cpu::Exception_base exception_base);
/**
* Set pager
*/
void pager(Pager_object &pager);
/**
* Return pager object
*/
Pager_object &pager()
{
if (_pager)
return *_pager;
ASSERT_NEVER_CALLED;
}
/**
* Return identification of thread when faulting
*/
unsigned long pager_object_badge() { return (unsigned long)this; }
/**
* Set the executing CPU for this thread
*/
void affinity(Affinity::Location location);
/**
* Pager_object starts migration preparation and calls for
* finalization of the migration.
* The method delegates the new exception portals to
* the protection domain and set the new acknowledged location.
*/
void prepare_migration();
void finalize_migration(Affinity::Location const location) {
_location = location; }
/**
* Get the executing CPU for this thread
*/
Affinity::Location affinity() const { return _location; }
/**
* Get thread name
*/
const char *name() const { return _name.string(); }
/**
* Get pd name
*/
const char *pd_name() const;
/**
* Associate thread with protection domain
*/
void bind_to_pd(Platform_pd *pd, bool main_thread)
{
_pd = pd;
if (main_thread) _features |= MAIN_THREAD;
}
/**
* Set CPU quota of the thread to 'quota'
*/
void quota(size_t const) { /* not supported*/ }
/**
* Return execution time consumed by the thread
*/
Trace::Execution_time execution_time() const;
};
class Platform_thread;
}
class Genode::Platform_thread
{
private:
Platform_pd *_pd;
Pager_object *_pager;
addr_t _id_base;
addr_t _sel_exc_base;
Affinity::Location _location;
enum {
MAIN_THREAD = 0x1U,
VCPU = 0x2U,
WORKER = 0x4U,
SC_CREATED = 0x8U,
REMOTE_PD = 0x10U,
};
uint8_t _features;
uint8_t _priority;
Stack::Name _name;
addr_t _sel_ec() const { return _id_base; }
addr_t _sel_pt_oom() const { return _id_base + 1; }
addr_t _sel_sc() const { return _id_base + 2; }
/* convenience function to access _feature variable */
inline bool main_thread() const { return _features & MAIN_THREAD; }
inline bool vcpu() const { return _features & VCPU; }
inline bool worker() const { return _features & WORKER; }
inline bool sc_created() const { return _features & SC_CREATED; }
inline bool remote_pd() const { return _features & REMOTE_PD; }
/*
* Noncopyable
*/
Platform_thread(Platform_thread const &);
Platform_thread &operator = (Platform_thread const &);
/**
* Create OOM portal and delegate it
*/
bool _create_and_map_oom_portal(Nova::Utcb &);
public:
/* mark as vcpu in remote pd if it is a vcpu */
addr_t remote_vcpu() {
if (!vcpu())
return Native_thread::INVALID_INDEX;
_features |= Platform_thread::REMOTE_PD;
return _sel_exc_base;
}
/**
* Constructor
*/
Platform_thread(size_t quota, char const *name,
unsigned priority,
Affinity::Location affinity,
addr_t utcb);
/**
* Destructor
*/
~Platform_thread();
/**
* Start thread
*
* \param ip instruction pointer to start at
* \param sp stack pointer to use
*
* \retval 0 successful
* \retval -1 thread/vCPU could not be started
*/
int start(void *ip, void *sp);
/**
* Pause this thread
*/
void pause();
/**
* Enable/disable single stepping
*/
void single_step(bool);
/**
* Resume this thread
*/
void resume();
/**
* Override thread state with 's'
*
* \throw Cpu_session::State_access_failed
*/
void state(Thread_state s);
/**
* Read thread state
*
* \throw Cpu_session::State_access_failed
*/
Thread_state state();
/************************
** Accessor functions **
************************/
/**
* Set thread type and exception portal base
*/
void thread_type(Cpu_session::Native_cpu::Thread_type thread_type,
Cpu_session::Native_cpu::Exception_base exception_base);
/**
* Set pager
*/
void pager(Pager_object &pager);
/**
* Return pager object
*/
Pager_object &pager()
{
if (_pager)
return *_pager;
ASSERT_NEVER_CALLED;
}
/**
* Return identification of thread when faulting
*/
unsigned long pager_object_badge() { return (unsigned long)this; }
/**
* Set the executing CPU for this thread
*/
void affinity(Affinity::Location location);
/**
* Pager_object starts migration preparation and calls for
* finalization of the migration.
* The method delegates the new exception portals to
* the protection domain and set the new acknowledged location.
*/
void prepare_migration();
void finalize_migration(Affinity::Location const location) {
_location = location; }
/**
* Get the executing CPU for this thread
*/
Affinity::Location affinity() const { return _location; }
/**
* Get thread name
*/
const char *name() const { return _name.string(); }
/**
* Get pd name
*/
const char *pd_name() const;
/**
* Associate thread with protection domain
*/
void bind_to_pd(Platform_pd *pd, bool main_thread)
{
_pd = pd;
if (main_thread) _features |= MAIN_THREAD;
}
/**
* Set CPU quota of the thread to 'quota'
*/
void quota(size_t const) { /* not supported*/ }
/**
* Return execution time consumed by the thread
*/
Trace::Execution_time execution_time() const;
};
#endif /* _CORE__INCLUDE__PLATFORM_THREAD_H_ */

View File

@ -24,6 +24,7 @@
namespace Genode { class Vm_session_component; }
class Genode::Vm_session_component
:
private Ram_quota_guard,

View File

@ -20,6 +20,7 @@
namespace Genode { class Irq_object; }
class Genode::Irq_object : public Thread {
private:

View File

@ -27,6 +27,7 @@
namespace Genode { class Page_table_registry; }
class Genode::Page_table_registry
{
public:

View File

@ -19,6 +19,7 @@
namespace Genode { class Rpc_cap_factory; }
class Genode::Rpc_cap_factory
{
private:

View File

@ -27,27 +27,7 @@
namespace Genode {
struct Thread_info
{
Cap_sel tcb_sel { 0 };
Cap_sel ep_sel { 0 };
Cap_sel lock_sel { 0 };
Cap_sel vcpu_sel { 0 };
addr_t ipc_buffer_phys { 0 };
addr_t vcpu_state_phys { 0 };
inline void write_thread_info_to_ipc_buffer(Cap_sel pd_ep_sel);
Thread_info() { }
inline void init_tcb(Platform &, Range_allocator &,
unsigned const prio, unsigned const cpu);
inline void init(addr_t const utcb_virt_addr, unsigned const prio);
inline void destruct();
bool init_vcpu(Platform &, Cap_sel ept);
};
struct Thread_info;
/**
* Set register values for the instruction pointer and stack pointer and
@ -55,8 +35,32 @@ namespace Genode {
*/
void start_sel4_thread(Cap_sel tcb_sel, addr_t ip, addr_t sp, unsigned cpu);
void affinity_sel4_thread(Cap_sel const &tcb_sel, unsigned cpu);
}
struct Genode::Thread_info
{
Cap_sel tcb_sel { 0 };
Cap_sel ep_sel { 0 };
Cap_sel lock_sel { 0 };
Cap_sel vcpu_sel { 0 };
addr_t ipc_buffer_phys { 0 };
addr_t vcpu_state_phys { 0 };
inline void write_thread_info_to_ipc_buffer(Cap_sel pd_ep_sel);
Thread_info() { }
inline void init_tcb(Platform &, Range_allocator &,
unsigned const prio, unsigned const cpu);
inline void init(addr_t const utcb_virt_addr, unsigned const prio);
inline void destruct();
bool init_vcpu(Platform &, Cap_sel ept);
};
void Genode::Thread_info::init_tcb(Platform &platform,
Range_allocator &phys_alloc,
unsigned const prio, unsigned const cpu)
@ -76,6 +80,7 @@ void Genode::Thread_info::init_tcb(Platform &platform,
affinity_sel4_thread(tcb_sel, cpu);
}
void Genode::Thread_info::init(addr_t const utcb_virt_addr, unsigned const prio)
{
Platform &platform = platform_specific();

View File

@ -26,6 +26,7 @@
namespace Genode { struct Untyped_memory; }
struct Genode::Untyped_memory
{
class Phys_alloc_failed : Exception { };

View File

@ -23,6 +23,7 @@
namespace Genode { class Vm_session_component; }
class Genode::Vm_session_component
:
private Ram_quota_guard,

View File

@ -19,6 +19,7 @@
namespace Genode { struct Address_space; }
struct Genode::Address_space : private Weak_object<Address_space>,
public Interface
{

View File

@ -16,6 +16,7 @@
namespace Genode { struct Boot_modules_header; }
struct Genode::Boot_modules_header
{
long name; /* physical address of null-terminated string */

View File

@ -19,6 +19,7 @@
namespace Genode { class Constrained_core_ram; }
class Genode::Constrained_core_ram : public Allocator
{
private:

View File

@ -18,6 +18,7 @@
#include <util/string.h>
namespace Genode {
struct Core_log;
struct Core_log_range {

View File

@ -20,71 +20,71 @@
/* Core includes */
#include <cpu_session_component.h>
namespace Genode {
namespace Genode { class Cpu_root; }
class Cpu_root : public Root_component<Cpu_session_component>
{
private:
Ram_allocator &_ram_alloc;
Region_map &_local_rm;
Rpc_entrypoint &_thread_ep;
Pager_entrypoint &_pager_ep;
Trace::Source_registry &_trace_sources;
class Genode::Cpu_root : public Root_component<Cpu_session_component>
{
private:
protected:
Ram_allocator &_ram_alloc;
Region_map &_local_rm;
Rpc_entrypoint &_thread_ep;
Pager_entrypoint &_pager_ep;
Trace::Source_registry &_trace_sources;
Cpu_session_component *_create_session(char const *args,
Affinity const &affinity) override {
protected:
size_t ram_quota =
Arg_string::find_arg(args, "ram_quota").ulong_value(0);
Cpu_session_component *_create_session(char const *args,
Affinity const &affinity) override {
if (ram_quota < Trace::Control_area::SIZE)
throw Insufficient_ram_quota();
size_t ram_quota =
Arg_string::find_arg(args, "ram_quota").ulong_value(0);
if (!affinity.valid())
throw Service_denied();
if (ram_quota < Trace::Control_area::SIZE)
throw Insufficient_ram_quota();
return new (md_alloc())
Cpu_session_component(*this->ep(),
session_resources_from_args(args),
session_label_from_args(args),
session_diag_from_args(args),
_ram_alloc, _local_rm,
_thread_ep, _pager_ep, _trace_sources,
args, affinity, 0);
}
if (!affinity.valid())
throw Service_denied();
void _upgrade_session(Cpu_session_component *cpu, const char *args) override
{
cpu->upgrade(ram_quota_from_args(args));
cpu->upgrade(cap_quota_from_args(args));
}
return new (md_alloc())
Cpu_session_component(*this->ep(),
session_resources_from_args(args),
session_label_from_args(args),
session_diag_from_args(args),
_ram_alloc, _local_rm,
_thread_ep, _pager_ep, _trace_sources,
args, affinity, 0);
}
public:
void _upgrade_session(Cpu_session_component *cpu, const char *args) override
{
cpu->upgrade(ram_quota_from_args(args));
cpu->upgrade(cap_quota_from_args(args));
}
/**
* Constructor
*
* \param session_ep entry point for managing cpu session objects
* \param thread_ep entry point for managing threads
* \param md_alloc meta data allocator to be used by root component
*/
Cpu_root(Ram_allocator &ram_alloc,
Region_map &local_rm,
Rpc_entrypoint &session_ep,
Rpc_entrypoint &thread_ep,
Pager_entrypoint &pager_ep,
Allocator &md_alloc,
Trace::Source_registry &trace_sources)
:
Root_component<Cpu_session_component>(&session_ep, &md_alloc),
_ram_alloc(ram_alloc), _local_rm(local_rm),
_thread_ep(thread_ep), _pager_ep(pager_ep),
_trace_sources(trace_sources)
{ }
};
}
public:
/**
* Constructor
*
* \param session_ep entry point for managing cpu session objects
* \param thread_ep entry point for managing threads
* \param md_alloc meta data allocator to be used by root component
*/
Cpu_root(Ram_allocator &ram_alloc,
Region_map &local_rm,
Rpc_entrypoint &session_ep,
Rpc_entrypoint &thread_ep,
Pager_entrypoint &pager_ep,
Allocator &md_alloc,
Trace::Source_registry &trace_sources)
:
Root_component<Cpu_session_component>(&session_ep, &md_alloc),
_ram_alloc(ram_alloc), _local_rm(local_rm),
_thread_ep(thread_ep), _pager_ep(pager_ep),
_trace_sources(trace_sources)
{ }
};
#endif /* _CORE__INCLUDE__CPU_ROOT_H_ */

View File

@ -21,8 +21,8 @@
/* base-internal includes */
#include <base/internal/page_size.h>
namespace Genode
{
namespace Genode {
class Cpu_thread_component;
/**

View File

@ -26,139 +26,141 @@
namespace Genode {
class Rm_region;
class Dataspace_component;
/**
* Deriving classes can own a dataspace to implement conditional behavior
*/
class Dataspace_owner : Interface { };
class Dataspace_component : public Rpc_object<Dataspace>
{
private:
addr_t const _phys_addr = 0; /* address of dataspace in physical memory */
addr_t _core_local_addr = 0; /* address of core-local mapping */
size_t const _size = 0; /* size of dataspace in bytes */
bool const _io_mem = false; /* dataspace is I/O mem, not to be touched */
bool const _writeable = false; /* false if dataspace is read-only */
/*
* Access memory cached, write-combined, or uncached respectively
*/
Cache const _cache { CACHED };
List<Rm_region> _regions { }; /* regions this is attached to */
Mutex _mutex { };
/*
* Holds the dataspace owner if a distinction between owner and
* others is necessary on the dataspace, otherwise it is 0.
*/
Dataspace_owner const * _owner = nullptr;
/*
* Noncopyable
*/
Dataspace_component(Dataspace_component const &);
Dataspace_component &operator = (Dataspace_component const &);
protected:
bool _managed = false; /* true if this is a managed dataspace */
public:
/**
* Default constructor returning an invalid dataspace
*/
Dataspace_component() { }
/**
* Constructor for non-I/O dataspaces
*
* This constructor is used by RAM and ROM dataspaces.
*/
Dataspace_component(size_t size, addr_t core_local_addr,
Cache cache, bool writeable,
Dataspace_owner *owner)
:
_phys_addr(core_local_addr), _core_local_addr(core_local_addr),
_size(round_page(size)), _io_mem(false),
_writeable(writeable), _cache(cache),
_owner(owner), _managed(false) { }
/**
* Constructor for dataspaces with different core-local and
* physical addresses
*
* This constructor is used by IO_MEM. Because I/O-memory areas may
* be located at addresses that are populated by data or text in
* Core's virtual address space, we need to map these areas to
* another core-local address. The local mapping in core's address
* space is needed to send a mapping to another address space.
*/
Dataspace_component(size_t size, addr_t core_local_addr,
addr_t phys_addr, Cache cache,
bool writeable, Dataspace_owner *owner)
:
_phys_addr(phys_addr), _core_local_addr(core_local_addr),
_size(size), _io_mem(true), _writeable(writeable),
_cache(cache), _owner(owner), _managed(false) { }
/**
* Destructor
*/
~Dataspace_component();
/**
* Return region map corresponding to nested dataspace
*
* \retval invalid capability if dataspace is not a nested one
*/
virtual Native_capability sub_rm() { return Dataspace_capability(); }
addr_t core_local_addr() const { return _core_local_addr; }
bool io_mem() const { return _io_mem; }
Cache cacheability() const { return _cache; }
addr_t phys_addr() const { return _phys_addr; }
bool managed() const { return _managed; }
/**
* Return dataspace base address to be used for map operations
*
* Depending on the used kernel, this may be a core-local address
* or a physical address.
*/
addr_t map_src_addr() const
{
return Genode::map_src_addr(_core_local_addr, _phys_addr);
}
void assign_core_local_addr(void *addr) { _core_local_addr = (addr_t)addr; }
void attached_to(Rm_region &region);
void detached_from(Rm_region &region);
/**
* Detach dataspace from all rm sessions.
*/
void detach_from_rm_sessions();
/**
* Check if dataspace is owned by a specific owner
*/
bool owner(Dataspace_owner const &o) const { return _owner == &o; }
List<Rm_region> &regions() { return _regions; }
/*************************
** Dataspace interface **
*************************/
size_t size() override { return _size; }
bool writeable() override { return _writeable; }
};
}
class Genode::Dataspace_component : public Rpc_object<Dataspace>
{
private:
addr_t const _phys_addr = 0; /* address of dataspace in physical memory */
addr_t _core_local_addr = 0; /* address of core-local mapping */
size_t const _size = 0; /* size of dataspace in bytes */
bool const _io_mem = false; /* dataspace is I/O mem, not to be touched */
bool const _writeable = false; /* false if dataspace is read-only */
/*
* Access memory cached, write-combined, or uncached respectively
*/
Cache const _cache { CACHED };
List<Rm_region> _regions { }; /* regions this is attached to */
Mutex _mutex { };
/*
* Holds the dataspace owner if a distinction between owner and
* others is necessary on the dataspace, otherwise it is 0.
*/
Dataspace_owner const * _owner = nullptr;
/*
* Noncopyable
*/
Dataspace_component(Dataspace_component const &);
Dataspace_component &operator = (Dataspace_component const &);
protected:
bool _managed = false; /* true if this is a managed dataspace */
public:
/**
* Default constructor returning an invalid dataspace
*/
Dataspace_component() { }
/**
* Constructor for non-I/O dataspaces
*
* This constructor is used by RAM and ROM dataspaces.
*/
Dataspace_component(size_t size, addr_t core_local_addr,
Cache cache, bool writeable,
Dataspace_owner *owner)
:
_phys_addr(core_local_addr), _core_local_addr(core_local_addr),
_size(round_page(size)), _io_mem(false),
_writeable(writeable), _cache(cache),
_owner(owner), _managed(false) { }
/**
* Constructor for dataspaces with different core-local and
* physical addresses
*
* This constructor is used by IO_MEM. Because I/O-memory areas may
* be located at addresses that are populated by data or text in
* Core's virtual address space, we need to map these areas to
* another core-local address. The local mapping in core's address
* space is needed to send a mapping to another address space.
*/
Dataspace_component(size_t size, addr_t core_local_addr,
addr_t phys_addr, Cache cache,
bool writeable, Dataspace_owner *owner)
:
_phys_addr(phys_addr), _core_local_addr(core_local_addr),
_size(size), _io_mem(true), _writeable(writeable),
_cache(cache), _owner(owner), _managed(false) { }
/**
* Destructor
*/
~Dataspace_component();
/**
* Return region map corresponding to nested dataspace
*
* \retval invalid capability if dataspace is not a nested one
*/
virtual Native_capability sub_rm() { return Dataspace_capability(); }
addr_t core_local_addr() const { return _core_local_addr; }
bool io_mem() const { return _io_mem; }
Cache cacheability() const { return _cache; }
addr_t phys_addr() const { return _phys_addr; }
bool managed() const { return _managed; }
/**
* Return dataspace base address to be used for map operations
*
* Depending on the used kernel, this may be a core-local address
* or a physical address.
*/
addr_t map_src_addr() const
{
return Genode::map_src_addr(_core_local_addr, _phys_addr);
}
void assign_core_local_addr(void *addr) { _core_local_addr = (addr_t)addr; }
void attached_to(Rm_region &region);
void detached_from(Rm_region &region);
/**
* Detach dataspace from all rm sessions.
*/
void detach_from_rm_sessions();
/**
* Check if dataspace is owned by a specific owner
*/
bool owner(Dataspace_owner const &o) const { return _owner == &o; }
List<Rm_region> &regions() { return _regions; }
/*************************
** Dataspace interface **
*************************/
size_t size() override { return _size; }
bool writeable() override { return _writeable; }
};
#endif /* _CORE__INCLUDE__DATASPACE_COMPONENT_H_ */

View File

@ -18,46 +18,46 @@
#include "io_mem_session_component.h"
namespace Genode {
namespace Genode { class Io_mem_root; }
class Io_mem_root : public Root_component<Io_mem_session_component>
{
private:
class Genode::Io_mem_root : public Root_component<Io_mem_session_component>
{
Range_allocator &_io_mem_alloc; /* MMIO region allocator */
Range_allocator &_ram_alloc; /* RAM allocator */
Rpc_entrypoint &_ds_ep; /* entry point for managing io_mem dataspaces */
private:
protected:
Range_allocator &_io_mem_alloc; /* MMIO region allocator */
Range_allocator &_ram_alloc; /* RAM allocator */
Rpc_entrypoint &_ds_ep; /* entry point for managing io_mem dataspaces */
Io_mem_session_component *_create_session(const char *args) override
{
return new (md_alloc())
Io_mem_session_component(_io_mem_alloc, _ram_alloc,
_ds_ep, args);
}
protected:
public:
Io_mem_session_component *_create_session(const char *args) override
{
return new (md_alloc())
Io_mem_session_component(_io_mem_alloc, _ram_alloc,
_ds_ep, args);
}
/**
* Constructor
*
* \param session_ep entry point for managing io_mem session objects
* \param ds_ep entry point for managing dataspaces
* \param io_mem_alloc platform IO_MEM allocator
* \param ram_alloc platform RAM allocator
* \param md_alloc meta-data allocator to be used by root component
*/
Io_mem_root(Rpc_entrypoint &session_ep,
Rpc_entrypoint &ds_ep,
Range_allocator &io_mem_alloc,
Range_allocator &ram_alloc,
Allocator &md_alloc)
:
Root_component<Io_mem_session_component>(&session_ep, &md_alloc),
_io_mem_alloc(io_mem_alloc), _ram_alloc(ram_alloc), _ds_ep(ds_ep) { }
};
}
public:
/**
* Constructor
*
* \param session_ep entry point for managing io_mem session objects
* \param ds_ep entry point for managing dataspaces
* \param io_mem_alloc platform IO_MEM allocator
* \param ram_alloc platform RAM allocator
* \param md_alloc meta-data allocator to be used by root component
*/
Io_mem_root(Rpc_entrypoint &session_ep,
Rpc_entrypoint &ds_ep,
Range_allocator &io_mem_alloc,
Range_allocator &ram_alloc,
Allocator &md_alloc)
:
Root_component<Io_mem_session_component>(&session_ep, &md_alloc),
_io_mem_alloc(io_mem_alloc), _ram_alloc(ram_alloc), _ds_ep(ds_ep) { }
};
#endif /* _CORE__INCLUDE__IO_MEM_ROOT_H_ */

View File

@ -22,128 +22,128 @@
/* core includes */
#include <dataspace_component.h>
namespace Genode {
class Io_mem_session_component : public Rpc_object<Io_mem_session>
{
private:
/*
* Helper class used to pass the dataspace attributes as
* parameters from the _prepare_io_mem function to the
* constructor of Dataspace_component.
*/
struct Dataspace_attr
{
size_t size { 0 };
addr_t core_local_addr { 0 };
addr_t phys_addr { 0 };
Cache cacheable { UNCACHED };
/**
* Base address of request used for freeing mem-ranges
*/
addr_t req_base { 0 };
/**
* Default constructor
*
* This constructor enables Dataspace_attr objects to be
* returned from the '_prepare_io_mem' function.
*/
Dataspace_attr() { }
/**
* Constructor
*
* An invalid dataspace is represented by setting all
* arguments to zero.
*/
Dataspace_attr(size_t s, addr_t cla, addr_t pa, Cache c,
addr_t req_base)
:
size(s), core_local_addr(cla), phys_addr(pa),
cacheable(c), req_base(req_base) { }
};
struct Io_dataspace_component : Dataspace_component
{
addr_t req_base;
/**
* Constructor
*/
Io_dataspace_component(Dataspace_attr da)
:
Dataspace_component(da.size, da.core_local_addr,
da.phys_addr, da.cacheable,
true, 0),
req_base(da.req_base) { }
namespace Genode { class Io_mem_session_component; }
bool valid() { return size() != 0; }
};
class Genode::Io_mem_session_component : public Rpc_object<Io_mem_session>
{
private:
Range_allocator &_io_mem_alloc;
Io_dataspace_component _ds;
Rpc_entrypoint &_ds_ep;
Io_mem_dataspace_capability _ds_cap { };
Cache _cacheable { UNCACHED };
Dataspace_attr _prepare_io_mem(const char *args, Range_allocator &ram_alloc);
/********************************************
** Platform-implemented support functions **
********************************************/
/* FIXME Could this be merged with Dataspace::unmap() and friends? */
/*
* Helper class used to pass the dataspace attributes as
* parameters from the _prepare_io_mem function to the
* constructor of Dataspace_component.
*/
struct Dataspace_attr
{
size_t size { 0 };
addr_t core_local_addr { 0 };
addr_t phys_addr { 0 };
Cache cacheable { UNCACHED };
/**
* Map region locally and return local base address
*
* Both parameters - base and size - must be page-aligned.
* Base address of request used for freeing mem-ranges
*/
addr_t _map_local(addr_t base, size_t size);
addr_t req_base { 0 };
/**
* Unmap Core-local mapping of region
* Default constructor
*
* Both parameters - base and size - must be page-aligned.
* This constructor enables Dataspace_attr objects to be
* returned from the '_prepare_io_mem' function.
*/
void _unmap_local(addr_t base, size_t size);
public:
Dataspace_attr() { }
/**
* Constructor
*
* \param io_mem_alloc MMIO region allocator
* \param ram_alloc RAM allocator that will be checked for
* region collisions
* \param ds_ep entry point to manage the dataspace
* corresponding the io_mem session
* \param args session construction arguments, in
* particular MMIO region base, size and
* caching demands
* An invalid dataspace is represented by setting all
* arguments to zero.
*/
Io_mem_session_component(Range_allocator &io_mem_alloc,
Range_allocator &ram_alloc,
Rpc_entrypoint &ds_ep,
const char *args);
Dataspace_attr(size_t s, addr_t cla, addr_t pa, Cache c,
addr_t req_base)
:
size(s), core_local_addr(cla), phys_addr(pa),
cacheable(c), req_base(req_base) { }
};
struct Io_dataspace_component : Dataspace_component
{
addr_t req_base;
/**
* Destructor
* Constructor
*/
~Io_mem_session_component();
Io_dataspace_component(Dataspace_attr da)
:
Dataspace_component(da.size, da.core_local_addr,
da.phys_addr, da.cacheable,
true, 0),
req_base(da.req_base) { }
/******************************
** Io-mem session interface **
******************************/
bool valid() { return size() != 0; }
};
Io_mem_dataspace_capability dataspace() override { return _ds_cap; }
};
}
Range_allocator &_io_mem_alloc;
Io_dataspace_component _ds;
Rpc_entrypoint &_ds_ep;
Io_mem_dataspace_capability _ds_cap { };
Cache _cacheable { UNCACHED };
Dataspace_attr _prepare_io_mem(const char *args, Range_allocator &ram_alloc);
/********************************************
** Platform-implemented support functions **
********************************************/
/* FIXME Could this be merged with Dataspace::unmap() and friends? */
/**
* Map region locally and return local base address
*
* Both parameters - base and size - must be page-aligned.
*/
addr_t _map_local(addr_t base, size_t size);
/**
* Unmap Core-local mapping of region
*
* Both parameters - base and size - must be page-aligned.
*/
void _unmap_local(addr_t base, size_t size);
public:
/**
* Constructor
*
* \param io_mem_alloc MMIO region allocator
* \param ram_alloc RAM allocator that will be checked for
* region collisions
* \param ds_ep entry point to manage the dataspace
* corresponding the io_mem session
* \param args session construction arguments, in
* particular MMIO region base, size and
* caching demands
*/
Io_mem_session_component(Range_allocator &io_mem_alloc,
Range_allocator &ram_alloc,
Rpc_entrypoint &ds_ep,
const char *args);
/**
* Destructor
*/
~Io_mem_session_component();
/******************************
** Io-mem session interface **
******************************/
Io_mem_dataspace_capability dataspace() override { return _ds_cap; }
};
#endif /* _CORE__INCLUDE__IO_MEM_SESSION_COMPONENT_H_ */

View File

@ -19,53 +19,57 @@
#include "io_port_session_component.h"
namespace Genode {
struct Io_port_handler
{
private:
enum { STACK_SIZE = 4096 };
Rpc_entrypoint _ep;
public:
Io_port_handler(Pd_session &pd_session) :
_ep(&pd_session, STACK_SIZE, "ioport", Affinity::Location())
{ }
Rpc_entrypoint &entrypoint() { return _ep; }
};
class Io_port_root : private Io_port_handler,
public Root_component<Io_port_session_component>
{
private:
Range_allocator &_io_port_alloc; /* I/O port allocator */
protected:
Io_port_session_component *_create_session(const char *args) override {
return new (md_alloc()) Io_port_session_component(_io_port_alloc, args); }
public:
/**
* Constructor
*
* \param cap_session capability allocator
* \param io_port_alloc platform IO_PORT allocator
* \param md_alloc meta-data allocator to be used by root component
*/
Io_port_root(Pd_session &pd_session,
Range_allocator &io_port_alloc,
Allocator &md_alloc)
:
Io_port_handler(pd_session),
Root_component<Io_port_session_component>(&entrypoint(), &md_alloc),
_io_port_alloc(io_port_alloc) { }
};
class Io_port_handler;
class Io_port_root;
}
class Genode::Io_port_handler
{
private:
enum { STACK_SIZE = 4096 };
Rpc_entrypoint _ep;
public:
Io_port_handler(Pd_session &pd_session) :
_ep(&pd_session, STACK_SIZE, "ioport", Affinity::Location())
{ }
Rpc_entrypoint &entrypoint() { return _ep; }
};
class Genode::Io_port_root : private Io_port_handler,
public Root_component<Io_port_session_component>
{
private:
Range_allocator &_io_port_alloc; /* I/O port allocator */
protected:
Io_port_session_component *_create_session(const char *args) override {
return new (md_alloc()) Io_port_session_component(_io_port_alloc, args); }
public:
/**
* Constructor
*
* \param cap_session capability allocator
* \param io_port_alloc platform IO_PORT allocator
* \param md_alloc meta-data allocator to be used by root component
*/
Io_port_root(Pd_session &pd_session,
Range_allocator &io_port_alloc,
Allocator &md_alloc)
:
Io_port_handler(pd_session),
Root_component<Io_port_session_component>(&entrypoint(), &md_alloc),
_io_port_alloc(io_port_alloc) { }
};
#endif /* _CORE__INCLUDE__IO_PORT_ROOT_H_ */

View File

@ -3,7 +3,7 @@
* \author Christian Helmuth
* \date 2007-04-17
*
* We assume Core is running on IOPL3.
* We assume core is running on IOPL3.
*/
/*
@ -24,53 +24,53 @@
/* core includes */
#include <dataspace_component.h>
namespace Genode {
class Io_port_session_component : public Rpc_object<Io_port_session>
{
private:
Range_allocator &_io_port_alloc;
unsigned short _base = 0;
unsigned short _size = 0;
/**
* Check if access exceeds range
*/
bool _in_bounds(unsigned short address, unsigned width) {
return (address >= _base) && (address + width <= _base + _size); }
public:
/**
* Constructor
*
* \param io_port_alloc IO_PORT region allocator
* \param args session construction arguments, in
* particular port base and size
* \throw Service_denied
*/
Io_port_session_component(Range_allocator &io_port_alloc,
const char *args);
/**
* Destructor
*/
~Io_port_session_component();
namespace Genode { class Io_port_session_component; }
/*******************************
** Io-port session interface **
*******************************/
class Genode::Io_port_session_component : public Rpc_object<Io_port_session>
{
private:
unsigned char inb(unsigned short) override;
unsigned short inw(unsigned short) override;
unsigned inl(unsigned short) override;
Range_allocator &_io_port_alloc;
unsigned short _base = 0;
unsigned short _size = 0;
void outb(unsigned short, unsigned char) override;
void outw(unsigned short, unsigned short) override;
void outl(unsigned short, unsigned) override;
};
}
/**
* Check if access exceeds range
*/
bool _in_bounds(unsigned short address, unsigned width) {
return (address >= _base) && (address + width <= _base + _size); }
public:
/**
* Constructor
*
* \param io_port_alloc IO_PORT region allocator
* \param args session construction arguments, in
* particular port base and size
* \throw Service_denied
*/
Io_port_session_component(Range_allocator &io_port_alloc,
const char *args);
/**
* Destructor
*/
~Io_port_session_component();
/*******************************
** Io-port session interface **
*******************************/
unsigned char inb(unsigned short) override;
unsigned short inw(unsigned short) override;
unsigned inl(unsigned short) override;
void outb(unsigned short, unsigned char) override;
void outw(unsigned short, unsigned short) override;
void outl(unsigned short, unsigned) override;
};
#endif /* _CORE__INCLUDE__IO_PORT_SESSION_COMPONENT_H_ */

View File

@ -20,6 +20,7 @@
namespace Genode { class Irq_args; }
class Genode::Irq_args
{
private:

View File

@ -18,8 +18,9 @@
namespace Genode { class Irq_object; }
class Genode::Irq_object : public Thread {
class Genode::Irq_object : public Thread
{
private:
Signal_context_capability _sig_cap { };

View File

@ -20,6 +20,7 @@
namespace Genode { class Irq_root; }
class Genode::Irq_root : public Root_component<Irq_session_component>
{

View File

@ -24,6 +24,7 @@
namespace Genode { class Irq_session_component; }
class Genode::Irq_session_component : public Rpc_object<Irq_session>,
private List<Irq_session_component>::Element
{

View File

@ -19,33 +19,33 @@
#include "log_session_component.h"
namespace Genode {
namespace Genode { class Log_root; }
class Log_root : public Root_component<Log_session_component>
{
protected:
/**
* Root component interface
*/
Log_session_component *_create_session(const char *args) override
{
return new (md_alloc()) Log_session_component(label_from_args(args));
}
class Genode::Log_root : public Root_component<Log_session_component>
{
protected:
public:
/**
* Root component interface
*/
Log_session_component *_create_session(const char *args) override
{
return new (md_alloc()) Log_session_component(label_from_args(args));
}
/**
* Constructor
*
* \param session_ep entry point for managing cpu session objects
* \param md_alloc meta-data allocator to be used by root component
*/
Log_root(Rpc_entrypoint &session_ep, Allocator &md_alloc)
:
Root_component<Log_session_component>(&session_ep, &md_alloc)
{ }
};
}
public:
/**
* Constructor
*
* \param session_ep entry point for managing cpu session objects
* \param md_alloc meta-data allocator to be used by root component
*/
Log_root(Rpc_entrypoint &session_ep, Allocator &md_alloc)
:
Root_component<Log_session_component>(&session_ep, &md_alloc)
{ }
};
#endif /* _CORE__INCLUDE__LOG_ROOT_H_ */

View File

@ -20,58 +20,58 @@
#include <base/session_label.h>
#include <log_session/log_session.h>
namespace Genode {
namespace Genode { class Log_session_component; }
class Log_session_component : public Rpc_object<Log_session>
{
private:
Session_label const _label;
class Genode::Log_session_component : public Rpc_object<Log_session>
{
private:
static Session_label _expand_label(Session_label const &label)
{
if (label == "init -> unlabeled")
return "";
else
return Session_label("[", label, "] ");
Session_label const _label;
static Session_label _expand_label(Session_label const &label)
{
if (label == "init -> unlabeled")
return "";
else
return Session_label("[", label, "] ");
}
public:
/**
* Constructor
*/
Log_session_component(Session_label const &label)
: _label(_expand_label(label)) { }
/*****************
** Log session **
*****************/
void write(String const &string_buf) override
{
if (!(string_buf.valid_string())) {
error("corrupted string");
return;
}
public:
char const * const string = string_buf.string();
size_t const len = strlen(string);
/**
* Constructor
*/
Log_session_component(Session_label const &label)
: _label(_expand_label(label)) { }
/*****************
** Log session **
*****************/
void write(String const &string_buf) override
{
if (!(string_buf.valid_string())) {
error("corrupted string");
return;
unsigned from_i = 0;
for (unsigned i = 0; i < len; i++) {
if (string[i] == '\n') {
log(_label, Cstring(string + from_i, i - from_i));
from_i = i + 1;
}
char const * const string = string_buf.string();
size_t const len = strlen(string);
unsigned from_i = 0;
for (unsigned i = 0; i < len; i++) {
if (string[i] == '\n') {
log(_label, Cstring(string + from_i, i - from_i));
from_i = i + 1;
}
}
/* if last character of string was not a line break, add one */
if (from_i < len)
log(_label, Cstring(string + from_i));
}
};
}
/* if last character of string was not a line break, add one */
if (from_i < len)
log(_label, Cstring(string + from_i));
}
};
#endif /* _CORE__INCLUDE__LOG_SESSION_COMPONENT_H_ */

View File

@ -20,9 +20,7 @@
/* Core */
#include <pd_session_component.h>
namespace Genode {
class Pd_root;
}
namespace Genode { class Pd_root; }
class Genode::Pd_root : public Genode::Root_component<Genode::Pd_session_component>

View File

@ -24,87 +24,7 @@
namespace Genode {
/**
* Generic platform interface
*/
class Platform_generic
{
public:
virtual ~Platform_generic() { }
/**
* Allocator of core-local mapped virtual memory
*/
virtual Range_allocator &core_mem_alloc() = 0;
/**
* Allocator of physical memory
*/
virtual Range_allocator &ram_alloc() = 0;
/**
* Allocator of free address ranges within core
*/
virtual Range_allocator &region_alloc() = 0;
/**
* I/O memory allocator
*/
virtual Range_allocator &io_mem_alloc() = 0;
/**
* I/O port allocator
*/
virtual Range_allocator &io_port_alloc() = 0;
/**
* IRQ allocator
*/
virtual Range_allocator &irq_alloc() = 0;
/**
* Virtual memory configuration accessors
*/
virtual addr_t vm_start() const = 0;
virtual size_t vm_size() const = 0;
/**
* ROM modules
*/
virtual Rom_fs &rom_fs() = 0;
/**
* Wait for exit condition
*/
virtual void wait_for_exit() = 0;
/**
* Return true if platform supports direct unmap (no mapping db)
*/
virtual bool supports_direct_unmap() const { return false; }
/**
* Return number of physical CPUs present in the platform
*
* The default implementation returns a single CPU.
*/
virtual Affinity::Space affinity_space() const
{
return Affinity::Space(1);
}
/**
* Return system-wide maximum number of capabilities
*/
virtual size_t max_caps() const = 0;
/**
* Return true if the core component relies on a 'Platform_pd' object
*/
virtual bool core_needs_platform_pd() const { return true; }
};
class Platform_generic;
/**
* Request pointer to static generic platform interface of core
@ -121,4 +41,84 @@ namespace Genode {
extern Platform &platform_specific();
}
class Genode::Platform_generic
{
public:
virtual ~Platform_generic() { }
/**
* Allocator of core-local mapped virtual memory
*/
virtual Range_allocator &core_mem_alloc() = 0;
/**
* Allocator of physical memory
*/
virtual Range_allocator &ram_alloc() = 0;
/**
* Allocator of free address ranges within core
*/
virtual Range_allocator &region_alloc() = 0;
/**
* I/O memory allocator
*/
virtual Range_allocator &io_mem_alloc() = 0;
/**
* I/O port allocator
*/
virtual Range_allocator &io_port_alloc() = 0;
/**
* IRQ allocator
*/
virtual Range_allocator &irq_alloc() = 0;
/**
* Virtual memory configuration accessors
*/
virtual addr_t vm_start() const = 0;
virtual size_t vm_size() const = 0;
/**
* ROM modules
*/
virtual Rom_fs &rom_fs() = 0;
/**
* Wait for exit condition
*/
virtual void wait_for_exit() = 0;
/**
* Return true if platform supports direct unmap (no mapping db)
*/
virtual bool supports_direct_unmap() const { return false; }
/**
* Return number of physical CPUs present in the platform
*
* The default implementation returns a single CPU.
*/
virtual Affinity::Space affinity_space() const
{
return Affinity::Space(1);
}
/**
* Return system-wide maximum number of capabilities
*/
virtual size_t max_caps() const = 0;
/**
* Return true if the core component relies on a 'Platform_pd' object
*/
virtual bool core_needs_platform_pd() const { return true; }
};
#endif /* _CORE__INCLUDE__PLATFORM_GENERIC_H_ */

View File

@ -40,7 +40,6 @@
#include <base/internal/stack_area.h>
namespace Genode {
class Cpu_thread_component;
class Dataspace_component;
class Region_map_component;
@ -51,6 +50,7 @@ namespace Genode {
class Rm_session_component;
}
class Genode::Region_map_detach : Genode::Interface
{
public:

View File

@ -17,39 +17,38 @@
#include <root/component.h>
#include "rom_session_component.h"
namespace Genode {
namespace Genode { class Rom_root; }
class Rom_root : public Root_component<Rom_session_component>
{
private:
class Genode::Rom_root : public Root_component<Rom_session_component>
{
private:
Rom_fs &_rom_fs; /* rom file system */
Rpc_entrypoint &_ds_ep; /* entry point for managing rom dataspaces */
Rom_fs &_rom_fs; /* rom file system */
Rpc_entrypoint &_ds_ep; /* entry point for managing rom dataspaces */
protected:
protected:
Rom_session_component *_create_session(const char *args) override {
return new (md_alloc()) Rom_session_component(_rom_fs, _ds_ep, args); }
Rom_session_component *_create_session(const char *args) override {
return new (md_alloc()) Rom_session_component(_rom_fs, _ds_ep, args); }
public:
public:
/**
* Constructor
*
* \param session_ep entry point for managing ram session objects
* \param ds_ep entry point for managing dataspaces
* \param rom_fs platform ROM file system
* \param md_alloc meta-data allocator to be used by root component
*/
Rom_root(Rpc_entrypoint &session_ep,
Rpc_entrypoint &ds_ep,
Rom_fs &rom_fs,
Allocator &md_alloc)
:
Root_component<Rom_session_component>(&session_ep, &md_alloc),
_rom_fs(rom_fs), _ds_ep(ds_ep) { }
};
}
/**
* Constructor
*
* \param session_ep entry point for managing ram session objects
* \param ds_ep entry point for managing dataspaces
* \param rom_fs platform ROM file system
* \param md_alloc meta-data allocator to be used by root component
*/
Rom_root(Rpc_entrypoint &session_ep,
Rpc_entrypoint &ds_ep,
Rom_fs &rom_fs,
Allocator &md_alloc)
:
Root_component<Rom_session_component>(&session_ep, &md_alloc),
_rom_fs(rom_fs), _ds_ep(ds_ep) { }
};
#endif /* _CORE__INCLUDE__ROM_ROOT_H_ */

View File

@ -20,63 +20,63 @@
#include <rom_session/rom_session.h>
#include <base/session_label.h>
namespace Genode {
class Rom_session_component : public Rpc_object<Rom_session>
{
private:
Rom_module const * const _rom_module = nullptr;
Dataspace_component _ds;
Rpc_entrypoint &_ds_ep;
Rom_dataspace_capability _ds_cap;
Rom_module const &_find_rom(Rom_fs &rom_fs, const char *args)
{
/* extract label */
Session_label const label = label_from_args(args);
/* find ROM module for trailing label element */
Rom_module const * rom = rom_fs.find(label.last_element().string());
if (rom)
return *rom;
throw Service_denied();
}
/*
* Noncopyable
*/
Rom_session_component(Rom_session_component const &);
Rom_session_component &operator = (Rom_session_component const &);
public:
/**
* Constructor
*
* \param rom_fs ROM filesystem
* \param ds_ep entry point to manage the dataspace
* corresponding the rom session
* \param args session-construction arguments
*/
Rom_session_component(Rom_fs &rom_fs,
Rpc_entrypoint &ds_ep,
const char *args);
/**
* Destructor
*/
~Rom_session_component();
namespace Genode { class Rom_session_component; }
/***************************
** Rom session interface **
***************************/
class Genode::Rom_session_component : public Rpc_object<Rom_session>
{
private:
Rom_dataspace_capability dataspace() override { return _ds_cap; }
void sigh(Signal_context_capability) override { }
};
}
Rom_module const * const _rom_module = nullptr;
Dataspace_component _ds;
Rpc_entrypoint &_ds_ep;
Rom_dataspace_capability _ds_cap;
Rom_module const &_find_rom(Rom_fs &rom_fs, const char *args)
{
/* extract label */
Session_label const label = label_from_args(args);
/* find ROM module for trailing label element */
Rom_module const * rom = rom_fs.find(label.last_element().string());
if (rom)
return *rom;
throw Service_denied();
}
/*
* Noncopyable
*/
Rom_session_component(Rom_session_component const &);
Rom_session_component &operator = (Rom_session_component const &);
public:
/**
* Constructor
*
* \param rom_fs ROM filesystem
* \param ds_ep entry point to manage the dataspace
* corresponding the rom session
* \param args session-construction arguments
*/
Rom_session_component(Rom_fs &rom_fs,
Rpc_entrypoint &ds_ep,
const char *args);
/**
* Destructor
*/
~Rom_session_component();
/***************************
** Rom session interface **
***************************/
Rom_dataspace_capability dataspace() override { return _ds_cap; }
void sigh(Signal_context_capability) override { }
};
#endif /* _CORE__INCLUDE__ROM_SESSION_COMPONENT_H_ */

View File

@ -20,6 +20,7 @@
namespace Genode { class Rpc_cap_factory; }
class Genode::Rpc_cap_factory
{
private:

View File

@ -21,6 +21,7 @@
namespace Genode { class Signal_broker; }
class Genode::Signal_broker
{
private:

View File

@ -15,83 +15,87 @@
#define _CORE__INCLUDE__SIGNAL_DELIVERY_PROXY_H_
namespace Genode {
struct Signal_delivery_proxy : Interface
{
GENODE_RPC(Rpc_deliver, void, _deliver_from_ep, Signal_context_capability, unsigned);
GENODE_RPC(Rpc_release, void, _release_from_ep, Genode::addr_t);
GENODE_RPC_INTERFACE(Rpc_deliver, Rpc_release);
};
struct Signal_delivery_proxy_component
:
Rpc_object<Signal_delivery_proxy, Signal_delivery_proxy_component>
{
Rpc_entrypoint &_ep;
Capability<Signal_delivery_proxy> _proxy_cap;
/**
* Constructor
*
* \param ep entrypoint to be used as a proxy for delivering signals
* as IPC-reply messages.
*/
Signal_delivery_proxy_component(Rpc_entrypoint &ep)
: _ep(ep), _proxy_cap(_ep.manage(this)) { }
~Signal_delivery_proxy_component()
{
if (_proxy_cap.valid())
_ep.dissolve(this);
}
/**
* Signal_delivery_proxy RPC interface
*
* This method is executed in the context of the 'ep'. Hence, it
* can produce legitimate IPC reply messages to 'Signal_source'
* clients.
*/
void _deliver_from_ep(Signal_context_capability cap, unsigned cnt)
{
_ep.apply(cap, [&] (Signal_context_component *context) {
if (context)
context->source().submit(*context, cnt);
else
warning("invalid signal-context capability");
});
}
void _release_from_ep(addr_t const context_addr)
{
Signal_context_component * context = reinterpret_cast<Signal_context_component *>(context_addr);
if (context)
context->source().release(*context);
}
/**
* Deliver signal via the proxy mechanism
*
* Since this method perform an RPC call to the 'ep' specified at the
* constructor, is must never be called from this ep.
*
* Called from threads other than 'ep'.
*/
void submit(Signal_context_capability cap, unsigned cnt) {
_proxy_cap.call<Rpc_deliver>(cap, cnt); }
/**
* Deliver signal via the proxy mechanism
*
* Since this method perform an RPC call to the 'ep' specified at the
* constructor, is must never be called from this ep.
*
* Called from threads other than 'ep'.
*/
void release(Signal_context_component &context) {
_proxy_cap.call<Rpc_release>(reinterpret_cast<addr_t>(&context)); }
};
struct Signal_delivery_proxy;
struct Signal_delivery_proxy_component;
}
struct Genode::Signal_delivery_proxy : Interface
{
GENODE_RPC(Rpc_deliver, void, _deliver_from_ep, Signal_context_capability, unsigned);
GENODE_RPC(Rpc_release, void, _release_from_ep, Genode::addr_t);
GENODE_RPC_INTERFACE(Rpc_deliver, Rpc_release);
};
struct Genode::Signal_delivery_proxy_component
:
Rpc_object<Signal_delivery_proxy, Signal_delivery_proxy_component>
{
Rpc_entrypoint &_ep;
Capability<Signal_delivery_proxy> _proxy_cap;
/**
* Constructor
*
* \param ep entrypoint to be used as a proxy for delivering signals
* as IPC-reply messages.
*/
Signal_delivery_proxy_component(Rpc_entrypoint &ep)
: _ep(ep), _proxy_cap(_ep.manage(this)) { }
~Signal_delivery_proxy_component()
{
if (_proxy_cap.valid())
_ep.dissolve(this);
}
/**
* Signal_delivery_proxy RPC interface
*
* This method is executed in the context of the 'ep'. Hence, it
* can produce legitimate IPC reply messages to 'Signal_source'
* clients.
*/
void _deliver_from_ep(Signal_context_capability cap, unsigned cnt)
{
_ep.apply(cap, [&] (Signal_context_component *context) {
if (context)
context->source().submit(*context, cnt);
else
warning("invalid signal-context capability");
});
}
void _release_from_ep(addr_t const context_addr)
{
Signal_context_component * context = reinterpret_cast<Signal_context_component *>(context_addr);
if (context)
context->source().release(*context);
}
/**
* Deliver signal via the proxy mechanism
*
* Since this method perform an RPC call to the 'ep' specified at the
* constructor, is must never be called from this ep.
*
* Called from threads other than 'ep'.
*/
void submit(Signal_context_capability cap, unsigned cnt) {
_proxy_cap.call<Rpc_deliver>(cap, cnt); }
/**
* Deliver signal via the proxy mechanism
*
* Since this method perform an RPC call to the 'ep' specified at the
* constructor, is must never be called from this ep.
*
* Called from threads other than 'ep'.
*/
void release(Signal_context_component &context) {
_proxy_cap.call<Rpc_release>(reinterpret_cast<addr_t>(&context)); }
};
#endif /* _CORE__INCLUDE__SIGNLA_DELIVERY_PROXY_H_ */

View File

@ -23,6 +23,7 @@
namespace Genode { class Vm_root; }
class Genode::Vm_root : public Root_component<Vm_session_component>
{
private: