mirror of
https://github.com/genodelabs/genode.git
synced 2024-12-30 02:28:54 +00:00
base: redesign object pool using lambda interface
Instead of returning pointers to locked objects via a lookup function, the new object pool implementation restricts object access to functors resp. lambda expressions that are applied to the objects within the pool itself. Fix #884 Fix #1658
This commit is contained in:
parent
555835c95b
commit
458b4d6fc4
@ -30,39 +30,41 @@ Core_rm_session::attach(Dataspace_capability ds_cap, size_t size,
|
|||||||
{
|
{
|
||||||
using namespace Codezero;
|
using namespace Codezero;
|
||||||
|
|
||||||
Object_pool<Dataspace_component>::Guard ds(_ds_ep->lookup_and_lock(ds_cap));
|
auto lambda = [&] (Dataspace_component *ds) -> Local_addr {
|
||||||
if (!ds)
|
if (!ds)
|
||||||
throw Invalid_dataspace();
|
throw Invalid_dataspace();
|
||||||
|
|
||||||
if (size == 0)
|
if (size == 0)
|
||||||
size = ds->size();
|
size = ds->size();
|
||||||
|
|
||||||
size_t page_rounded_size = (size + get_page_size() - 1) & get_page_mask();
|
size_t page_rounded_size = (size + get_page_size() - 1) & get_page_mask();
|
||||||
size_t num_pages = page_rounded_size >> get_page_size_log2();
|
size_t num_pages = page_rounded_size >> get_page_size_log2();
|
||||||
|
|
||||||
if (use_local_addr) {
|
if (use_local_addr) {
|
||||||
PERR("Parameter 'use_local_addr' not supported within core");
|
PERR("Parameter 'use_local_addr' not supported within core");
|
||||||
return 0;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (offset) {
|
if (offset) {
|
||||||
PERR("Parameter 'offset' not supported within core");
|
PERR("Parameter 'offset' not supported within core");
|
||||||
return 0;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* allocate range in core's virtual address space */
|
/* allocate range in core's virtual address space */
|
||||||
void *virt_addr;
|
void *virt_addr;
|
||||||
if (!platform()->region_alloc()->alloc(page_rounded_size, &virt_addr)) {
|
if (!platform()->region_alloc()->alloc(page_rounded_size, &virt_addr)) {
|
||||||
PERR("Could not allocate virtual address range in core of size %zd\n",
|
PERR("Could not allocate virtual address range in core of size %zd\n",
|
||||||
page_rounded_size);
|
page_rounded_size);
|
||||||
return false;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!map_local(ds->phys_addr(), (addr_t)virt_addr, num_pages)) {
|
if (!map_local(ds->phys_addr(), (addr_t)virt_addr, num_pages)) {
|
||||||
PERR("core-local memory mapping failed virt=%lx, phys=%lx\n",
|
PERR("core-local memory mapping failed virt=%lx, phys=%lx\n",
|
||||||
(addr_t)virt_addr, ds->phys_addr());
|
(addr_t)virt_addr, ds->phys_addr());
|
||||||
return 0;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
return virt_addr;
|
return virt_addr;
|
||||||
|
};
|
||||||
|
return _ds_ep->apply(ds_cap, lambda);
|
||||||
}
|
}
|
||||||
|
@ -176,7 +176,7 @@ void Ipc_pager::acknowledge_wakeup()
|
|||||||
** Pager entrypoint **
|
** Pager entrypoint **
|
||||||
**********************/
|
**********************/
|
||||||
|
|
||||||
Untyped_capability Pager_entrypoint::_manage(Pager_object *obj)
|
Untyped_capability Pager_entrypoint::_pager_object_cap(unsigned long badge)
|
||||||
{
|
{
|
||||||
return Untyped_capability(_tid.l4id, obj->badge());
|
return Untyped_capability(_tid.l4id, badge);
|
||||||
}
|
}
|
||||||
|
@ -78,7 +78,7 @@ void Ipc_pager::acknowledge_wakeup()
|
|||||||
** Pager Entrypoint **
|
** Pager Entrypoint **
|
||||||
**********************/
|
**********************/
|
||||||
|
|
||||||
Untyped_capability Pager_entrypoint::_manage(Pager_object *obj)
|
Untyped_capability Pager_entrypoint::_pager_object_cap(unsigned long badge)
|
||||||
{
|
{
|
||||||
return Untyped_capability(_tid.l4id, obj->badge());
|
return Untyped_capability(_tid.l4id, badge);
|
||||||
}
|
}
|
||||||
|
@ -70,24 +70,13 @@ void Rpc_entrypoint::entry()
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* atomically lookup and lock referenced object */
|
apply(srv.badge(), [&] (Rpc_object_base *obj) {
|
||||||
Object_pool<Rpc_object_base>::Guard curr_obj(lookup_and_lock(srv.badge()));
|
if (!obj) return;
|
||||||
if (!curr_obj)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
{
|
try {
|
||||||
Lock::Guard lock_guard(_curr_obj_lock);
|
srv.ret(obj->dispatch(opcode, srv, srv));
|
||||||
_curr_obj = curr_obj;
|
} catch(Blocking_canceled&) { }
|
||||||
}
|
});
|
||||||
|
|
||||||
/* dispatch request */
|
|
||||||
try { srv.ret(_curr_obj->dispatch(opcode, srv, srv)); }
|
|
||||||
catch (Blocking_canceled) { }
|
|
||||||
|
|
||||||
{
|
|
||||||
Lock::Guard lock_guard(_curr_obj_lock);
|
|
||||||
_curr_obj = 0;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* answer exit call, thereby wake up '~Rpc_entrypoint' */
|
/* answer exit call, thereby wake up '~Rpc_entrypoint' */
|
||||||
@ -95,6 +84,4 @@ void Rpc_entrypoint::entry()
|
|||||||
|
|
||||||
/* defer the destruction of 'Ipc_server' until '~Rpc_entrypoint' is ready */
|
/* defer the destruction of 'Ipc_server' until '~Rpc_entrypoint' is ready */
|
||||||
_delay_exit.lock();
|
_delay_exit.lock();
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -40,14 +40,16 @@ void Genode::Cpu_session_component::enable_vcpu(Genode::Thread_capability thread
|
|||||||
using namespace Genode;
|
using namespace Genode;
|
||||||
using namespace Fiasco;
|
using namespace Fiasco;
|
||||||
|
|
||||||
Object_pool<Cpu_thread_component>::Guard thread(_thread_ep->lookup_and_lock(thread_cap));
|
auto lambda = [&] (Cpu_thread_component *thread) {
|
||||||
if (!thread) return;
|
if (!thread) return;
|
||||||
|
|
||||||
Native_thread tid = thread->platform_thread()->thread().local.dst();
|
Native_thread tid = thread->platform_thread()->thread().local.dst();
|
||||||
|
|
||||||
l4_msgtag_t tag = l4_thread_vcpu_control(tid, vcpu_state);
|
l4_msgtag_t tag = l4_thread_vcpu_control(tid, vcpu_state);
|
||||||
if (l4_msgtag_has_error(tag))
|
if (l4_msgtag_has_error(tag))
|
||||||
PWRN("l4_thread_vcpu_control failed");
|
PWRN("l4_thread_vcpu_control failed");
|
||||||
|
};
|
||||||
|
_thread_ep->apply(thread_cap, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -56,10 +58,11 @@ Genode::Cpu_session_component::native_cap(Genode::Thread_capability cap)
|
|||||||
{
|
{
|
||||||
using namespace Genode;
|
using namespace Genode;
|
||||||
|
|
||||||
Object_pool<Cpu_thread_component>::Guard thread(_thread_ep->lookup_and_lock(cap));
|
auto lambda = [&] (Cpu_thread_component *thread) {
|
||||||
if (!thread) return Native_capability();
|
return (!thread) ? Native_capability()
|
||||||
|
: thread->platform_thread()->thread().local;
|
||||||
return thread->platform_thread()->thread().local;
|
};
|
||||||
|
return _thread_ep->apply(cap, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -97,15 +100,17 @@ void Genode::Cpu_session_component::single_step(Genode::Thread_capability thread
|
|||||||
{
|
{
|
||||||
using namespace Genode;
|
using namespace Genode;
|
||||||
|
|
||||||
Object_pool<Cpu_thread_component>::Guard thread(_thread_ep->lookup_and_lock(thread_cap));
|
auto lambda = [&] (Cpu_thread_component *thread) {
|
||||||
if (!thread) return;
|
if (!thread) return;
|
||||||
|
|
||||||
Native_thread tid = thread->platform_thread()->thread().local.dst();
|
Native_thread tid = thread->platform_thread()->thread().local.dst();
|
||||||
|
|
||||||
enum { THREAD_SINGLE_STEP = 0x40000 };
|
enum { THREAD_SINGLE_STEP = 0x40000 };
|
||||||
int flags = enable ? THREAD_SINGLE_STEP : 0;
|
int flags = enable ? THREAD_SINGLE_STEP : 0;
|
||||||
|
|
||||||
Fiasco::l4_thread_ex_regs(tid, ~0UL, ~0UL, flags);
|
Fiasco::l4_thread_ex_regs(tid, ~0UL, ~0UL, flags);
|
||||||
|
};
|
||||||
|
_thread_ep->apply(thread_cap, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -42,96 +42,95 @@ void Pager_entrypoint::entry()
|
|||||||
|
|
||||||
reply_pending = false;
|
reply_pending = false;
|
||||||
|
|
||||||
/* lookup referenced object */
|
apply(_pager.badge(), [&] (Pager_object *obj) {
|
||||||
Object_pool<Pager_object>::Guard obj(lookup_and_lock(_pager.badge()));
|
/* the pager_object might be destroyed, while we got the message */
|
||||||
|
if (!obj) {
|
||||||
|
PWRN("No pager object found!");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/* the pager_object might be destroyed, while we got the message */
|
switch (_pager.msg_type()) {
|
||||||
if (!obj) {
|
|
||||||
PWRN("No pager object found!");
|
case Ipc_pager::PAGEFAULT:
|
||||||
continue;
|
case Ipc_pager::EXCEPTION:
|
||||||
}
|
{
|
||||||
|
if (_pager.is_exception()) {
|
||||||
|
Lock::Guard guard(obj->state.lock);
|
||||||
|
_pager.get_regs(&obj->state);
|
||||||
|
obj->state.exceptions++;
|
||||||
|
obj->state.in_exception = true;
|
||||||
|
obj->submit_exception_signal();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
switch (_pager.msg_type()) {
|
/* handle request */
|
||||||
|
if (obj->pager(_pager)) {
|
||||||
|
/* could not resolv - leave thread in pagefault */
|
||||||
|
PDBG("Could not resolve pf=%p ip=%p",
|
||||||
|
(void*)_pager.fault_addr(), (void*)_pager.fault_ip());
|
||||||
|
} else {
|
||||||
|
_pager.set_reply_dst(obj->badge());
|
||||||
|
reply_pending = true;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
case Ipc_pager::PAGEFAULT:
|
case Ipc_pager::WAKE_UP:
|
||||||
case Ipc_pager::EXCEPTION:
|
{
|
||||||
{
|
/*
|
||||||
if (_pager.is_exception()) {
|
* We got a request from one of cores region-manager sessions
|
||||||
|
* to answer the pending page fault of a resolved region-manager
|
||||||
|
* client, or to resume a previously paused thread. Hence, we
|
||||||
|
* have to send a reply to the specified thread and answer the
|
||||||
|
* call.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* send reply to the caller */
|
||||||
|
_pager.set_reply_dst(Native_thread());
|
||||||
|
_pager.acknowledge_wakeup();
|
||||||
|
|
||||||
|
{
|
||||||
|
Lock::Guard guard(obj->state.lock);
|
||||||
|
/* revert exception flag */
|
||||||
|
obj->state.in_exception = false;
|
||||||
|
/* set new register contents */
|
||||||
|
_pager.set_regs(obj->state);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* send wake up message to requested thread */
|
||||||
|
_pager.set_reply_dst(obj->badge());
|
||||||
|
_pager.acknowledge_exception();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Handle exceptions that are artificially generated by the pause
|
||||||
|
* function of the CPU service.
|
||||||
|
*/
|
||||||
|
case Ipc_pager::PAUSE:
|
||||||
|
{
|
||||||
Lock::Guard guard(obj->state.lock);
|
Lock::Guard guard(obj->state.lock);
|
||||||
_pager.get_regs(&obj->state);
|
_pager.get_regs(&obj->state);
|
||||||
obj->state.exceptions++;
|
obj->state.exceptions++;
|
||||||
obj->state.in_exception = true;
|
obj->state.in_exception = true;
|
||||||
obj->submit_exception_signal();
|
|
||||||
continue;
|
/*
|
||||||
|
* It might occur that the thread raises an exception,
|
||||||
|
* after it already got resumed by the cpu_session, in
|
||||||
|
* that case we unblock it immediately.
|
||||||
|
*/
|
||||||
|
if (!obj->state.paused) {
|
||||||
|
_pager.set_reply_dst(obj->badge());
|
||||||
|
reply_pending = true;
|
||||||
|
}
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* handle request */
|
default:
|
||||||
if (obj->pager(_pager)) {
|
PERR("Got unknown message type %x!", _pager.msg_type());
|
||||||
/* could not resolv - leave thread in pagefault */
|
|
||||||
PDBG("Could not resolve pf=%p ip=%p",
|
|
||||||
(void*)_pager.fault_addr(), (void*)_pager.fault_ip());
|
|
||||||
} else {
|
|
||||||
_pager.set_reply_dst(obj->badge());
|
|
||||||
reply_pending = true;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
});
|
||||||
case Ipc_pager::WAKE_UP:
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* We got a request from one of cores region-manager sessions
|
|
||||||
* to answer the pending page fault of a resolved region-manager
|
|
||||||
* client, or to resume a previously paused thread. Hence, we
|
|
||||||
* have to send a reply to the specified thread and answer the
|
|
||||||
* call.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* send reply to the caller */
|
|
||||||
_pager.set_reply_dst(Native_thread());
|
|
||||||
_pager.acknowledge_wakeup();
|
|
||||||
|
|
||||||
{
|
|
||||||
Lock::Guard guard(obj->state.lock);
|
|
||||||
/* revert exception flag */
|
|
||||||
obj->state.in_exception = false;
|
|
||||||
/* set new register contents */
|
|
||||||
_pager.set_regs(obj->state);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* send wake up message to requested thread */
|
|
||||||
_pager.set_reply_dst(obj->badge());
|
|
||||||
_pager.acknowledge_exception();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Handle exceptions that are artificially generated by the pause
|
|
||||||
* function of the CPU service.
|
|
||||||
*/
|
|
||||||
case Ipc_pager::PAUSE:
|
|
||||||
{
|
|
||||||
Lock::Guard guard(obj->state.lock);
|
|
||||||
_pager.get_regs(&obj->state);
|
|
||||||
obj->state.exceptions++;
|
|
||||||
obj->state.in_exception = true;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* It might occur that the thread raises an exception,
|
|
||||||
* after it already got resumed by the cpu_session, in
|
|
||||||
* that case we unblock it immediately.
|
|
||||||
*/
|
|
||||||
if (!obj->state.paused) {
|
|
||||||
_pager.set_reply_dst(obj->badge());
|
|
||||||
reply_pending = true;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
default:
|
|
||||||
PERR("Got unknown message type %x!", _pager.msg_type());
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -141,7 +140,7 @@ void Pager_entrypoint::dissolve(Pager_object *obj)
|
|||||||
/* cleanup at cap session */
|
/* cleanup at cap session */
|
||||||
_cap_session->free(obj->Object_pool<Pager_object>::Entry::cap());
|
_cap_session->free(obj->Object_pool<Pager_object>::Entry::cap());
|
||||||
|
|
||||||
remove_locked(obj);
|
remove(obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -72,23 +72,13 @@ void Rpc_entrypoint::entry()
|
|||||||
srv.ret(Ipc_client::ERR_INVALID_OBJECT);
|
srv.ret(Ipc_client::ERR_INVALID_OBJECT);
|
||||||
|
|
||||||
/* atomically lookup and lock referenced object */
|
/* atomically lookup and lock referenced object */
|
||||||
Object_pool<Rpc_object_base>::Guard curr_obj(lookup_and_lock(srv.badge()));
|
apply(srv.badge(), [&] (Rpc_object_base *curr_obj) {
|
||||||
if (!curr_obj)
|
if (!curr_obj) return;
|
||||||
continue;
|
|
||||||
|
|
||||||
{
|
/* dispatch request */
|
||||||
Lock::Guard lock_guard(_curr_obj_lock);
|
try { srv.ret(curr_obj->dispatch(opcode, srv, srv)); }
|
||||||
_curr_obj = curr_obj;
|
catch (Blocking_canceled) { }
|
||||||
}
|
});
|
||||||
|
|
||||||
/* dispatch request */
|
|
||||||
try { srv.ret(_curr_obj->dispatch(opcode, srv, srv)); }
|
|
||||||
catch (Blocking_canceled) { }
|
|
||||||
|
|
||||||
{
|
|
||||||
Lock::Guard lock_guard(_curr_obj_lock);
|
|
||||||
_curr_obj = 0;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* answer exit call, thereby wake up '~Rpc_entrypoint' */
|
/* answer exit call, thereby wake up '~Rpc_entrypoint' */
|
||||||
|
@ -28,42 +28,44 @@ Core_rm_session::attach(Dataspace_capability ds_cap, size_t size,
|
|||||||
off_t offset, bool use_local_addr,
|
off_t offset, bool use_local_addr,
|
||||||
Rm_session::Local_addr, bool executable)
|
Rm_session::Local_addr, bool executable)
|
||||||
{
|
{
|
||||||
Object_pool<Dataspace_component>::Guard ds(_ds_ep->lookup_and_lock(ds_cap));
|
auto lambda = [&] (Dataspace_component *ds) -> Local_addr {
|
||||||
if (!ds)
|
if (!ds)
|
||||||
throw Invalid_dataspace();
|
throw Invalid_dataspace();
|
||||||
|
|
||||||
if (size == 0)
|
if (size == 0)
|
||||||
size = ds->size();
|
size = ds->size();
|
||||||
|
|
||||||
size_t page_rounded_size = (size + get_page_size() - 1) & get_page_mask();
|
size_t page_rounded_size = (size + get_page_size() - 1) & get_page_mask();
|
||||||
|
|
||||||
if (use_local_addr) {
|
if (use_local_addr) {
|
||||||
PERR("Parameter 'use_local_addr' not supported within core");
|
PERR("Parameter 'use_local_addr' not supported within core");
|
||||||
return 0UL;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (offset) {
|
if (offset) {
|
||||||
PERR("Parameter 'offset' not supported within core");
|
PERR("Parameter 'offset' not supported within core");
|
||||||
return 0UL;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* allocate range in core's virtual address space */
|
/* allocate range in core's virtual address space */
|
||||||
void *virt_addr;
|
void *virt_addr;
|
||||||
if (!platform()->region_alloc()->alloc_aligned(page_rounded_size,
|
if (!platform()->region_alloc()->alloc_aligned(page_rounded_size,
|
||||||
&virt_addr,
|
&virt_addr,
|
||||||
get_page_size_log2()).is_ok()) {
|
get_page_size_log2()).is_ok()) {
|
||||||
PERR("Could not allocate virtual address range in core of size %zd\n",
|
PERR("Could not allocate virtual address range in core of size %zd\n",
|
||||||
page_rounded_size);
|
page_rounded_size);
|
||||||
return false;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* map the dataspace's physical pages to corresponding virtual addresses */
|
/* map the dataspace's physical pages to corresponding virtual addresses */
|
||||||
unsigned num_pages = page_rounded_size >> get_page_size_log2();
|
unsigned num_pages = page_rounded_size >> get_page_size_log2();
|
||||||
Page_flags const flags = Page_flags::apply_mapping(ds.object()->writable(),
|
Page_flags const flags = Page_flags::apply_mapping(ds->writable(),
|
||||||
ds.object()->cacheability(),
|
ds->cacheability(),
|
||||||
ds.object()->is_io_mem());
|
ds->is_io_mem());
|
||||||
if (!map_local(ds->phys_addr(), (addr_t)virt_addr, num_pages, flags))
|
if (!map_local(ds->phys_addr(), (addr_t)virt_addr, num_pages, flags))
|
||||||
return 0UL;
|
return nullptr;
|
||||||
|
|
||||||
return virt_addr;
|
return virt_addr;
|
||||||
|
};
|
||||||
|
return _ds_ep->apply(ds_cap, lambda);
|
||||||
}
|
}
|
||||||
|
@ -25,10 +25,11 @@ Ram_dataspace_capability
|
|||||||
Cpu_session_component::utcb(Thread_capability thread_cap)
|
Cpu_session_component::utcb(Thread_capability thread_cap)
|
||||||
{
|
{
|
||||||
/* look up requested UTCB dataspace */
|
/* look up requested UTCB dataspace */
|
||||||
Object_pool<Cpu_thread_component>::Guard
|
auto lambda = [] (Cpu_thread_component *t) {
|
||||||
t(_thread_ep->lookup_and_lock(thread_cap));
|
if (!t) return Ram_dataspace_capability();
|
||||||
if (!t) return Ram_dataspace_capability();
|
return t->platform_thread()->utcb();
|
||||||
return t->platform_thread()->utcb();
|
};
|
||||||
|
return _thread_ep->apply(thread_cap, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -104,7 +104,7 @@ Pager_object::Pager_object(unsigned const badge, Affinity::Location)
|
|||||||
|
|
||||||
void Pager_entrypoint::dissolve(Pager_object * const o)
|
void Pager_entrypoint::dissolve(Pager_object * const o)
|
||||||
{
|
{
|
||||||
remove_locked(o);
|
remove(o);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -129,24 +129,26 @@ int Platform_thread::start(void * const ip, void * const sp)
|
|||||||
if (_main_thread) {
|
if (_main_thread) {
|
||||||
|
|
||||||
/* lookup dataspace component for physical address */
|
/* lookup dataspace component for physical address */
|
||||||
Rpc_entrypoint * ep = core_env()->entrypoint();
|
auto lambda = [&] (Dataspace_component *dsc) {
|
||||||
Object_pool<Dataspace_component>::Guard dsc(ep->lookup_and_lock(_utcb));
|
if (!dsc) return -1;
|
||||||
if (!dsc) return -1;
|
|
||||||
|
|
||||||
/* lock the address space */
|
/* lock the address space */
|
||||||
Locked_ptr<Address_space> locked_ptr(_address_space);
|
Locked_ptr<Address_space> locked_ptr(_address_space);
|
||||||
if (!locked_ptr.is_valid()) {
|
if (!locked_ptr.is_valid()) {
|
||||||
PERR("invalid RM client");
|
PERR("invalid RM client");
|
||||||
return -1;
|
return -1;
|
||||||
|
};
|
||||||
|
Page_flags const flags = Page_flags::apply_mapping(true, CACHED, false);
|
||||||
|
_utcb_pd_addr = utcb_main_thread();
|
||||||
|
Hw::Address_space * as = static_cast<Hw::Address_space*>(&*locked_ptr);
|
||||||
|
if (!as->insert_translation((addr_t)_utcb_pd_addr, dsc->phys_addr(),
|
||||||
|
sizeof(Native_utcb), flags)) {
|
||||||
|
PERR("failed to attach UTCB");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
};
|
};
|
||||||
Page_flags const flags = Page_flags::apply_mapping(true, CACHED, false);
|
if (core_env()->entrypoint()->apply(_utcb, lambda)) return -1;
|
||||||
_utcb_pd_addr = utcb_main_thread();
|
|
||||||
Hw::Address_space * as = static_cast<Hw::Address_space*>(&*locked_ptr);
|
|
||||||
if (!as->insert_translation((addr_t)_utcb_pd_addr, dsc->phys_addr(),
|
|
||||||
sizeof(Native_utcb), flags)) {
|
|
||||||
PERR("failed to attach UTCB");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* initialize thread registers */
|
/* initialize thread registers */
|
||||||
|
@ -60,40 +60,42 @@ void Pager_entrypoint::entry()
|
|||||||
{
|
{
|
||||||
/* receive fault */
|
/* receive fault */
|
||||||
if (Kernel::await_signal(_cap.dst())) continue;
|
if (Kernel::await_signal(_cap.dst())) continue;
|
||||||
Pager_object * po =
|
|
||||||
*(Pager_object**)Thread_base::myself()->utcb()->base();
|
Untyped_capability cap =
|
||||||
|
(*(Pager_object**)Thread_base::myself()->utcb()->base())->cap();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Synchronize access and ensure that the object is still managed
|
* Synchronize access and ensure that the object is still managed
|
||||||
*
|
*
|
||||||
* FIXME: The implicit lookup of the oject isn't needed.
|
* FIXME: The implicit lookup of the oject isn't needed.
|
||||||
*/
|
*/
|
||||||
unsigned const pon = po->cap().local_name();
|
auto lambda = [&] (Pager_object *po) {
|
||||||
Object_pool<Pager_object>::Guard pog(lookup_and_lock(pon));
|
if (!po) return;
|
||||||
if (!pog) continue;
|
|
||||||
|
|
||||||
/* fetch fault data */
|
/* fetch fault data */
|
||||||
Platform_thread * const pt = (Platform_thread *)pog->badge();
|
Platform_thread * const pt = (Platform_thread *)po->badge();
|
||||||
if (!pt) {
|
if (!pt) {
|
||||||
PWRN("failed to get platform thread of faulter");
|
PWRN("failed to get platform thread of faulter");
|
||||||
continue;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
_fault.pd = pt->kernel_object()->fault_pd();
|
_fault.pd = pt->kernel_object()->fault_pd();
|
||||||
_fault.ip = pt->kernel_object()->ip;
|
_fault.ip = pt->kernel_object()->ip;
|
||||||
_fault.addr = pt->kernel_object()->fault_addr();
|
_fault.addr = pt->kernel_object()->fault_addr();
|
||||||
_fault.writes = pt->kernel_object()->fault_writes();
|
_fault.writes = pt->kernel_object()->fault_writes();
|
||||||
_fault.signal = pt->kernel_object()->fault_signal();
|
_fault.signal = pt->kernel_object()->fault_signal();
|
||||||
|
|
||||||
/* try to resolve fault directly via local region managers */
|
/* try to resolve fault directly via local region managers */
|
||||||
if (pog->pager(*this)) { continue; }
|
if (po->pager(*this)) return;
|
||||||
|
|
||||||
/* apply mapping that was determined by the local region managers */
|
/* apply mapping that was determined by the local region managers */
|
||||||
if (apply_mapping()) {
|
if (apply_mapping()) {
|
||||||
PWRN("failed to apply mapping");
|
PWRN("failed to apply mapping");
|
||||||
continue;
|
return;
|
||||||
}
|
}
|
||||||
/* let pager object go back to no-fault state */
|
/* let pager object go back to no-fault state */
|
||||||
pog->wake_up();
|
po->wake_up();
|
||||||
|
};
|
||||||
|
apply(cap, lambda);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -52,14 +52,16 @@ Signal_receiver_capability Signal_session_component::alloc_receiver()
|
|||||||
void Signal_session_component::free_receiver(Signal_receiver_capability cap)
|
void Signal_session_component::free_receiver(Signal_receiver_capability cap)
|
||||||
{
|
{
|
||||||
/* look up ressource info */
|
/* look up ressource info */
|
||||||
Receiver::Pool::Guard r(_receivers.lookup_and_lock(cap));
|
auto lambda = [&] (Receiver *r) {
|
||||||
if (!r) {
|
if (!r) {
|
||||||
PERR("unknown signal receiver");
|
PERR("unknown signal receiver");
|
||||||
throw Kill_receiver_failed();
|
throw Kill_receiver_failed();
|
||||||
}
|
}
|
||||||
/* release resources */
|
/* release resources */
|
||||||
_receivers.remove_locked(r);
|
_receivers.remove(r);
|
||||||
destroy(&_receivers_slab, r.object());
|
destroy(&_receivers_slab, r);
|
||||||
|
};
|
||||||
|
_receivers.apply(cap, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -68,35 +70,39 @@ Signal_session_component::alloc_context(Signal_receiver_capability src,
|
|||||||
unsigned const imprint)
|
unsigned const imprint)
|
||||||
{
|
{
|
||||||
/* look up ressource info */
|
/* look up ressource info */
|
||||||
Receiver::Pool::Guard r(_receivers.lookup_and_lock(src));
|
auto lambda = [&] (Receiver *r) {
|
||||||
if (!r) {
|
if (!r) {
|
||||||
PERR("unknown signal receiver");
|
PERR("unknown signal receiver");
|
||||||
throw Create_context_failed();
|
throw Create_context_failed();
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
Context * c = new (_contexts_slab) Context(*r.object(), imprint);
|
Context * c = new (_contexts_slab) Context(*r, imprint);
|
||||||
_contexts.insert(c);
|
_contexts.insert(c);
|
||||||
return reinterpret_cap_cast<Signal_context>(c->cap());
|
return reinterpret_cap_cast<Signal_context>(c->cap());
|
||||||
} catch (Allocator::Out_of_memory&) {
|
} catch (Allocator::Out_of_memory&) {
|
||||||
PERR("failed to allocate signal-context resources");
|
PERR("failed to allocate signal-context resources");
|
||||||
throw Out_of_metadata();
|
throw Out_of_metadata();
|
||||||
}
|
}
|
||||||
return reinterpret_cap_cast<Signal_context>(Untyped_capability());
|
return reinterpret_cap_cast<Signal_context>(Untyped_capability());
|
||||||
|
};
|
||||||
|
return _receivers.apply(src, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Signal_session_component::free_context(Signal_context_capability cap)
|
void Signal_session_component::free_context(Signal_context_capability cap)
|
||||||
{
|
{
|
||||||
/* look up ressource info */
|
/* look up ressource info */
|
||||||
Context::Pool::Guard c(_contexts.lookup_and_lock(cap));
|
auto lambda = [&] (Context *c) {
|
||||||
if (!c) {
|
if (!c) {
|
||||||
PERR("unknown signal context");
|
PERR("unknown signal context");
|
||||||
throw Kill_context_failed();
|
throw Kill_context_failed();
|
||||||
}
|
}
|
||||||
/* release resources */
|
/* release resources */
|
||||||
_contexts.remove_locked(c);
|
_contexts.remove(c);
|
||||||
destroy(&_contexts_slab, c.object());
|
destroy(&_contexts_slab, c);
|
||||||
|
};
|
||||||
|
_contexts.apply(cap, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -108,12 +114,8 @@ Signal_session_component::Signal_session_component(Allocator * const allocator,
|
|||||||
|
|
||||||
Signal_session_component::~Signal_session_component()
|
Signal_session_component::~Signal_session_component()
|
||||||
{
|
{
|
||||||
while (Context * const c = _contexts.first_locked()) {
|
_contexts.remove_all([this] (Context * c) {
|
||||||
_contexts.remove_locked(c);
|
destroy(&_contexts_slab, c);});
|
||||||
destroy(&_contexts_slab, c);
|
_receivers.remove_all([this] (Receiver * r) {
|
||||||
}
|
destroy(&_receivers_slab, r);});
|
||||||
while (Receiver * const r = _receivers.first_locked()) {
|
|
||||||
_receivers.remove_locked(r);
|
|
||||||
destroy(&_receivers_slab, r);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -53,10 +53,11 @@ void Vm_session_component::_attach(addr_t phys_addr, addr_t vm_addr, size_t size
|
|||||||
void Vm_session_component::attach(Dataspace_capability ds_cap, addr_t vm_addr)
|
void Vm_session_component::attach(Dataspace_capability ds_cap, addr_t vm_addr)
|
||||||
{
|
{
|
||||||
/* check dataspace validity */
|
/* check dataspace validity */
|
||||||
Object_pool<Dataspace_component>::Guard dsc(_ds_ep->lookup_and_lock(ds_cap));
|
_ds_ep->apply(ds_cap, [&] (Dataspace_component *dsc) {
|
||||||
if (!dsc) throw Invalid_dataspace();
|
if (!dsc) throw Invalid_dataspace();
|
||||||
|
|
||||||
_attach(dsc->phys_addr(), vm_addr, dsc->size());
|
_attach(dsc->phys_addr(), vm_addr, dsc->size());
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -15,35 +15,34 @@ using namespace Genode;
|
|||||||
|
|
||||||
void Cpu_session_component::thread_id(Thread_capability thread_cap, int pid, int tid)
|
void Cpu_session_component::thread_id(Thread_capability thread_cap, int pid, int tid)
|
||||||
{
|
{
|
||||||
Object_pool<Cpu_thread_component>::Guard
|
_thread_ep->apply(thread_cap, [&] (Cpu_thread_component *thread) {
|
||||||
thread(_thread_ep->lookup_and_lock(thread_cap));
|
if (thread) thread->platform_thread()->thread_id(pid, tid); });
|
||||||
if (!thread) return;
|
|
||||||
|
|
||||||
thread->platform_thread()->thread_id(pid, tid);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Untyped_capability Cpu_session_component::server_sd(Thread_capability thread_cap)
|
Untyped_capability Cpu_session_component::server_sd(Thread_capability thread_cap)
|
||||||
{
|
{
|
||||||
Object_pool<Cpu_thread_component>::Guard
|
auto lambda = [] (Cpu_thread_component *thread) {
|
||||||
thread(_thread_ep->lookup_and_lock(thread_cap));
|
if (!thread) return Untyped_capability();
|
||||||
if (!thread) return Untyped_capability();
|
|
||||||
|
|
||||||
enum { DUMMY_LOCAL_NAME = 0 };
|
enum { DUMMY_LOCAL_NAME = 0 };
|
||||||
typedef Native_capability::Dst Dst;
|
typedef Native_capability::Dst Dst;
|
||||||
return Untyped_capability(Dst(thread->platform_thread()->server_sd()),
|
return Untyped_capability(Dst(thread->platform_thread()->server_sd()),
|
||||||
DUMMY_LOCAL_NAME);
|
DUMMY_LOCAL_NAME);
|
||||||
|
};
|
||||||
|
return _thread_ep->apply(thread_cap, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Untyped_capability Cpu_session_component::client_sd(Thread_capability thread_cap)
|
Untyped_capability Cpu_session_component::client_sd(Thread_capability thread_cap)
|
||||||
{
|
{
|
||||||
Object_pool<Cpu_thread_component>::Guard
|
auto lambda = [] (Cpu_thread_component *thread) {
|
||||||
thread(_thread_ep->lookup_and_lock(thread_cap));
|
if (!thread) return Untyped_capability();
|
||||||
if (!thread) return Untyped_capability();
|
|
||||||
|
|
||||||
enum { DUMMY_LOCAL_NAME = 0 };
|
enum { DUMMY_LOCAL_NAME = 0 };
|
||||||
typedef Native_capability::Dst Dst;
|
typedef Native_capability::Dst Dst;
|
||||||
return Untyped_capability(Dst(thread->platform_thread()->client_sd()),
|
return Untyped_capability(Dst(thread->platform_thread()->client_sd()),
|
||||||
DUMMY_LOCAL_NAME);
|
DUMMY_LOCAL_NAME);
|
||||||
|
};
|
||||||
|
return _thread_ep->apply(thread_cap, lambda);
|
||||||
}
|
}
|
||||||
|
@ -40,16 +40,15 @@ namespace Genode {
|
|||||||
*/
|
*/
|
||||||
Thread_capability thread_cap() { return _thread_cap; } const
|
Thread_capability thread_cap() { return _thread_cap; } const
|
||||||
void thread_cap(Thread_capability cap) { _thread_cap = cap; }
|
void thread_cap(Thread_capability cap) { _thread_cap = cap; }
|
||||||
|
|
||||||
/* required by lookup_and_lock, provided by Object_pool::Entry normally */
|
|
||||||
void release() { }
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct Pager_entrypoint
|
struct Pager_entrypoint
|
||||||
{
|
{
|
||||||
Pager_entrypoint(Cap_session *) { }
|
Pager_entrypoint(Cap_session *) { }
|
||||||
|
|
||||||
Pager_object *lookup_and_lock(Pager_capability) { return 0; }
|
template <typename FUNC>
|
||||||
|
auto apply(Pager_capability, FUNC f) -> decltype(f(nullptr)) {
|
||||||
|
return f(nullptr); }
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -24,6 +24,7 @@
|
|||||||
|
|
||||||
namespace Genode {
|
namespace Genode {
|
||||||
|
|
||||||
|
class Dataspace_component;
|
||||||
class Pd_session_component : public Rpc_object<Linux_pd_session, Pd_session_component>
|
class Pd_session_component : public Rpc_object<Linux_pd_session, Pd_session_component>
|
||||||
{
|
{
|
||||||
private:
|
private:
|
||||||
@ -39,6 +40,8 @@ namespace Genode {
|
|||||||
Parent_capability _parent;
|
Parent_capability _parent;
|
||||||
Rpc_entrypoint *_ds_ep;
|
Rpc_entrypoint *_ds_ep;
|
||||||
|
|
||||||
|
void _start(Dataspace_component *ds);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -305,6 +305,98 @@ static const char *get_env(const char *key)
|
|||||||
** PD session interface **
|
** PD session interface **
|
||||||
**************************/
|
**************************/
|
||||||
|
|
||||||
|
void Pd_session_component::_start(Dataspace_component *ds)
|
||||||
|
{
|
||||||
|
const char *tmp_filename = "temporary_executable_elf_dataspace_file_for_execve";
|
||||||
|
|
||||||
|
if (!ds) {
|
||||||
|
PERR("could not lookup binary, aborted PD startup");
|
||||||
|
return; /* XXX reflect error to client */
|
||||||
|
}
|
||||||
|
|
||||||
|
/* we need 's' on stack to make it an lvalue with an lvalue member we use the pointer to */
|
||||||
|
Linux_dataspace::Filename s = ds->fname();
|
||||||
|
const char *filename = s.buf;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* In order to be executable via 'execve', a program must be represented as
|
||||||
|
* a file on the Linux file system. However, this is not the case for a
|
||||||
|
* plain RAM dataspace that contains an ELF image. In this case, we copy
|
||||||
|
* the dataspace content into a temporary file whose path is passed to
|
||||||
|
* 'execve()'.
|
||||||
|
*/
|
||||||
|
if (strcmp(filename, "") == 0) {
|
||||||
|
|
||||||
|
filename = tmp_filename;
|
||||||
|
|
||||||
|
int tmp_binary_fd = lx_open(filename, O_CREAT | O_EXCL | O_WRONLY, S_IRWXU);
|
||||||
|
if (tmp_binary_fd < 0) {
|
||||||
|
PERR("Could not create file '%s'", filename);
|
||||||
|
return; /* XXX reflect error to client */
|
||||||
|
}
|
||||||
|
|
||||||
|
char buf[4096];
|
||||||
|
int num_bytes = 0;
|
||||||
|
while ((num_bytes = lx_read(ds->fd().dst().socket, buf, sizeof(buf))) != 0)
|
||||||
|
lx_write(tmp_binary_fd, buf, num_bytes);
|
||||||
|
|
||||||
|
lx_close(tmp_binary_fd);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* pass parent capability as environment variable to the child */
|
||||||
|
enum { ENV_STR_LEN = 256 };
|
||||||
|
static char envbuf[5][ENV_STR_LEN];
|
||||||
|
Genode::snprintf(envbuf[1], ENV_STR_LEN, "parent_local_name=%lu",
|
||||||
|
_parent.local_name());
|
||||||
|
Genode::snprintf(envbuf[2], ENV_STR_LEN, "DISPLAY=%s",
|
||||||
|
get_env("DISPLAY"));
|
||||||
|
Genode::snprintf(envbuf[3], ENV_STR_LEN, "HOME=%s",
|
||||||
|
get_env("HOME"));
|
||||||
|
Genode::snprintf(envbuf[4], ENV_STR_LEN, "LD_LIBRARY_PATH=%s",
|
||||||
|
get_env("LD_LIBRARY_PATH"));
|
||||||
|
|
||||||
|
char *env[] = { &envbuf[0][0], &envbuf[1][0], &envbuf[2][0],
|
||||||
|
&envbuf[3][0], &envbuf[4][0], 0 };
|
||||||
|
|
||||||
|
/* prefix name of Linux program (helps killing some zombies) */
|
||||||
|
char const *prefix = "[Genode] ";
|
||||||
|
char pname_buf[sizeof(_label) + sizeof(prefix)];
|
||||||
|
snprintf(pname_buf, sizeof(pname_buf), "%s%s", prefix, _label);
|
||||||
|
char *argv_buf[2];
|
||||||
|
argv_buf[0] = pname_buf;
|
||||||
|
argv_buf[1] = 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We cannot create the new process via 'fork()' because all our used
|
||||||
|
* memory including stack memory is backed by dataspaces, which had been
|
||||||
|
* mapped with the 'MAP_SHARED' flag. Therefore, after being created, the
|
||||||
|
* new process starts using the stack with the same physical memory pages
|
||||||
|
* as used by parent process. This would ultimately lead to stack
|
||||||
|
* corruption. To prevent both processes from concurrently accessing the
|
||||||
|
* same stack, we pause the execution of the parent until the child calls
|
||||||
|
* 'execve'. From then on, the child has its private memory layout. The
|
||||||
|
* desired behaviour is normally provided by 'vfork' but we use the more
|
||||||
|
* modern 'clone' call for this purpose.
|
||||||
|
*/
|
||||||
|
enum { STACK_SIZE = 4096 };
|
||||||
|
static char stack[STACK_SIZE]; /* initial stack used by the child until
|
||||||
|
calling 'execve' */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Argument frame as passed to 'clone'. Because, we can only pass a single
|
||||||
|
* pointer, all arguments are embedded within the 'execve_args' struct.
|
||||||
|
*/
|
||||||
|
Execve_args arg(filename, _root, argv_buf, env, _uid, _gid,
|
||||||
|
_parent.dst().socket);
|
||||||
|
|
||||||
|
_pid = lx_create_process((int (*)(void *))_exec_child,
|
||||||
|
stack + STACK_SIZE - sizeof(umword_t), &arg);
|
||||||
|
|
||||||
|
if (strcmp(filename, tmp_filename) == 0)
|
||||||
|
lx_unlink(filename);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
Pd_session_component::Pd_session_component(Rpc_entrypoint * ep,
|
Pd_session_component::Pd_session_component(Rpc_entrypoint * ep,
|
||||||
Allocator * md_alloc,
|
Allocator * md_alloc,
|
||||||
const char * args)
|
const char * args)
|
||||||
@ -366,94 +458,7 @@ int Pd_session_component::assign_parent(Parent_capability parent)
|
|||||||
|
|
||||||
void Pd_session_component::start(Capability<Dataspace> binary)
|
void Pd_session_component::start(Capability<Dataspace> binary)
|
||||||
{
|
{
|
||||||
const char *tmp_filename = "temporary_executable_elf_dataspace_file_for_execve";
|
|
||||||
|
|
||||||
/* lookup binary dataspace */
|
/* lookup binary dataspace */
|
||||||
Object_pool<Dataspace_component>::Guard ds(_ds_ep->lookup_and_lock(binary));
|
_ds_ep->apply(binary, [&] (Dataspace_component *ds) {
|
||||||
|
_start(ds); });
|
||||||
if (!ds) {
|
|
||||||
PERR("could not lookup binary, aborted PD startup");
|
|
||||||
return; /* XXX reflect error to client */
|
|
||||||
}
|
|
||||||
|
|
||||||
/* we need 's' on stack to make it an lvalue with an lvalue member we use the pointer to */
|
|
||||||
Linux_dataspace::Filename s = ds->fname();
|
|
||||||
const char *filename = s.buf;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* In order to be executable via 'execve', a program must be represented as
|
|
||||||
* a file on the Linux file system. However, this is not the case for a
|
|
||||||
* plain RAM dataspace that contains an ELF image. In this case, we copy
|
|
||||||
* the dataspace content into a temporary file whose path is passed to
|
|
||||||
* 'execve()'.
|
|
||||||
*/
|
|
||||||
if (strcmp(filename, "") == 0) {
|
|
||||||
|
|
||||||
filename = tmp_filename;
|
|
||||||
|
|
||||||
int tmp_binary_fd = lx_open(filename, O_CREAT | O_EXCL | O_WRONLY, S_IRWXU);
|
|
||||||
if (tmp_binary_fd < 0) {
|
|
||||||
PERR("Could not create file '%s'", filename);
|
|
||||||
return; /* XXX reflect error to client */
|
|
||||||
}
|
|
||||||
|
|
||||||
char buf[4096];
|
|
||||||
int num_bytes = 0;
|
|
||||||
while ((num_bytes = lx_read(ds->fd().dst().socket, buf, sizeof(buf))) != 0)
|
|
||||||
lx_write(tmp_binary_fd, buf, num_bytes);
|
|
||||||
|
|
||||||
lx_close(tmp_binary_fd);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* pass parent capability as environment variable to the child */
|
|
||||||
enum { ENV_STR_LEN = 256 };
|
|
||||||
static char envbuf[5][ENV_STR_LEN];
|
|
||||||
Genode::snprintf(envbuf[1], ENV_STR_LEN, "parent_local_name=%lu",
|
|
||||||
_parent.local_name());
|
|
||||||
Genode::snprintf(envbuf[2], ENV_STR_LEN, "DISPLAY=%s",
|
|
||||||
get_env("DISPLAY"));
|
|
||||||
Genode::snprintf(envbuf[3], ENV_STR_LEN, "HOME=%s",
|
|
||||||
get_env("HOME"));
|
|
||||||
Genode::snprintf(envbuf[4], ENV_STR_LEN, "LD_LIBRARY_PATH=%s",
|
|
||||||
get_env("LD_LIBRARY_PATH"));
|
|
||||||
|
|
||||||
char *env[] = { &envbuf[0][0], &envbuf[1][0], &envbuf[2][0],
|
|
||||||
&envbuf[3][0], &envbuf[4][0], 0 };
|
|
||||||
|
|
||||||
/* prefix name of Linux program (helps killing some zombies) */
|
|
||||||
char const *prefix = "[Genode] ";
|
|
||||||
char pname_buf[sizeof(_label) + sizeof(prefix)];
|
|
||||||
snprintf(pname_buf, sizeof(pname_buf), "%s%s", prefix, _label);
|
|
||||||
char *argv_buf[2];
|
|
||||||
argv_buf[0] = pname_buf;
|
|
||||||
argv_buf[1] = 0;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We cannot create the new process via 'fork()' because all our used
|
|
||||||
* memory including stack memory is backed by dataspaces, which had been
|
|
||||||
* mapped with the 'MAP_SHARED' flag. Therefore, after being created, the
|
|
||||||
* new process starts using the stack with the same physical memory pages
|
|
||||||
* as used by parent process. This would ultimately lead to stack
|
|
||||||
* corruption. To prevent both processes from concurrently accessing the
|
|
||||||
* same stack, we pause the execution of the parent until the child calls
|
|
||||||
* 'execve'. From then on, the child has its private memory layout. The
|
|
||||||
* desired behaviour is normally provided by 'vfork' but we use the more
|
|
||||||
* modern 'clone' call for this purpose.
|
|
||||||
*/
|
|
||||||
enum { STACK_SIZE = 4096 };
|
|
||||||
static char stack[STACK_SIZE]; /* initial stack used by the child until
|
|
||||||
calling 'execve' */
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Argument frame as passed to 'clone'. Because, we can only pass a single
|
|
||||||
* pointer, all arguments are embedded within the 'execve_args' struct.
|
|
||||||
*/
|
|
||||||
Execve_args arg(filename, _root, argv_buf, env, _uid, _gid,
|
|
||||||
_parent.dst().socket);
|
|
||||||
|
|
||||||
_pid = lx_create_process((int (*)(void *))_exec_child,
|
|
||||||
stack + STACK_SIZE - sizeof(umword_t), &arg);
|
|
||||||
|
|
||||||
if (strcmp(filename, tmp_filename) == 0)
|
|
||||||
lx_unlink(filename);
|
|
||||||
};
|
};
|
||||||
|
@ -193,10 +193,8 @@ Platform_env_base::Rm_session_mmap::_dataspace_size(Capability<Dataspace> ds_cap
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* use local function call if called from the entrypoint */
|
/* use local function call if called from the entrypoint */
|
||||||
Object_pool<Rpc_object_base>::Guard
|
return core_env()->entrypoint()->apply(ds_cap, [] (Dataspace *ds) {
|
||||||
ds_rpc(core_env()->entrypoint()->lookup_and_lock(ds_cap));
|
return ds ? ds->size() : 0; });
|
||||||
Dataspace * ds = dynamic_cast<Dataspace *>(&*ds_rpc);
|
|
||||||
return ds ? ds->size() : 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -212,10 +210,6 @@ int Platform_env_base::Rm_session_mmap::_dataspace_fd(Capability<Dataspace> ds_c
|
|||||||
|
|
||||||
Capability<Linux_dataspace> lx_ds_cap = static_cap_cast<Linux_dataspace>(ds_cap);
|
Capability<Linux_dataspace> lx_ds_cap = static_cap_cast<Linux_dataspace>(ds_cap);
|
||||||
|
|
||||||
Object_pool<Rpc_object_base>::Guard
|
|
||||||
ds_rpc(core_env()->entrypoint()->lookup_and_lock(lx_ds_cap));
|
|
||||||
Linux_dataspace * ds = dynamic_cast<Linux_dataspace *>(&*ds_rpc);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return a duplicate of the dataspace file descriptor, which will be freed
|
* Return a duplicate of the dataspace file descriptor, which will be freed
|
||||||
* immediately after mmap'ing the file (see 'Rm_session_mmap').
|
* immediately after mmap'ing the file (see 'Rm_session_mmap').
|
||||||
@ -225,7 +219,8 @@ int Platform_env_base::Rm_session_mmap::_dataspace_fd(Capability<Dataspace> ds_c
|
|||||||
* socket descriptor during the RPC handling). When later destroying the
|
* socket descriptor during the RPC handling). When later destroying the
|
||||||
* dataspace, the descriptor would unexpectedly be closed again.
|
* dataspace, the descriptor would unexpectedly be closed again.
|
||||||
*/
|
*/
|
||||||
return ds ? lx_dup(ds->fd().dst().socket) : -1;
|
return core_env()->entrypoint()->apply(lx_ds_cap, [] (Linux_dataspace *ds) {
|
||||||
|
return ds ? lx_dup(ds->fd().dst().socket) : -1; });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -239,9 +234,6 @@ bool Platform_env_base::Rm_session_mmap::_dataspace_writable(Dataspace_capabilit
|
|||||||
return writable;
|
return writable;
|
||||||
}
|
}
|
||||||
|
|
||||||
Object_pool<Rpc_object_base>::Guard
|
return core_env()->entrypoint()->apply(ds_cap, [] (Dataspace *ds) {
|
||||||
ds_rpc(core_env()->entrypoint()->lookup_and_lock(ds_cap));
|
return ds ? ds->writable() : false; });
|
||||||
Dataspace * ds = dynamic_cast<Dataspace *>(&*ds_rpc);
|
|
||||||
|
|
||||||
return ds ? ds->writable() : false;
|
|
||||||
}
|
}
|
||||||
|
@ -89,19 +89,7 @@ void Rpc_entrypoint::_dissolve(Rpc_object_base *obj)
|
|||||||
Nova::revoke(Nova::Obj_crd(obj->cap().local_name(), 0), true);
|
Nova::revoke(Nova::Obj_crd(obj->cap().local_name(), 0), true);
|
||||||
|
|
||||||
/* make sure nobody is able to find this object */
|
/* make sure nobody is able to find this object */
|
||||||
remove_locked(obj);
|
remove(obj);
|
||||||
|
|
||||||
/*
|
|
||||||
* The activation may execute a blocking operation in a dispatch function.
|
|
||||||
* Before resolving the corresponding object, we need to ensure that it is
|
|
||||||
* no longer used by an activation. Therefore, we to need cancel an
|
|
||||||
* eventually blocking operation and let the activation leave the context
|
|
||||||
* of the object.
|
|
||||||
*/
|
|
||||||
_leave_server_object(obj);
|
|
||||||
|
|
||||||
/* wait until nobody is inside dispatch */
|
|
||||||
obj->acquire();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Rpc_entrypoint::_activation_entry()
|
void Rpc_entrypoint::_activation_entry()
|
||||||
@ -115,10 +103,9 @@ void Rpc_entrypoint::_activation_entry()
|
|||||||
|
|
||||||
Rpc_entrypoint *ep = static_cast<Rpc_entrypoint *>(Thread_base::myself());
|
Rpc_entrypoint *ep = static_cast<Rpc_entrypoint *>(Thread_base::myself());
|
||||||
|
|
||||||
/* delay start if requested so */
|
{
|
||||||
if (ep->_curr_obj) {
|
/* potentially delay start */
|
||||||
ep->_delay_start.lock();
|
Lock::Guard lock_guard(ep->_delay_start);
|
||||||
ep->_delay_start.unlock();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* required to decrease ref count of capability used during last reply */
|
/* required to decrease ref count of capability used during last reply */
|
||||||
@ -134,30 +121,25 @@ void Rpc_entrypoint::_activation_entry()
|
|||||||
srv.ret(Ipc_client::ERR_INVALID_OBJECT);
|
srv.ret(Ipc_client::ERR_INVALID_OBJECT);
|
||||||
|
|
||||||
/* atomically lookup and lock referenced object */
|
/* atomically lookup and lock referenced object */
|
||||||
ep->_curr_obj = ep->lookup_and_lock(id_pt);
|
auto lambda = [&] (Rpc_object_base *obj) {
|
||||||
if (!ep->_curr_obj) {
|
if (!obj) {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Badge is used to suppress error message solely.
|
* Badge is used to suppress error message solely.
|
||||||
* It's non zero during cleanup call of an
|
* It's non zero during cleanup call of an
|
||||||
* rpc_object_base object, see _leave_server_object.
|
* rpc_object_base object, see _leave_server_object.
|
||||||
*/
|
*/
|
||||||
if (!srv.badge())
|
if (!srv.badge())
|
||||||
PERR("could not look up server object, "
|
PERR("could not look up server object, "
|
||||||
" return from call id_pt=%lx",
|
" return from call id_pt=%lx", id_pt);
|
||||||
id_pt);
|
return;
|
||||||
|
}
|
||||||
} else {
|
|
||||||
|
|
||||||
/* dispatch request */
|
/* dispatch request */
|
||||||
try { srv.ret(ep->_curr_obj->dispatch(opcode, srv, srv)); }
|
try { srv.ret(obj->dispatch(opcode, srv, srv)); }
|
||||||
catch (Blocking_canceled) { }
|
catch (Blocking_canceled) { }
|
||||||
|
};
|
||||||
Rpc_object_base * tmp = ep->_curr_obj;
|
ep->apply(id_pt, lambda);
|
||||||
ep->_curr_obj = 0;
|
|
||||||
|
|
||||||
tmp->release();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!ep->_rcv_buf.prepare_rcv_window((Nova::Utcb *)ep->utcb()))
|
if (!ep->_rcv_buf.prepare_rcv_window((Nova::Utcb *)ep->utcb()))
|
||||||
PWRN("out of capability selectors for handling server requests");
|
PWRN("out of capability selectors for handling server requests");
|
||||||
@ -174,30 +156,6 @@ void Rpc_entrypoint::entry()
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Rpc_entrypoint::_leave_server_object(Rpc_object_base *)
|
|
||||||
{
|
|
||||||
using namespace Nova;
|
|
||||||
|
|
||||||
Utcb *utcb = reinterpret_cast<Utcb *>(Thread_base::myself()->utcb());
|
|
||||||
/* don't call ourself */
|
|
||||||
if (utcb == reinterpret_cast<Utcb *>(this->utcb()))
|
|
||||||
return;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Required outside of core. E.g. launchpad needs it to forcefully kill
|
|
||||||
* a client which blocks on a session opening request where the service
|
|
||||||
* is not up yet.
|
|
||||||
*/
|
|
||||||
cancel_blocking();
|
|
||||||
|
|
||||||
utcb->msg[0] = 0xdead;
|
|
||||||
utcb->set_msg_word(1);
|
|
||||||
if (uint8_t res = call(_cap.local_name()))
|
|
||||||
PERR("%8p - could not clean up entry point of thread 0x%p - res %u",
|
|
||||||
utcb, this->utcb(), res);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void Rpc_entrypoint::_block_until_cap_valid() { }
|
void Rpc_entrypoint::_block_until_cap_valid() { }
|
||||||
|
|
||||||
|
|
||||||
@ -220,7 +178,6 @@ Rpc_entrypoint::Rpc_entrypoint(Cap_session *cap_session, size_t stack_size,
|
|||||||
Affinity::Location location)
|
Affinity::Location location)
|
||||||
:
|
:
|
||||||
Thread_base(Cpu_session::DEFAULT_WEIGHT, name, stack_size),
|
Thread_base(Cpu_session::DEFAULT_WEIGHT, name, stack_size),
|
||||||
_curr_obj(start_on_construction ? 0 : (Rpc_object_base *)~0UL),
|
|
||||||
_delay_start(Lock::LOCKED),
|
_delay_start(Lock::LOCKED),
|
||||||
_cap_session(cap_session)
|
_cap_session(cap_session)
|
||||||
{
|
{
|
||||||
@ -260,13 +217,10 @@ Rpc_entrypoint::~Rpc_entrypoint()
|
|||||||
{
|
{
|
||||||
typedef Object_pool<Rpc_object_base> Pool;
|
typedef Object_pool<Rpc_object_base> Pool;
|
||||||
|
|
||||||
if (Pool::first()) {
|
Pool::remove_all([&] (Rpc_object_base *obj) {
|
||||||
PWRN("Object pool not empty in %s", __func__);
|
PWRN("Object pool not empty in %s", __func__);
|
||||||
|
_dissolve(obj);
|
||||||
/* dissolve all objects - objects are not destroyed! */
|
});
|
||||||
while (Rpc_object_base *obj = Pool::first())
|
|
||||||
_dissolve(obj);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!_cap.valid())
|
if (!_cap.valid())
|
||||||
return;
|
return;
|
||||||
|
@ -32,20 +32,22 @@ Core_rm_session::attach(Dataspace_capability ds_cap, size_t size,
|
|||||||
Rm_session::Local_addr local_addr,
|
Rm_session::Local_addr local_addr,
|
||||||
bool executable)
|
bool executable)
|
||||||
{
|
{
|
||||||
Object_pool<Dataspace_component>::Guard ds(_ds_ep->lookup_and_lock(ds_cap));
|
auto lambda = [&] (Dataspace_component *ds) -> Local_addr {
|
||||||
if (!ds)
|
if (!ds)
|
||||||
throw Invalid_dataspace();
|
throw Invalid_dataspace();
|
||||||
|
|
||||||
if (use_local_addr) {
|
if (use_local_addr) {
|
||||||
PERR("Parameter 'use_local_addr' not supported within core");
|
PERR("Parameter 'use_local_addr' not supported within core");
|
||||||
return 0UL;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (offset) {
|
if (offset) {
|
||||||
PERR("Parameter 'offset' not supported within core");
|
PERR("Parameter 'offset' not supported within core");
|
||||||
return 0UL;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* allocate range in core's virtual address space */
|
/* allocate range in core's virtual address space */
|
||||||
return ds->core_local_addr();
|
return ds->core_local_addr();
|
||||||
|
};
|
||||||
|
return _ds_ep->apply(ds_cap, lambda);
|
||||||
}
|
}
|
||||||
|
@ -23,12 +23,13 @@ using namespace Genode;
|
|||||||
Native_capability
|
Native_capability
|
||||||
Cpu_session_component::pause_sync(Thread_capability thread_cap)
|
Cpu_session_component::pause_sync(Thread_capability thread_cap)
|
||||||
{
|
{
|
||||||
Object_pool<Cpu_thread_component>::Guard
|
auto lambda = [] (Cpu_thread_component *thread) {
|
||||||
thread(_thread_ep->lookup_and_lock(thread_cap));
|
if (!thread || !thread->platform_thread())
|
||||||
if (!thread || !thread->platform_thread())
|
return Native_capability();
|
||||||
return Native_capability();
|
|
||||||
|
|
||||||
return thread->platform_thread()->pause();
|
return thread->platform_thread()->pause();
|
||||||
|
};
|
||||||
|
return _thread_ep->apply(thread_cap, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -37,12 +38,13 @@ Cpu_session_component::single_step_sync(Thread_capability thread_cap, bool enabl
|
|||||||
{
|
{
|
||||||
using namespace Genode;
|
using namespace Genode;
|
||||||
|
|
||||||
Object_pool<Cpu_thread_component>::Guard
|
auto lambda = [enable] (Cpu_thread_component *thread) {
|
||||||
thread(_thread_ep->lookup_and_lock(thread_cap));
|
if (!thread || !thread->platform_thread())
|
||||||
if (!thread || !thread->platform_thread())
|
return Native_capability();
|
||||||
return Native_capability();
|
|
||||||
|
|
||||||
return thread->platform_thread()->single_step(enable);
|
return thread->platform_thread()->single_step(enable);
|
||||||
|
};
|
||||||
|
return _thread_ep->apply(thread_cap, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -170,7 +170,6 @@ namespace Genode {
|
|||||||
*/
|
*/
|
||||||
void assign_pd(addr_t pd_sel) { _pd = pd_sel; }
|
void assign_pd(addr_t pd_sel) { _pd = pd_sel; }
|
||||||
addr_t pd_sel() const { return _pd; }
|
addr_t pd_sel() const { return _pd; }
|
||||||
void dump_kernel_quota_usage(Pager_object * = (Pager_object *)~0UL);
|
|
||||||
|
|
||||||
void exception(uint8_t exit_id);
|
void exception(uint8_t exit_id);
|
||||||
|
|
||||||
@ -386,12 +385,6 @@ namespace Genode {
|
|||||||
*/
|
*/
|
||||||
void ep(Pager_entrypoint *ep) { _ep = ep; }
|
void ep(Pager_entrypoint *ep) { _ep = ep; }
|
||||||
|
|
||||||
/*
|
|
||||||
* Used for diagnostic/debugging purposes
|
|
||||||
* - see Pager_object::dump_kernel_quota_usage
|
|
||||||
*/
|
|
||||||
Pager_object * pager_head();
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Thread interface
|
* Thread interface
|
||||||
*/
|
*/
|
||||||
|
@ -500,29 +500,6 @@ Exception_handlers::Exception_handlers(Pager_object *obj)
|
|||||||
******************/
|
******************/
|
||||||
|
|
||||||
|
|
||||||
void Pager_object::dump_kernel_quota_usage(Pager_object *obj)
|
|
||||||
{
|
|
||||||
if (obj == (Pager_object *)~0UL) {
|
|
||||||
unsigned use_cpu = location.xpos();
|
|
||||||
obj = pager_threads[use_cpu]->pager_head();
|
|
||||||
PINF("-- kernel memory usage of Genode PDs --");
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!obj)
|
|
||||||
return;
|
|
||||||
|
|
||||||
addr_t limit = 0; addr_t usage = 0;
|
|
||||||
Nova::pd_ctrl_debug(obj->pd_sel(), limit, usage);
|
|
||||||
|
|
||||||
char const * thread_name = reinterpret_cast<char const *>(obj->badge());
|
|
||||||
PINF("pd=0x%lx pager=%p thread='%s' limit=0x%lx usage=0x%lx",
|
|
||||||
obj->pd_sel(), obj, thread_name, limit, usage);
|
|
||||||
|
|
||||||
dump_kernel_quota_usage(static_cast<Pager_object *>(obj->child(Genode::Avl_node_base::LEFT)));
|
|
||||||
dump_kernel_quota_usage(static_cast<Pager_object *>(obj->child(Genode::Avl_node_base::RIGHT)));
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
Pager_object::Pager_object(unsigned long badge, Affinity::Location location)
|
Pager_object::Pager_object(unsigned long badge, Affinity::Location location)
|
||||||
:
|
:
|
||||||
_badge(badge),
|
_badge(badge),
|
||||||
@ -847,9 +824,6 @@ Pager_activation_base::Pager_activation_base(const char *name, size_t stack_size
|
|||||||
void Pager_activation_base::entry() { }
|
void Pager_activation_base::entry() { }
|
||||||
|
|
||||||
|
|
||||||
Pager_object * Pager_activation_base::pager_head() {
|
|
||||||
return _ep ? _ep->first() : nullptr; }
|
|
||||||
|
|
||||||
/**********************
|
/**********************
|
||||||
** Pager entrypoint **
|
** Pager entrypoint **
|
||||||
**********************/
|
**********************/
|
||||||
@ -918,7 +892,7 @@ void Pager_entrypoint::dissolve(Pager_object *obj)
|
|||||||
/* revoke cap selector locally */
|
/* revoke cap selector locally */
|
||||||
revoke(pager_obj.dst(), true);
|
revoke(pager_obj.dst(), true);
|
||||||
/* remove object from pool */
|
/* remove object from pool */
|
||||||
remove_locked(obj);
|
remove(obj);
|
||||||
/* take care that no faults are in-flight */
|
/* take care that no faults are in-flight */
|
||||||
obj->cleanup_call();
|
obj->cleanup_call();
|
||||||
}
|
}
|
||||||
|
@ -146,9 +146,6 @@ int Platform_thread::start(void *ip, void *sp)
|
|||||||
KEEP_FREE_PAGES_NOT_AVAILABLE_FOR_UPGRADE, UPPER_LIMIT_PAGES);
|
KEEP_FREE_PAGES_NOT_AVAILABLE_FOR_UPGRADE, UPPER_LIMIT_PAGES);
|
||||||
if (res != NOVA_OK) {
|
if (res != NOVA_OK) {
|
||||||
PERR("create_pd returned %d", res);
|
PERR("create_pd returned %d", res);
|
||||||
|
|
||||||
_pager->dump_kernel_quota_usage();
|
|
||||||
|
|
||||||
goto cleanup_pd;
|
goto cleanup_pd;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -87,19 +87,20 @@ Signal_context_capability Signal_session_component::alloc_context(long imprint)
|
|||||||
|
|
||||||
void Signal_session_component::free_context(Signal_context_capability context_cap)
|
void Signal_session_component::free_context(Signal_context_capability context_cap)
|
||||||
{
|
{
|
||||||
Signal_context_component * context =
|
auto lambda = [&] (Signal_context_component *context) {
|
||||||
dynamic_cast<Signal_context_component *>(_signal_queue.lookup_and_lock(context_cap.local_name()));
|
if (!context) {
|
||||||
if (!context) {
|
PWRN("%p - specified signal-context capability has wrong type %lx",
|
||||||
PWRN("%p - specified signal-context capability has wrong type %lx",
|
this, context_cap.local_name());
|
||||||
this, context_cap.local_name());
|
return;
|
||||||
return;
|
}
|
||||||
}
|
|
||||||
|
|
||||||
_signal_queue.remove_locked(context);
|
_signal_queue.remove(context);
|
||||||
destroy(&_contexts_slab, context);
|
destroy(&_contexts_slab, context);
|
||||||
|
|
||||||
Nova::revoke(Nova::Obj_crd(context_cap.local_name(), 0));
|
Nova::revoke(Nova::Obj_crd(context_cap.local_name(), 0));
|
||||||
cap_map()->remove(context_cap.local_name(), 0);
|
cap_map()->remove(context_cap.local_name(), 0);
|
||||||
|
};
|
||||||
|
_signal_queue.apply(context_cap, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -26,37 +26,40 @@ Core_rm_session::attach(Dataspace_capability ds_cap, size_t size,
|
|||||||
{
|
{
|
||||||
using namespace Okl4;
|
using namespace Okl4;
|
||||||
|
|
||||||
Object_pool<Dataspace_component>::Guard ds(_ds_ep->lookup_and_lock(ds_cap));
|
auto lambda = [&] (Dataspace_component *ds) -> void* {
|
||||||
if (!ds)
|
if (!ds)
|
||||||
throw Invalid_dataspace();
|
throw Invalid_dataspace();
|
||||||
|
|
||||||
if (size == 0)
|
if (size == 0)
|
||||||
size = ds->size();
|
size = ds->size();
|
||||||
|
|
||||||
size_t page_rounded_size = (size + get_page_size() - 1) & get_page_mask();
|
size_t page_rounded_size = (size + get_page_size() - 1)
|
||||||
|
& get_page_mask();
|
||||||
|
|
||||||
if (use_local_addr) {
|
if (use_local_addr) {
|
||||||
PERR("Parameter 'use_local_addr' not supported within core");
|
PERR("Parameter 'use_local_addr' not supported within core");
|
||||||
return 0;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (offset) {
|
if (offset) {
|
||||||
PERR("Parameter 'offset' not supported within core");
|
PERR("Parameter 'offset' not supported within core");
|
||||||
return 0;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* allocate range in core's virtual address space */
|
/* allocate range in core's virtual address space */
|
||||||
void *virt_addr;
|
void *virt_addr;
|
||||||
if (!platform()->region_alloc()->alloc(page_rounded_size, &virt_addr)) {
|
if (!platform()->region_alloc()->alloc(page_rounded_size, &virt_addr)) {
|
||||||
PERR("Could not allocate virtual address range in core of size %zd\n",
|
PERR("Could not allocate virtual address range in core of size %zd\n",
|
||||||
page_rounded_size);
|
page_rounded_size);
|
||||||
return false;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* map the dataspace's physical pages to corresponding virtual addresses */
|
/* map the dataspace's physical pages to corresponding virtual addresses */
|
||||||
unsigned num_pages = page_rounded_size >> get_page_size_log2();
|
unsigned num_pages = page_rounded_size >> get_page_size_log2();
|
||||||
if (!map_local(ds->phys_addr(), (addr_t)virt_addr, num_pages))
|
if (!map_local(ds->phys_addr(), (addr_t)virt_addr, num_pages))
|
||||||
return 0;
|
return nullptr;
|
||||||
|
return virt_addr;
|
||||||
|
};
|
||||||
|
|
||||||
return virt_addr;
|
return _ds_ep->apply(ds_cap, lambda);
|
||||||
}
|
}
|
||||||
|
@ -21,8 +21,9 @@ using namespace Genode;
|
|||||||
|
|
||||||
void Pd_session_component::space_pager(Thread_capability thread)
|
void Pd_session_component::space_pager(Thread_capability thread)
|
||||||
{
|
{
|
||||||
Object_pool<Cpu_thread_component>::Guard
|
_thread_ep->apply(thread, [this] (Cpu_thread_component *cpu_thread)
|
||||||
cpu_thread(_thread_ep->lookup_and_lock(thread));
|
{
|
||||||
if (!cpu_thread) return;
|
if (!cpu_thread) return;
|
||||||
_pd.space_pager(cpu_thread->platform_thread());
|
_pd.space_pager(cpu_thread->platform_thread());
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
@ -149,7 +149,7 @@ void Ipc_pager::acknowledge_wakeup()
|
|||||||
** Pager entrypoint **
|
** Pager entrypoint **
|
||||||
**********************/
|
**********************/
|
||||||
|
|
||||||
Untyped_capability Pager_entrypoint::_manage(Pager_object *obj)
|
Untyped_capability Pager_entrypoint::_pager_object_cap(unsigned long badge)
|
||||||
{
|
{
|
||||||
return Untyped_capability(_tid.l4id, obj->badge());
|
return Untyped_capability(_tid.l4id, badge);
|
||||||
}
|
}
|
||||||
|
@ -140,7 +140,7 @@ void Ipc_pager::acknowledge_wakeup()
|
|||||||
** Pager entrypoint **
|
** Pager entrypoint **
|
||||||
**********************/
|
**********************/
|
||||||
|
|
||||||
Untyped_capability Pager_entrypoint::_manage(Pager_object *obj)
|
Untyped_capability Pager_entrypoint::_pager_object_cap(unsigned long badge)
|
||||||
{
|
{
|
||||||
return Untyped_capability(_tid.l4id, obj->badge());
|
return Untyped_capability(_tid.l4id, badge);
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
/* Genode includes */
|
/* Genode includes */
|
||||||
|
#include <internal/capability_space_sel4.h>
|
||||||
#include <base/rpc_server.h>
|
#include <base/rpc_server.h>
|
||||||
|
|
||||||
using namespace Genode;
|
using namespace Genode;
|
||||||
@ -65,23 +66,14 @@ void Rpc_entrypoint::entry()
|
|||||||
srv.ret(Ipc_client::ERR_INVALID_OBJECT);
|
srv.ret(Ipc_client::ERR_INVALID_OBJECT);
|
||||||
|
|
||||||
/* atomically lookup and lock referenced object */
|
/* atomically lookup and lock referenced object */
|
||||||
Object_pool<Rpc_object_base>::Guard curr_obj(lookup_and_lock(srv.badge()));
|
auto lambda = [&] (Rpc_object_base *obj) {
|
||||||
if (!curr_obj)
|
if (!obj) return;
|
||||||
continue;
|
|
||||||
|
|
||||||
{
|
/* dispatch request */
|
||||||
Lock::Guard lock_guard(_curr_obj_lock);
|
try { srv.ret(obj->dispatch(opcode, srv, srv)); }
|
||||||
_curr_obj = curr_obj;
|
catch (Blocking_canceled) { }
|
||||||
}
|
};
|
||||||
|
apply(srv.badge(), lambda);
|
||||||
/* dispatch request */
|
|
||||||
try { srv.ret(_curr_obj->dispatch(opcode, srv, srv)); }
|
|
||||||
catch (Blocking_canceled) { }
|
|
||||||
|
|
||||||
{
|
|
||||||
Lock::Guard lock_guard(_curr_obj_lock);
|
|
||||||
_curr_obj = 0;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* answer exit call, thereby wake up '~Rpc_entrypoint' */
|
/* answer exit call, thereby wake up '~Rpc_entrypoint' */
|
||||||
@ -89,6 +81,4 @@ void Rpc_entrypoint::entry()
|
|||||||
|
|
||||||
/* defer the destruction of 'Ipc_server' until '~Rpc_entrypoint' is ready */
|
/* defer the destruction of 'Ipc_server' until '~Rpc_entrypoint' is ready */
|
||||||
_delay_exit.lock();
|
_delay_exit.lock();
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -28,36 +28,39 @@ Core_rm_session::attach(Dataspace_capability ds_cap, size_t size,
|
|||||||
Rm_session::Local_addr local_addr,
|
Rm_session::Local_addr local_addr,
|
||||||
bool executable)
|
bool executable)
|
||||||
{
|
{
|
||||||
Object_pool<Dataspace_component>::Guard ds(_ds_ep->lookup_and_lock(ds_cap));
|
auto lambda = [&] (Dataspace_component *ds) -> Local_addr {
|
||||||
if (!ds)
|
if (!ds)
|
||||||
throw Invalid_dataspace();
|
throw Invalid_dataspace();
|
||||||
|
|
||||||
if (size == 0)
|
if (size == 0)
|
||||||
size = ds->size();
|
size = ds->size();
|
||||||
|
|
||||||
size_t page_rounded_size = (size + get_page_size() - 1) & get_page_mask();
|
size_t page_rounded_size = (size + get_page_size() - 1) & get_page_mask();
|
||||||
|
|
||||||
if (use_local_addr) {
|
if (use_local_addr) {
|
||||||
PERR("Parameter 'use_local_addr' not supported within core");
|
PERR("Parameter 'use_local_addr' not supported within core");
|
||||||
return 0;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (offset) {
|
if (offset) {
|
||||||
PERR("Parameter 'offset' not supported within core");
|
PERR("Parameter 'offset' not supported within core");
|
||||||
return 0;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* allocate range in core's virtual address space */
|
/* allocate range in core's virtual address space */
|
||||||
void *virt_addr;
|
void *virt_addr;
|
||||||
if (!platform()->region_alloc()->alloc(page_rounded_size, &virt_addr)) {
|
if (!platform()->region_alloc()->alloc(page_rounded_size, &virt_addr)) {
|
||||||
PERR("Could not allocate virtual address range in core of size %zd\n",
|
PERR("Could not allocate virtual address range in core of size %zd\n",
|
||||||
page_rounded_size);
|
page_rounded_size);
|
||||||
return false;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* map the dataspace's physical pages to core-local virtual addresses */
|
/* map the dataspace's physical pages to core-local virtual addresses */
|
||||||
size_t num_pages = page_rounded_size >> get_page_size_log2();
|
size_t num_pages = page_rounded_size >> get_page_size_log2();
|
||||||
map_local(ds->phys_addr(), (addr_t)virt_addr, num_pages);
|
map_local(ds->phys_addr(), (addr_t)virt_addr, num_pages);
|
||||||
|
|
||||||
return virt_addr;
|
return virt_addr;
|
||||||
|
};
|
||||||
|
|
||||||
|
return _ds_ep->apply(ds_cap, lambda);
|
||||||
}
|
}
|
||||||
|
@ -120,14 +120,14 @@ void Pager_object::unresolved_page_fault_occurred()
|
|||||||
** Pager entrypoint **
|
** Pager entrypoint **
|
||||||
**********************/
|
**********************/
|
||||||
|
|
||||||
Untyped_capability Pager_entrypoint::_manage(Pager_object *obj)
|
Untyped_capability Pager_entrypoint::_pager_object_cap(unsigned long badge)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Create minted endpoint capability of the pager entrypoint.
|
* Create minted endpoint capability of the pager entrypoint.
|
||||||
* The badge of the page-fault message is used to find the pager
|
* The badge of the page-fault message is used to find the pager
|
||||||
* object for faulted thread.
|
* object for faulted thread.
|
||||||
*/
|
*/
|
||||||
Rpc_obj_key rpc_obj_key((addr_t)obj->badge());
|
Rpc_obj_key rpc_obj_key((addr_t)badge);
|
||||||
|
|
||||||
Untyped_capability ep_cap(Capability_space::create_ep_cap(*this));
|
Untyped_capability ep_cap(Capability_space::create_ep_cap(*this));
|
||||||
return Capability_space::create_rpc_obj_cap(ep_cap, nullptr, rpc_obj_key);
|
return Capability_space::create_rpc_obj_cap(ep_cap, nullptr, rpc_obj_key);
|
||||||
|
@ -224,6 +224,8 @@ class Genode::Child : protected Rpc_object<Parent>
|
|||||||
*/
|
*/
|
||||||
void _remove_session(Session *s);
|
void _remove_session(Session *s);
|
||||||
|
|
||||||
|
void _close(Session *s);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return service interface targetting the parent
|
* Return service interface targetting the parent
|
||||||
*
|
*
|
||||||
|
@ -1,7 +1,8 @@
|
|||||||
/*
|
/*
|
||||||
* \brief Object pool - map ids to objects
|
* \brief Object pool - map capabilities to objects
|
||||||
* \author Norman Feske
|
* \author Norman Feske
|
||||||
* \author Alexander Boettcher
|
* \author Alexander Boettcher
|
||||||
|
* \author Stafen Kalkowski
|
||||||
* \date 2006-06-26
|
* \date 2006-06-26
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -23,7 +24,7 @@ namespace Genode { template <typename> class Object_pool; }
|
|||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Map object ids to local objects
|
* Map capabilities to local objects
|
||||||
*
|
*
|
||||||
* \param OBJ_TYPE object type (must be inherited from Object_pool::Entry)
|
* \param OBJ_TYPE object type (must be inherited from Object_pool::Entry)
|
||||||
*
|
*
|
||||||
@ -35,66 +36,25 @@ class Genode::Object_pool
|
|||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
|
||||||
class Guard
|
|
||||||
{
|
|
||||||
private:
|
|
||||||
|
|
||||||
OBJ_TYPE * _object;
|
|
||||||
|
|
||||||
public:
|
|
||||||
operator OBJ_TYPE*() const { return _object; }
|
|
||||||
OBJ_TYPE * operator->() const { return _object; }
|
|
||||||
OBJ_TYPE * object() const { return _object; }
|
|
||||||
|
|
||||||
template <class X>
|
|
||||||
explicit Guard(X * object) {
|
|
||||||
_object = dynamic_cast<OBJ_TYPE *>(object); }
|
|
||||||
|
|
||||||
~Guard()
|
|
||||||
{
|
|
||||||
if (!_object) return;
|
|
||||||
|
|
||||||
_object->release();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
class Entry : public Avl_node<Entry>
|
class Entry : public Avl_node<Entry>
|
||||||
{
|
{
|
||||||
private:
|
private:
|
||||||
|
|
||||||
Untyped_capability _cap;
|
Untyped_capability _cap;
|
||||||
short int _ref;
|
Lock _lock;
|
||||||
bool _dead;
|
|
||||||
|
|
||||||
Lock _entry_lock;
|
|
||||||
|
|
||||||
inline unsigned long _obj_id() { return _cap.local_name(); }
|
inline unsigned long _obj_id() { return _cap.local_name(); }
|
||||||
|
|
||||||
friend class Object_pool;
|
friend class Object_pool;
|
||||||
friend class Avl_tree<Entry>;
|
friend class Avl_tree<Entry>;
|
||||||
|
|
||||||
/*
|
|
||||||
* Support methods for atomic lookup and lock functionality of
|
|
||||||
* class Object_pool.
|
|
||||||
*/
|
|
||||||
|
|
||||||
void lock() { _entry_lock.lock(); };
|
|
||||||
void unlock() { _entry_lock.unlock(); };
|
|
||||||
|
|
||||||
void add_ref() { _ref += 1; }
|
|
||||||
void del_ref() { _ref -= 1; }
|
|
||||||
|
|
||||||
bool is_dead(bool set_dead = false) {
|
|
||||||
return (set_dead ? (_dead = true) : _dead); }
|
|
||||||
bool is_ref_zero() { return _ref <= 0; }
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructors
|
* Constructors
|
||||||
*/
|
*/
|
||||||
Entry() : _ref(0), _dead(false) { }
|
Entry() { }
|
||||||
Entry(Untyped_capability cap) : _cap(cap), _ref(0), _dead(false) { }
|
Entry(Untyped_capability cap) : _cap(cap) { }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Avl_node interface
|
* Avl_node interface
|
||||||
@ -120,12 +80,7 @@ class Genode::Object_pool
|
|||||||
void cap(Untyped_capability c) { _cap = c; }
|
void cap(Untyped_capability c) { _cap = c; }
|
||||||
|
|
||||||
Untyped_capability const cap() const { return _cap; }
|
Untyped_capability const cap() const { return _cap; }
|
||||||
|
Lock& lock() { return _lock; }
|
||||||
/**
|
|
||||||
* Function used - ideally - solely by the Guard.
|
|
||||||
*/
|
|
||||||
void release() { del_ref(); unlock(); }
|
|
||||||
void acquire() { lock(); add_ref(); }
|
|
||||||
};
|
};
|
||||||
|
|
||||||
private:
|
private:
|
||||||
@ -133,6 +88,58 @@ class Genode::Object_pool
|
|||||||
Avl_tree<Entry> _tree;
|
Avl_tree<Entry> _tree;
|
||||||
Lock _lock;
|
Lock _lock;
|
||||||
|
|
||||||
|
OBJ_TYPE* _obj_by_capid(unsigned long capid)
|
||||||
|
{
|
||||||
|
Entry *ret = _tree.first() ? _tree.first()->find_by_obj_id(capid)
|
||||||
|
: nullptr;
|
||||||
|
return static_cast<OBJ_TYPE*>(ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename FUNC, typename RET>
|
||||||
|
struct Apply_functor
|
||||||
|
{
|
||||||
|
RET operator()(OBJ_TYPE *obj, FUNC f)
|
||||||
|
{
|
||||||
|
using Functor = Trait::Functor<decltype(&FUNC::operator())>;
|
||||||
|
using Object_pointer = typename Functor::template Argument<0>::Type;
|
||||||
|
|
||||||
|
try {
|
||||||
|
auto ret = f(dynamic_cast<Object_pointer>(obj));
|
||||||
|
if (obj) obj->_lock.unlock();
|
||||||
|
return ret;
|
||||||
|
} catch(...) {
|
||||||
|
if (obj) obj->_lock.unlock();
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename FUNC>
|
||||||
|
struct Apply_functor<FUNC, void>
|
||||||
|
{
|
||||||
|
void operator()(OBJ_TYPE *obj, FUNC f)
|
||||||
|
{
|
||||||
|
using Functor = Trait::Functor<decltype(&FUNC::operator())>;
|
||||||
|
using Object_pointer = typename Functor::template Argument<0>::Type;
|
||||||
|
|
||||||
|
try {
|
||||||
|
f(dynamic_cast<Object_pointer>(obj));
|
||||||
|
if (obj) obj->_lock.unlock();
|
||||||
|
} catch(...) {
|
||||||
|
if (obj) obj->_lock.unlock();
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
protected:
|
||||||
|
|
||||||
|
bool empty()
|
||||||
|
{
|
||||||
|
Lock::Guard lock_guard(_lock);
|
||||||
|
return _tree.first() == nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
void insert(OBJ_TYPE *obj)
|
void insert(OBJ_TYPE *obj)
|
||||||
@ -141,74 +148,60 @@ class Genode::Object_pool
|
|||||||
_tree.insert(obj);
|
_tree.insert(obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
void remove_locked(OBJ_TYPE *obj)
|
void remove(OBJ_TYPE *obj)
|
||||||
{
|
{
|
||||||
obj->is_dead(true);
|
Lock::Guard lock_guard(_lock);
|
||||||
obj->del_ref();
|
_tree.remove(obj);
|
||||||
|
|
||||||
while (true) {
|
|
||||||
obj->unlock();
|
|
||||||
{
|
|
||||||
Lock::Guard lock_guard(_lock);
|
|
||||||
if (obj->is_ref_zero()) {
|
|
||||||
_tree.remove(obj);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
obj->lock();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
template <typename FUNC>
|
||||||
* Lookup object
|
auto apply(unsigned long capid, FUNC func)
|
||||||
*/
|
-> typename Trait::Functor<decltype(&FUNC::operator())>::Return_type
|
||||||
OBJ_TYPE *lookup_and_lock(addr_t obj_id)
|
|
||||||
{
|
{
|
||||||
OBJ_TYPE * obj_typed;
|
using Functor = Trait::Functor<decltype(&FUNC::operator())>;
|
||||||
|
|
||||||
|
OBJ_TYPE * obj;
|
||||||
|
|
||||||
{
|
{
|
||||||
Lock::Guard lock_guard(_lock);
|
Lock::Guard lock_guard(_lock);
|
||||||
Entry *obj = _tree.first();
|
|
||||||
if (!obj) return 0;
|
|
||||||
|
|
||||||
obj_typed = (OBJ_TYPE *)obj->find_by_obj_id(obj_id);
|
obj = _obj_by_capid(capid);
|
||||||
if (!obj_typed || obj_typed->is_dead())
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
obj_typed->add_ref();
|
if (obj) obj->_lock.lock();
|
||||||
}
|
}
|
||||||
|
|
||||||
obj_typed->lock();
|
Apply_functor<FUNC, typename Functor::Return_type> hf;
|
||||||
return obj_typed;
|
return hf(obj, func);
|
||||||
}
|
}
|
||||||
|
|
||||||
OBJ_TYPE *lookup_and_lock(Untyped_capability cap)
|
template <typename FUNC>
|
||||||
|
auto apply(Untyped_capability cap, FUNC func)
|
||||||
|
-> typename Trait::Functor<decltype(&FUNC::operator())>::Return_type
|
||||||
{
|
{
|
||||||
return lookup_and_lock(cap.local_name());
|
return apply(cap.local_name(), func);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
template <typename FUNC>
|
||||||
* Return first element of tree
|
void remove_all(FUNC func)
|
||||||
*
|
|
||||||
* This function is used for removing tree elements step by step.
|
|
||||||
*/
|
|
||||||
OBJ_TYPE *first()
|
|
||||||
{
|
{
|
||||||
Lock::Guard lock_guard(_lock);
|
for (;;) {
|
||||||
return (OBJ_TYPE *)_tree.first();
|
OBJ_TYPE * obj;
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
{
|
||||||
* Return first element of tree locked
|
Lock::Guard lock_guard(_lock);
|
||||||
*
|
|
||||||
* This function is used for removing tree elements step by step.
|
obj = (OBJ_TYPE*) _tree.first();
|
||||||
*/
|
|
||||||
OBJ_TYPE *first_locked()
|
if (!obj) return;
|
||||||
{
|
|
||||||
Lock::Guard lock_guard(_lock);
|
{
|
||||||
OBJ_TYPE * const obj_typed = (OBJ_TYPE *)_tree.first();
|
Lock::Guard object_guard(obj->_lock);
|
||||||
if (!obj_typed) { return 0; }
|
_tree.remove(obj);
|
||||||
obj_typed->lock();
|
}
|
||||||
return obj_typed;
|
}
|
||||||
|
|
||||||
|
func(obj);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -275,8 +275,6 @@ class Genode::Rpc_entrypoint : Thread_base, public Object_pool<Rpc_object_base>
|
|||||||
protected:
|
protected:
|
||||||
|
|
||||||
Ipc_server *_ipc_server;
|
Ipc_server *_ipc_server;
|
||||||
Rpc_object_base *_curr_obj; /* currently dispatched RPC object */
|
|
||||||
Lock _curr_obj_lock; /* for the protection of '_curr_obj' */
|
|
||||||
Lock _cap_valid; /* thread startup synchronization */
|
Lock _cap_valid; /* thread startup synchronization */
|
||||||
Lock _delay_start; /* delay start of request dispatching */
|
Lock _delay_start; /* delay start of request dispatching */
|
||||||
Lock _delay_exit; /* delay destructor until server settled */
|
Lock _delay_exit; /* delay destructor until server settled */
|
||||||
@ -298,13 +296,6 @@ class Genode::Rpc_entrypoint : Thread_base, public Object_pool<Rpc_object_base>
|
|||||||
*/
|
*/
|
||||||
void _dissolve(Rpc_object_base *obj);
|
void _dissolve(Rpc_object_base *obj);
|
||||||
|
|
||||||
/**
|
|
||||||
* Force activation to cancel dispatching the specified server object
|
|
||||||
*
|
|
||||||
* \noapi
|
|
||||||
*/
|
|
||||||
void _leave_server_object(Rpc_object_base *obj);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Wait until the entrypoint activation is initialized
|
* Wait until the entrypoint activation is initialized
|
||||||
*
|
*
|
||||||
|
@ -248,26 +248,29 @@ class Genode::Root_component : public Rpc_object<Typed_root<SESSION_TYPE> >,
|
|||||||
{
|
{
|
||||||
if (!args.is_valid_string()) throw Root::Invalid_args();
|
if (!args.is_valid_string()) throw Root::Invalid_args();
|
||||||
|
|
||||||
typedef typename Object_pool<SESSION_TYPE>::Guard Object_guard;
|
_ep->apply(session, [&] (SESSION_TYPE *s) {
|
||||||
Object_guard s(_ep->lookup_and_lock(session));
|
if (!s) return;
|
||||||
if (!s) return;
|
|
||||||
|
|
||||||
_upgrade_session(s, args.string());
|
_upgrade_session(s, args.string());
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
void close(Session_capability session) override
|
void close(Session_capability session_cap) override
|
||||||
{
|
{
|
||||||
SESSION_TYPE * s =
|
SESSION_TYPE * session;
|
||||||
dynamic_cast<SESSION_TYPE *>(_ep->lookup_and_lock(session));
|
|
||||||
if (!s) return;
|
|
||||||
|
|
||||||
/* let the entry point forget the session object */
|
_ep->apply(session_cap, [&] (SESSION_TYPE *s) {
|
||||||
_ep->dissolve(s);
|
session = s;
|
||||||
|
|
||||||
_destroy_session(s);
|
/* let the entry point forget the session object */
|
||||||
|
if (session) _ep->dissolve(session);
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!session) return;
|
||||||
|
|
||||||
|
_destroy_session(session);
|
||||||
|
|
||||||
POLICY::release();
|
POLICY::release();
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -43,6 +43,23 @@ namespace Genode {
|
|||||||
|
|
||||||
namespace Meta {
|
namespace Meta {
|
||||||
|
|
||||||
|
/***********************************
|
||||||
|
** Variadic template type access **
|
||||||
|
***********************************/
|
||||||
|
|
||||||
|
template <unsigned long N, typename HEAD, typename... TAIL>
|
||||||
|
struct Variadic_type_tuple
|
||||||
|
{
|
||||||
|
using Type = typename Variadic_type_tuple<N-1, TAIL...>::Type;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename HEAD, typename... TAIL>
|
||||||
|
struct Variadic_type_tuple<0, HEAD, TAIL...>
|
||||||
|
{
|
||||||
|
using Type = HEAD;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
/***************
|
/***************
|
||||||
** Type list **
|
** Type list **
|
||||||
***************/
|
***************/
|
||||||
@ -643,6 +660,28 @@ namespace Genode {
|
|||||||
template <bool VALUE> struct Bool_to_type { enum { V = VALUE }; };
|
template <bool VALUE> struct Bool_to_type { enum { V = VALUE }; };
|
||||||
|
|
||||||
} /* namespace Meta */
|
} /* namespace Meta */
|
||||||
|
|
||||||
|
namespace Trait {
|
||||||
|
|
||||||
|
template<typename T> struct Functor;
|
||||||
|
|
||||||
|
template<typename RET, typename T, typename... ARGS>
|
||||||
|
struct Functor<RET (T::*)(ARGS...) const>
|
||||||
|
{
|
||||||
|
static constexpr unsigned long argument_count = sizeof...(ARGS);
|
||||||
|
|
||||||
|
using Return_type = RET;
|
||||||
|
|
||||||
|
template <unsigned long N>
|
||||||
|
struct Argument
|
||||||
|
{
|
||||||
|
static_assert(N < argument_count, "Invalid index");
|
||||||
|
|
||||||
|
using Type =
|
||||||
|
typename Meta::Variadic_type_tuple<N, ARGS...>::Type;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
} /* namespace Trait */
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* _INCLUDE__BASE__UTIL__META_H_ */
|
#endif /* _INCLUDE__BASE__UTIL__META_H_ */
|
||||||
|
@ -198,7 +198,6 @@ void Child::_add_session(Child::Session const &s)
|
|||||||
void Child::_remove_session(Child::Session *s)
|
void Child::_remove_session(Child::Session *s)
|
||||||
{
|
{
|
||||||
/* forget about this session */
|
/* forget about this session */
|
||||||
_session_pool.remove_locked(s);
|
|
||||||
_session_list.remove(s);
|
_session_list.remove(s);
|
||||||
|
|
||||||
/* return session quota to the ram session of the child */
|
/* return session quota to the ram session of the child */
|
||||||
@ -216,6 +215,51 @@ Service *Child::_parent_service()
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Child::_close(Session* s)
|
||||||
|
{
|
||||||
|
if (!s) {
|
||||||
|
PWRN("no session structure found");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* There is a chance that the server is not responding to the 'close' call,
|
||||||
|
* making us block infinitely. However, by using core's cancel-blocking
|
||||||
|
* mechanism, we can cancel the 'close' call by another (watchdog) thread
|
||||||
|
* that invokes 'cancel_blocking' at our thread after a timeout. The
|
||||||
|
* unblocking is reflected at the API level as an 'Blocking_canceled'
|
||||||
|
* exception. We catch this exception to proceed with normal operation
|
||||||
|
* after being unblocked.
|
||||||
|
*/
|
||||||
|
try { s->service()->close(s->cap()); }
|
||||||
|
catch (Blocking_canceled) {
|
||||||
|
PDBG("Got Blocking_canceled exception during %s->close call\n",
|
||||||
|
s->ident()); }
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the session was provided by a child of us,
|
||||||
|
* 'server()->ram_session_cap()' returns the RAM session of the
|
||||||
|
* corresponding child. Since the session to the server is closed now, we
|
||||||
|
* expect that the server released all donated resources and we can
|
||||||
|
* decrease the servers' quota.
|
||||||
|
*
|
||||||
|
* If this goes wrong, the server is misbehaving.
|
||||||
|
*/
|
||||||
|
if (s->service()->ram_session_cap().valid()) {
|
||||||
|
Ram_session_client server_ram(s->service()->ram_session_cap());
|
||||||
|
if (server_ram.transfer_quota(env()->ram_session_cap(),
|
||||||
|
s->donated_ram_quota())) {
|
||||||
|
PERR("Misbehaving server '%s'!", s->service()->name());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
Lock::Guard lock_guard(_lock);
|
||||||
|
_remove_session(s);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void Child::revoke_server(Server const *server)
|
void Child::revoke_server(Server const *server)
|
||||||
{
|
{
|
||||||
Lock::Guard lock_guard(_lock);
|
Lock::Guard lock_guard(_lock);
|
||||||
@ -228,6 +272,8 @@ void Child::revoke_server(Server const *server)
|
|||||||
/* if no matching session exists, we are done */
|
/* if no matching session exists, we are done */
|
||||||
if (!s) return;
|
if (!s) return;
|
||||||
|
|
||||||
|
_session_pool.apply(s->cap(), [&] (Session *s) {
|
||||||
|
if (s) _session_pool.remove(s); });
|
||||||
_remove_session(s);
|
_remove_session(s);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -329,41 +375,43 @@ void Child::upgrade(Session_capability to_session, Parent::Upgrade_args const &a
|
|||||||
targeted_service = &_pd_service;
|
targeted_service = &_pd_service;
|
||||||
|
|
||||||
/* check if upgrade refers to server */
|
/* check if upgrade refers to server */
|
||||||
Object_pool<Session>::Guard session(_session_pool.lookup_and_lock(to_session));
|
_session_pool.apply(to_session, [&] (Session *session)
|
||||||
if (session)
|
{
|
||||||
targeted_service = session->service();
|
if (session)
|
||||||
|
targeted_service = session->service();
|
||||||
|
|
||||||
if (!targeted_service) {
|
if (!targeted_service) {
|
||||||
PWRN("could not lookup service for session upgrade");
|
PWRN("could not lookup service for session upgrade");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!args.is_valid_string()) {
|
if (!args.is_valid_string()) {
|
||||||
PWRN("no valid session-upgrade arguments");
|
PWRN("no valid session-upgrade arguments");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t const ram_quota =
|
size_t const ram_quota =
|
||||||
Arg_string::find_arg(args.string(), "ram_quota").ulong_value(0);
|
Arg_string::find_arg(args.string(), "ram_quota").ulong_value(0);
|
||||||
|
|
||||||
/* transfer quota from client to ourself */
|
/* transfer quota from client to ourself */
|
||||||
Transfer donation_from_child(ram_quota, _ram,
|
Transfer donation_from_child(ram_quota, _ram,
|
||||||
env()->ram_session_cap());
|
env()->ram_session_cap());
|
||||||
|
|
||||||
/* transfer session quota from ourself to the service provider */
|
/* transfer session quota from ourself to the service provider */
|
||||||
Transfer donation_to_service(ram_quota, env()->ram_session_cap(),
|
Transfer donation_to_service(ram_quota, env()->ram_session_cap(),
|
||||||
targeted_service->ram_session_cap());
|
targeted_service->ram_session_cap());
|
||||||
|
|
||||||
try { targeted_service->upgrade(to_session, args.string()); }
|
try { targeted_service->upgrade(to_session, args.string()); }
|
||||||
catch (Service::Quota_exceeded) { throw Quota_exceeded(); }
|
catch (Service::Quota_exceeded) { throw Quota_exceeded(); }
|
||||||
|
|
||||||
/* remember new amount attached to the session */
|
/* remember new amount attached to the session */
|
||||||
if (session)
|
if (session)
|
||||||
session->upgrade_ram_quota(ram_quota);
|
session->upgrade_ram_quota(ram_quota);
|
||||||
|
|
||||||
/* finish transaction */
|
/* finish transaction */
|
||||||
donation_from_child.acknowledge();
|
donation_from_child.acknowledge();
|
||||||
donation_to_service.acknowledge();
|
donation_to_service.acknowledge();
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -376,46 +424,13 @@ void Child::close(Session_capability session_cap)
|
|||||||
|| session_cap.local_name() == _pd.local_name())
|
|| session_cap.local_name() == _pd.local_name())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
Session *s = _session_pool.lookup_and_lock(session_cap);
|
Session *session = nullptr;
|
||||||
|
_session_pool.apply(session_cap, [&] (Session *s)
|
||||||
if (!s) {
|
{
|
||||||
PWRN("no session structure found");
|
session = s;
|
||||||
return;
|
if (s) _session_pool.remove(s);
|
||||||
}
|
});
|
||||||
|
_close(session);
|
||||||
/*
|
|
||||||
* There is a chance that the server is not responding to the 'close' call,
|
|
||||||
* making us block infinitely. However, by using core's cancel-blocking
|
|
||||||
* mechanism, we can cancel the 'close' call by another (watchdog) thread
|
|
||||||
* that invokes 'cancel_blocking' at our thread after a timeout. The
|
|
||||||
* unblocking is reflected at the API level as an 'Blocking_canceled'
|
|
||||||
* exception. We catch this exception to proceed with normal operation
|
|
||||||
* after being unblocked.
|
|
||||||
*/
|
|
||||||
try { s->service()->close(s->cap()); }
|
|
||||||
catch (Blocking_canceled) {
|
|
||||||
PDBG("Got Blocking_canceled exception during %s->close call\n",
|
|
||||||
s->ident()); }
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If the session was provided by a child of us,
|
|
||||||
* 'server()->ram_session_cap()' returns the RAM session of the
|
|
||||||
* corresponding child. Since the session to the server is closed now, we
|
|
||||||
* expect that the server released all donated resources and we can
|
|
||||||
* decrease the servers' quota.
|
|
||||||
*
|
|
||||||
* If this goes wrong, the server is misbehaving.
|
|
||||||
*/
|
|
||||||
if (s->service()->ram_session_cap().valid()) {
|
|
||||||
Ram_session_client server_ram(s->service()->ram_session_cap());
|
|
||||||
if (server_ram.transfer_quota(env()->ram_session_cap(),
|
|
||||||
s->donated_ram_quota())) {
|
|
||||||
PERR("Misbehaving server '%s'!", s->service()->name());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Lock::Guard lock_guard(_lock);
|
|
||||||
_remove_session(s);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -495,7 +510,6 @@ Child::~Child()
|
|||||||
_entrypoint->dissolve(this);
|
_entrypoint->dissolve(this);
|
||||||
_policy->unregister_services();
|
_policy->unregister_services();
|
||||||
|
|
||||||
for (Session *s; (s = _session_pool.first()); )
|
_session_pool.remove_all([&] (Session *s) { _close(s); });
|
||||||
close(s->cap());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -22,18 +22,7 @@ using namespace Genode;
|
|||||||
void Rpc_entrypoint::_dissolve(Rpc_object_base *obj)
|
void Rpc_entrypoint::_dissolve(Rpc_object_base *obj)
|
||||||
{
|
{
|
||||||
/* make sure nobody is able to find this object */
|
/* make sure nobody is able to find this object */
|
||||||
remove_locked(obj);
|
remove(obj);
|
||||||
|
|
||||||
/*
|
|
||||||
* The activation may execute a blocking operation in a dispatch function.
|
|
||||||
* Before resolving the corresponding object, we need to ensure that it is
|
|
||||||
* no longer used. Therefore, we to need cancel an eventually blocking
|
|
||||||
* operation and let the activation leave the context of the object.
|
|
||||||
*/
|
|
||||||
_leave_server_object(obj);
|
|
||||||
|
|
||||||
/* wait until nobody is inside dispatch */
|
|
||||||
obj->acquire();
|
|
||||||
|
|
||||||
_cap_session->free(obj->cap());
|
_cap_session->free(obj->cap());
|
||||||
|
|
||||||
@ -41,15 +30,6 @@ void Rpc_entrypoint::_dissolve(Rpc_object_base *obj)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Rpc_entrypoint::_leave_server_object(Rpc_object_base *obj)
|
|
||||||
{
|
|
||||||
Lock::Guard lock_guard(_curr_obj_lock);
|
|
||||||
|
|
||||||
if (obj == _curr_obj)
|
|
||||||
cancel_blocking();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void Rpc_entrypoint::_block_until_cap_valid()
|
void Rpc_entrypoint::_block_until_cap_valid()
|
||||||
{
|
{
|
||||||
_cap_valid.lock();
|
_cap_valid.lock();
|
||||||
@ -104,7 +84,7 @@ Rpc_entrypoint::Rpc_entrypoint(Cap_session *cap_session, size_t stack_size,
|
|||||||
:
|
:
|
||||||
Thread_base(Cpu_session::DEFAULT_WEIGHT, name, stack_size),
|
Thread_base(Cpu_session::DEFAULT_WEIGHT, name, stack_size),
|
||||||
_cap(Untyped_capability()),
|
_cap(Untyped_capability()),
|
||||||
_curr_obj(0), _cap_valid(Lock::LOCKED), _delay_start(Lock::LOCKED),
|
_cap_valid(Lock::LOCKED), _delay_start(Lock::LOCKED),
|
||||||
_delay_exit(Lock::LOCKED),
|
_delay_exit(Lock::LOCKED),
|
||||||
_cap_session(cap_session)
|
_cap_session(cap_session)
|
||||||
{
|
{
|
||||||
@ -124,8 +104,6 @@ Rpc_entrypoint::Rpc_entrypoint(Cap_session *cap_session, size_t stack_size,
|
|||||||
|
|
||||||
Rpc_entrypoint::~Rpc_entrypoint()
|
Rpc_entrypoint::~Rpc_entrypoint()
|
||||||
{
|
{
|
||||||
typedef Object_pool<Rpc_object_base> Pool;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We have to make sure the server loop is running which is only the case
|
* We have to make sure the server loop is running which is only the case
|
||||||
* if the Rpc_entrypoint was actived before we execute the RPC call.
|
* if the Rpc_entrypoint was actived before we execute the RPC call.
|
||||||
@ -137,14 +115,9 @@ Rpc_entrypoint::~Rpc_entrypoint()
|
|||||||
|
|
||||||
dissolve(&_exit_handler);
|
dissolve(&_exit_handler);
|
||||||
|
|
||||||
if (Pool::first()) {
|
if (!empty())
|
||||||
PWRN("Object pool not empty in %s", __func__);
|
PWRN("Object pool not empty in %s", __func__);
|
||||||
|
|
||||||
/* dissolve all objects - objects are not destroyed! */
|
|
||||||
while (Rpc_object_base *obj = Pool::first())
|
|
||||||
_dissolve(obj);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Now that we finished the 'dissolve' steps above (which need a working
|
* Now that we finished the 'dissolve' steps above (which need a working
|
||||||
* 'Ipc_server' in the context of the entrypoint thread), we can allow the
|
* 'Ipc_server' in the context of the entrypoint thread), we can allow the
|
||||||
|
@ -42,6 +42,8 @@ Untyped_capability Rpc_entrypoint::_manage(Rpc_object_base *obj)
|
|||||||
|
|
||||||
void Rpc_entrypoint::entry()
|
void Rpc_entrypoint::entry()
|
||||||
{
|
{
|
||||||
|
using Pool = Object_pool<Rpc_object_base>;
|
||||||
|
|
||||||
Ipc_server srv(&_snd_buf, &_rcv_buf);
|
Ipc_server srv(&_snd_buf, &_rcv_buf);
|
||||||
_ipc_server = &srv;
|
_ipc_server = &srv;
|
||||||
_cap = srv;
|
_cap = srv;
|
||||||
@ -65,24 +67,13 @@ void Rpc_entrypoint::entry()
|
|||||||
/* set default return value */
|
/* set default return value */
|
||||||
srv.ret(Ipc_client::ERR_INVALID_OBJECT);
|
srv.ret(Ipc_client::ERR_INVALID_OBJECT);
|
||||||
|
|
||||||
/* atomically lookup and lock referenced object */
|
Pool::apply(srv.badge(), [&] (Rpc_object_base *obj)
|
||||||
Object_pool<Rpc_object_base>::Guard curr_obj(lookup_and_lock(srv.badge()));
|
|
||||||
if (!curr_obj)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
{
|
{
|
||||||
Lock::Guard lock_guard(_curr_obj_lock);
|
if (!obj) { return;}
|
||||||
_curr_obj = curr_obj;
|
try {
|
||||||
}
|
srv.ret(obj->dispatch(opcode, srv, srv));
|
||||||
|
} catch(Blocking_canceled&) { }
|
||||||
/* dispatch request */
|
});
|
||||||
try { srv.ret(_curr_obj->dispatch(opcode, srv, srv)); }
|
|
||||||
catch (Blocking_canceled) { }
|
|
||||||
|
|
||||||
{
|
|
||||||
Lock::Guard lock_guard(_curr_obj_lock);
|
|
||||||
_curr_obj = 0;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* answer exit call, thereby wake up '~Rpc_entrypoint' */
|
/* answer exit call, thereby wake up '~Rpc_entrypoint' */
|
||||||
|
@ -103,91 +103,108 @@ void Cpu_session_component::_unsynchronized_kill_thread(Cpu_thread_component *th
|
|||||||
|
|
||||||
void Cpu_session_component::kill_thread(Thread_capability thread_cap)
|
void Cpu_session_component::kill_thread(Thread_capability thread_cap)
|
||||||
{
|
{
|
||||||
Cpu_thread_component * thread =
|
auto lambda = [this] (Cpu_thread_component *thread) {
|
||||||
dynamic_cast<Cpu_thread_component *>(_thread_ep->lookup_and_lock(thread_cap));
|
if (!thread) return;
|
||||||
if (!thread) return;
|
|
||||||
|
|
||||||
Lock::Guard lock_guard(_thread_list_lock);
|
Lock::Guard lock_guard(_thread_list_lock);
|
||||||
_unsynchronized_kill_thread(thread);
|
_unsynchronized_kill_thread(thread);
|
||||||
|
};
|
||||||
|
_thread_ep->apply(thread_cap, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int Cpu_session_component::set_pager(Thread_capability thread_cap,
|
int Cpu_session_component::set_pager(Thread_capability thread_cap,
|
||||||
Pager_capability pager_cap)
|
Pager_capability pager_cap)
|
||||||
{
|
{
|
||||||
Object_pool<Cpu_thread_component>::Guard thread(_thread_ep->lookup_and_lock(thread_cap));
|
auto lambda = [&] (Cpu_thread_component *thread) {
|
||||||
if (!thread) return -1;
|
if (!thread) return -1;
|
||||||
|
|
||||||
Object_pool<Pager_object>::Guard p(_pager_ep->lookup_and_lock(pager_cap));
|
auto p_lambda = [&] (Pager_object *p) {
|
||||||
if (!p) return -2;
|
if (!p) return -2;
|
||||||
|
|
||||||
thread->platform_thread()->pager(p);
|
thread->platform_thread()->pager(p);
|
||||||
|
|
||||||
p->thread_cap(thread->cap());
|
p->thread_cap(thread->cap());
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
};
|
||||||
|
return _pager_ep->apply(pager_cap, p_lambda);
|
||||||
|
};
|
||||||
|
return _thread_ep->apply(thread_cap, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int Cpu_session_component::start(Thread_capability thread_cap,
|
int Cpu_session_component::start(Thread_capability thread_cap,
|
||||||
addr_t ip, addr_t sp)
|
addr_t ip, addr_t sp)
|
||||||
{
|
{
|
||||||
Object_pool<Cpu_thread_component>::Guard thread(_thread_ep->lookup_and_lock(thread_cap));
|
auto lambda = [&] (Cpu_thread_component *thread) {
|
||||||
if (!thread) return -1;
|
if (!thread) return -1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If an exception handler was installed prior to the call of 'set_pager',
|
* If an exception handler was installed prior to the call of 'set_pager',
|
||||||
* we need to update the pager object with the current exception handler.
|
* we need to update the pager object with the current exception handler.
|
||||||
*/
|
*/
|
||||||
thread->update_exception_sigh();
|
thread->update_exception_sigh();
|
||||||
|
|
||||||
return thread->platform_thread()->start((void *)ip, (void *)sp);
|
return thread->platform_thread()->start((void *)ip, (void *)sp);
|
||||||
|
};
|
||||||
|
return _thread_ep->apply(thread_cap, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Cpu_session_component::pause(Thread_capability thread_cap)
|
void Cpu_session_component::pause(Thread_capability thread_cap)
|
||||||
{
|
{
|
||||||
Object_pool<Cpu_thread_component>::Guard thread(_thread_ep->lookup_and_lock(thread_cap));
|
auto lambda = [this] (Cpu_thread_component *thread) {
|
||||||
if (!thread) return;
|
if (!thread) return;
|
||||||
|
|
||||||
thread->platform_thread()->pause();
|
thread->platform_thread()->pause();
|
||||||
|
};
|
||||||
|
_thread_ep->apply(thread_cap, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Cpu_session_component::resume(Thread_capability thread_cap)
|
void Cpu_session_component::resume(Thread_capability thread_cap)
|
||||||
{
|
{
|
||||||
Object_pool<Cpu_thread_component>::Guard thread(_thread_ep->lookup_and_lock(thread_cap));
|
auto lambda = [this] (Cpu_thread_component *thread) {
|
||||||
if (!thread) return;
|
if (!thread) return;
|
||||||
|
|
||||||
thread->platform_thread()->resume();
|
thread->platform_thread()->resume();
|
||||||
|
};
|
||||||
|
_thread_ep->apply(thread_cap, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Cpu_session_component::cancel_blocking(Thread_capability thread_cap)
|
void Cpu_session_component::cancel_blocking(Thread_capability thread_cap)
|
||||||
{
|
{
|
||||||
Object_pool<Cpu_thread_component>::Guard thread(_thread_ep->lookup_and_lock(thread_cap));
|
auto lambda = [this] (Cpu_thread_component *thread) {
|
||||||
if (!thread) return;
|
if (!thread) return;
|
||||||
|
|
||||||
thread->platform_thread()->cancel_blocking();
|
thread->platform_thread()->cancel_blocking();
|
||||||
|
};
|
||||||
|
_thread_ep->apply(thread_cap, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Thread_state Cpu_session_component::state(Thread_capability thread_cap)
|
Thread_state Cpu_session_component::state(Thread_capability thread_cap)
|
||||||
{
|
{
|
||||||
Object_pool<Cpu_thread_component>::Guard thread(_thread_ep->lookup_and_lock(thread_cap));
|
auto lambda = [this] (Cpu_thread_component *thread) {
|
||||||
if (!thread) throw State_access_failed();
|
if (!thread) throw State_access_failed();
|
||||||
|
|
||||||
return thread->platform_thread()->state();
|
return thread->platform_thread()->state();
|
||||||
|
};
|
||||||
|
return _thread_ep->apply(thread_cap, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Cpu_session_component::state(Thread_capability thread_cap,
|
void Cpu_session_component::state(Thread_capability thread_cap,
|
||||||
Thread_state const &state)
|
Thread_state const &state)
|
||||||
{
|
{
|
||||||
Object_pool<Cpu_thread_component>::Guard thread(_thread_ep->lookup_and_lock(thread_cap));
|
auto lambda = [&] (Cpu_thread_component *thread) {
|
||||||
if (!thread) throw State_access_failed();
|
if (!thread) throw State_access_failed();
|
||||||
|
|
||||||
thread->platform_thread()->state(state);
|
thread->platform_thread()->state(state);
|
||||||
|
};
|
||||||
|
_thread_ep->apply(thread_cap, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -212,10 +229,12 @@ Cpu_session_component::exception_handler(Thread_capability thread_cap,
|
|||||||
sigh_cap = _default_exception_handler;
|
sigh_cap = _default_exception_handler;
|
||||||
}
|
}
|
||||||
|
|
||||||
Object_pool<Cpu_thread_component>::Guard thread(_thread_ep->lookup_and_lock(thread_cap));
|
auto lambda = [&] (Cpu_thread_component *thread) {
|
||||||
if (!thread) return;
|
if (!thread) return;
|
||||||
|
|
||||||
thread->sigh(sigh_cap);
|
thread->sigh(sigh_cap);
|
||||||
|
};
|
||||||
|
_thread_ep->apply(thread_cap, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -232,23 +251,25 @@ Affinity::Space Cpu_session_component::affinity_space() const
|
|||||||
void Cpu_session_component::affinity(Thread_capability thread_cap,
|
void Cpu_session_component::affinity(Thread_capability thread_cap,
|
||||||
Affinity::Location location)
|
Affinity::Location location)
|
||||||
{
|
{
|
||||||
Object_pool<Cpu_thread_component>::Guard thread(_thread_ep->lookup_and_lock(thread_cap));
|
auto lambda = [&] (Cpu_thread_component *thread) {
|
||||||
if (!thread) return;
|
if (!thread) return;
|
||||||
|
|
||||||
/* convert session-local location to physical location */
|
/* convert session-local location to physical location */
|
||||||
int const x1 = location.xpos() + _location.xpos(),
|
int const x1 = location.xpos() + _location.xpos(),
|
||||||
y1 = location.ypos() + _location.ypos(),
|
y1 = location.ypos() + _location.ypos(),
|
||||||
x2 = location.xpos() + location.width(),
|
x2 = location.xpos() + location.width(),
|
||||||
y2 = location.ypos() + location.height();
|
y2 = location.ypos() + location.height();
|
||||||
|
|
||||||
int const clipped_x1 = max(_location.xpos(), x1),
|
int const clipped_x1 = max(_location.xpos(), x1),
|
||||||
clipped_y1 = max(_location.ypos(), y1),
|
clipped_y1 = max(_location.ypos(), y1),
|
||||||
clipped_x2 = max(_location.xpos() + (int)_location.width() - 1, x2),
|
clipped_x2 = max(_location.xpos() + (int)_location.width() - 1, x2),
|
||||||
clipped_y2 = max(_location.ypos() + (int)_location.height() - 1, y2);
|
clipped_y2 = max(_location.ypos() + (int)_location.height() - 1, y2);
|
||||||
|
|
||||||
thread->platform_thread()->affinity(Affinity::Location(clipped_x1, clipped_y1,
|
thread->platform_thread()->affinity(Affinity::Location(clipped_x1, clipped_y1,
|
||||||
clipped_x2 - clipped_x1 + 1,
|
clipped_x2 - clipped_x1 + 1,
|
||||||
clipped_y2 - clipped_y1 + 1));
|
clipped_y2 - clipped_y1 + 1));
|
||||||
|
};
|
||||||
|
_thread_ep->apply(thread_cap, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -260,28 +281,34 @@ Dataspace_capability Cpu_session_component::trace_control()
|
|||||||
|
|
||||||
unsigned Cpu_session_component::trace_control_index(Thread_capability thread_cap)
|
unsigned Cpu_session_component::trace_control_index(Thread_capability thread_cap)
|
||||||
{
|
{
|
||||||
Object_pool<Cpu_thread_component>::Guard thread(_thread_ep->lookup_and_lock(thread_cap));
|
auto lambda = [] (Cpu_thread_component *thread) -> unsigned {
|
||||||
if (!thread) return 0;
|
if (!thread) return 0;
|
||||||
|
|
||||||
return thread->trace_control_index();
|
return thread->trace_control_index();
|
||||||
|
};
|
||||||
|
return _thread_ep->apply(thread_cap, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Dataspace_capability Cpu_session_component::trace_buffer(Thread_capability thread_cap)
|
Dataspace_capability Cpu_session_component::trace_buffer(Thread_capability thread_cap)
|
||||||
{
|
{
|
||||||
Object_pool<Cpu_thread_component>::Guard thread(_thread_ep->lookup_and_lock(thread_cap));
|
auto lambda = [this] (Cpu_thread_component *thread) {
|
||||||
if (!thread) return Dataspace_capability();
|
if (!thread) return Dataspace_capability();
|
||||||
|
|
||||||
return thread->trace_source()->buffer();
|
return thread->trace_source()->buffer();
|
||||||
|
};
|
||||||
|
return _thread_ep->apply(thread_cap, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Dataspace_capability Cpu_session_component::trace_policy(Thread_capability thread_cap)
|
Dataspace_capability Cpu_session_component::trace_policy(Thread_capability thread_cap)
|
||||||
{
|
{
|
||||||
Object_pool<Cpu_thread_component>::Guard thread(_thread_ep->lookup_and_lock(thread_cap));
|
auto lambda = [this] (Cpu_thread_component *thread) {
|
||||||
if (!thread) return Dataspace_capability();
|
if (!thread) return Dataspace_capability();
|
||||||
|
|
||||||
return thread->trace_source()->policy();
|
return thread->trace_source()->policy();
|
||||||
|
};
|
||||||
|
return _thread_ep->apply(thread_cap, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -309,29 +336,30 @@ int Cpu_session_component::transfer_quota(Cpu_session_capability dst_cap,
|
|||||||
size_t amount)
|
size_t amount)
|
||||||
{
|
{
|
||||||
/* lookup targeted CPU session */
|
/* lookup targeted CPU session */
|
||||||
Object_pool<Cpu_session_component>::Guard
|
auto lambda = [&] (Cpu_session_component *dst) {
|
||||||
dst(_session_ep->lookup_and_lock(dst_cap));
|
if (!dst) {
|
||||||
if (!dst) {
|
PWRN("Transfer CPU quota, %s, targeted session not found",
|
||||||
PWRN("Transfer CPU quota, %s, targeted session not found",
|
_label.string());
|
||||||
_label.string());
|
return -1;
|
||||||
return -1;
|
}
|
||||||
}
|
/* check reference relationship */
|
||||||
/* check reference relationship */
|
if (dst->_ref != this && dst != _ref) {
|
||||||
if (dst->_ref != this && dst != _ref) {
|
PWRN("Transfer CPU quota, %s -> %s, no reference relation",
|
||||||
PWRN("Transfer CPU quota, %s -> %s, no reference relation",
|
_label.string(), dst->_label.string());
|
||||||
_label.string(), dst->_label.string());
|
return -2;
|
||||||
return -2;
|
}
|
||||||
}
|
/* check quota availability */
|
||||||
/* check quota availability */
|
size_t const quota = quota_lim_downscale(_quota, amount);
|
||||||
size_t const quota = quota_lim_downscale(_quota, amount);
|
if (quota > _quota) {
|
||||||
if (quota > _quota) {
|
PWRN("Transfer CPU quota, %s -> %s, insufficient quota %zu, need %zu",
|
||||||
PWRN("Transfer CPU quota, %s -> %s, insufficient quota %zu, need %zu",
|
_label.string(), dst->_label.string(), _quota, quota);
|
||||||
_label.string(), dst->_label.string(), _quota, quota);
|
return -3;
|
||||||
return -3;
|
}
|
||||||
}
|
/* transfer quota */
|
||||||
/* transfer quota */
|
_transfer_quota(dst, quota);
|
||||||
_transfer_quota(dst, quota);
|
return 0;
|
||||||
return 0;
|
};
|
||||||
|
return _session_ep->apply(dst_cap, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -348,22 +376,23 @@ int Cpu_session_component::ref_account(Cpu_session_capability ref_cap)
|
|||||||
return -2; }
|
return -2; }
|
||||||
|
|
||||||
/* lookup and check targeted CPU-session */
|
/* lookup and check targeted CPU-session */
|
||||||
Object_pool<Cpu_session_component>::Guard
|
auto lambda = [&] (Cpu_session_component *ref) {
|
||||||
ref(_session_ep->lookup_and_lock(ref_cap));
|
if (!ref) {
|
||||||
if (!ref) {
|
PWRN("Set ref account, %s, targeted session not found",
|
||||||
PWRN("Set ref account, %s, targeted session not found",
|
_label.string());
|
||||||
_label.string());
|
return -1;
|
||||||
return -1;
|
}
|
||||||
}
|
if (ref == this) {
|
||||||
if (ref == this) {
|
PWRN("Set ref account, %s, self reference not allowed",
|
||||||
PWRN("Set ref account, %s, self reference not allowed",
|
_label.string());
|
||||||
_label.string());
|
return -3;
|
||||||
return -3;
|
}
|
||||||
}
|
/* establish ref-account relation from targeted CPU-session to us */
|
||||||
/* establish ref-account relation from targeted CPU-session to us */
|
_ref = ref;
|
||||||
_ref = ref;
|
_ref->_insert_ref_member(this);
|
||||||
_ref->_insert_ref_member(this);
|
return 0;
|
||||||
return 0;
|
};
|
||||||
|
return _session_ep->apply(ref_cap, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -41,12 +41,13 @@ namespace Genode {
|
|||||||
Local_addr local_addr = 0,
|
Local_addr local_addr = 0,
|
||||||
bool executable = false)
|
bool executable = false)
|
||||||
{
|
{
|
||||||
Object_pool<Dataspace_component>::Guard
|
auto lambda = [] (Dataspace_component *ds) {
|
||||||
ds(_ds_ep->lookup_and_lock(ds_cap));
|
if (!ds)
|
||||||
if (!ds)
|
throw Invalid_dataspace();
|
||||||
throw Invalid_dataspace();
|
|
||||||
|
|
||||||
return (void *)ds->phys_addr();
|
return (void *)ds->phys_addr();
|
||||||
|
};
|
||||||
|
return _ds_ep->apply(ds_cap, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
void detach(Local_addr local_addr) { }
|
void detach(Local_addr local_addr) { }
|
||||||
|
@ -134,7 +134,7 @@ class Genode::Pager_entrypoint : public Object_pool<Pager_object>,
|
|||||||
Ipc_pager _pager;
|
Ipc_pager _pager;
|
||||||
Cap_session *_cap_session;
|
Cap_session *_cap_session;
|
||||||
|
|
||||||
Untyped_capability _manage(Pager_object *obj);
|
Untyped_capability _pager_object_cap(unsigned long badge);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
|
@ -56,18 +56,20 @@ namespace Genode {
|
|||||||
Session_capability cap = Root_component<Rm_session_component>::session(args, affinity);
|
Session_capability cap = Root_component<Rm_session_component>::session(args, affinity);
|
||||||
|
|
||||||
/* lookup rm_session_component object */
|
/* lookup rm_session_component object */
|
||||||
Object_pool<Rm_session_component>::Guard rm_session(ep()->lookup_and_lock(cap));
|
auto lambda = [] (Rm_session_component *rm_session) {
|
||||||
if (!rm_session)
|
if (!rm_session)
|
||||||
/* should never happen */
|
/* should never happen */
|
||||||
return cap;
|
return;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Assign rm_session capability to dataspace component. It can
|
* Assign rm_session capability to dataspace component. It can
|
||||||
* not be done beforehand because the dataspace_component is
|
* not be done beforehand because the dataspace_component is
|
||||||
* constructed before the rm_session
|
* constructed before the rm_session
|
||||||
*/
|
*/
|
||||||
if (rm_session->dataspace_component())
|
if (rm_session->dataspace_component())
|
||||||
rm_session->dataspace_component()->sub_rm_session(rm_session->cap());
|
rm_session->dataspace_component()->sub_rm_session(rm_session->cap());
|
||||||
|
};
|
||||||
|
ep()->apply(cap, lambda);
|
||||||
return cap;
|
return cap;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -282,6 +282,43 @@ namespace Genode {
|
|||||||
Rm_dataspace_component _ds; /* dataspace representation of region map */
|
Rm_dataspace_component _ds; /* dataspace representation of region map */
|
||||||
Dataspace_capability _ds_cap;
|
Dataspace_capability _ds_cap;
|
||||||
|
|
||||||
|
template <typename F>
|
||||||
|
auto _apply_to_dataspace(addr_t addr, F f, addr_t offset, unsigned level)
|
||||||
|
-> typename Trait::Functor<decltype(&F::operator())>::Return_type
|
||||||
|
{
|
||||||
|
using Functor = Trait::Functor<decltype(&F::operator())>;
|
||||||
|
using Return_type = typename Functor::Return_type;
|
||||||
|
|
||||||
|
Lock::Guard lock_guard(_lock);
|
||||||
|
|
||||||
|
/* skip further lookup when reaching the recursion limit */
|
||||||
|
if (!level) return f(this, nullptr, 0, 0);
|
||||||
|
|
||||||
|
/* lookup region and dataspace */
|
||||||
|
Rm_region *region = _map.metadata((void*)addr);
|
||||||
|
Dataspace_component *dsc = region ? region->dataspace()
|
||||||
|
: nullptr;
|
||||||
|
|
||||||
|
/* calculate offset in dataspace */
|
||||||
|
addr_t ds_offset = region ? (addr - region->base()
|
||||||
|
+ region->offset()) : 0;
|
||||||
|
|
||||||
|
/* check for nested dataspace */
|
||||||
|
Native_capability cap = dsc ? dsc->sub_rm_session()
|
||||||
|
: Native_capability();
|
||||||
|
if (!cap.valid()) return f(this, region, ds_offset, offset);
|
||||||
|
|
||||||
|
/* in case of a nested dataspace perform a recursive lookup */
|
||||||
|
auto lambda = [&] (Rm_session_component *rsc) -> Return_type
|
||||||
|
{
|
||||||
|
return (!rsc) ? f(nullptr, nullptr, ds_offset, offset)
|
||||||
|
: rsc->_apply_to_dataspace(ds_offset, f,
|
||||||
|
offset+region->base(),
|
||||||
|
--level);
|
||||||
|
};
|
||||||
|
return _session_ep->apply(cap, lambda);
|
||||||
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -300,17 +337,6 @@ namespace Genode {
|
|||||||
|
|
||||||
class Fault_area;
|
class Fault_area;
|
||||||
|
|
||||||
/**
|
|
||||||
* Reversely lookup dataspace and offset matching the specified address
|
|
||||||
*
|
|
||||||
* \return true lookup succeeded
|
|
||||||
*/
|
|
||||||
bool reverse_lookup(addr_t dst_base,
|
|
||||||
Fault_area *dst_fault_region,
|
|
||||||
Dataspace_component **src_dataspace,
|
|
||||||
Fault_area *src_fault_region,
|
|
||||||
Rm_session_component **sub_rm_session);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register fault
|
* Register fault
|
||||||
*
|
*
|
||||||
@ -341,6 +367,20 @@ namespace Genode {
|
|||||||
*/
|
*/
|
||||||
void upgrade_ram_quota(size_t ram_quota) { _md_alloc.upgrade(ram_quota); }
|
void upgrade_ram_quota(size_t ram_quota) { _md_alloc.upgrade(ram_quota); }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Apply a function to dataspace attached at a given address
|
||||||
|
*
|
||||||
|
* /param addr address where the dataspace is attached
|
||||||
|
* /param f functor or lambda to apply
|
||||||
|
*/
|
||||||
|
template <typename F>
|
||||||
|
auto apply_to_dataspace(addr_t addr, F f)
|
||||||
|
-> typename Trait::Functor<decltype(&F::operator())>::Return_type
|
||||||
|
{
|
||||||
|
enum { RECURSION_LIMIT = 5 };
|
||||||
|
|
||||||
|
return _apply_to_dataspace(addr, f, 0, RECURSION_LIMIT);
|
||||||
|
}
|
||||||
|
|
||||||
/**************************************
|
/**************************************
|
||||||
** Region manager session interface **
|
** Region manager session interface **
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* \brief Generic implmentation of pager entrypoint
|
* \brief Generic implementation of pager entrypoint
|
||||||
* \author Norman Feske
|
* \author Norman Feske
|
||||||
* \author Stefan Kalkowski
|
* \author Stefan Kalkowski
|
||||||
* \date 2009-03-31
|
* \date 2009-03-31
|
||||||
@ -20,6 +20,8 @@ using namespace Genode;
|
|||||||
|
|
||||||
void Pager_entrypoint::entry()
|
void Pager_entrypoint::entry()
|
||||||
{
|
{
|
||||||
|
using Pool = Object_pool<Pager_object>;
|
||||||
|
|
||||||
bool reply_pending = false;
|
bool reply_pending = false;
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
@ -31,68 +33,63 @@ void Pager_entrypoint::entry()
|
|||||||
|
|
||||||
reply_pending = false;
|
reply_pending = false;
|
||||||
|
|
||||||
/* lookup referenced object */
|
Pool::apply(_pager.badge(), [&] (Pager_object *obj) {
|
||||||
Object_pool<Pager_object>::Guard _obj(lookup_and_lock(_pager.badge()));
|
if (obj) {
|
||||||
Pager_object *obj = _obj;
|
if (_pager.is_exception())
|
||||||
|
obj->submit_exception_signal();
|
||||||
/* handle request */
|
else
|
||||||
if (obj) {
|
/* send reply if page-fault handling succeeded */
|
||||||
if (_pager.is_exception()) {
|
reply_pending = !obj->pager(_pager);
|
||||||
obj->submit_exception_signal();
|
} else {
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* send reply if page-fault handling succeeded */
|
|
||||||
reply_pending = !obj->pager(_pager);
|
|
||||||
continue;
|
|
||||||
|
|
||||||
} else {
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Prevent threads outside of core to mess with our wake-up
|
|
||||||
* interface. This condition can trigger if a process gets
|
|
||||||
* destroyed which triggered a page fault shortly before getting
|
|
||||||
* killed. In this case, 'wait_for_fault()' returns (because of
|
|
||||||
* the page fault delivery) but the pager-object lookup will fail
|
|
||||||
* (because core removed the process already).
|
|
||||||
*/
|
|
||||||
if (_pager.request_from_core()) {
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We got a request from one of cores region-manager sessions
|
* Prevent threads outside of core to mess with our wake-up
|
||||||
* to answer the pending page fault of a resolved region-manager
|
* interface. This condition can trigger if a process gets
|
||||||
* client. Hence, we have to send the page-fault reply to the
|
* destroyed which triggered a page fault shortly before getting
|
||||||
* specified thread and answer the call of the region-manager
|
* killed. In this case, 'wait_for_fault()' returns (because of
|
||||||
* session.
|
* the page fault delivery) but the pager-object lookup will fail
|
||||||
*
|
* (because core removed the process already).
|
||||||
* When called from a region-manager session, we receive the
|
|
||||||
* core-local address of the targeted pager object via the
|
|
||||||
* first message word, which corresponds to the 'fault_ip'
|
|
||||||
* argument of normal page-fault messages.
|
|
||||||
*/
|
*/
|
||||||
obj = reinterpret_cast<Pager_object *>(_pager.fault_ip());
|
if (_pager.request_from_core()) {
|
||||||
|
|
||||||
/* send reply to the calling region-manager session */
|
/*
|
||||||
_pager.acknowledge_wakeup();
|
* We got a request from one of cores region-manager sessions
|
||||||
|
* to answer the pending page fault of a resolved region-manager
|
||||||
|
* client. Hence, we have to send the page-fault reply to the
|
||||||
|
* specified thread and answer the call of the region-manager
|
||||||
|
* session.
|
||||||
|
*
|
||||||
|
* When called from a region-manager session, we receive the
|
||||||
|
* core-local address of the targeted pager object via the
|
||||||
|
* first message word, which corresponds to the 'fault_ip'
|
||||||
|
* argument of normal page-fault messages.
|
||||||
|
*/
|
||||||
|
obj = reinterpret_cast<Pager_object *>(_pager.fault_ip());
|
||||||
|
|
||||||
/* answer page fault of resolved pager object */
|
/* send reply to the calling region-manager session */
|
||||||
_pager.set_reply_dst(obj->cap());
|
_pager.acknowledge_wakeup();
|
||||||
_pager.acknowledge_wakeup();
|
|
||||||
|
/* answer page fault of resolved pager object */
|
||||||
|
_pager.set_reply_dst(obj->cap());
|
||||||
|
_pager.acknowledge_wakeup();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
});
|
||||||
};
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Pager_entrypoint::dissolve(Pager_object *obj)
|
void Pager_entrypoint::dissolve(Pager_object *obj)
|
||||||
{
|
{
|
||||||
remove_locked(obj);
|
using Pool = Object_pool<Pager_object>;
|
||||||
|
|
||||||
|
if (obj) Pool::remove(obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Pager_capability Pager_entrypoint::manage(Pager_object *obj)
|
Pager_capability Pager_entrypoint::manage(Pager_object *obj)
|
||||||
{
|
{
|
||||||
Native_capability cap = _manage(obj);
|
Native_capability cap = _pager_object_cap(obj->badge());
|
||||||
|
|
||||||
/* add server object to object pool */
|
/* add server object to object pool */
|
||||||
obj->cap(cap);
|
obj->cap(cap);
|
||||||
|
@ -26,23 +26,24 @@ using namespace Genode;
|
|||||||
|
|
||||||
int Pd_session_component::bind_thread(Thread_capability thread)
|
int Pd_session_component::bind_thread(Thread_capability thread)
|
||||||
{
|
{
|
||||||
Object_pool<Cpu_thread_component>::Guard cpu_thread(_thread_ep->lookup_and_lock(thread));
|
return _thread_ep->apply(thread, [&] (Cpu_thread_component *cpu_thread) {
|
||||||
if (!cpu_thread) return -1;
|
if (!cpu_thread) return -1;
|
||||||
|
|
||||||
if (cpu_thread->bound()) {
|
if (cpu_thread->bound()) {
|
||||||
PWRN("rebinding of threads not supported");
|
PWRN("rebinding of threads not supported");
|
||||||
return -2;
|
return -2;
|
||||||
}
|
}
|
||||||
|
|
||||||
Platform_thread *p_thread = cpu_thread->platform_thread();
|
Platform_thread *p_thread = cpu_thread->platform_thread();
|
||||||
|
|
||||||
int res = _pd.bind_thread(p_thread);
|
int res = _pd.bind_thread(p_thread);
|
||||||
|
|
||||||
if (res)
|
if (res)
|
||||||
return res;
|
return res;
|
||||||
|
|
||||||
cpu_thread->bound(true);
|
cpu_thread->bound(true);
|
||||||
return 0;
|
return 0;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -26,9 +26,12 @@ static const bool verbose = false;
|
|||||||
|
|
||||||
addr_t Ram_session_component::phys_addr(Ram_dataspace_capability ds)
|
addr_t Ram_session_component::phys_addr(Ram_dataspace_capability ds)
|
||||||
{
|
{
|
||||||
Object_pool<Dataspace_component>::Guard dsc(_ds_ep->lookup_and_lock(ds));
|
auto lambda = [] (Dataspace_component *dsc) {
|
||||||
if (!dsc) throw Invalid_dataspace();
|
if (!dsc) throw Invalid_dataspace();
|
||||||
return dsc->phys_addr();
|
return dsc->phys_addr();
|
||||||
|
};
|
||||||
|
|
||||||
|
return _ds_ep->apply(ds, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -217,12 +220,12 @@ Ram_dataspace_capability Ram_session_component::alloc(size_t ds_size, Cache_attr
|
|||||||
|
|
||||||
void Ram_session_component::free(Ram_dataspace_capability ds_cap)
|
void Ram_session_component::free(Ram_dataspace_capability ds_cap)
|
||||||
{
|
{
|
||||||
Dataspace_component * ds =
|
auto lambda = [this] (Dataspace_component *ds) {
|
||||||
dynamic_cast<Dataspace_component *>(_ds_ep->lookup_and_lock(ds_cap));
|
if (!ds) return;
|
||||||
if (!ds)
|
_free_ds(ds);
|
||||||
return;
|
};
|
||||||
|
|
||||||
_free_ds(ds);
|
_ds_ep->apply(ds_cap, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -231,18 +234,21 @@ int Ram_session_component::ref_account(Ram_session_capability ram_session_cap)
|
|||||||
/* the reference account cannot be defined twice */
|
/* the reference account cannot be defined twice */
|
||||||
if (_ref_account) return -2;
|
if (_ref_account) return -2;
|
||||||
|
|
||||||
Object_pool<Ram_session_component>::Guard ref(_ram_session_ep->lookup_and_lock(ram_session_cap));
|
auto lambda = [this] (Ram_session_component *ref) {
|
||||||
|
|
||||||
/* check if recipient is a valid Ram_session_component */
|
/* check if recipient is a valid Ram_session_component */
|
||||||
if (!ref) return -1;
|
if (!ref) return -1;
|
||||||
|
|
||||||
/* deny the usage of the ram session as its own ref account */
|
/* deny the usage of the ram session as its own ref account */
|
||||||
/* XXX also check for cycles along the tree of ref accounts */
|
/* XXX also check for cycles along the tree of ref accounts */
|
||||||
if (ref == this) return -3;
|
if (ref == this) return -3;
|
||||||
|
|
||||||
_ref_account = ref;
|
_ref_account = ref;
|
||||||
_ref_account->_register_ref_account_member(this);
|
_ref_account->_register_ref_account_member(this);
|
||||||
return 0;
|
return 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
return _ram_session_ep->apply(ram_session_cap, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -252,8 +258,9 @@ int Ram_session_component::transfer_quota(Ram_session_capability ram_session_cap
|
|||||||
if (verbose)
|
if (verbose)
|
||||||
PDBG("amount=%zu", amount);
|
PDBG("amount=%zu", amount);
|
||||||
|
|
||||||
Object_pool<Ram_session_component>::Guard dst(_ram_session_ep->lookup_and_lock(ram_session_cap));
|
auto lambda = [&] (Ram_session_component *dst) {
|
||||||
return _transfer_quota(dst, amount);
|
return _transfer_quota(dst, amount); };
|
||||||
|
return _ram_session_ep->apply(ram_session_cap, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -165,6 +165,8 @@ namespace Genode {
|
|||||||
|
|
||||||
int Rm_client::pager(Ipc_pager &pager)
|
int Rm_client::pager(Ipc_pager &pager)
|
||||||
{
|
{
|
||||||
|
using Fault_area = Rm_session_component::Fault_area;
|
||||||
|
|
||||||
Rm_session::Fault_type pf_type = pager.is_write_fault() ? Rm_session::WRITE_FAULT
|
Rm_session::Fault_type pf_type = pager.is_write_fault() ? Rm_session::WRITE_FAULT
|
||||||
: Rm_session::READ_FAULT;
|
: Rm_session::READ_FAULT;
|
||||||
addr_t pf_addr = pager.fault_addr();
|
addr_t pf_addr = pager.fault_addr();
|
||||||
@ -173,132 +175,87 @@ int Rm_client::pager(Ipc_pager &pager)
|
|||||||
if (verbose_page_faults)
|
if (verbose_page_faults)
|
||||||
print_page_fault("page fault", pf_addr, pf_ip, pf_type, badge());
|
print_page_fault("page fault", pf_addr, pf_ip, pf_type, badge());
|
||||||
|
|
||||||
Rm_session_component *curr_rm_session = member_rm_session();
|
auto lambda = [&] (Rm_session_component *rm_session,
|
||||||
Rm_session_component *sub_rm_session = 0;
|
Rm_region *region,
|
||||||
addr_t curr_rm_base = 0;
|
addr_t ds_offset,
|
||||||
Dataspace_component *src_dataspace = 0;
|
addr_t region_offset) -> int
|
||||||
Rm_session_component::Fault_area src_fault_area;
|
{
|
||||||
Rm_session_component::Fault_area dst_fault_area(pf_addr);
|
Dataspace_component * dsc = region ? region->dataspace() : nullptr;
|
||||||
bool lookup;
|
if (!dsc) {
|
||||||
|
|
||||||
unsigned level;
|
/*
|
||||||
enum { MAX_NESTING_LEVELS = 5 };
|
* We found no attachment at the page-fault address and therefore have
|
||||||
|
* to reflect the page fault as region-manager fault. The signal
|
||||||
|
* handler is then expected to request the state of the region-manager
|
||||||
|
* session.
|
||||||
|
*/
|
||||||
|
|
||||||
/* helper guard to release the rm_session lock on return */
|
/* print a warning if it's no managed-dataspace */
|
||||||
class Guard {
|
if (rm_session == member_rm_session())
|
||||||
private:
|
print_page_fault("no RM attachment", pf_addr, pf_ip,
|
||||||
|
pf_type, badge());
|
||||||
|
|
||||||
Rm_session_component ** _release_session;
|
/* register fault at responsible region-manager session */
|
||||||
unsigned * _level;
|
if (rm_session)
|
||||||
|
rm_session->fault(this, pf_addr - region_offset, pf_type);
|
||||||
|
|
||||||
public:
|
/* there is no attachment return an error condition */
|
||||||
|
return 1;
|
||||||
explicit Guard(Rm_session_component ** rs, unsigned * level)
|
|
||||||
: _release_session(rs), _level(level) {}
|
|
||||||
|
|
||||||
~Guard() {
|
|
||||||
if ((*_level > 0) && (*_release_session))
|
|
||||||
(*_release_session)->release();
|
|
||||||
}
|
|
||||||
} release_guard(&curr_rm_session, &level);
|
|
||||||
|
|
||||||
/* traverse potentially nested dataspaces until we hit a leaf dataspace */
|
|
||||||
for (level = 0; level < MAX_NESTING_LEVELS; level++) {
|
|
||||||
lookup = curr_rm_session->reverse_lookup(curr_rm_base,
|
|
||||||
&dst_fault_area,
|
|
||||||
&src_dataspace,
|
|
||||||
&src_fault_area,
|
|
||||||
&sub_rm_session);
|
|
||||||
/* check if we need to traverse into a nested dataspace */
|
|
||||||
if (!sub_rm_session)
|
|
||||||
break;
|
|
||||||
|
|
||||||
if (!lookup) {
|
|
||||||
sub_rm_session->release();
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* set up next iteration */
|
addr_t ds_base = dsc->map_src_addr();
|
||||||
|
Fault_area src_fault_area(ds_base + ds_offset);
|
||||||
curr_rm_base = dst_fault_area.fault_addr()
|
Fault_area dst_fault_area(pf_addr);
|
||||||
- src_fault_area.fault_addr() + src_dataspace->map_src_addr();
|
src_fault_area.constrain(ds_base, dsc->size());
|
||||||
|
dst_fault_area.constrain(region_offset + region->base(), region->size());
|
||||||
if (level > 0)
|
|
||||||
curr_rm_session->release();
|
|
||||||
curr_rm_session = sub_rm_session;
|
|
||||||
sub_rm_session = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (level == MAX_NESTING_LEVELS) {
|
|
||||||
PWRN("Too many nesting levels of managed dataspaces");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!lookup) {
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We found no attachment at the page-fault address and therefore have
|
* Determine mapping size compatible with source and destination,
|
||||||
* to reflect the page fault as region-manager fault. The signal
|
* and apply platform-specific constraint of mapping sizes.
|
||||||
* handler is then expected to request the state of the region-manager
|
|
||||||
* session.
|
|
||||||
*/
|
*/
|
||||||
|
size_t map_size_log2 = dst_fault_area.common_size_log2(dst_fault_area,
|
||||||
|
src_fault_area);
|
||||||
|
map_size_log2 = constrain_map_size_log2(map_size_log2);
|
||||||
|
|
||||||
/* print a warning if it's no managed-dataspace */
|
src_fault_area.constrain(map_size_log2);
|
||||||
if (curr_rm_session == member_rm_session())
|
dst_fault_area.constrain(map_size_log2);
|
||||||
print_page_fault("no RM attachment", pf_addr, pf_ip, pf_type, badge());
|
if (!src_fault_area.valid() || !dst_fault_area.valid())
|
||||||
|
PERR("Invalid mapping");
|
||||||
|
|
||||||
/* register fault at responsible region-manager session */
|
/*
|
||||||
curr_rm_session->fault(this, dst_fault_area.fault_addr() - curr_rm_base, pf_type);
|
* Check if dataspace is compatible with page-fault type
|
||||||
/* there is no attachment return an error condition */
|
*/
|
||||||
return 1;
|
if (pf_type == Rm_session::WRITE_FAULT && !dsc->writable()) {
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/* attempted there is no attachment return an error condition */
|
||||||
* Determine mapping size compatible with source and destination,
|
print_page_fault("attempted write at read-only memory",
|
||||||
* and apply platform-specific constraint of mapping sizes.
|
pf_addr, pf_ip, pf_type, badge());
|
||||||
*/
|
|
||||||
size_t map_size_log2 = dst_fault_area.common_size_log2(dst_fault_area,
|
|
||||||
src_fault_area);
|
|
||||||
map_size_log2 = constrain_map_size_log2(map_size_log2);
|
|
||||||
|
|
||||||
src_fault_area.constrain(map_size_log2);
|
/* register fault at responsible region-manager session */
|
||||||
dst_fault_area.constrain(map_size_log2);
|
rm_session->fault(this, src_fault_area.fault_addr(), pf_type);
|
||||||
|
return 2;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
Mapping mapping(dst_fault_area.base(), src_fault_area.base(),
|
||||||
* Check if dataspace is compatible with page-fault type
|
dsc->cacheability(), dsc->is_io_mem(),
|
||||||
*/
|
map_size_log2, dsc->writable());
|
||||||
if (pf_type == Rm_session::WRITE_FAULT && !src_dataspace->writable()) {
|
|
||||||
|
|
||||||
/* attempted there is no attachment return an error condition */
|
/*
|
||||||
print_page_fault("attempted write at read-only memory",
|
* On kernels with a mapping database, the 'dsc' dataspace is a leaf
|
||||||
pf_addr, pf_ip, pf_type, badge());
|
* dataspace that corresponds to a virtual address range within core. To
|
||||||
|
* prepare the answer for the page fault, we make sure that this range is
|
||||||
|
* locally mapped in core. On platforms that support map operations of
|
||||||
|
* pages that are not locally mapped, the 'map_core_local' function may be
|
||||||
|
* empty.
|
||||||
|
*/
|
||||||
|
if (!dsc->is_io_mem())
|
||||||
|
mapping.prepare_map_operation();
|
||||||
|
|
||||||
/* register fault at responsible region-manager session */
|
/* answer page fault with a flex-page mapping */
|
||||||
curr_rm_session->fault(this, src_fault_area.fault_addr(), pf_type);
|
pager.set_reply_mapping(mapping);
|
||||||
return 2;
|
return 0;
|
||||||
}
|
};
|
||||||
|
return member_rm_session()->apply_to_dataspace(pf_addr, lambda);
|
||||||
Mapping mapping(dst_fault_area.base(),
|
|
||||||
src_fault_area.base(),
|
|
||||||
src_dataspace->cacheability(),
|
|
||||||
src_dataspace->is_io_mem(),
|
|
||||||
map_size_log2,
|
|
||||||
src_dataspace->writable());
|
|
||||||
|
|
||||||
/*
|
|
||||||
* On kernels with a mapping database, the 'dsc' dataspace is a leaf
|
|
||||||
* dataspace that corresponds to a virtual address range within core. To
|
|
||||||
* prepare the answer for the page fault, we make sure that this range is
|
|
||||||
* locally mapped in core. On platforms that support map operations of
|
|
||||||
* pages that are not locally mapped, the 'map_core_local' function may be
|
|
||||||
* empty.
|
|
||||||
*/
|
|
||||||
if (!src_dataspace->is_io_mem())
|
|
||||||
mapping.prepare_map_operation();
|
|
||||||
|
|
||||||
/* answer page fault with a flex-page mapping */
|
|
||||||
pager.set_reply_mapping(mapping);
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -357,107 +314,111 @@ Rm_session_component::attach(Dataspace_capability ds_cap, size_t size,
|
|||||||
if (offset < 0 || align_addr(offset, get_page_size_log2()) != offset)
|
if (offset < 0 || align_addr(offset, get_page_size_log2()) != offset)
|
||||||
throw Invalid_args();
|
throw Invalid_args();
|
||||||
|
|
||||||
/* check dataspace validity */
|
auto lambda = [&] (Dataspace_component *dsc) {
|
||||||
Object_pool<Dataspace_component>::Guard dsc(_ds_ep->lookup_and_lock(ds_cap));
|
/* check dataspace validity */
|
||||||
if (!dsc) throw Invalid_dataspace();
|
if (!dsc) throw Invalid_dataspace();
|
||||||
|
|
||||||
if (!size)
|
if (!size)
|
||||||
size = dsc->size() - offset;
|
size = dsc->size() - offset;
|
||||||
|
|
||||||
/* work with page granularity */
|
/* work with page granularity */
|
||||||
size = align_addr(size, get_page_size_log2());
|
size = align_addr(size, get_page_size_log2());
|
||||||
|
|
||||||
/* deny creation of regions larger then the actual dataspace */
|
/* deny creation of regions larger then the actual dataspace */
|
||||||
if (dsc->size() < size + offset)
|
if (dsc->size() < size + offset)
|
||||||
throw Invalid_args();
|
throw Invalid_args();
|
||||||
|
|
||||||
/* allocate region for attachment */
|
/* allocate region for attachment */
|
||||||
void *r = 0;
|
void *r = 0;
|
||||||
if (use_local_addr) {
|
if (use_local_addr) {
|
||||||
switch (_map.alloc_addr(size, local_addr).value) {
|
switch (_map.alloc_addr(size, local_addr).value) {
|
||||||
|
|
||||||
case Range_allocator::Alloc_return::OUT_OF_METADATA:
|
case Range_allocator::Alloc_return::OUT_OF_METADATA:
|
||||||
throw Out_of_metadata();
|
throw Out_of_metadata();
|
||||||
|
|
||||||
case Range_allocator::Alloc_return::RANGE_CONFLICT:
|
case Range_allocator::Alloc_return::RANGE_CONFLICT:
|
||||||
throw Region_conflict();
|
throw Region_conflict();
|
||||||
|
|
||||||
case Range_allocator::Alloc_return::OK:
|
case Range_allocator::Alloc_return::OK:
|
||||||
r = local_addr;
|
r = local_addr;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
/*
|
|
||||||
* Find optimal alignment for new region. First try natural alignment.
|
|
||||||
* If that is not possible, try again with successively less alignment
|
|
||||||
* constraints.
|
|
||||||
*/
|
|
||||||
size_t align_log2 = log2(size);
|
|
||||||
for (; align_log2 >= get_page_size_log2(); align_log2--) {
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Don't use an aligment higher than the alignment of the backing
|
* Find optimal alignment for new region. First try natural alignment.
|
||||||
* store. The backing store would constrain the mapping size
|
* If that is not possible, try again with successively less alignment
|
||||||
* anyway such that a higher alignment of the region is of no use.
|
* constraints.
|
||||||
*/
|
*/
|
||||||
if (((dsc->map_src_addr() + offset) & ((1UL << align_log2) - 1)) != 0)
|
size_t align_log2 = log2(size);
|
||||||
continue;
|
for (; align_log2 >= get_page_size_log2(); align_log2--) {
|
||||||
|
|
||||||
/* try allocating the align region */
|
/*
|
||||||
Range_allocator::Alloc_return alloc_return =
|
* Don't use an aligment higher than the alignment of the backing
|
||||||
_map.alloc_aligned(size, &r, align_log2);
|
* store. The backing store would constrain the mapping size
|
||||||
|
* anyway such that a higher alignment of the region is of no use.
|
||||||
|
*/
|
||||||
|
if (((dsc->map_src_addr() + offset) & ((1UL << align_log2) - 1)) != 0)
|
||||||
|
continue;
|
||||||
|
|
||||||
if (alloc_return.is_ok())
|
/* try allocating the align region */
|
||||||
break;
|
Range_allocator::Alloc_return alloc_return =
|
||||||
else if (alloc_return.value == Range_allocator::Alloc_return::OUT_OF_METADATA) {
|
_map.alloc_aligned(size, &r, align_log2);
|
||||||
|
|
||||||
|
if (alloc_return.is_ok())
|
||||||
|
break;
|
||||||
|
else if (alloc_return.value == Range_allocator::Alloc_return::OUT_OF_METADATA) {
|
||||||
|
_map.free(r);
|
||||||
|
throw Out_of_metadata();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (align_log2 < get_page_size_log2()) {
|
||||||
_map.free(r);
|
_map.free(r);
|
||||||
throw Out_of_metadata();
|
throw Region_conflict();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (align_log2 < get_page_size_log2()) {
|
/* store attachment info in meta data */
|
||||||
|
_map.metadata(r, Rm_region((addr_t)r, size, true, dsc, offset, this));
|
||||||
|
Rm_region *region = _map.metadata(r);
|
||||||
|
|
||||||
|
/* also update region list */
|
||||||
|
Rm_region_ref *p;
|
||||||
|
try { p = new(&_ref_slab) Rm_region_ref(region); }
|
||||||
|
catch (Allocator::Out_of_memory) {
|
||||||
_map.free(r);
|
_map.free(r);
|
||||||
throw Region_conflict();
|
throw Out_of_metadata();
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* store attachment info in meta data */
|
|
||||||
_map.metadata(r, Rm_region((addr_t)r, size, true, dsc, offset, this));
|
|
||||||
Rm_region *region = _map.metadata(r);
|
|
||||||
|
|
||||||
/* also update region list */
|
|
||||||
Rm_region_ref *p;
|
|
||||||
try { p = new(&_ref_slab) Rm_region_ref(region); }
|
|
||||||
catch (Allocator::Out_of_memory) {
|
|
||||||
_map.free(r);
|
|
||||||
throw Out_of_metadata();
|
|
||||||
}
|
|
||||||
|
|
||||||
_regions.insert(p);
|
|
||||||
|
|
||||||
/* inform dataspace about attachment */
|
|
||||||
dsc->attached_to(region);
|
|
||||||
|
|
||||||
if (verbose)
|
|
||||||
PDBG("attach ds %p (a=%lx,s=%zx,o=%lx) @ [%lx,%lx)",
|
|
||||||
(Dataspace_component *)dsc, dsc->phys_addr(), dsc->size(), offset, (addr_t)r, (addr_t)r + size);
|
|
||||||
|
|
||||||
/* check if attach operation resolves any faulting region-manager clients */
|
|
||||||
for (Rm_faulter *faulter = _faulters.head(); faulter; ) {
|
|
||||||
|
|
||||||
/* remember next pointer before possibly removing current list element */
|
|
||||||
Rm_faulter *next = faulter->next();
|
|
||||||
|
|
||||||
if (faulter->fault_in_addr_range((addr_t)r, size)) {
|
|
||||||
_faulters.remove(faulter);
|
|
||||||
faulter->continue_after_resolved_fault();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
faulter = next;
|
_regions.insert(p);
|
||||||
}
|
|
||||||
|
|
||||||
return r;
|
/* inform dataspace about attachment */
|
||||||
|
dsc->attached_to(region);
|
||||||
|
|
||||||
|
if (verbose)
|
||||||
|
PDBG("attach ds %p (a=%lx,s=%zx,o=%lx) @ [%lx,%lx)",
|
||||||
|
(Dataspace_component *)dsc, dsc->phys_addr(), dsc->size(),
|
||||||
|
offset, (addr_t)r, (addr_t)r + size);
|
||||||
|
|
||||||
|
/* check if attach operation resolves any faulting region-manager clients */
|
||||||
|
for (Rm_faulter *faulter = _faulters.head(); faulter; ) {
|
||||||
|
|
||||||
|
/* remember next pointer before possibly removing current list element */
|
||||||
|
Rm_faulter *next = faulter->next();
|
||||||
|
|
||||||
|
if (faulter->fault_in_addr_range((addr_t)r, size)) {
|
||||||
|
_faulters.remove(faulter);
|
||||||
|
faulter->continue_after_resolved_fault();
|
||||||
|
}
|
||||||
|
|
||||||
|
faulter = next;
|
||||||
|
}
|
||||||
|
|
||||||
|
return r;
|
||||||
|
};
|
||||||
|
|
||||||
|
return _ds_ep->apply(ds_cap, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -612,19 +573,20 @@ Pager_capability Rm_session_component::add_client(Thread_capability thread)
|
|||||||
|
|
||||||
{
|
{
|
||||||
/* lookup thread and setup correct parameters */
|
/* lookup thread and setup correct parameters */
|
||||||
Object_pool<Cpu_thread_component>::Guard
|
auto lambda = [&] (Cpu_thread_component *cpu_thread) {
|
||||||
cpu_thread(_thread_ep->lookup_and_lock(thread));
|
if (!cpu_thread) throw Invalid_thread();
|
||||||
if (!cpu_thread) throw Invalid_thread();
|
|
||||||
|
|
||||||
/* determine identification of client when faulting */
|
/* determine identification of client when faulting */
|
||||||
badge = cpu_thread->platform_thread()->pager_object_badge();
|
badge = cpu_thread->platform_thread()->pager_object_badge();
|
||||||
|
|
||||||
/* determine cpu affinity of client thread */
|
/* determine cpu affinity of client thread */
|
||||||
location = cpu_thread->platform_thread()->affinity();
|
location = cpu_thread->platform_thread()->affinity();
|
||||||
|
|
||||||
address_space = cpu_thread->platform_thread()->address_space();
|
address_space = cpu_thread->platform_thread()->address_space();
|
||||||
if (!Locked_ptr<Address_space>(address_space).is_valid())
|
if (!Locked_ptr<Address_space>(address_space).is_valid())
|
||||||
throw Unbound_thread();
|
throw Unbound_thread();
|
||||||
|
};
|
||||||
|
_thread_ep->apply(thread, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* serialize access */
|
/* serialize access */
|
||||||
@ -644,112 +606,45 @@ Pager_capability Rm_session_component::add_client(Thread_capability thread)
|
|||||||
|
|
||||||
void Rm_session_component::remove_client(Pager_capability pager_cap)
|
void Rm_session_component::remove_client(Pager_capability pager_cap)
|
||||||
{
|
{
|
||||||
|
Rm_client *client;
|
||||||
|
|
||||||
Rm_client * cl = dynamic_cast<Rm_client *>(_pager_ep->lookup_and_lock(pager_cap));
|
auto lambda = [&] (Rm_client *cl) {
|
||||||
if (!cl) return;
|
client = cl;
|
||||||
|
|
||||||
/*
|
if (!client) return;
|
||||||
* Rm_client is derived from Pager_object. If the Pager_object is also
|
|
||||||
* derived from Thread_base then the Rm_client object must be
|
|
||||||
* destructed without holding the rm_session_object lock. The native
|
|
||||||
* platform specific Thread_base implementation has to take care that
|
|
||||||
* all in-flight page handling requests are finished before
|
|
||||||
* destruction. (Either by waiting until the end of or by
|
|
||||||
* <deadlock free> cancellation of the last in-flight request.
|
|
||||||
* This operation can also require taking the rm_session_object lock.
|
|
||||||
*/
|
|
||||||
{
|
|
||||||
Lock::Guard lock_guard(_lock);
|
|
||||||
_clients.remove(cl);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* call platform specific dissolve routines */
|
/*
|
||||||
_pager_ep->dissolve(cl);
|
* Rm_client is derived from Pager_object. If the Pager_object is also
|
||||||
|
* derived from Thread_base then the Rm_client object must be
|
||||||
|
* destructed without holding the rm_session_object lock. The native
|
||||||
|
* platform specific Thread_base implementation has to take care that
|
||||||
|
* all in-flight page handling requests are finished before
|
||||||
|
* destruction. (Either by waiting until the end of or by
|
||||||
|
* <deadlock free> cancellation of the last in-flight request.
|
||||||
|
* This operation can also require taking the rm_session_object lock.
|
||||||
|
*/
|
||||||
|
{
|
||||||
|
Lock::Guard lock_guard(_lock);
|
||||||
|
_clients.remove(client);
|
||||||
|
}
|
||||||
|
|
||||||
{
|
/* call platform specific dissolve routines */
|
||||||
Lock::Guard lock_guard(_lock);
|
_pager_ep->dissolve(client);
|
||||||
cl->dissolve_from_faulting_rm_session(this);
|
|
||||||
}
|
|
||||||
|
|
||||||
destroy(&_client_slab, cl);
|
{
|
||||||
}
|
Lock::Guard lock_guard(_lock);
|
||||||
|
client->dissolve_from_faulting_rm_session(this);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
_pager_ep->apply(pager_cap, lambda);
|
||||||
|
|
||||||
|
destroy(&_client_slab, client);
|
||||||
bool Rm_session_component::reverse_lookup(addr_t dst_base,
|
|
||||||
Fault_area *dst_fault_area,
|
|
||||||
Dataspace_component **src_dataspace,
|
|
||||||
Fault_area *src_fault_area,
|
|
||||||
Rm_session_component **sub_rm_session)
|
|
||||||
{
|
|
||||||
/* serialize access */
|
|
||||||
Lock::Guard lock_guard(_lock);
|
|
||||||
|
|
||||||
/* rm-session-relative fault address */
|
|
||||||
addr_t fault_addr = dst_fault_area->fault_addr() - dst_base;
|
|
||||||
|
|
||||||
/* lookup region */
|
|
||||||
Rm_region *region = _map.metadata((void*)fault_addr);
|
|
||||||
if (!region)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
/* request dataspace backing the region */
|
|
||||||
*src_dataspace = region->dataspace();
|
|
||||||
if (!*src_dataspace)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Constrain destination fault area to region
|
|
||||||
*
|
|
||||||
* Handle corner case when the 'dst_base' is negative. In this case, we
|
|
||||||
* determine the largest flexpage within the positive portion of the
|
|
||||||
* region.
|
|
||||||
*/
|
|
||||||
addr_t region_base = region->base() + dst_base;
|
|
||||||
size_t region_size = region->size();
|
|
||||||
|
|
||||||
/* check for overflow condition */
|
|
||||||
while ((long)region_base < 0 && (long)(region_base + region_size) > 0) {
|
|
||||||
|
|
||||||
/* increment base address by half of the region size */
|
|
||||||
region_base += region_size >> 1;
|
|
||||||
|
|
||||||
/* lower the region size by one log2 step */
|
|
||||||
region_size >>= 1;
|
|
||||||
}
|
|
||||||
dst_fault_area->constrain(region_base, region_size);
|
|
||||||
|
|
||||||
/* calculate source fault address relative to 'src_dataspace' */
|
|
||||||
addr_t src_fault_offset = fault_addr - region->base() + region->offset();
|
|
||||||
|
|
||||||
addr_t src_base = (*src_dataspace)->map_src_addr();
|
|
||||||
*src_fault_area = Fault_area(src_base + src_fault_offset);
|
|
||||||
|
|
||||||
/* constrain source fault area by the source dataspace dimensions */
|
|
||||||
src_fault_area->constrain(src_base, (*src_dataspace)->size());
|
|
||||||
|
|
||||||
if (!src_fault_area->valid() || !dst_fault_area->valid())
|
|
||||||
return false;
|
|
||||||
|
|
||||||
/* lookup and lock nested dataspace if required */
|
|
||||||
Native_capability session_cap = (*src_dataspace)->sub_rm_session();
|
|
||||||
if (session_cap.valid()) {
|
|
||||||
*sub_rm_session = dynamic_cast<Rm_session_component *>(_session_ep->lookup_and_lock(session_cap));
|
|
||||||
return (*sub_rm_session != 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* loop refer to leaf */
|
|
||||||
*sub_rm_session = 0;
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Rm_session_component::fault(Rm_faulter *faulter, addr_t pf_addr,
|
void Rm_session_component::fault(Rm_faulter *faulter, addr_t pf_addr,
|
||||||
Rm_session::Fault_type pf_type)
|
Rm_session::Fault_type pf_type)
|
||||||
{
|
{
|
||||||
|
|
||||||
/* serialize access */
|
|
||||||
Lock::Guard lock_guard(_lock);
|
|
||||||
|
|
||||||
/* remember fault state in faulting thread */
|
/* remember fault state in faulting thread */
|
||||||
faulter->fault(this, Rm_session::State(pf_type, pf_addr));
|
faulter->fault(this, Rm_session::State(pf_type, pf_addr));
|
||||||
|
|
||||||
@ -869,13 +764,12 @@ Rm_session_component::~Rm_session_component()
|
|||||||
|
|
||||||
_lock.unlock();
|
_lock.unlock();
|
||||||
|
|
||||||
{
|
/* lookup thread and reset pager pointer */
|
||||||
/* lookup thread and reset pager pointer */
|
auto lambda = [&] (Cpu_thread_component *cpu_thread) {
|
||||||
Object_pool<Cpu_thread_component>::Guard
|
|
||||||
cpu_thread(_thread_ep->lookup_and_lock(thread_cap));
|
|
||||||
if (cpu_thread && (cpu_thread->platform_thread()->pager() == cl))
|
if (cpu_thread && (cpu_thread->platform_thread()->pager() == cl))
|
||||||
cpu_thread->platform_thread()->pager(0);
|
cpu_thread->platform_thread()->pager(0);
|
||||||
}
|
};
|
||||||
|
_thread_ep->apply(thread_cap, lambda);
|
||||||
|
|
||||||
destroy(&_client_slab, cl);
|
destroy(&_client_slab, cl);
|
||||||
|
|
||||||
|
@ -71,33 +71,33 @@ Signal_context_capability Signal_session_component::alloc_context(long imprint)
|
|||||||
|
|
||||||
void Signal_session_component::free_context(Signal_context_capability context_cap)
|
void Signal_session_component::free_context(Signal_context_capability context_cap)
|
||||||
{
|
{
|
||||||
Signal_context_component * context =
|
_context_ep->apply(context_cap, [this] (Signal_context_component *context) {
|
||||||
dynamic_cast<Signal_context_component *>(_context_ep->lookup_and_lock(context_cap));
|
if (!context) {
|
||||||
if (!context) {
|
PWRN("specified signal-context capability has wrong type");
|
||||||
PWRN("specified signal-context capability has wrong type");
|
return;
|
||||||
return;
|
}
|
||||||
}
|
|
||||||
|
|
||||||
_context_ep->dissolve(context);
|
_context_ep->dissolve(context);
|
||||||
destroy(&_contexts_slab, context);
|
destroy(&_contexts_slab, context);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Signal_session_component::submit(Signal_context_capability context_cap,
|
void Signal_session_component::submit(Signal_context_capability context_cap,
|
||||||
unsigned cnt)
|
unsigned cnt)
|
||||||
{
|
{
|
||||||
Object_pool<Signal_context_component>::Guard
|
_context_ep->apply(context_cap, [&] (Signal_context_component *context) {
|
||||||
context(_context_ep->lookup_and_lock(context_cap));
|
if (!context) {
|
||||||
if (!context) {
|
/*
|
||||||
/*
|
* We do not use PWRN() to enable the build system to suppress this
|
||||||
* We do not use PWRN() to enable the build system to suppress this
|
* warning in release mode (SPECS += release).
|
||||||
* warning in release mode (SPECS += release).
|
*/
|
||||||
*/
|
PDBG("invalid signal-context capability");
|
||||||
PDBG("invalid signal-context capability");
|
return;
|
||||||
return;
|
}
|
||||||
}
|
|
||||||
|
|
||||||
context->source()->submit(context, _ipc_ostream, cnt);
|
context->source()->submit(context, _ipc_ostream, cnt);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -540,13 +540,17 @@ void Backend_memory::free(Genode::Ram_dataspace_capability cap)
|
|||||||
{
|
{
|
||||||
using namespace Genode;
|
using namespace Genode;
|
||||||
|
|
||||||
Memory_object_base *o = memory_pool.lookup_and_lock(cap);
|
Memory_object_base *object;
|
||||||
if (!o)
|
auto lambda = [&] (Memory_object_base *o) {
|
||||||
return;
|
object = o;
|
||||||
|
|
||||||
o->free();
|
if (object) {
|
||||||
memory_pool.remove_locked(o);
|
object->free();
|
||||||
destroy(env()->heap(), o);
|
memory_pool.remove(object);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
memory_pool.apply(cap, lambda);
|
||||||
|
destroy(env()->heap(), object);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -480,11 +480,14 @@ void Lx::backend_free(Genode::Ram_dataspace_capability cap)
|
|||||||
{
|
{
|
||||||
using namespace Genode;
|
using namespace Genode;
|
||||||
|
|
||||||
Memory_object_base *o = memory_pool.lookup_and_lock(cap);
|
Memory_object_base *object;
|
||||||
if (!o)
|
auto lambda = [&] (Memory_object_base *o) {
|
||||||
return;
|
object = o;
|
||||||
|
if (object) {
|
||||||
o->free();
|
o->free();
|
||||||
memory_pool.remove_locked(o);
|
memory_pool.remove(o);
|
||||||
destroy(env()->heap(), o);
|
}
|
||||||
|
};
|
||||||
|
memory_pool.apply(cap, lambda);
|
||||||
|
destroy(env()->heap(), object);
|
||||||
}
|
}
|
||||||
|
@ -807,12 +807,9 @@ class Wm::Nitpicker::Session_component : public Rpc_object<Nitpicker::Session>,
|
|||||||
|
|
||||||
View_handle view_handle(View_capability view_cap, View_handle handle) override
|
View_handle view_handle(View_capability view_cap, View_handle handle) override
|
||||||
{
|
{
|
||||||
View *view = dynamic_cast<View *>(_ep.rpc_ep().lookup_and_lock(view_cap));
|
return _ep.rpc_ep().apply(view_cap, [&] (View *view) {
|
||||||
if (!view) return View_handle();
|
return (view) ? _view_handle_registry.alloc(*view, handle)
|
||||||
|
: View_handle(); });
|
||||||
Object_pool<Rpc_object_base>::Guard guard(view);
|
|
||||||
|
|
||||||
return _view_handle_registry.alloc(*view, handle);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
View_capability view_capability(View_handle handle) override
|
View_capability view_capability(View_handle handle) override
|
||||||
@ -1071,50 +1068,64 @@ class Wm::Nitpicker::Root : public Genode::Rpc_object<Genode::Typed_root<Session
|
|||||||
{
|
{
|
||||||
if (!args.is_valid_string()) throw Root::Invalid_args();
|
if (!args.is_valid_string()) throw Root::Invalid_args();
|
||||||
|
|
||||||
Rpc_object_base *session = _ep.rpc_ep().lookup_and_lock(session_cap);
|
auto lambda = [&] (Rpc_object_base *session) {
|
||||||
|
if (!session) {
|
||||||
|
PDBG("session lookup failed");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (!session) {
|
Session_component *regular_session =
|
||||||
PDBG("session lookup failed");
|
dynamic_cast<Session_component *>(session);
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
Session_component *regular_session =
|
if (regular_session)
|
||||||
dynamic_cast<Session_component *>(session);
|
regular_session->upgrade(args.string());
|
||||||
|
|
||||||
if (regular_session)
|
Decorator_nitpicker_session *decorator_session =
|
||||||
regular_session->upgrade(args.string());
|
dynamic_cast<Decorator_nitpicker_session *>(session);
|
||||||
|
|
||||||
Decorator_nitpicker_session *decorator_session =
|
if (decorator_session)
|
||||||
dynamic_cast<Decorator_nitpicker_session *>(session);
|
decorator_session->upgrade(args.string());
|
||||||
|
};
|
||||||
if (decorator_session)
|
_ep.rpc_ep().apply(session_cap, lambda);
|
||||||
decorator_session->upgrade(args.string());
|
|
||||||
|
|
||||||
session->release();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void close(Genode::Session_capability session_cap) override
|
void close(Genode::Session_capability session_cap) override
|
||||||
{
|
{
|
||||||
Rpc_object_base *session = _ep.rpc_ep().lookup_and_lock(session_cap);
|
Genode::Rpc_entrypoint &ep = _ep.rpc_ep();
|
||||||
|
|
||||||
Session_component *regular_session = dynamic_cast<Session_component *>(session);
|
Session_component *regular_session =
|
||||||
|
ep.apply(session_cap, [this] (Session_component *session) {
|
||||||
|
if (session) {
|
||||||
|
_sessions.remove(session);
|
||||||
|
_ep.dissolve(*session);
|
||||||
|
}
|
||||||
|
return session;
|
||||||
|
});
|
||||||
if (regular_session) {
|
if (regular_session) {
|
||||||
_sessions.remove(regular_session);
|
|
||||||
_ep.dissolve(*regular_session);
|
|
||||||
Genode::destroy(_md_alloc, regular_session);
|
Genode::destroy(_md_alloc, regular_session);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (session == _decorator_session) {
|
auto decorator_lambda = [this] (Decorator_nitpicker_session *session) {
|
||||||
_ep.dissolve(*_decorator_session);
|
_ep.dissolve(*_decorator_session);
|
||||||
Genode::destroy(_md_alloc, _decorator_session);
|
|
||||||
_decorator_session = nullptr;
|
_decorator_session = nullptr;
|
||||||
|
return session;
|
||||||
|
};
|
||||||
|
|
||||||
|
if (ep.apply(session_cap, decorator_lambda) == _decorator_session) {
|
||||||
|
Genode::destroy(_md_alloc, _decorator_session);
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (session == _layouter_session) {
|
auto layouter_lambda = [this] (Layouter_nitpicker_session *session) {
|
||||||
_ep.dissolve(*_layouter_session);
|
_ep.dissolve(*_layouter_session);
|
||||||
Genode::destroy(_md_alloc, _layouter_session);
|
|
||||||
_layouter_session = nullptr;
|
_layouter_session = nullptr;
|
||||||
|
return session;
|
||||||
|
};
|
||||||
|
|
||||||
|
if (ep.apply(session_cap, layouter_lambda) == _layouter_session) {
|
||||||
|
Genode::destroy(_md_alloc, _layouter_session);
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -479,102 +479,109 @@ namespace Platform {
|
|||||||
Config_access config_access;
|
Config_access config_access;
|
||||||
|
|
||||||
/* lookup device component for previous device */
|
/* lookup device component for previous device */
|
||||||
Genode::Object_pool<Device_component>::Guard
|
auto lambda = [&] (Device_component *prev)
|
||||||
prev(_ep->lookup_and_lock(prev_device));
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Start bus scanning after the previous device's location.
|
* Start bus scanning after the previous device's location.
|
||||||
* If no valid device was specified for 'prev_device', start at
|
* If no valid device was specified for 'prev_device',
|
||||||
* the beginning.
|
* start at the beginning.
|
||||||
*/
|
*/
|
||||||
int bus = 0, device = 0, function = -1;
|
int bus = 0, device = 0, function = -1;
|
||||||
|
|
||||||
if (prev) {
|
if (prev) {
|
||||||
Device_config config = prev->config();
|
Device_config config = prev->config();
|
||||||
bus = config.bus_number();
|
bus = config.bus_number();
|
||||||
device = config.device_number();
|
device = config.device_number();
|
||||||
function = config.function_number();
|
function = config.function_number();
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Scan buses for devices.
|
* Scan buses for devices.
|
||||||
* If no device is found, return an invalid capability.
|
* If no device is found, return an invalid capability.
|
||||||
*/
|
*/
|
||||||
Device_config config;
|
Device_config config;
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
function += 1;
|
function += 1;
|
||||||
if (!_find_next(bus, device, function, &config, &config_access))
|
if (!_find_next(bus, device, function, &config,
|
||||||
return Device_capability();
|
&config_access))
|
||||||
|
return Device_capability();
|
||||||
|
|
||||||
/* get new bdf values */
|
/* get new bdf values */
|
||||||
bus = config.bus_number();
|
bus = config.bus_number();
|
||||||
device = config.device_number();
|
device = config.device_number();
|
||||||
function = config.function_number();
|
function = config.function_number();
|
||||||
|
|
||||||
/* if filter of driver don't match skip and continue */
|
/* if filter of driver don't match skip and continue */
|
||||||
if ((config.class_code() ^ device_class) & class_mask)
|
if ((config.class_code() ^ device_class) & class_mask)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* check that policy permit access to the matched device */
|
/* check that policy permit access to the matched device */
|
||||||
if (permit_device(bus, device, function,
|
if (permit_device(bus, device, function,
|
||||||
config.class_code()))
|
config.class_code()))
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* lookup if we have a extended pci config space */
|
/* lookup if we have a extended pci config space */
|
||||||
Genode::addr_t config_space = lookup_config_space(bus, device,
|
Genode::addr_t config_space =
|
||||||
function);
|
lookup_config_space(bus, device, function);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* A device was found. Create a new device component for the
|
* A device was found. Create a new device component for the
|
||||||
* device and return its capability.
|
* device and return its capability.
|
||||||
*/
|
*/
|
||||||
try {
|
try {
|
||||||
Device_component * dev = new (_device_slab) Device_component(config, config_space, _ep, this, msi_usage());
|
Device_component * dev = new (_device_slab)
|
||||||
|
Device_component(config, config_space, _ep, this,
|
||||||
|
msi_usage());
|
||||||
|
|
||||||
/* if more than one driver uses the device - warn about */
|
/* if more than one driver uses the device - warn about */
|
||||||
if (bdf_in_use.get(Device_config::MAX_BUSES * bus +
|
if (bdf_in_use.get(Device_config::MAX_BUSES * bus +
|
||||||
Device_config::MAX_DEVICES * device +
|
Device_config::MAX_DEVICES * device +
|
||||||
function, 1))
|
function, 1))
|
||||||
PERR("Device %2x:%2x.%u is used by more than one "
|
PERR("Device %2x:%2x.%u is used by more than one "
|
||||||
"driver - session '%s'.", bus, device, function,
|
"driver - session '%s'.", bus, device, function,
|
||||||
_label.string());
|
_label.string());
|
||||||
else
|
else
|
||||||
bdf_in_use.set(Device_config::MAX_BUSES * bus +
|
bdf_in_use.set(Device_config::MAX_BUSES * bus +
|
||||||
Device_config::MAX_DEVICES * device +
|
Device_config::MAX_DEVICES * device +
|
||||||
function, 1);
|
function, 1);
|
||||||
|
|
||||||
_device_list.insert(dev);
|
_device_list.insert(dev);
|
||||||
return _ep->manage(dev);
|
return _ep->manage(dev);
|
||||||
} catch (Genode::Allocator::Out_of_memory) {
|
} catch (Genode::Allocator::Out_of_memory) {
|
||||||
throw Device::Quota_exceeded();
|
throw Device::Quota_exceeded();
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
return _ep->apply(prev_device, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
void release_device(Device_capability device_cap)
|
void release_device(Device_capability device_cap)
|
||||||
{
|
{
|
||||||
|
auto lambda = [&] (Device_component *device)
|
||||||
|
{
|
||||||
|
if (!device)
|
||||||
|
return;
|
||||||
|
|
||||||
|
unsigned const bus = device->config().bus_number();
|
||||||
|
unsigned const dev = device->config().device_number();
|
||||||
|
unsigned const func = device->config().function_number();
|
||||||
|
|
||||||
|
bdf_in_use.clear(Device_config::MAX_BUSES * bus +
|
||||||
|
Device_config::MAX_DEVICES * dev + func, 1);
|
||||||
|
|
||||||
|
_device_list.remove(device);
|
||||||
|
_ep->dissolve(device);
|
||||||
|
|
||||||
|
if (device->config().valid())
|
||||||
|
destroy(_device_slab, device);
|
||||||
|
else
|
||||||
|
destroy(_md_alloc, device);
|
||||||
|
};
|
||||||
|
|
||||||
/* lookup device component for previous device */
|
/* lookup device component for previous device */
|
||||||
Device_component *device = dynamic_cast<Device_component *>
|
_ep->apply(device_cap, lambda);
|
||||||
(_ep->lookup_and_lock(device_cap));
|
|
||||||
|
|
||||||
if (!device)
|
|
||||||
return;
|
|
||||||
|
|
||||||
unsigned const bus = device->config().bus_number();
|
|
||||||
unsigned const dev = device->config().device_number();
|
|
||||||
unsigned const func = device->config().function_number();
|
|
||||||
|
|
||||||
bdf_in_use.clear(Device_config::MAX_BUSES * bus +
|
|
||||||
Device_config::MAX_DEVICES * dev + func, 1);
|
|
||||||
|
|
||||||
_device_list.remove(device);
|
|
||||||
_ep->dissolve(device);
|
|
||||||
|
|
||||||
if (device->config().valid())
|
|
||||||
destroy(_device_slab, device);
|
|
||||||
else
|
|
||||||
destroy(_md_alloc, device);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Genode::Io_mem_dataspace_capability assign_device(Device_component * device)
|
Genode::Io_mem_dataspace_capability assign_device(Device_component * device)
|
||||||
@ -601,10 +608,8 @@ namespace Platform {
|
|||||||
{
|
{
|
||||||
using namespace Genode;
|
using namespace Genode;
|
||||||
|
|
||||||
Object_pool<Device_component>::Guard
|
return _ep->apply(device_cap, [&] (Device_component *device) {
|
||||||
device(_ep->lookup_and_lock(device_cap));
|
return assign_device(device);});
|
||||||
|
|
||||||
return assign_device(device);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -52,7 +52,6 @@ class Loader::Session_component : public Rpc_object<Session>
|
|||||||
|
|
||||||
void _close(Rom_session_component *rom)
|
void _close(Rom_session_component *rom)
|
||||||
{
|
{
|
||||||
_ep.dissolve(rom);
|
|
||||||
_rom_sessions.remove(rom);
|
_rom_sessions.remove(rom);
|
||||||
destroy(&_md_alloc, rom);
|
destroy(&_md_alloc, rom);
|
||||||
}
|
}
|
||||||
@ -73,7 +72,9 @@ class Loader::Session_component : public Rpc_object<Session>
|
|||||||
Lock::Guard guard(_lock);
|
Lock::Guard guard(_lock);
|
||||||
|
|
||||||
while (_rom_sessions.first()) {
|
while (_rom_sessions.first()) {
|
||||||
_close(_rom_sessions.first()); }
|
_ep.remove(_rom_sessions.first());
|
||||||
|
_close(_rom_sessions.first());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Genode::Session_capability session(char const *args,
|
Genode::Session_capability session(char const *args,
|
||||||
@ -108,10 +109,12 @@ class Loader::Session_component : public Rpc_object<Session>
|
|||||||
{
|
{
|
||||||
Lock::Guard guard(_lock);
|
Lock::Guard guard(_lock);
|
||||||
|
|
||||||
Rpc_object_base *rom = _ep.lookup_and_lock(session);
|
Rom_session_component *component;
|
||||||
|
|
||||||
Rom_session_component *component =
|
_ep.apply(session, [&] (Rom_session_component *rsc) {
|
||||||
dynamic_cast<Rom_session_component *>(rom);
|
component = rsc;
|
||||||
|
if (component) _ep.remove(component);
|
||||||
|
});
|
||||||
|
|
||||||
if (component) {
|
if (component) {
|
||||||
_close(component);
|
_close(component);
|
||||||
|
@ -841,12 +841,12 @@ class Nitpicker::Session_component : public Genode::Rpc_object<Session>,
|
|||||||
|
|
||||||
View_handle view_handle(View_capability view_cap, View_handle handle) override
|
View_handle view_handle(View_capability view_cap, View_handle handle) override
|
||||||
{
|
{
|
||||||
View *view = dynamic_cast<View *>(_ep.lookup_and_lock(view_cap));
|
auto lambda = [&] (View *view)
|
||||||
if (!view) return View_handle();
|
{
|
||||||
|
return (view) ? _view_handle_registry.alloc(*view, handle)
|
||||||
Object_pool<Rpc_object_base>::Guard guard(view);
|
: View_handle();
|
||||||
|
};
|
||||||
return _view_handle_registry.alloc(*view, handle);
|
return _ep.apply(view_cap, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
View_capability view_capability(View_handle handle) override
|
View_capability view_capability(View_handle handle) override
|
||||||
@ -924,15 +924,12 @@ class Nitpicker::Session_component : public Genode::Rpc_object<Session>,
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
/* lookup targeted session object */
|
/* lookup targeted session object */
|
||||||
Session_component * const session =
|
auto lambda = [this] (Session_component *session)
|
||||||
session_cap.valid() ? dynamic_cast<Session_component *>(_ep.lookup_and_lock(session_cap)) : 0;
|
{
|
||||||
|
_mode.focused_session(session);
|
||||||
_mode.focused_session(session);
|
report_session(_focus_reporter, session);
|
||||||
|
};
|
||||||
if (session)
|
_ep.apply(session_cap, lambda);
|
||||||
session->release();
|
|
||||||
|
|
||||||
report_session(_focus_reporter, session);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void session_control(Label suffix, Session_control control) override
|
void session_control(Label suffix, Session_control control) override
|
||||||
|
@ -44,8 +44,8 @@ Genode::Session_capability Session_component::cap()
|
|||||||
|
|
||||||
bool Session_component::belongs_to(Genode::Session_capability cap)
|
bool Session_component::belongs_to(Genode::Session_capability cap)
|
||||||
{
|
{
|
||||||
Object_pool<Session_component>::Guard session(_ep.lookup_and_lock(cap));
|
return _ep.apply(cap, [this] (Session_component *session) {
|
||||||
return session == this;
|
return session == this; });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -204,36 +204,46 @@ namespace Gdb_monitor {
|
|||||||
{
|
{
|
||||||
using namespace Genode;
|
using namespace Genode;
|
||||||
|
|
||||||
Child_session *session = _sessions.lookup_and_lock(session_cap);
|
auto lambda = [&] (Child_session *session) {
|
||||||
if (!session) {
|
if (!session) {
|
||||||
PERR("attempt to upgrade unknown session");
|
PERR("attempt to upgrade unknown session");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
Genode::size_t ram_quota =
|
Genode::size_t ram_quota =
|
||||||
Arg_string::find_arg(args.string(),
|
Arg_string::find_arg(args.string(),
|
||||||
"ram_quota").ulong_value(0);
|
"ram_quota").ulong_value(0);
|
||||||
|
|
||||||
/* forward session quota to child */
|
/* forward session quota to child */
|
||||||
env()->ram_session()->transfer_quota(_child_ram, ram_quota);
|
env()->ram_session()->transfer_quota(_child_ram, ram_quota);
|
||||||
|
|
||||||
session->ram_quota += ram_quota;
|
session->ram_quota += ram_quota;
|
||||||
|
|
||||||
/* inform child about quota upgrade */
|
/* inform child about quota upgrade */
|
||||||
_child_root.upgrade(session_cap, args);
|
_child_root.upgrade(session_cap, args);
|
||||||
|
};
|
||||||
|
|
||||||
|
_sessions.apply(session_cap, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
void close(Session_capability session_cap)
|
void close(Session_capability session_cap)
|
||||||
{
|
{
|
||||||
using namespace Genode;
|
using namespace Genode;
|
||||||
|
|
||||||
Child_session *session = _sessions.lookup_and_lock(session_cap);
|
Child_session *session;
|
||||||
if (!session) {
|
|
||||||
PERR("attempt to close unknown session");
|
auto lambda = [&] (Child_session *s) {
|
||||||
return;
|
session = s;
|
||||||
}
|
|
||||||
|
if (!session) {
|
||||||
|
PERR("attempt to close unknown session");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
_sessions.remove(session);
|
||||||
|
};
|
||||||
|
_sessions.apply(session_cap, lambda);
|
||||||
|
|
||||||
Genode::size_t ram_quota = session->ram_quota;
|
Genode::size_t ram_quota = session->ram_quota;
|
||||||
_sessions.remove_locked(session);
|
|
||||||
destroy(env()->heap(), session);
|
destroy(env()->heap(), session);
|
||||||
|
|
||||||
_child_root.close(session_cap);
|
_child_root.close(session_cap);
|
||||||
|
@ -42,13 +42,12 @@ Rm_session_component::Region *Rm_session_component::find_region(void *local_addr
|
|||||||
*offset_in_region = ((addr_t)local_addr - (addr_t)region->start());
|
*offset_in_region = ((addr_t)local_addr - (addr_t)region->start());
|
||||||
// PDBG("offset_in_region = %lx", *offset_in_region);
|
// PDBG("offset_in_region = %lx", *offset_in_region);
|
||||||
|
|
||||||
Object_pool<Dataspace_object>::Guard managed_ds_obj(_managed_ds_map->lookup_and_lock(region->ds_cap()));
|
_managed_ds_map->apply(region->ds_cap(), [&] (Dataspace_object *managed_ds_obj) {
|
||||||
if (managed_ds_obj) {
|
if (managed_ds_obj)
|
||||||
// PDBG("managed dataspace detected");
|
region =
|
||||||
region = managed_ds_obj->rm_session_component()->find_region((void*)*offset_in_region, offset_in_region);
|
managed_ds_obj->rm_session_component()->find_region((void*)*offset_in_region,
|
||||||
// if (region)
|
offset_in_region);
|
||||||
// PDBG("found sub region: start = %p, offset = %lx", region->start(), *offset_in_region);
|
});
|
||||||
}
|
|
||||||
|
|
||||||
return region;
|
return region;
|
||||||
}
|
}
|
||||||
|
@ -118,12 +118,8 @@ namespace Noux {
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
class Dataspace_registry
|
class Dataspace_registry : public Object_pool<Dataspace_info>
|
||||||
{
|
{
|
||||||
private:
|
|
||||||
|
|
||||||
Object_pool<Dataspace_info> _pool;
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
~Dataspace_registry()
|
~Dataspace_registry()
|
||||||
@ -136,25 +132,8 @@ namespace Noux {
|
|||||||
* created via 'Rm_dataspace_info::fork', are not handled by
|
* created via 'Rm_dataspace_info::fork', are not handled by
|
||||||
* those destructors. So we have to clean them up here.
|
* those destructors. So we have to clean them up here.
|
||||||
*/
|
*/
|
||||||
while(Dataspace_info *info = _pool.first()) {
|
remove_all([&] (Dataspace_info *info) {
|
||||||
_pool.remove_locked(info);
|
destroy(env()->heap(), info); });
|
||||||
destroy(env()->heap(), info);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void insert(Dataspace_info *info)
|
|
||||||
{
|
|
||||||
_pool.insert(info);
|
|
||||||
}
|
|
||||||
|
|
||||||
void remove(Dataspace_info *info)
|
|
||||||
{
|
|
||||||
_pool.remove_locked(info);
|
|
||||||
}
|
|
||||||
|
|
||||||
Dataspace_info *lookup_info(Dataspace_capability ds_cap)
|
|
||||||
{
|
|
||||||
return _pool.lookup_and_lock(ds_cap);
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -172,18 +151,17 @@ namespace Noux {
|
|||||||
|
|
||||||
~Static_dataspace_info()
|
~Static_dataspace_info()
|
||||||
{
|
{
|
||||||
Static_dataspace_info *info =
|
auto lambda = [this] (Static_dataspace_info *info) {
|
||||||
dynamic_cast<Static_dataspace_info *>(_ds_registry.lookup_info(ds_cap()));
|
if (!info) {
|
||||||
|
PERR("lookup of binary ds info failed");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (!info) {
|
_ds_registry.remove(info);
|
||||||
PERR("lookup of binary ds info failed");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
_ds_registry.remove(info);
|
|
||||||
|
|
||||||
info->dissolve_users();
|
|
||||||
|
|
||||||
|
info->dissolve_users();
|
||||||
|
};
|
||||||
|
_ds_registry.apply(ds_cap(), lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
Dataspace_capability fork(Ram_session_capability,
|
Dataspace_capability fork(Ram_session_capability,
|
||||||
|
@ -126,26 +126,31 @@ namespace Noux {
|
|||||||
|
|
||||||
void close(Genode::Session_capability session)
|
void close(Genode::Session_capability session)
|
||||||
{
|
{
|
||||||
Rm_session_component * rm_session =
|
Dataspace_info *info;
|
||||||
dynamic_cast<Rm_session_component *>(_ep.lookup_and_lock(session));
|
|
||||||
if (!rm_session) {
|
|
||||||
PWRN("Unexpected call of close with non-RM-session argument");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* use RM dataspace as key to obtain the dataspace info object */
|
auto lambda = [&] (Rm_session_component *rm_session) {
|
||||||
Dataspace_capability ds_cap = rm_session->dataspace();
|
if (!rm_session) {
|
||||||
|
PWRN("Unexpected call of close with non-RM-session argument");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/* release dataspace info */
|
/* use RM dataspace as key to obtain the dataspace info object */
|
||||||
Dataspace_info *info = _ds_registry.lookup_info(ds_cap);
|
Dataspace_capability ds_cap = rm_session->dataspace();
|
||||||
if (!info) {
|
|
||||||
PWRN("Could not lookup dataspace info for local RM session");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
_ds_registry.remove(info);
|
/* release dataspace info */
|
||||||
|
_ds_registry.apply(ds_cap, [&] (Dataspace_info *di) {
|
||||||
|
info = di;
|
||||||
|
if (!info) {
|
||||||
|
PWRN("Could not lookup dataspace info for local RM session");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
info->dissolve_users();
|
_ds_registry.remove(info);
|
||||||
|
|
||||||
|
info->dissolve_users();
|
||||||
|
});
|
||||||
|
};
|
||||||
|
_ep.apply(session, lambda);
|
||||||
|
|
||||||
/* 'rm_session' is deleted by deleting Rm_dataspace_info 'info' */
|
/* 'rm_session' is deleted by deleting Rm_dataspace_info 'info' */
|
||||||
destroy(env()->heap(), info);
|
destroy(env()->heap(), info);
|
||||||
|
@ -59,18 +59,20 @@ namespace Noux {
|
|||||||
void close(Genode::Session_capability session)
|
void close(Genode::Session_capability session)
|
||||||
{
|
{
|
||||||
/* acquire locked session object */
|
/* acquire locked session object */
|
||||||
Rom_session_component *rom_session =
|
Rom_session_component *rom_session;
|
||||||
dynamic_cast<Rom_session_component *>(_ep.lookup_and_lock(session));
|
|
||||||
|
|
||||||
if (!rom_session) {
|
_ep.apply(session, [&] (Rom_session_component *rsc) {
|
||||||
PWRN("Unexpected call of close with non-ROM-session argument");
|
rom_session = rsc;
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
_ep.dissolve(rom_session);
|
if (!rom_session) {
|
||||||
|
PWRN("Unexpected call of close with non-ROM-session argument");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
_ep.dissolve(rom_session);
|
||||||
|
});
|
||||||
|
|
||||||
destroy(env()->heap(), rom_session);
|
destroy(env()->heap(), rom_session);
|
||||||
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -153,22 +153,26 @@ namespace Noux {
|
|||||||
|
|
||||||
void free(Ram_dataspace_capability ds_cap)
|
void free(Ram_dataspace_capability ds_cap)
|
||||||
{
|
{
|
||||||
Ram_dataspace_info *ds_info =
|
Ram_dataspace_info *ds_info;
|
||||||
dynamic_cast<Ram_dataspace_info *>(_registry.lookup_info(ds_cap));
|
|
||||||
|
|
||||||
if (!ds_info) {
|
auto lambda = [&] (Ram_dataspace_info *rdi) {
|
||||||
PERR("RAM free: dataspace lookup failed");
|
ds_info = rdi;
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
_registry.remove(ds_info);
|
if (!ds_info) {
|
||||||
|
PERR("RAM free: dataspace lookup failed");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
ds_info->dissolve_users();
|
_registry.remove(ds_info);
|
||||||
|
|
||||||
_list.remove(ds_info);
|
ds_info->dissolve_users();
|
||||||
_used_quota -= ds_info->size();
|
|
||||||
|
|
||||||
env()->ram_session()->free(ds_cap);
|
_list.remove(ds_info);
|
||||||
|
_used_quota -= ds_info->size();
|
||||||
|
|
||||||
|
env()->ram_session()->free(ds_cap);
|
||||||
|
};
|
||||||
|
_registry.apply(ds_cap, lambda);
|
||||||
destroy(env()->heap(), ds_info);
|
destroy(env()->heap(), ds_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -124,21 +124,22 @@ class Noux::Rm_session_component : public Rpc_object<Rm_session>
|
|||||||
Region * const region = _lookup_region_by_addr(addr);
|
Region * const region = _lookup_region_by_addr(addr);
|
||||||
if (!region) { return cap(); }
|
if (!region) { return cap(); }
|
||||||
|
|
||||||
/* if there is no info for the region it can't be a sub RM */
|
auto lambda = [&] (Dataspace_info *info)
|
||||||
Dataspace_capability ds_cap = region->ds;
|
{
|
||||||
typedef Object_pool<Dataspace_info>::Guard Info_guard;
|
/* if there is no info for the region it can't be a sub RM */
|
||||||
Info_guard info(_ds_registry.lookup_info(ds_cap));
|
if (!info) { return cap(); }
|
||||||
if (!info) { return cap(); }
|
|
||||||
|
|
||||||
/* ask the dataspace info for an appropriate sub RM */
|
/* ask the dataspace info for an appropriate sub RM */
|
||||||
addr_t const region_base = region->local_addr;
|
addr_t const region_base = region->local_addr;
|
||||||
addr_t const region_off = region->offset;
|
addr_t const region_off = region->offset;
|
||||||
addr_t const sub_addr = addr - region_base + region_off;
|
addr_t const sub_addr = addr - region_base + region_off;
|
||||||
Rm_session_capability sub_rm = info->lookup_rm_session(sub_addr);
|
Rm_session_capability sub_rm = info->lookup_rm_session(sub_addr);
|
||||||
|
|
||||||
/* if the result is invalid the dataspace is no sub RM */
|
/* if the result is invalid the dataspace is no sub RM */
|
||||||
if (!sub_rm.valid()) { return cap(); }
|
if (!sub_rm.valid()) { return cap(); }
|
||||||
return sub_rm;
|
return sub_rm;
|
||||||
|
};
|
||||||
|
return _ds_registry.apply(region->ds, lambda);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -158,49 +159,49 @@ class Noux::Rm_session_component : public Rpc_object<Rm_session>
|
|||||||
{
|
{
|
||||||
Lock::Guard guard(_region_lock);
|
Lock::Guard guard(_region_lock);
|
||||||
for (Region *curr = _regions.first(); curr; curr = curr->next_region()) {
|
for (Region *curr = _regions.first(); curr; curr = curr->next_region()) {
|
||||||
|
auto lambda = [&] (Dataspace_info *info)
|
||||||
|
{
|
||||||
|
Dataspace_capability ds;
|
||||||
|
if (info) {
|
||||||
|
|
||||||
Dataspace_capability ds;
|
ds = info->fork(dst_ram, ds_registry, ep);
|
||||||
|
|
||||||
Object_pool<Dataspace_info>::Guard info(_ds_registry.lookup_info(curr->ds));
|
/*
|
||||||
|
* XXX We could detect dataspaces that are attached
|
||||||
|
* more than once. For now, we create a new fork
|
||||||
|
* for each attachment.
|
||||||
|
*/
|
||||||
|
|
||||||
if (info) {
|
} else {
|
||||||
|
|
||||||
ds = info->fork(dst_ram, ds_registry, ep);
|
PWRN("replay: missing ds_info for dataspace at addr 0x%lx",
|
||||||
|
curr->local_addr);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* XXX We could detect dataspaces that are attached
|
* If the dataspace is not a RAM dataspace, assume that
|
||||||
* more than once. For now, we create a new fork
|
* it's a ROM dataspace.
|
||||||
* for each attachment.
|
*
|
||||||
*/
|
* XXX Handle ROM dataspaces explicitly. For once, we
|
||||||
|
* need to make sure that they remain available
|
||||||
|
* until the child process exits even if the parent
|
||||||
|
* process exits earlier. Furthermore, we would
|
||||||
|
* like to detect unexpected dataspaces.
|
||||||
|
*/
|
||||||
|
ds = curr->ds;
|
||||||
|
}
|
||||||
|
|
||||||
} else {
|
if (!ds.valid()) {
|
||||||
|
PERR("replay: Error while forking dataspace");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
PWRN("replay: missing ds_info for dataspace at addr 0x%lx",
|
Rm_session_client(dst_rm).attach(ds, curr->size,
|
||||||
curr->local_addr);
|
curr->offset,
|
||||||
|
true,
|
||||||
/*
|
curr->local_addr);
|
||||||
* If the dataspace is not a RAM dataspace, assume that
|
};
|
||||||
* it's a ROM dataspace.
|
_ds_registry.apply(curr->ds, lambda);
|
||||||
*
|
};
|
||||||
* XXX Handle ROM dataspaces explicitly. For once, we
|
|
||||||
* need to make sure that they remain available
|
|
||||||
* until the child process exits even if the parent
|
|
||||||
* process exits earlier. Furthermore, we would
|
|
||||||
* like to detect unexpected dataspaces.
|
|
||||||
*/
|
|
||||||
ds = curr->ds;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!ds.valid()) {
|
|
||||||
PERR("replay: Error while forking dataspace");
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
Rm_session_client(dst_rm).attach(ds, curr->size,
|
|
||||||
curr->offset,
|
|
||||||
true,
|
|
||||||
curr->local_addr);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void poke(addr_t dst_addr, void const *src, size_t len)
|
void poke(addr_t dst_addr, void const *src, size_t len)
|
||||||
@ -235,14 +236,13 @@ class Noux::Rm_session_component : public Rpc_object<Rm_session>
|
|||||||
local_addr = region->local_addr;
|
local_addr = region->local_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
Object_pool<Dataspace_info>::Guard info(_ds_registry.lookup_info(ds_cap));
|
_ds_registry.apply(ds_cap, [&] (Dataspace_info *info) {
|
||||||
if (!info) {
|
if (!info) {
|
||||||
PERR("attempt to write to unknown dataspace type");
|
PERR("attempt to write to unknown dataspace type");
|
||||||
for (;;);
|
for (;;);
|
||||||
return;
|
}
|
||||||
}
|
info->poke(dst_addr - local_addr, src, len);
|
||||||
|
});
|
||||||
info->poke(dst_addr - local_addr, src, len);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -275,20 +275,21 @@ class Noux::Rm_session_component : public Rpc_object<Rm_session>
|
|||||||
Region(*this, ds, size, offset, local_addr);
|
Region(*this, ds, size, offset, local_addr);
|
||||||
|
|
||||||
/* register region as user of RAM dataspaces */
|
/* register region as user of RAM dataspaces */
|
||||||
|
auto lambda = [&] (Dataspace_info *info)
|
||||||
{
|
{
|
||||||
Object_pool<Dataspace_info>::Guard info(_ds_registry.lookup_info(ds));
|
|
||||||
|
|
||||||
if (info) {
|
if (info) {
|
||||||
info->register_user(*region);
|
info->register_user(*region);
|
||||||
} else {
|
} else {
|
||||||
if (verbose_attach) {
|
if (verbose_attach) {
|
||||||
PWRN("Trying to attach unknown dataspace type");
|
PWRN("Trying to attach unknown dataspace type");
|
||||||
PWRN(" ds_info@%p at 0x%lx size=%zd offset=0x%lx",
|
PWRN(" ds_info@%p at 0x%lx size=%zd offset=0x%lx",
|
||||||
info.object(), (long)local_addr,
|
info, (long)local_addr,
|
||||||
Dataspace_client(ds).size(), (long)offset);
|
Dataspace_client(ds).size(), (long)offset);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
};
|
||||||
|
_ds_registry.apply(ds, lambda);
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Record attachment for later replay (needed during
|
* Record attachment for later replay (needed during
|
||||||
@ -315,10 +316,8 @@ class Noux::Rm_session_component : public Rpc_object<Rm_session>
|
|||||||
_regions.remove(region);
|
_regions.remove(region);
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
_ds_registry.apply(region->ds, [&] (Dataspace_info *info) {
|
||||||
Object_pool<Dataspace_info>::Guard info(_ds_registry.lookup_info(region->ds));
|
if (info) info->unregister_user(*region); });
|
||||||
if (info) info->unregister_user(*region);
|
|
||||||
}
|
|
||||||
|
|
||||||
destroy(env()->heap(), region);
|
destroy(env()->heap(), region);
|
||||||
|
|
||||||
|
@ -69,17 +69,16 @@ namespace Noux {
|
|||||||
* Lookup and lock ds info instead of directly accessing
|
* Lookup and lock ds info instead of directly accessing
|
||||||
* the '_ds_info' member.
|
* the '_ds_info' member.
|
||||||
*/
|
*/
|
||||||
Object_pool<Dataspace_info>::Guard
|
_ds_registry.apply(_ds_info.ds_cap(), [this] (Dataspace_info *info) {
|
||||||
info(_ds_registry.lookup_info(_ds_info.ds_cap()));
|
if (!info) {
|
||||||
|
|
||||||
if (!info) {
|
|
||||||
PERR("~Rom_session_component: unexpected !info");
|
PERR("~Rom_session_component: unexpected !info");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
_ds_registry.remove(&_ds_info);
|
_ds_registry.remove(&_ds_info);
|
||||||
|
|
||||||
info->dissolve_users();
|
info->dissolve_users();
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user