base: scoped access of Native_thread

With planned removal of Thread:: exceptions, we need to consider that a
'Thread' object may exist without a valid 'Stack' and therefore without
a valid 'Native_thread', which is hosted as part of the 'Stack'.

This patch reworks the code that accesses the 'Native_thread' to use the
new 'Thread::with_native_thread' interface. Within the local scope,
the native thread is referred to as 'nt'.

The _init_platform_thread and _deinit_platform_thread() have been
replaced by _init_native_thread and _deinit_native_thread, which take
a 'Stack &' as argument.

As a safety caution, 'Native_thread' objects can no longer be copied.

Issue 
This commit is contained in:
Norman Feske 2025-03-28 22:57:34 +01:00
parent d241baec61
commit aa9ff3894c
65 changed files with 1113 additions and 866 deletions

@ -101,7 +101,9 @@ void Ipc_pager::acknowledge_wakeup()
Untyped_capability Pager_entrypoint::_pager_object_cap(unsigned long badge)
{
return Capability_space::import(native_thread().l4id, Rpc_obj_key(badge));
return with_native_thread(
[&] (Native_thread &nt) { return Capability_space::import(nt.l4id, Rpc_obj_key(badge)); },
[&] { return Untyped_capability(); });
}

@ -35,21 +35,30 @@ void Thread::_thread_start()
Thread::Start_result Thread::start()
{
if (!_stack)
return Start_result::DENIED;
Stack &stack = *_stack;
Native_thread &nt = stack.native_thread();
/* create and start platform thread */
native_thread().pt = new (platform().core_mem_alloc())
Platform_thread(platform_specific().core_pd(), _stack->name().string());
try {
nt.pt = new (platform().core_mem_alloc())
Platform_thread(platform_specific().core_pd(), stack.name().string());
}
catch (...) { return Start_result::DENIED; }
native_thread().pt->pager(platform_specific().core_pager());
native_thread().l4id = native_thread().pt->native_thread_id();
nt.pt->pager(platform_specific().core_pager());
nt.l4id = nt.pt->native_thread_id();
native_thread().pt->start((void *)_thread_start, stack_top());
nt.pt->start((void *)_thread_start, (void *)stack.top());
return Start_result::OK;
}
void Thread::_deinit_platform_thread()
void Thread::_deinit_native_thread(Stack &stack)
{
/* destruct platform thread */
destroy(platform().core_mem_alloc(), native_thread().pt);
destroy(platform().core_mem_alloc(), stack.native_thread().pt);
}

@ -15,7 +15,7 @@
#define _INCLUDE__BASE__INTERNAL__NATIVE_THREAD_H_
/* Genode includes */
#include <base/stdint.h>
#include <util/noncopyable.h>
/* L4/Fiasco includes */
#include <fiasco/syscall.h>
@ -25,9 +25,9 @@ namespace Core { struct Platform_thread; }
namespace Genode { struct Native_thread; }
struct Genode::Native_thread
struct Genode::Native_thread : Noncopyable
{
Fiasco::l4_threadid_t l4id;
Fiasco::l4_threadid_t l4id { };
/**
* Only used in core
@ -36,7 +36,9 @@ struct Genode::Native_thread
* thread object, which is going to be destroyed on destruction of the
* 'Thread'.
*/
Core::Platform_thread *pt;
struct { Core::Platform_thread *pt = nullptr; };
Native_thread() { }
};
#endif /* _INCLUDE__BASE__INTERNAL__NATIVE_THREAD_H_ */

@ -42,7 +42,7 @@ void Genode::prepare_init_main_thread() { }
void Thread::_thread_bootstrap() { }
void Thread::_init_platform_thread(size_t, Type type)
void Thread::_init_native_thread(Stack &, size_t, Type type)
{
if (type == NORMAL) return;

@ -19,22 +19,21 @@
#define _INCLUDE__FOC__NATIVE_THREAD_H_
/* Genode includes */
#include <base/stdint.h>
#include <util/noncopyable.h>
#include <foc/receive_window.h>
#include <foc/syscall.h>
namespace Genode { struct Native_thread; }
struct Genode::Native_thread
struct Genode::Native_thread : Noncopyable
{
Foc::l4_cap_idx_t kcap = 0;
Foc::l4_cap_idx_t kcap { };
/* receive window for capability selectors received at the server side */
Receive_window rcv_window { };
Native_thread() { }
explicit Native_thread(Foc::l4_cap_idx_t kcap) : kcap(kcap) { }
};
#endif /* _INCLUDE__FOC__NATIVE_THREAD_H_ */

@ -29,40 +29,13 @@
using namespace Core;
void Thread::_deinit_platform_thread()
{
warning(__func__, ": not implemented yet!");
}
void Thread::_deinit_native_thread(Stack &) { }
void Thread::_init_platform_thread(size_t, Type) { }
void Thread::_init_native_thread(Stack &, size_t, Type) { }
Thread::Start_result Thread::start()
{
using namespace Foc;
/* create and start platform thread */
Platform_thread &pt = *new (platform().core_mem_alloc())
Platform_thread(_stack->name().string());
platform_specific().core_pd().bind_thread(pt);
l4_utcb_t * const foc_utcb = (l4_utcb_t *)(pt.utcb());
native_thread() = Native_thread(pt.gate().remote);
utcb()->foc_utcb = foc_utcb;
_thread_cap =
reinterpret_cap_cast<Cpu_thread>(Native_capability(pt.thread().local));
pt.pager(platform_specific().core_pager());
l4_utcb_tcr_u(foc_utcb)->user[UTCB_TCR_BADGE] = (unsigned long) pt.gate().local.data();
l4_utcb_tcr_u(foc_utcb)->user[UTCB_TCR_THREAD_OBJ] = (addr_t)this;
pt.start((void *)_thread_start, stack_top());
namespace {
struct Core_trace_source : public Core::Trace::Source::Info_accessor,
private Core::Trace::Control,
@ -95,9 +68,9 @@ Thread::Start_result Thread::start()
}
return { Session_label("core"), thread.name(),
Trace::Execution_time(ec_time, sc_time, 10000,
platform_thread.prio()),
thread._affinity };
Genode::Trace::Execution_time(ec_time, sc_time, 10000,
platform_thread.prio()),
thread.affinity() };
}
Core_trace_source(Core::Trace::Source_registry &registry, Thread &t,
@ -109,9 +82,44 @@ Thread::Start_result Thread::start()
registry.insert(this);
}
};
}
new (platform().core_mem_alloc()) Core_trace_source(Core::Trace::sources(),
*this, pt);
Thread::Start_result Thread::start()
{
using namespace Foc;
try {
/* create and start platform thread */
Platform_thread &pt = *new (platform().core_mem_alloc())
Platform_thread(_stack->name().string());
platform_specific().core_pd().bind_thread(pt);
l4_utcb_t * const foc_utcb = (l4_utcb_t *)(pt.utcb());
with_native_thread([&] (Native_thread &nt) {
nt.kcap = pt.gate().remote; });
utcb()->foc_utcb = foc_utcb;
_thread_cap =
reinterpret_cap_cast<Cpu_thread>(Native_capability(pt.thread().local));
pt.pager(platform_specific().core_pager());
l4_utcb_tcr_u(foc_utcb)->user[UTCB_TCR_BADGE] = (unsigned long) pt.gate().local.data();
l4_utcb_tcr_u(foc_utcb)->user[UTCB_TCR_THREAD_OBJ] = (addr_t)this;
pt.start((void *)_thread_start, stack_top());
try {
new (platform().core_mem_alloc())
Core_trace_source(Core::Trace::sources(), *this, pt);
}
catch (...) { }
}
catch (...) { return Start_result::DENIED; }
return Start_result::OK;
}

@ -35,6 +35,17 @@
static inline void thread_yield() { Foc::l4_thread_yield(); }
static inline Foc::l4_cap_idx_t foc_cap_idx(Genode::Thread *thread_ptr)
{
if (!thread_ptr)
return Foc::MAIN_THREAD_CAP;
return thread_ptr->with_native_thread(
[&] (Genode::Native_thread &nt) { return nt.kcap; },
[&] { return Foc::MAIN_THREAD_CAP; });
}
/**
* Custom ExchangeRegisters wrapper for waking up a thread
*
@ -44,13 +55,9 @@ static inline void thread_yield() { Foc::l4_thread_yield(); }
*
* \return true if the thread was in blocking state
*/
static inline bool thread_check_stopped_and_restart(Genode::Thread *thread_base)
static inline bool thread_check_stopped_and_restart(Genode::Thread *thread_ptr)
{
Foc::l4_cap_idx_t tid = thread_base ?
thread_base->native_thread().kcap :
Foc::MAIN_THREAD_CAP;
Foc::l4_cap_idx_t irq = tid + Foc::THREAD_IRQ_CAP;
Foc::l4_irq_trigger(irq);
Foc::l4_irq_trigger(foc_cap_idx(thread_ptr) + Foc::THREAD_IRQ_CAP);
return true;
}
@ -58,12 +65,9 @@ static inline bool thread_check_stopped_and_restart(Genode::Thread *thread_base)
/**
* Yield CPU time to the specified thread
*/
static inline void thread_switch_to(Genode::Thread *thread_base)
static inline void thread_switch_to(Genode::Thread *thread_ptr)
{
Foc::l4_cap_idx_t tid = thread_base ?
thread_base->native_thread().kcap :
Foc::MAIN_THREAD_CAP;
Foc::l4_thread_switch(tid);
Foc::l4_thread_switch(foc_cap_idx(thread_ptr));
}
@ -78,13 +82,8 @@ __attribute__((used))
static void thread_stop_myself(Genode::Thread *)
{
using namespace Foc;
Genode::Thread *myself = Genode::Thread::myself();
Foc::l4_cap_idx_t tid = myself ?
myself->native_thread().kcap :
Foc::MAIN_THREAD_CAP;
Foc::l4_cap_idx_t irq = tid + THREAD_IRQ_CAP;
l4_irq_receive(irq, L4_IPC_NEVER);
l4_irq_receive(foc_cap_idx(Genode::Thread::myself()) + THREAD_IRQ_CAP,
L4_IPC_NEVER);
}
#endif /* _INCLUDE__BASE__INTERNAL__LOCK_HELPER_H_ */

@ -310,52 +310,57 @@ Rpc_request Genode::ipc_reply_wait(Reply_capability const &,
Msgbuf_base &reply_msg,
Msgbuf_base &request_msg)
{
Receive_window &rcv_window = Thread::myself()->native_thread().rcv_window;
return Thread::myself()->with_native_thread([&] (Native_thread &nt) {
bool need_to_wait = false;
Receive_window &rcv_window = nt.rcv_window;
for (;;) {
bool need_to_wait = false;
request_msg.reset();
for (;;) {
/* prepare receive window in UTCB */
addr_t rcv_cap_sel = rcv_window.rcv_cap_sel_base();
for (size_t i = 0; i < Msgbuf_base::MAX_CAPS_PER_MSG; i++) {
l4_utcb_br()->br[i] = rcv_cap_sel | L4_RCV_ITEM_SINGLE_CAP;
rcv_cap_sel += L4_CAP_SIZE;
request_msg.reset();
/* prepare receive window in UTCB */
addr_t rcv_cap_sel = rcv_window.rcv_cap_sel_base();
for (size_t i = 0; i < Msgbuf_base::MAX_CAPS_PER_MSG; i++) {
l4_utcb_br()->br[i] = rcv_cap_sel | L4_RCV_ITEM_SINGLE_CAP;
rcv_cap_sel += L4_CAP_SIZE;
}
l4_utcb_br()->bdr &= ~L4_BDR_OFFSET_MASK;
l4_msgtag_t request_tag;
l4_umword_t label = 0; /* kernel-protected label of invoked capability */
if (exc.value != Rpc_exception_code::INVALID_OBJECT && !need_to_wait) {
l4_msgtag_t const reply_tag = copy_msgbuf_to_utcb(reply_msg, exc.value);
request_tag = l4_ipc_reply_and_wait(l4_utcb(), reply_tag, &label, L4_IPC_SEND_TIMEOUT_0);
} else {
request_tag = l4_ipc_wait(l4_utcb(), &label, L4_IPC_NEVER);
}
if (ipc_error(request_tag, false)) {
need_to_wait = true;
continue;
} else need_to_wait = false;
/* copy request message from the UTCBs message registers */
unsigned long const badge =
extract_msg_from_utcb(request_tag, rcv_window, request_msg);
/* ignore request if we detect a forged badge */
if (!badge_matches_label(badge, label)) {
raw("badge does not match label, ignoring request");
continue;
}
return Rpc_request(Native_capability(), badge);
}
l4_utcb_br()->bdr &= ~L4_BDR_OFFSET_MASK;
l4_msgtag_t request_tag;
l4_umword_t label = 0; /* kernel-protected label of invoked capability */
if (exc.value != Rpc_exception_code::INVALID_OBJECT
&& !need_to_wait) {
l4_msgtag_t const reply_tag = copy_msgbuf_to_utcb(reply_msg, exc.value);
request_tag = l4_ipc_reply_and_wait(l4_utcb(), reply_tag, &label, L4_IPC_SEND_TIMEOUT_0);
} else {
request_tag = l4_ipc_wait(l4_utcb(), &label, L4_IPC_NEVER);
}
if (ipc_error(request_tag, false)) {
need_to_wait = true;
continue;
} else need_to_wait = false;
/* copy request message from the UTCBs message registers */
unsigned long const badge =
extract_msg_from_utcb(request_tag, rcv_window, request_msg);
/* ignore request if we detect a forged badge */
if (!badge_matches_label(badge, label)) {
raw("badge does not match label, ignoring request");
continue;
}
return Rpc_request(Native_capability(), badge);
}
},
[&] () -> Rpc_request {
sleep_forever(); /* ipc_reply_wait called by uninitialized thread */
});
}
@ -363,7 +368,8 @@ Ipc_server::Ipc_server()
:
Native_capability((Cap_index*)Foc::l4_utcb_tcr()->user[Foc::UTCB_TCR_BADGE])
{
Thread::myself()->native_thread().rcv_window.init();
Thread::myself()->with_native_thread([&] (Native_thread &nt) {
nt.rcv_window.init(); });
}

@ -46,11 +46,11 @@ static Thread_capability main_thread_cap(Thread_capability main_cap = { })
}
void Thread::_deinit_platform_thread()
void Thread::_deinit_native_thread(Stack &stack)
{
using namespace Foc;
if (native_thread().kcap) {
if (stack.native_thread().kcap) {
Cap_index *i = (Cap_index*)l4_utcb_tcr_u(utcb()->foc_utcb)->user[UTCB_TCR_BADGE];
cap_map().remove(i);
}
@ -61,7 +61,7 @@ void Thread::_deinit_platform_thread()
}
void Thread::_init_platform_thread(size_t weight, Type type)
void Thread::_init_native_thread(Stack &stack, size_t weight, Type type)
{
_init_cpu_session_and_trace_control();
@ -74,7 +74,7 @@ void Thread::_init_platform_thread(size_t weight, Type type)
}
/* adjust values whose computation differs for a main thread */
native_thread().kcap = Foc::MAIN_THREAD_CAP;
stack.native_thread().kcap = Foc::MAIN_THREAD_CAP;
_thread_cap = main_thread_cap();
if (_thread_cap.failed()) {
@ -104,7 +104,8 @@ Thread::Start_result Thread::start()
Foc::l4_utcb_t * const foc_utcb = (Foc::l4_utcb_t *)state.utcb;
utcb()->foc_utcb = foc_utcb;
native_thread() = Native_thread(state.kcap);
with_native_thread([&] (Native_thread &nt) {
nt.kcap = state.kcap; });
Cap_index *i = cap_map().insert(state.id, state.kcap);
l4_utcb_tcr_u(foc_utcb)->user[UTCB_TCR_BADGE] = (unsigned long) i;

@ -423,9 +423,11 @@ struct Foc_vcpu : Thread, Noncopyable
/* consume notification */
while (vcpu->sticky_flags) {
Foc::l4_cap_idx_t tid = native_thread().kcap;
Foc::l4_cap_idx_t irq = tid + Foc::TASK_VCPU_IRQ_CAP;
l4_irq_receive(irq, L4_IPC_RECV_TIMEOUT_0);
with_native_thread([&] (Native_thread &nt) {
Foc::l4_cap_idx_t tid = nt.kcap;
Foc::l4_cap_idx_t irq = tid + Foc::TASK_VCPU_IRQ_CAP;
l4_irq_receive(irq, L4_IPC_RECV_TIMEOUT_0);
});
}
}
}
@ -449,9 +451,11 @@ struct Foc_vcpu : Thread, Noncopyable
/* consume notification */
while (vcpu->sticky_flags) {
Foc::l4_cap_idx_t tid = native_thread().kcap;
Foc::l4_cap_idx_t irq = tid + Foc::TASK_VCPU_IRQ_CAP;
l4_irq_receive(irq, L4_IPC_RECV_TIMEOUT_0);
with_native_thread([&] (Native_thread &nt) {
Foc::l4_cap_idx_t tid = nt.kcap;
Foc::l4_cap_idx_t irq = tid + Foc::TASK_VCPU_IRQ_CAP;
l4_irq_receive(irq, L4_IPC_RECV_TIMEOUT_0);
});
}
}
@ -1341,9 +1345,11 @@ struct Foc_vcpu : Thread, Noncopyable
_state_request = PAUSE;
/* Trigger vCPU exit */
Foc::l4_cap_idx_t tid = native_thread().kcap;
Foc::l4_cap_idx_t irq = tid + Foc::TASK_VCPU_IRQ_CAP;
Foc::l4_irq_trigger(irq);
with_native_thread([&] (Native_thread &nt) {
Foc::l4_cap_idx_t tid = nt.kcap;
Foc::l4_cap_idx_t irq = tid + Foc::TASK_VCPU_IRQ_CAP;
Foc::l4_irq_trigger(irq);
});
_wake_up.up();
}

@ -134,7 +134,7 @@ class Timer::Device
void entry() override
{
_myself = native_thread().kcap;
with_native_thread([&] (Native_thread &nt) { _myself = nt.kcap; });
for (;;) {

@ -199,12 +199,11 @@ void Pager_entrypoint::dissolve(Pager_object &o)
Pager_capability Pager_entrypoint::manage(Pager_object &o)
{
unsigned const cpu = o.location().xpos();
if (cpu >= _cpus) {
if (cpu >= _cpus)
error("Invalid location of pager object ", cpu);
} else {
o.start_paging(_threads[cpu]._kobj,
*_threads[cpu].native_thread().platform_thread);
}
else
_threads[cpu].with_native_thread([&] (Native_thread &nt) {
o.start_paging(_threads[cpu]._kobj, *nt.platform_thread); });
return reinterpret_cap_cast<Pager_object>(o.cap());
}

@ -32,13 +32,7 @@ using namespace Core;
namespace Hw { extern Untyped_capability _main_thread_cap; }
Thread::Start_result Thread::start()
{
/* start thread with stack pointer at the top of stack */
native_thread().platform_thread->start((void *)&_thread_start, stack_top());
if (_thread_cap.failed())
return Start_result::DENIED;
namespace {
struct Trace_source : public Core::Trace::Source::Info_accessor,
private Core::Trace::Control,
@ -51,11 +45,11 @@ Thread::Start_result Thread::start()
*/
Info trace_source_info() const override
{
Platform_thread * t = thread.native_thread().platform_thread;
Genode::Trace::Execution_time execution_time { 0, 0 };
Trace::Execution_time execution_time { 0, 0 };
if (t)
execution_time = t->execution_time();
thread.with_native_thread([&] (Native_thread &nt) {
if (nt.platform_thread)
execution_time = nt.platform_thread->execution_time(); });
return { Session_label("core"), thread.name(),
execution_time, thread.affinity() };
@ -70,34 +64,52 @@ Thread::Start_result Thread::start()
registry.insert(this);
}
};
}
Thread::Start_result Thread::start()
{
if (!_stack)
return Start_result::DENIED;
Stack &stack = *_stack;
Native_thread &nt = stack.native_thread();
/* start thread with stack pointer at the top of stack */
nt.platform_thread->start((void *)&_thread_start, (void *)stack.top());
if (_thread_cap.failed())
return Start_result::DENIED;;
/* create trace sources for core threads */
new (platform().core_mem_alloc()) Trace_source(Core::Trace::sources(), *this);
try {
new (platform().core_mem_alloc()) Trace_source(Core::Trace::sources(), *this);
} catch (...) { }
return Start_result::OK;
}
void Thread::_deinit_platform_thread()
void Thread::_deinit_native_thread(Stack &stack)
{
/* destruct platform thread */
destroy(platform().core_mem_alloc(), native_thread().platform_thread);
destroy(platform().core_mem_alloc(), stack.native_thread().platform_thread);
}
void Thread::_init_platform_thread(size_t, Type type)
void Thread::_init_native_thread(Stack &stack, size_t, Type type)
{
if (type == NORMAL) {
native_thread().platform_thread = new (platform().core_mem_alloc())
Platform_thread(_stack->name(), _stack->utcb());
stack.native_thread().platform_thread = new (platform().core_mem_alloc())
Platform_thread(_stack->name(), stack.utcb());
return;
}
/* remap initial main-thread UTCB according to stack-area spec */
map_local(Platform::core_main_thread_phys_utcb(),
(addr_t)&_stack->utcb(),
(addr_t)&stack.utcb(),
max(sizeof(Native_utcb) / get_page_size(), (size_t)1));
/* adjust initial object state in case of a main thread */
native_thread().cap = Hw::_main_thread_cap;
stack.native_thread().cap = Hw::_main_thread_cap;
}

@ -32,11 +32,16 @@ static inline void thread_yield() { Kernel::yield_thread(); }
/**
* Return kernel name of thread t
*/
static inline Kernel::capid_t
native_thread_id(Genode::Thread * const t)
static inline Kernel::capid_t native_thread_id(Genode::Thread *thread_ptr)
{
using Genode::Capability_space::capid;
return t ? capid(t->native_thread().cap) : capid(Hw::_main_thread_cap);
if (!thread_ptr)
return capid(Hw::_main_thread_cap);
return thread_ptr->with_native_thread(
[&] (Genode::Native_thread &nt) { return capid(nt.cap); },
[&] { return Kernel::cap_id_invalid(); });
}
@ -52,10 +57,9 @@ static inline void thread_switch_to(Genode::Thread *)
/**
* Resume thread t and return wether t was paused or not
*/
static inline bool
thread_check_stopped_and_restart(Genode::Thread * const t)
static inline bool thread_check_stopped_and_restart(Genode::Thread *thread_ptr)
{
return Kernel::restart_thread(native_thread_id(t));
return Kernel::restart_thread(native_thread_id(thread_ptr));
}

@ -14,19 +14,21 @@
#ifndef _INCLUDE__BASE__INTERNAL__NATIVE_THREAD_H_
#define _INCLUDE__BASE__INTERNAL__NATIVE_THREAD_H_
#include <base/stdint.h>
#include <util/noncopyable.h>
#include <base/native_capability.h>
namespace Genode { struct Native_thread; }
namespace Core { class Platform_thread; }
struct Genode::Native_thread
struct Genode::Native_thread : Noncopyable
{
Core::Platform_thread *platform_thread;
Native_capability cap;
Native_capability cap { };
struct { Core::Platform_thread *platform_thread; };
Native_thread() { }
};
#endif /* _INCLUDE__BASE__INTERNAL__NATIVE_THREAD_H_ */

@ -165,11 +165,18 @@ Genode::Rpc_request Genode::ipc_reply_wait(Reply_capability const &,
}
Ipc_server::Ipc_server()
:
Native_capability(Thread::myself() ? Thread::myself()->native_thread().cap
: Hw::_main_thread_cap)
{ }
static inline Native_capability my_native_thread_cap()
{
if (!Thread::myself())
return Hw::_main_thread_cap;
return Thread::myself()->with_native_thread(
[&] (Native_thread &nt) { return nt.cap; },
[&] { return Native_capability(); });
}
Ipc_server::Ipc_server() : Native_capability(my_native_thread_cap()) { }
Ipc_server::~Ipc_server() { }

@ -84,8 +84,10 @@ void Thread::_thread_start()
void Thread::_thread_bootstrap()
{
Kernel::capid_t capid = myself()->utcb()->cap_get(Native_utcb::THREAD_MYSELF);
native_thread().cap = Capability_space::import(capid);
if (native_thread().cap.valid())
Kernel::ack_cap(Capability_space::capid(native_thread().cap));
with_native_thread([&] (Native_thread &nt) {
Kernel::capid_t capid = myself()->utcb()->cap_get(Native_utcb::THREAD_MYSELF);
nt.cap = Capability_space::import(capid);
if (nt.cap.valid())
Kernel::ack_cap(Capability_space::capid(nt.cap));
});
}

@ -51,14 +51,14 @@ static Thread_capability main_thread_cap(Thread_capability main_cap = { })
** Thread **
************/
void Thread::_init_platform_thread(size_t weight, Type type)
void Thread::_init_native_thread(Stack &stack, size_t weight, Type type)
{
_init_cpu_session_and_trace_control();
if (type == NORMAL) {
/* create server object */
addr_t const utcb = (addr_t)&_stack->utcb();
addr_t const utcb = (addr_t)&stack.utcb();
_thread_cap = _cpu_session->create_thread(pd_session_cap(), name(), _affinity,
Weight(weight), utcb);
@ -67,7 +67,7 @@ void Thread::_init_platform_thread(size_t weight, Type type)
/* if we got reinitialized we have to get rid of the old UTCB */
size_t const utcb_size = sizeof(Native_utcb);
addr_t const stack_area = stack_area_virtual_base();
addr_t const utcb_new = (addr_t)&_stack->utcb() - stack_area;
addr_t const utcb_new = (addr_t)&stack.utcb() - stack_area;
/* remap initial main-thread UTCB according to stack-area spec */
if (env_stack_area_region_map->attach(Hw::_main_thread_utcb_ds, {
@ -81,12 +81,12 @@ void Thread::_init_platform_thread(size_t weight, Type type)
error("failed to attach UTCB to local address space");
/* adjust initial object state in case of a main thread */
native_thread().cap = Hw::_main_thread_cap;
stack.native_thread().cap = Hw::_main_thread_cap;
_thread_cap = main_thread_cap();
}
void Thread::_deinit_platform_thread()
void Thread::_deinit_native_thread(Stack &stack)
{
if (!_cpu_session) {
error("Thread::_cpu_session unexpectedly not defined");
@ -98,8 +98,8 @@ void Thread::_deinit_platform_thread()
[&] (Cpu_session::Create_thread_error) { });
/* detach userland stack */
size_t const size = sizeof(_stack->utcb());
addr_t utcb = Stack_allocator::addr_to_base(_stack) +
size_t const size = sizeof(stack.utcb());
addr_t utcb = Stack_allocator::addr_to_base(&stack) +
stack_virtual_size() - size - stack_area_virtual_base();
env_stack_area_region_map->detach(utcb);
}
@ -107,6 +107,11 @@ void Thread::_deinit_platform_thread()
Thread::Start_result Thread::start()
{
if (!_stack)
return Start_result::DENIED;
Stack &stack = *_stack;
while (avail_capability_slab() < 5)
upgrade_capability_slab();
@ -115,19 +120,19 @@ Thread::Start_result Thread::start()
Cpu_thread_client cpu_thread(cap);
/* attach UTCB at top of stack */
size_t const size = sizeof(_stack->utcb());
size_t const size = sizeof(stack.utcb());
return env_stack_area_region_map->attach(cpu_thread.utcb(), {
.size = size,
.offset = { },
.use_at = true,
.at = Stack_allocator::addr_to_base(_stack)
.at = Stack_allocator::addr_to_base(&stack)
+ stack_virtual_size() - size - stack_area_virtual_base(),
.executable = { },
.writeable = true
}).convert<Start_result>(
[&] (Region_map::Range) {
/* start execution with initial IP and aligned SP */
cpu_thread.start((addr_t)_thread_start, _stack->top());
cpu_thread.start((addr_t)_thread_start, stack.top());
return Start_result::OK;
},
[&] (Region_map::Attach_error) {

@ -27,11 +27,14 @@ void Genode::init_rpc_cap_alloc(Parent &) { }
Native_capability Rpc_entrypoint::_alloc_rpc_cap(Pd_session &, Native_capability,
addr_t)
{
return Thread::native_thread().epoll.alloc_rpc_cap();
return with_native_thread(
[&] (Native_thread &nt) { return nt.epoll.alloc_rpc_cap(); },
[&] { return Native_capability(); });
}
void Rpc_entrypoint::_free_rpc_cap(Pd_session &, Native_capability cap)
{
Thread::native_thread().epoll.free_rpc_cap(cap);
with_native_thread([&] (Native_thread &nt) {
nt.epoll.free_rpc_cap(cap); });
}

@ -58,15 +58,19 @@ void Thread::_thread_start()
}
void Thread::_init_platform_thread(size_t, Type) { }
void Thread::_init_native_thread(Stack &, size_t, Type) { }
void Thread::_deinit_platform_thread() { }
void Thread::_deinit_native_thread(Stack &) { }
Thread::Start_result Thread::start()
{
native_thread().tid = lx_create_thread(Thread::_thread_start, stack_top());
native_thread().pid = lx_getpid();
return Start_result::OK;
return with_native_thread(
[&] (Native_thread &nt) {
nt.tid = lx_create_thread(Thread::_thread_start, stack_top());
nt.pid = lx_getpid();
return Start_result::OK;
},
[&] { return Start_result::DENIED; });
}

@ -37,12 +37,23 @@ static inline void thread_yield()
}
static inline bool thread_check_stopped_and_restart(Genode::Thread *thread_base)
static inline int *futex_counter_ptr(Genode::Thread *thread_ptr)
{
const int *futex_counter_ptr = thread_base ?
&thread_base->native_thread().futex_counter :
&main_thread_futex_counter;
return lx_futex(futex_counter_ptr, LX_FUTEX_WAKE, 1);
if (!thread_ptr)
return &main_thread_futex_counter;
return thread_ptr->with_native_thread(
[&] (Genode::Native_thread &nt) { return &nt.futex_counter; },
[&] {
Genode::error("attempt to access futex of invalid thread");
return (int *)nullptr;
});
}
static inline bool thread_check_stopped_and_restart(Genode::Thread *thread_ptr)
{
return lx_futex(futex_counter_ptr(thread_ptr), LX_FUTEX_WAKE, 1);
}
@ -56,10 +67,7 @@ static inline void thread_stop_myself(Genode::Thread *myself)
* 'thread_check_stopped_and_restart()' function will get called
* repeatedly until this thread has actually executed the syscall.
*/
const int *futex_counter_ptr = myself ?
&myself->native_thread().futex_counter :
&main_thread_futex_counter;
lx_futex(futex_counter_ptr, LX_FUTEX_WAIT, 0);
lx_futex(futex_counter_ptr(myself), LX_FUTEX_WAIT, 0);
}
#endif /* _INCLUDE__BASE__INTERNAL__LOCK_HELPER_H_ */

@ -22,92 +22,82 @@
namespace Genode { struct Native_thread; }
class Genode::Native_thread
struct Genode::Native_thread : Noncopyable
{
private:
/*
* Unfortunately, both - PID and TID - are needed for lx_tgkill()
*/
unsigned int tid = 0; /* Native thread ID type as returned by the
'clone' system call */
unsigned int pid = 0; /* process ID (resp. thread-group ID) */
/*
* Noncopyable
*/
Native_thread(Native_thread const &);
Native_thread &operator = (Native_thread const &);
bool is_ipc_server = false;
public:
/**
* Natively aligned memory location used in the lock implementation
*/
int futex_counter __attribute__((aligned(sizeof(Genode::addr_t)))) = 0;
/*
* Unfortunately, both - PID and TID - are needed for lx_tgkill()
*/
unsigned int tid = 0; /* Native thread ID type as returned by the
'clone' system call */
unsigned int pid = 0; /* process ID (resp. thread-group ID) */
struct Meta_data;
bool is_ipc_server = false;
/**
* Opaque pointer to additional thread-specific meta data
*
* This pointer is used by hybrid Linux/Genode programs to maintain
* POSIX-thread-related meta data. For non-hybrid Genode programs, it
* remains unused.
*/
struct { Meta_data *meta_data = nullptr; };
/**
* Natively aligned memory location used in the lock implementation
*/
int futex_counter __attribute__((aligned(sizeof(Genode::addr_t)))) = 0;
class Epoll
{
private:
struct Meta_data;
Lx_socketpair _control { };
/**
* Opaque pointer to additional thread-specific meta data
*
* This pointer is used by hybrid Linux/Genode programs to maintain
* POSIX-thread-related meta data. For non-hybrid Genode programs, it
* remains unused.
*/
Meta_data *meta_data = nullptr;
Lx_epoll_sd const _epoll;
class Epoll
{
private:
void _add (Lx_sd);
void _remove(Lx_sd);
Lx_socketpair _control { };
bool _rpc_ep_exited = false;
Lx_epoll_sd const _epoll;
struct Control_function : Interface
{
virtual void execute() = 0;
};
void _add (Lx_sd);
void _remove(Lx_sd);
/*
* Execute functor 'fn' in the context of the 'poll' method.
*/
void _exec_control(auto const &fn);
bool _rpc_ep_exited = false;
public:
struct Control_function : Interface
{
virtual void execute() = 0;
};
Epoll();
/*
* Execute functor 'fn' in the context of the 'poll' method.
*/
void _exec_control(auto const &fn);
~Epoll();
public:
/**
* Wait for incoming RPC messages
*
* \return valid socket descriptor that matches the invoked
* RPC object
*/
Lx_sd poll();
Epoll();
Native_capability alloc_rpc_cap();
~Epoll();
void free_rpc_cap(Native_capability);
/**
* Wait for incoming RPC messages
*
* \return valid socket descriptor that matches the invoked
* RPC object
*/
Lx_sd poll();
/**
* Flag RPC entrypoint as no longer in charge of dispatching
*/
void rpc_ep_exited() { _rpc_ep_exited = true; }
Native_capability alloc_rpc_cap();
} epoll { };
void free_rpc_cap(Native_capability);
/**
* Flag RPC entrypoint as no longer in charge of dispatching
*/
void rpc_ep_exited() { _rpc_ep_exited = true; }
} epoll { };
Native_thread() { }
Native_thread() { }
};
#endif /* _INCLUDE__BASE__INTERNAL__NATIVE_THREAD_H_ */

@ -392,36 +392,38 @@ Rpc_request Genode::ipc_reply_wait(Reply_capability const &last_caller,
for (;;) lx_nanosleep(&ts, 0);
}
Native_thread::Epoll &epoll = myself_ptr->native_thread().epoll;
return myself_ptr->with_native_thread([&] (Native_thread &nt) {
for (;;) {
for (;;) {
Lx_sd const selected_sd = epoll.poll();
Lx_sd const selected_sd = nt.epoll.poll();
Protocol_header &header = request_msg.header<Protocol_header>();
Message msg(header.msg_start(), sizeof(Protocol_header) + request_msg.capacity());
Protocol_header &header = request_msg.header<Protocol_header>();
Message msg(header.msg_start(), sizeof(Protocol_header) + request_msg.capacity());
msg.accept_sockets(Message::MAX_SDS_PER_MSG);
msg.accept_sockets(Message::MAX_SDS_PER_MSG);
request_msg.reset();
int const ret = lx_recvmsg(selected_sd, msg.msg(), 0x40);
request_msg.reset();
int const ret = lx_recvmsg(selected_sd, msg.msg(), 0x40);
if (ret < 0)
continue;
if (ret < 0)
continue;
if (msg.num_sockets() == 0 || !msg.socket_at_index(0).valid()) {
warning("ipc_reply_wait: failed to obtain reply socket");
continue;
if (msg.num_sockets() == 0 || !msg.socket_at_index(0).valid()) {
warning("ipc_reply_wait: failed to obtain reply socket");
continue;
}
Lx_sd const reply_socket = msg.socket_at_index(0);
/* start at offset 1 to skip the reply channel */
extract_sds_from_message(1, msg, header, request_msg);
return Rpc_request(Capability_space::import(Rpc_destination(reply_socket),
Rpc_obj_key()), selected_sd.value);
}
Lx_sd const reply_socket = msg.socket_at_index(0);
/* start at offset 1 to skip the reply channel */
extract_sds_from_message(1, msg, header, request_msg);
return Rpc_request(Capability_space::import(Rpc_destination(reply_socket),
Rpc_obj_key()), selected_sd.value);
}
}, [&] () -> Rpc_request { sleep_forever(); });
}
@ -435,16 +437,16 @@ Ipc_server::Ipc_server()
if (!Thread::myself())
return;
Native_thread &native_thread = Thread::myself()->native_thread();
Thread::myself()->with_native_thread([&] (Native_thread &nt) {
if (native_thread.is_ipc_server) {
Genode::raw(lx_getpid(), ":", lx_gettid(),
" unexpected multiple instantiation of Ipc_server by one thread");
struct Ipc_server_multiple_instance { };
throw Ipc_server_multiple_instance();
}
if (nt.is_ipc_server) {
Genode::raw(lx_getpid(), ":", lx_gettid(),
" unexpected multiple instantiation of Ipc_server by one thread");
sleep_forever();
}
native_thread.is_ipc_server = true;
nt.is_ipc_server = true;
});
}
@ -457,7 +459,6 @@ Ipc_server::~Ipc_server()
* Reset thread role to non-server such that we can enter 'sleep_forever'
* without getting a warning.
*/
Native_thread &native_thread = Thread::myself()->native_thread();
native_thread.is_ipc_server = false;
Thread::myself()->with_native_thread([&] (Native_thread &nt) {
nt.is_ipc_server = false; });
}

@ -130,8 +130,12 @@ void Native_thread::Epoll::_exec_control(FN const &fn)
* If 'myself_ptr' is nullptr, the caller is the initial thread w/o
* a valid 'Thread' object associated yet. This thread is never polling.
*/
bool const myself_is_polling = (myself_ptr != nullptr)
&& (&myself_ptr->native_thread().epoll == this);
auto myself_is_polling = [&]
{
return myself_ptr && myself_ptr->with_native_thread(
[&] (Native_thread &nt) { return (&nt.epoll == this); },
[&] { return false; });
};
/*
* If caller runs in the context of the same thread that executes 'poll' we
@ -139,7 +143,7 @@ void Native_thread::Epoll::_exec_control(FN const &fn)
* block at this time. If the RPC entrypoint has existed its dispatch
* loop, it also cannot poll anymore.
*/
if (myself_is_polling || _rpc_ep_exited) {
if (myself_is_polling() || _rpc_ep_exited) {
fn();
return;
}

@ -61,26 +61,29 @@ Native_capability Rpc_entrypoint::_alloc_rpc_cap(Pd_session& pd, Native_capabili
"cap_quota=", cap_upgrade).string());
});
}
return Thread::native_thread().epoll.alloc_rpc_cap();
return with_native_thread(
[&] (Native_thread &nt) { return nt.epoll.alloc_rpc_cap(); },
[&] { return Native_capability(); });
}
void Rpc_entrypoint::_free_rpc_cap(Pd_session& pd, Native_capability cap)
{
Native_thread::Epoll &epoll = Thread::native_thread().epoll;
with_native_thread([&] (Native_thread &nt) {
/*
* Flag RPC entrypoint as exited to prevent 'free_rpc_cap' from issuing
* a remote control request.
*/
if (_exit_handler.exit)
epoll.rpc_ep_exited();
/*
* Flag RPC entrypoint as exited to prevent 'free_rpc_cap' from issuing
* a remote control request.
*/
if (_exit_handler.exit)
nt.epoll.rpc_ep_exited();
/*
* Perform the accounting of the PDs cap quota at core, to remain
* consistent with other kernel platforms.
*/
pd.free_rpc_cap(Native_capability());
/*
* Perform the accounting of the PDs cap quota at core, to remain
* consistent with other kernel platforms.
*/
pd.free_rpc_cap(Native_capability());
epoll.free_rpc_cap(cap);
nt.epoll.free_rpc_cap(cap);
});
}

@ -83,10 +83,10 @@ void Thread::_thread_start()
lx_sigaction(LX_SIGUSR1, empty_signal_handler, false);
/* inform core about the new thread and process ID of the new thread */
{
thread->with_native_thread([&] (Native_thread &nt) {
Linux_native_cpu_client native_cpu(thread->_cpu_session->native_cpu());
native_cpu.thread_id(thread->cap(), thread->native_thread().pid, thread->native_thread().tid);
}
native_cpu.thread_id(thread->cap(), nt.pid, nt.tid);
});
/* wakeup 'start' function */
startup_lock().wakeup();
@ -100,7 +100,7 @@ void Thread::_thread_start()
}
void Thread::_init_platform_thread(size_t /* weight */, Type type)
void Thread::_init_native_thread(Stack &stack, size_t /* weight */, Type type)
{
/* if no cpu session is given, use it from the environment */
if (!_cpu_session) {
@ -110,7 +110,7 @@ void Thread::_init_platform_thread(size_t /* weight */, Type type)
/* for normal threads create an object at the CPU session */
if (type == NORMAL) {
_cpu_session->create_thread(pd_session_cap(), _stack->name().string(),
_cpu_session->create_thread(pd_session_cap(), stack.name(),
Affinity::Location(), Weight()).with_result(
[&] (Thread_capability cap) { _thread_cap = cap; },
[&] (Cpu_session::Create_thread_error) {
@ -119,12 +119,12 @@ void Thread::_init_platform_thread(size_t /* weight */, Type type)
return;
}
/* adjust initial object state for main threads */
native_thread().futex_counter = main_thread_futex_counter;
stack.native_thread().futex_counter = main_thread_futex_counter;
_thread_cap = main_thread_cap();
}
void Thread::_deinit_platform_thread()
void Thread::_deinit_native_thread(Stack &stack)
{
/*
* Kill thread until it is really really dead
@ -139,12 +139,12 @@ void Thread::_deinit_platform_thread()
* anymore.
*/
for (;;) {
Native_thread &nt = stack.native_thread();
/* destroy thread locally */
int pid = native_thread().pid;
if (pid == 0) break;
if (nt.pid == 0) break;
int ret = lx_tgkill(pid, native_thread().tid, LX_SIGCANCEL);
int ret = lx_tgkill(nt.pid, nt.tid, LX_SIGCANCEL);
if (ret < 0) break;
@ -180,8 +180,10 @@ Thread::Start_result Thread::start()
threadlib_initialized = true;
}
native_thread().tid = lx_create_thread(Thread::_thread_start, stack_top());
native_thread().pid = lx_getpid();
with_native_thread([&] (Native_thread &nt) {
nt.tid = lx_create_thread(Thread::_thread_start, stack_top());
nt.pid = lx_getpid();
});
/* wait until the 'thread_start' function got entered */
startup_lock().block();

@ -383,9 +383,10 @@ static void adopt_thread(Native_thread::Meta_data *meta_data)
/*
* Initialize thread meta data
*/
Native_thread &native_thread = meta_data->thread_base.native_thread();
native_thread.tid = lx_gettid();
native_thread.pid = lx_getpid();
meta_data->thread_base.with_native_thread([&] (Native_thread &nt) {
nt.tid = lx_gettid();
nt.pid = lx_getpid();
});
}
@ -483,10 +484,10 @@ Thread *Thread::myself()
new (global_alloc()) Thread_meta_data_adopted(thread);
/*
* Initialize 'Thread::_native_thread' to point to the default-
* Initialize 'Thread::_native_thread_ptr' to point to the default-
* constructed 'Native_thread' (part of 'Meta_data').
*/
meta_data->thread_base._native_thread = &meta_data->native_thread;
meta_data->thread_base._native_thread_ptr = &meta_data->native_thread;
adopt_thread(meta_data);
return thread;
@ -498,20 +499,20 @@ Thread::Start_result Thread::start()
/*
* Unblock thread that is supposed to slumber in 'thread_start'.
*/
native_thread().meta_data->started();
with_native_thread([&] (Native_thread &nt) {
nt.meta_data->started(); });
return Start_result::OK;
}
void Thread::join()
{
native_thread().meta_data->wait_for_join();
with_native_thread([&] (Native_thread &nt) {
nt.meta_data->wait_for_join(); });
}
Native_thread &Thread::native_thread() { return *_native_thread; }
Thread::Thread(size_t weight, const char *name, size_t /* stack size */,
Type, Cpu_session * cpu_sess, Affinity::Location)
: _cpu_session(cpu_sess), _affinity()
@ -519,7 +520,7 @@ Thread::Thread(size_t weight, const char *name, size_t /* stack size */,
Native_thread::Meta_data *meta_data =
new (global_alloc()) Thread_meta_data_created(this);
_native_thread = &meta_data->native_thread;
_native_thread_ptr = &meta_data->native_thread;
int const ret = pthread_create(&meta_data->pt, 0, thread_start, meta_data);
if (ret) {
@ -528,18 +529,21 @@ Thread::Thread(size_t weight, const char *name, size_t /* stack size */,
throw Out_of_stack_space();
}
native_thread().meta_data->wait_for_construction();
with_native_thread([&] (Native_thread &nt) {
_thread_cap = _cpu_session->create_thread(_env_ptr->pd_session_cap(), name,
Location(), Weight(weight));
_thread_cap.with_result(
[&] (Thread_capability cap) {
Linux_native_cpu_client native_cpu(_cpu_session->native_cpu());
native_cpu.thread_id(cap, native_thread().pid, native_thread().tid);
},
[&] (Cpu_session::Create_thread_error) {
error("failed to create hybrid thread"); }
);
nt.meta_data->wait_for_construction();
_thread_cap = _cpu_session->create_thread(_env_ptr->pd_session_cap(), name,
Location(), Weight(weight));
_thread_cap.with_result(
[&] (Thread_capability cap) {
Linux_native_cpu_client native_cpu(_cpu_session->native_cpu());
native_cpu.thread_id(cap, nt.pid, nt.tid);
},
[&] (Cpu_session::Create_thread_error) {
error("failed to create hybrid thread"); }
);
});
}
@ -561,22 +565,25 @@ Thread::Thread(Env &env, Name const &name, size_t stack_size)
Thread::~Thread()
{
bool const needs_join = (pthread_cancel(native_thread().meta_data->pt) == 0);
with_native_thread([&] (Native_thread &nt) {
if (needs_join) {
int const ret = pthread_join(native_thread().meta_data->pt, 0);
if (ret)
warning("pthread_join unexpectedly returned "
"with ", ret, " (errno=", errno, ")");
}
bool const needs_join = (pthread_cancel(nt.meta_data->pt) == 0);
Thread_meta_data_created *meta_data =
dynamic_cast<Thread_meta_data_created *>(native_thread().meta_data);
if (needs_join) {
int const ret = pthread_join(nt.meta_data->pt, 0);
if (ret)
warning("pthread_join unexpectedly returned "
"with ", ret, " (errno=", errno, ")");
}
if (meta_data)
destroy(global_alloc(), meta_data);
Thread_meta_data_created *meta_data =
dynamic_cast<Thread_meta_data_created *>(nt.meta_data);
_native_thread = nullptr;
if (meta_data)
destroy(global_alloc(), meta_data);
});
_native_thread_ptr = nullptr;
/* inform core about the killed thread */
_thread_cap.with_result(

@ -19,21 +19,17 @@
#ifndef _INCLUDE__NOVA__NATIVE_THREAD_H_
#define _INCLUDE__NOVA__NATIVE_THREAD_H_
#include <base/stdint.h>
#include <nova/receive_window.h>
namespace Genode { struct Native_thread; }
struct Genode::Native_thread
struct Genode::Native_thread : Noncopyable
{
static constexpr unsigned long INVALID_INDEX = ~0UL;
addr_t ec_sel { 0 }; /* selector for execution context */
addr_t exc_pt_sel { 0 }; /* base of event portal window */
addr_t initial_ip { 0 }; /* initial IP of local thread */
/* receive window for capability selectors received at the server side */
Receive_window server_rcv_window { };
addr_t ec_sel = INVALID_INDEX; /* selector for execution context */
addr_t exc_pt_sel = INVALID_INDEX; /* base of event portal window */
/*
* Designated selector to populate with the result of an IPC call
@ -49,13 +45,19 @@ struct Genode::Native_thread
*/
addr_t client_rcv_sel = INVALID_INDEX;
void reset_client_rcv_sel() { client_rcv_sel = INVALID_INDEX; }
addr_t initial_ip = 0;
/* receive window for capability selectors received at the server side */
Receive_window server_rcv_window { };
Native_capability pager_cap { };
Native_thread() : ec_sel(INVALID_INDEX),
exc_pt_sel(INVALID_INDEX),
initial_ip(0) { }
Native_thread() { }
/* ec_sel is invalid until thread gets started */
bool ec_valid() const { return ec_sel != INVALID_INDEX; }
void reset_client_rcv_sel() { client_rcv_sel = INVALID_INDEX; }
};
#endif /* _INCLUDE__NOVA__NATIVE_THREAD_H_ */

@ -569,13 +569,16 @@ void Exception_handlers::register_handler(Pager_object &obj, Mtd mtd,
void (* __attribute__((regparm(1))) func)(Pager_object &))
{
uint8_t res = !Nova::NOVA_OK;
with_pager_thread(obj.location(), platform_specific(), [&] (Pager_thread &pager_thread) {
addr_t const ec_sel = pager_thread.native_thread().ec_sel;
with_pager_thread(obj.location(), platform_specific(), [&] (Pager_thread &thread) {
thread.with_native_thread([&] (Native_thread &nt) {
addr_t const ec_sel = nt.ec_sel;
/* compiler generates instance of exception entry if not specified */
addr_t entry = func ? (addr_t)func : (addr_t)(&_handler<EV>);
res = create_portal(obj.exc_pt_sel_client() + EV,
platform_specific().core_pd_sel(), ec_sel, mtd, entry, &obj);
/* compiler generates instance of exception entry if not specified */
addr_t entry = func ? (addr_t)func : (addr_t)(&_handler<EV>);
res = create_portal(obj.exc_pt_sel_client() + EV,
platform_specific().core_pd_sel(),
ec_sel, mtd, entry, &obj);
});
});
if (res != Nova::NOVA_OK)
@ -644,13 +647,15 @@ void Pager_object::_construct_pager()
uint8_t res = !Nova::NOVA_OK;
with_pager_thread(_location, platform_specific(), [&] (Pager_thread &pager_thread) {
pager_thread.with_native_thread([&] (Native_thread &nt) {
addr_t const ec_sel = pager_thread.native_thread().ec_sel;
addr_t const ec_sel = nt.ec_sel;
/* create portal for final cleanup call used during destruction */
res = create_portal(sel_pt_cleanup(), pd_sel, ec_sel, Mtd(0),
reinterpret_cast<addr_t>(_invoke_handler),
this);
/* create portal for final cleanup call used during destruction */
res = create_portal(sel_pt_cleanup(), pd_sel, ec_sel, Mtd(0),
reinterpret_cast<addr_t>(_invoke_handler),
this);
});
});
if (res != Nova::NOVA_OK) {
error("could not create pager cleanup portal, error=", res);
@ -872,8 +877,10 @@ void Pager_object::_oom_handler(addr_t pager_dst, addr_t pager_src,
if (assert) {
error("unknown OOM case - stop core pager thread");
utcb.set_msg_word(0);
reply(myself.stack_top(), myself.native_thread().exc_pt_sel + Nova::SM_SEL_EC);
myself.with_native_thread([&] (Native_thread &nt) {
utcb.set_msg_word(0);
reply(myself.stack_top(), nt.exc_pt_sel + Nova::SM_SEL_EC);
});
}
/* be strict in case of the -strict- STOP policy - stop causing thread */
@ -892,8 +899,11 @@ void Pager_object::_oom_handler(addr_t pager_dst, addr_t pager_src,
case SRC_PD_UNKNOWN:
/* should not happen on Genode - we create and know every PD in core */
error("Unknown PD has insufficient kernel memory left - stop thread");
utcb.set_msg_word(0);
reply(myself.stack_top(), myself.native_thread().exc_pt_sel + Nova::SM_SEL_EC);
myself.with_native_thread([&] (Native_thread &nt) {
utcb.set_msg_word(0);
reply(myself.stack_top(), nt.exc_pt_sel + Nova::SM_SEL_EC);
});
break;
case SRC_CORE_PD:
/* core PD -> other PD, which has insufficient kernel resources */
@ -943,13 +953,14 @@ addr_t Pager_object::create_oom_portal()
{
uint8_t res = !Nova::NOVA_OK;
with_pager_thread(_location, platform_specific(),
[&] (Pager_thread &thread) {
with_pager_thread(_location, platform_specific(), [&] (Pager_thread &thread) {
thread.with_native_thread([&] (Native_thread &nt) {
addr_t const core_pd_sel = platform_specific().core_pd_sel();
addr_t const ec_sel = thread.native_thread().ec_sel;
addr_t const ec_sel = nt.ec_sel;
res = create_portal(sel_oom_portal(), core_pd_sel, ec_sel, Mtd(0),
reinterpret_cast<addr_t>(_oom_handler),
this);
});
});
if (res == Nova::NOVA_OK)

@ -30,51 +30,54 @@
using namespace Core;
void Thread::_init_platform_thread(size_t, Type type)
void Thread::_init_native_thread(Stack &stack, size_t, Type type)
{
Native_thread &nt = stack.native_thread();
/*
* This function is called for constructing server activations and pager
* objects. It allocates capability selectors for the thread's execution
* context and a synchronization-helper semaphore needed for 'Lock'.
*/
using namespace Nova;
if (type == MAIN)
{
if (type == MAIN) {
/* set EC selector according to NOVA spec */
native_thread().ec_sel = platform_specific().core_pd_sel() + 1;
nt.ec_sel = platform_specific().core_pd_sel() + 1;
/*
* Exception base of first thread in core is 0. We have to set
* it here so that Thread code finds the semaphore of the
* main thread.
*/
native_thread().exc_pt_sel = 0;
nt.exc_pt_sel = 0;
return;
}
native_thread().ec_sel = cap_map().insert(1);
native_thread().exc_pt_sel = cap_map().insert(NUM_INITIAL_PT_LOG2);
nt.ec_sel = cap_map().insert(1);
nt.exc_pt_sel = cap_map().insert(Nova::NUM_INITIAL_PT_LOG2);
/* create running semaphore required for locking */
addr_t rs_sel = native_thread().exc_pt_sel + SM_SEL_EC;
uint8_t res = create_sm(rs_sel, platform_specific().core_pd_sel(), 0);
if (res != NOVA_OK)
addr_t rs_sel = nt.exc_pt_sel + Nova::SM_SEL_EC;
uint8_t res = Nova::create_sm(rs_sel, platform_specific().core_pd_sel(), 0);
if (res != Nova::NOVA_OK)
error("Thread::_init_platform_thread: create_sm returned ", res);
}
void Thread::_deinit_platform_thread()
void Thread::_deinit_native_thread(Stack &stack)
{
unmap_local(Nova::Obj_crd(native_thread().ec_sel, 1));
unmap_local(Nova::Obj_crd(native_thread().exc_pt_sel, Nova::NUM_INITIAL_PT_LOG2));
Native_thread &nt = stack.native_thread();
cap_map().remove(native_thread().ec_sel, 1, false);
cap_map().remove(native_thread().exc_pt_sel, Nova::NUM_INITIAL_PT_LOG2, false);
unmap_local(Nova::Obj_crd(nt.ec_sel, 1));
unmap_local(Nova::Obj_crd(nt.exc_pt_sel, Nova::NUM_INITIAL_PT_LOG2));
cap_map().remove(nt.ec_sel, 1, false);
cap_map().remove(nt.exc_pt_sel, Nova::NUM_INITIAL_PT_LOG2, false);
/* revoke utcb */
Nova::Rights rwx(true, true, true);
addr_t utcb = reinterpret_cast<addr_t>(&_stack->utcb());
addr_t utcb = reinterpret_cast<addr_t>(&stack.utcb());
Nova::revoke(Nova::Mem_crd(utcb >> 12, 0, rwx));
}
@ -93,9 +96,11 @@ Thread::Start_result Thread::start()
/* create local EC */
enum { LOCAL_THREAD = false };
unsigned const kernel_cpu_id = platform_specific().kernel_cpu_id(_affinity);
uint8_t res = create_ec(native_thread().ec_sel,
platform_specific().core_pd_sel(), kernel_cpu_id,
(mword_t)&utcb, sp, native_thread().exc_pt_sel, LOCAL_THREAD);
uint8_t res { };
with_native_thread([&] (Native_thread &nt) {
res = create_ec(nt.ec_sel, platform_specific().core_pd_sel(), kernel_cpu_id,
(mword_t)&utcb, sp, nt.exc_pt_sel, LOCAL_THREAD); });
if (res != NOVA_OK) {
error("Thread::start: create_ec returned ", res);
return Start_result::DENIED;
@ -109,10 +114,16 @@ Thread::Start_result Thread::start()
if (i == SM_SEL_EC)
continue;
if (map_local(platform_specific().core_pd_sel(),
*reinterpret_cast<Nova::Utcb *>(Thread::myself()->utcb()),
Obj_crd(i, 0),
Obj_crd(native_thread().exc_pt_sel + i, 0))) {
bool page_fault_portal_ok = with_native_thread(
[&] (Native_thread &nt) {
return !map_local(platform_specific().core_pd_sel(),
*reinterpret_cast<Nova::Utcb *>(Thread::myself()->utcb()),
Obj_crd(i, 0),
Obj_crd(nt.exc_pt_sel + i, 0));
},
[&] { return false; });
if (!page_fault_portal_ok) {
error("Thread::start: failed to create page-fault portal");
return Start_result::DENIED;
}
@ -131,9 +142,11 @@ Thread::Start_result Thread::start()
{
uint64_t ec_time = 0;
uint8_t res = Nova::ec_time(thread.native_thread().ec_sel, ec_time);
if (res != Nova::NOVA_OK)
warning("ec_time for core thread failed res=", res);
thread.with_native_thread([&] (Native_thread &nt) {
uint8_t res = Nova::ec_time(nt.ec_sel, ec_time);
if (res != Nova::NOVA_OK)
warning("ec_time for core thread failed res=", res);
});
return { Session_label("core"), thread.name(),
Trace::Execution_time(ec_time, 0), thread._affinity };

@ -32,13 +32,22 @@
extern int main_thread_running_semaphore();
static inline bool thread_check_stopped_and_restart(Genode::Thread *thread_base)
static inline Genode::addr_t sm_sel_ec(Genode::Thread *thread_ptr)
{
Genode::addr_t sem = thread_base ?
thread_base->native_thread().exc_pt_sel + Nova::SM_SEL_EC :
main_thread_running_semaphore();
if (!thread_ptr)
return main_thread_running_semaphore();
Nova::sm_ctrl(sem, Nova::SEMAPHORE_UP);
using namespace Genode;
return thread_ptr->with_native_thread(
[&] (Native_thread &nt) { return nt.exc_pt_sel + Nova::SM_SEL_EC; },
[&] { error("attempt to synchronize invalid thread"); return 0UL; });
}
static inline bool thread_check_stopped_and_restart(Genode::Thread *thread_ptr)
{
Nova::sm_ctrl(sm_sel_ec(thread_ptr), Nova::SEMAPHORE_UP);
return true;
}
@ -48,16 +57,7 @@ static inline void thread_switch_to(Genode::Thread *) { }
static inline void thread_stop_myself(Genode::Thread *myself)
{
using namespace Genode;
using namespace Nova;
addr_t sem;
if (myself)
sem = myself->native_thread().exc_pt_sel + SM_SEL_EC;
else
sem = main_thread_running_semaphore();
sm_ctrl(sem, SEMAPHORE_DOWNZERO);
Nova::sm_ctrl(sm_sel_ec(myself), Nova::SEMAPHORE_DOWNZERO);
}
#endif /* _INCLUDE__BASE__INTERNAL__LOCK_HELPER_H_ */

@ -29,14 +29,28 @@ enum State {
enum { RESERVED_BITS = 12, COUNTER_MASK = 0xFFC };
static inline Genode::addr_t ec_sel(Genode::Thread *thread_ptr)
{
if (!thread_ptr)
return Nova::EC_SEL_THREAD;
using namespace Genode;
return thread_ptr->with_native_thread(
[&] (Native_thread &nt) { return nt.ec_sel; },
[&] { error("failed to obtain ec_sel for invalid thread"); return 0UL; });
}
template <typename T>
static inline void spinlock_lock(volatile T *lock_variable)
{
using Genode::cmpxchg;
Genode::Thread * myself = Genode::Thread::myself();
T const tid = (T)(myself ? myself->native_thread().ec_sel
: (Genode::addr_t)Nova::EC_SEL_THREAD);
T const tid = (T)ec_sel(myself);
unsigned help_counter = 0;

@ -54,11 +54,13 @@ namespace Genode {
: Rpc_client<Nova_signal_source>(static_cap_cast<Nova_signal_source>(cap))
{
/* request mapping of semaphore capability selector */
Thread * myself = Thread::myself();
auto const &exc_base = myself->native_thread().exc_pt_sel;
request_signal_sm_cap(exc_base + Nova::PT_SEL_PAGE_FAULT,
exc_base + Nova::SM_SEL_SIGNAL);
_sem = Capability_space::import(exc_base + Nova::SM_SEL_SIGNAL);
Thread &myself = *Thread::myself();
myself.with_native_thread([&] (Native_thread &nt) {
auto const exc_base = nt.exc_pt_sel;
request_signal_sm_cap(exc_base + Nova::PT_SEL_PAGE_FAULT,
exc_base + Nova::SM_SEL_SIGNAL);
_sem = Capability_space::import(exc_base + Nova::SM_SEL_SIGNAL);
});
call<Rpc_register_semaphore>(_sem);
}

@ -65,8 +65,15 @@ Rpc_exception_code Genode::ipc_call(Native_capability dst,
* Determine manually defined selector for receiving the call result.
* See the comment in 'base-nova/include/nova/native_thread.h'.
*/
addr_t const manual_rcv_sel = myself ? myself->native_thread().client_rcv_sel
: Receive_window::INVALID_INDEX;
auto manual_rcv_sel_for_myself = [&]
{
addr_t sel = Receive_window::INVALID_INDEX;
if (myself)
myself->with_native_thread([&] (Native_thread &nt) {
sel = nt.client_rcv_sel; });
return sel;
};
addr_t const manual_rcv_sel = manual_rcv_sel_for_myself();
/* if we can't setup receive window, die in order to recognize the issue */
if (!rcv_window.prepare_rcv_window(utcb, manual_rcv_sel))

@ -42,27 +42,29 @@ Untyped_capability Rpc_entrypoint::_manage(Rpc_object_base *obj)
return obj->cap();
}
Untyped_capability ec_cap;
return with_native_thread(
[&] (Native_thread &nt) {
/* _ec_sel is invalid until thread gets started */
if (native_thread().ec_sel != Native_thread::INVALID_INDEX)
ec_cap = Capability_space::import(native_thread().ec_sel);
else
ec_cap = Thread::cap();
Untyped_capability const ec_cap =
nt.ec_valid() ? Capability_space::import(nt.ec_sel) : Thread::cap();
Untyped_capability obj_cap = _alloc_rpc_cap(_pd_session, ec_cap,
(addr_t)&_activation_entry);
if (!obj_cap.valid())
return obj_cap;
Untyped_capability const obj_cap =
_alloc_rpc_cap(_pd_session, ec_cap, (addr_t)&_activation_entry);
/* add server object to object pool */
obj->cap(obj_cap);
insert(obj);
if (!obj_cap.valid())
return Untyped_capability();
/* return object capability managed by entrypoint thread */
return obj_cap;
/* add server object to object pool */
obj->cap(obj_cap);
insert(obj);
/* return object capability managed by entrypoint thread */
return obj_cap;
},
[&] { return Untyped_capability(); });
}
static void cleanup_call(Rpc_object_base *obj, Nova::Utcb * ep_utcb,
Native_capability &cap)
{
@ -129,52 +131,54 @@ void Rpc_entrypoint::_activation_entry()
Rpc_entrypoint &ep = *static_cast<Rpc_entrypoint *>(Thread::myself());
Nova::Utcb &utcb = *(Nova::Utcb *)Thread::myself()->utcb();
Receive_window &rcv_window = ep.native_thread().server_rcv_window;
rcv_window.post_ipc(utcb);
ep.with_native_thread([&] (Native_thread &nt) {
Receive_window &rcv_window = nt.server_rcv_window;
rcv_window.post_ipc(utcb);
/* handle ill-formed message */
if (utcb.msg_words() < 2) {
ep._rcv_buf.word(0) = ~0UL; /* invalid opcode */
} else {
copy_utcb_to_msgbuf(utcb, rcv_window, ep._rcv_buf);
}
/* handle ill-formed message */
if (utcb.msg_words() < 2) {
ep._rcv_buf.word(0) = ~0UL; /* invalid opcode */
} else {
copy_utcb_to_msgbuf(utcb, rcv_window, ep._rcv_buf);
}
Ipc_unmarshaller unmarshaller(ep._rcv_buf);
Ipc_unmarshaller unmarshaller(ep._rcv_buf);
Rpc_opcode opcode(0);
unmarshaller.extract(opcode);
Rpc_opcode opcode(0);
unmarshaller.extract(opcode);
/* default return value */
Rpc_exception_code exc = Rpc_exception_code(Rpc_exception_code::INVALID_OBJECT);
/* default return value */
Rpc_exception_code exc = Rpc_exception_code(Rpc_exception_code::INVALID_OBJECT);
/* in case of a portal cleanup call we are done here - just reply */
if (ep._cap.local_name() == (long)id_pt) {
if (!rcv_window.prepare_rcv_window(utcb))
/* in case of a portal cleanup call we are done here - just reply */
if (ep._cap.local_name() == (long)id_pt) {
if (!rcv_window.prepare_rcv_window(utcb))
warning("out of capability selectors for handling server requests");
ep._rcv_buf.reset();
reply(utcb, exc, ep._snd_buf);
}
/* atomically lookup and lock referenced object */
auto lambda = [&] (Rpc_object_base *obj)
{
if (!obj) {
error("could not look up server object, return from call id_pt=", id_pt);
return;
}
/* dispatch request */
ep._snd_buf.reset();
exc = obj->dispatch(opcode, unmarshaller, ep._snd_buf);
};
ep.apply(id_pt, lambda);
if (!rcv_window.prepare_rcv_window(*(Nova::Utcb *)ep.utcb()))
warning("out of capability selectors for handling server requests");
ep._rcv_buf.reset();
reply(utcb, exc, ep._snd_buf);
}
/* atomically lookup and lock referenced object */
auto lambda = [&] (Rpc_object_base *obj)
{
if (!obj) {
error("could not look up server object, return from call id_pt=", id_pt);
return;
}
/* dispatch request */
ep._snd_buf.reset();
exc = obj->dispatch(opcode, unmarshaller, ep._snd_buf);
};
ep.apply(id_pt, lambda);
if (!rcv_window.prepare_rcv_window(*(Nova::Utcb *)ep.utcb()))
warning("out of capability selectors for handling server requests");
ep._rcv_buf.reset();
reply(utcb, exc, ep._snd_buf);
});
}
@ -202,28 +206,33 @@ Rpc_entrypoint::Rpc_entrypoint(Pd_session *pd_session, size_t stack_size,
_pd_session(*pd_session)
{
/* set magic value evaluated by thread_nova.cc to start a local thread */
if (native_thread().ec_sel == Native_thread::INVALID_INDEX) {
native_thread().ec_sel = Native_thread::INVALID_INDEX - 1;
native_thread().initial_ip = (addr_t)&_activation_entry;
}
with_native_thread([&] (Native_thread &nt) {
if (nt.ec_valid())
return;
nt.ec_sel = Native_thread::INVALID_INDEX - 1;
nt.initial_ip = (addr_t)&_activation_entry;
});
/* required to create a 'local' EC */
Thread::start();
/* create cleanup portal */
_cap = _alloc_rpc_cap(_pd_session,
Capability_space::import(native_thread().ec_sel),
(addr_t)_activation_entry);
with_native_thread([&] (Native_thread &nt) {
_cap = _alloc_rpc_cap(_pd_session, Capability_space::import(nt.ec_sel),
(addr_t)_activation_entry); });
if (!_cap.valid()) {
error("failed to allocate RPC cap for new entrypoint");
return;
}
Receive_window &rcv_window = Thread::native_thread().server_rcv_window;
with_native_thread([&] (Native_thread &nt) {
Receive_window &rcv_window = nt.server_rcv_window;
/* prepare portal receive window of new thread */
if (!rcv_window.prepare_rcv_window(*(Nova::Utcb *)&_stack->utcb()))
error("failed to prepare receive window for RPC entrypoint");
/* prepare portal receive window of new thread */
if (!rcv_window.prepare_rcv_window(*(Nova::Utcb *)&_stack->utcb()))
error("failed to prepare receive window for RPC entrypoint");
});
}

@ -55,8 +55,6 @@ static Thread_capability main_thread_cap(Thread_capability main_cap = { })
*/
void Thread::_thread_start()
{
using namespace Genode;
/* catch any exception at this point and try to print an error message */
try {
Thread::myself()->entry();
@ -83,24 +81,23 @@ void Thread::_thread_start()
** Thread base **
*****************/
void Thread::_init_platform_thread(size_t weight, Type type)
void Thread::_init_native_thread(Stack &stack, size_t weight, Type type)
{
using namespace Nova;
Native_thread &nt = stack.native_thread();
/*
* Allocate capability selectors for the thread's execution context,
* running semaphore and exception handler portals.
*/
native_thread().ec_sel = Native_thread::INVALID_INDEX;
/* for main threads the member initialization differs */
if (type == MAIN) {
_thread_cap = main_thread_cap();
native_thread().exc_pt_sel = 0;
native_thread().ec_sel = Nova::EC_SEL_THREAD;
nt.exc_pt_sel = 0;
nt.ec_sel = Nova::EC_SEL_THREAD;
request_native_ec_cap(PT_SEL_PAGE_FAULT, native_thread().ec_sel);
request_native_ec_cap(Nova::PT_SEL_PAGE_FAULT, nt.ec_sel);
return;
}
@ -113,12 +110,12 @@ void Thread::_init_platform_thread(size_t weight, Type type)
* 'Cpu_session::kill_thread()' and is not able to revoke the UTCB
* afterwards.
*/
Rights rwx(true, true, true);
addr_t utcb = reinterpret_cast<addr_t>(&_stack->utcb());
revoke(Mem_crd(utcb >> 12, 0, rwx));
Nova::Rights rwx(true, true, true);
addr_t utcb = reinterpret_cast<addr_t>(&stack.utcb());
Nova::revoke(Nova::Mem_crd(utcb >> 12, 0, rwx));
native_thread().exc_pt_sel = cap_map().insert(NUM_INITIAL_PT_LOG2);
if (native_thread().exc_pt_sel == Native_thread::INVALID_INDEX) {
nt.exc_pt_sel = cap_map().insert(Nova::NUM_INITIAL_PT_LOG2);
if (nt.exc_pt_sel == Native_thread::INVALID_INDEX) {
error("failed allocate exception-portal selector for new thread");
return;
}
@ -134,88 +131,87 @@ void Thread::_init_platform_thread(size_t weight, Type type)
}
void Thread::_deinit_platform_thread()
void Thread::_deinit_native_thread(Stack &stack)
{
using namespace Nova;
Native_thread &nt = stack.native_thread();
if (native_thread().ec_sel != Native_thread::INVALID_INDEX) {
revoke(Obj_crd(native_thread().ec_sel, 0));
}
if (nt.ec_valid())
Nova::revoke(Nova::Obj_crd(nt.ec_sel, 0));
/* de-announce thread */
_thread_cap.with_result(
[&] (Thread_capability cap) { _cpu_session->kill_thread(cap); },
[&] (Cpu_session::Create_thread_error) { });
cap_map().remove(native_thread().exc_pt_sel, NUM_INITIAL_PT_LOG2);
cap_map().remove(nt.exc_pt_sel, Nova::NUM_INITIAL_PT_LOG2);
}
Thread::Start_result Thread::start()
{
if (native_thread().ec_sel < Native_thread::INVALID_INDEX - 1) {
error("Thread::start failed due to invalid exception portal selector");
return Start_result::DENIED;
}
return with_native_thread([&] (Native_thread &nt) {
if (_thread_cap.failed())
return Start_result::DENIED;
if (nt.ec_sel < Native_thread::INVALID_INDEX - 1) {
error("Thread::start failed due to invalid exception portal selector");
return Start_result::DENIED;
}
/*
* Default: create global thread - ec.sel == INVALID_INDEX
* create local thread - ec.sel == INVALID_INDEX - 1
*/
bool global = native_thread().ec_sel == Native_thread::INVALID_INDEX;
if (_thread_cap.failed())
return Start_result::DENIED;
using namespace Genode;
/*
* Default: create global thread - ec.sel == INVALID_INDEX
* create local thread - ec.sel == INVALID_INDEX - 1
*/
bool global = nt.ec_sel == Native_thread::INVALID_INDEX;
/* create EC at core */
/* create EC at core */
try {
Cpu_session::Native_cpu::Thread_type thread_type;
try {
Cpu_session::Native_cpu::Thread_type thread_type;
if (global)
thread_type = Cpu_session::Native_cpu::Thread_type::GLOBAL;
else
thread_type = Cpu_session::Native_cpu::Thread_type::LOCAL;
Cpu_session::Native_cpu::Exception_base exception_base { nt.exc_pt_sel };
Nova_native_cpu_client native_cpu(_cpu_session->native_cpu());
native_cpu.thread_type(cap(), thread_type, exception_base);
} catch (...) {
error("Thread::start failed to set thread type");
return Start_result::DENIED;
}
/* local thread have no start instruction pointer - set via portal entry */
addr_t thread_ip = global ? reinterpret_cast<addr_t>(_thread_start) : nt.initial_ip;
Cpu_thread_client cpu_thread(cap());
cpu_thread.start(thread_ip, _stack->top());
/* request native EC thread cap */
nt.ec_sel = nt.exc_pt_sel + Nova::EC_SEL_THREAD;
/*
* Requested ec cap that is used for recall and
* creation of portals (Native_pd::alloc_rpc_cap).
*/
request_native_ec_cap(nt.exc_pt_sel + Nova::PT_SEL_PAGE_FAULT,
nt.ec_sel);
/* default: we don't accept any mappings or translations */
Nova::Utcb &utcb = *(Nova::Utcb *)Thread::utcb();
utcb.crd_rcv = Nova::Obj_crd();
utcb.crd_xlt = Nova::Obj_crd();
if (global)
thread_type = Cpu_session::Native_cpu::Thread_type::GLOBAL;
else
thread_type = Cpu_session::Native_cpu::Thread_type::LOCAL;
/* request creation of SC to let thread run*/
cpu_thread.resume();
Cpu_session::Native_cpu::Exception_base exception_base { native_thread().exc_pt_sel };
return Start_result::OK;
Nova_native_cpu_client native_cpu(_cpu_session->native_cpu());
native_cpu.thread_type(cap(), thread_type, exception_base);
} catch (...) {
error("Thread::start failed to set thread type");
return Start_result::DENIED;
}
/* local thread have no start instruction pointer - set via portal entry */
addr_t thread_ip = global ? reinterpret_cast<addr_t>(_thread_start) : native_thread().initial_ip;
Cpu_thread_client cpu_thread(cap());
cpu_thread.start(thread_ip, _stack->top());
/* request native EC thread cap */
native_thread().ec_sel = native_thread().exc_pt_sel + Nova::EC_SEL_THREAD;
/*
* Requested ec cap that is used for recall and
* creation of portals (Native_pd::alloc_rpc_cap).
*/
request_native_ec_cap(native_thread().exc_pt_sel + Nova::PT_SEL_PAGE_FAULT,
native_thread().ec_sel);
using namespace Nova;
/* default: we don't accept any mappings or translations */
Utcb * utcb_obj = reinterpret_cast<Utcb *>(utcb());
utcb_obj->crd_rcv = Obj_crd();
utcb_obj->crd_xlt = Obj_crd();
if (global)
/* request creation of SC to let thread run*/
cpu_thread.resume();
return Start_result::OK;
}, [&] { return Start_result::DENIED; });
}

@ -655,12 +655,13 @@ void Nova_vcpu::with_state(auto const &fn)
static void nova_reply(Thread &myself, Nova::Utcb &utcb, auto &&... args)
{
Receive_window &rcv_window = myself.native_thread().server_rcv_window;
myself.with_native_thread([&] (Native_thread &nt) {
/* reset receive window to values expected by RPC server code */
rcv_window.prepare_rcv_window(utcb);
/* reset receive window to values expected by RPC server code */
nt.server_rcv_window.prepare_rcv_window(utcb);
Nova::reply(myself.stack_top(), args...);
Nova::reply(myself.stack_top(), args...);
});
}
@ -697,19 +698,23 @@ Signal_context_capability Nova_vcpu::_create_exit_handler(Pd_session &pd,
uint16_t exit_reason,
Nova::Mtd mtd)
{
Thread *tep = reinterpret_cast<Thread *>(&handler.rpc_ep());
Thread *ep = reinterpret_cast<Thread *>(&handler.rpc_ep());
Native_capability thread_cap = Capability_space::import(tep->native_thread().ec_sel);
return ep->with_native_thread([&] (Native_thread &nt) {
Nova_native_pd_client native_pd { pd.native_pd() };
Native_capability thread_cap = Capability_space::import(nt.ec_sel);
Native_capability vm_exit_cap =
native_pd.alloc_rpc_cap(thread_cap, (addr_t)Nova_vcpu::_exit_entry, mtd.value());
Nova_native_pd_client native_pd { pd.native_pd() };
Badge const badge { vcpu_id, exit_reason };
native_pd.imprint_rpc_cap(vm_exit_cap, badge.value());
Native_capability vm_exit_cap =
native_pd.alloc_rpc_cap(thread_cap, (addr_t)Nova_vcpu::_exit_entry, mtd.value());
return reinterpret_cap_cast<Signal_context>(vm_exit_cap);
Badge const badge { vcpu_id, exit_reason };
native_pd.imprint_rpc_cap(vm_exit_cap, badge.value());
return reinterpret_cap_cast<Signal_context>(vm_exit_cap);
}, [&] { return Signal_context_capability(); });
}

@ -227,8 +227,9 @@ void test_revoke(Genode::Env &env)
* as used before by copy_session_cap
*/
Genode::Thread * myself = Genode::Thread::myself();
request_native_ec_cap(myself->native_thread().exc_pt_sel + Nova::PT_SEL_PAGE_FAULT,
copy_session_cap.local_name());
myself->with_native_thread([&] (Native_thread &nt) {
request_native_ec_cap(nt.exc_pt_sel + Nova::PT_SEL_PAGE_FAULT,
copy_session_cap.local_name()); });
/* check whether the requested cap before is valid and placed well */
crd_ses = Nova::Obj_crd(copy_session_cap.local_name(), 0);
@ -319,33 +320,37 @@ void test_pat(Genode::Env &env)
touch_read_write(reinterpret_cast<unsigned char *>( memory + offset));
}
Nova::Rights const all(true, true, true);
/*
* Establish memory mapping with evilly wrong mapping attributes
*/
Nova_native_pd_client native_pd { env.pd().native_pd() };
Thread * thread = reinterpret_cast<Genode::Thread *>(&ep);
Native_capability const thread_cap
= Capability_space::import(thread->native_thread().ec_sel);
Untyped_capability const pt =
native_pd.alloc_rpc_cap(thread_cap, (addr_t)portal_entry, 0 /* MTD */);
thread->with_native_thread([&] (Native_thread &nt) {
Nova::Rights const all(true, true, true);
Nova::Mem_crd const rcv_crd(memory_remap >> PAGE_4K, DS_ORDER, all);
Nova::Mem_crd const snd_crd(memory_wc >> PAGE_4K, DS_ORDER, all);
Nova::Crd const old_crd = utcb.crd_rcv;
Native_capability const thread_cap = Capability_space::import(nt.ec_sel);
utcb.crd_rcv = rcv_crd;
utcb.set_msg_word(1);
utcb.msg()[0] = snd_crd.value();
Untyped_capability const pt =
native_pd.alloc_rpc_cap(thread_cap, (addr_t)portal_entry, 0 /* MTD */);
uint8_t const res = Nova::call(pt.local_name());
utcb.crd_rcv = old_crd;
Nova::Mem_crd const rcv_crd(memory_remap >> PAGE_4K, DS_ORDER, all);
Nova::Mem_crd const snd_crd(memory_wc >> PAGE_4K, DS_ORDER, all);
Nova::Crd const old_crd = utcb.crd_rcv;
if (res != Nova::NOVA_OK) {
Genode::error("establishing memory failed ", res);
failed++;
}
utcb.crd_rcv = rcv_crd;
utcb.set_msg_word(1);
utcb.msg()[0] = snd_crd.value();
uint8_t const res = Nova::call(pt.local_name());
utcb.crd_rcv = old_crd;
if (res != Nova::NOVA_OK) {
Genode::error("establishing memory failed ", res);
failed++;
}
});
/* sanity check - touch re-mapped area */
for (auto offset = 0; offset < DS_SIZE; offset += (1 << PAGE_4K))
@ -488,20 +493,23 @@ class Pager : private Genode::Thread {
touch_read(reinterpret_cast<unsigned char *>(_ds_mem));
/* request creation of a 'local' EC */
Thread::native_thread().ec_sel = Native_thread::INVALID_INDEX - 1;
Thread::start();
with_native_thread([&] (Native_thread &nt) {
Genode::warning("pager: created");
nt.ec_sel = Native_thread::INVALID_INDEX - 1;
Native_capability thread_cap =
Capability_space::import(Thread::native_thread().ec_sel);
Thread::start();
Genode::Nova_native_pd_client native_pd(env.pd().native_pd());
Nova::Mtd mtd (Nova::Mtd::QUAL | Nova::Mtd::EIP | Nova::Mtd::ESP);
Genode::addr_t entry = reinterpret_cast<Genode::addr_t>(page_fault);
Genode::warning("pager: created");
_call_to_map = native_pd.alloc_rpc_cap(thread_cap, entry,
mtd.value());
Native_capability thread_cap = Capability_space::import(nt.ec_sel);
Genode::Nova_native_pd_client native_pd(env.pd().native_pd());
Nova::Mtd mtd (Nova::Mtd::QUAL | Nova::Mtd::EIP | Nova::Mtd::ESP);
Genode::addr_t entry = reinterpret_cast<Genode::addr_t>(page_fault);
_call_to_map = native_pd.alloc_rpc_cap(thread_cap, entry,
mtd.value());
});
}
Native_capability call_to_map() { return _call_to_map; }
@ -700,11 +708,14 @@ Main::Main(Env &env) : env(env)
};
addr_t sel_pd = cap_map().insert();
addr_t sel_ec = myself->native_thread().ec_sel;
addr_t sel_ec = Native_thread::INVALID_INDEX;
addr_t sel_cap = cap_map().insert();
addr_t handler = 0UL;
uint8_t res = 0;
myself->with_native_thread([&] (Native_thread &nt) {
sel_ec = nt.ec_sel; });
Nova::Mtd mtd(Nova::Mtd::ALL);
if (sel_cap == ~0UL || sel_ec == ~0UL || sel_cap == ~0UL) {
@ -721,9 +732,10 @@ Main::Main(Env &env) : env(env)
/* changing the badge of one of the portal must fail */
for (unsigned i = 0; i < (1U << Nova::NUM_INITIAL_PT_LOG2); i++) {
addr_t sel_exc = myself->native_thread().exc_pt_sel + i;
res = Nova::pt_ctrl(sel_exc, 0xbadbad);
check(res, "pt_ctrl ", i);
myself->with_native_thread([&] (Native_thread &nt) {
res = Nova::pt_ctrl(nt.exc_pt_sel + i, 0xbadbad);
check(res, "pt_ctrl ", i);
});
}
/* test PAT kernel feature */

@ -104,7 +104,9 @@ class Timer::Device
static Sel init_signal_sem(Thread &thread)
{
auto const exc_base = thread.native_thread().exc_pt_sel;
addr_t exc_base = Native_thread::INVALID_INDEX;
thread.with_native_thread([&] (Native_thread &nt) {
exc_base = nt.exc_pt_sel; });
request_signal_sm_cap(exc_base + Nova::PT_SEL_PAGE_FAULT,
exc_base + Nova::SM_SEL_SIGNAL);

@ -144,7 +144,9 @@ void Ipc_pager::acknowledge_wakeup()
Untyped_capability Pager_entrypoint::_pager_object_cap(unsigned long badge)
{
return Capability_space::import(native_thread().l4id, Rpc_obj_key(badge));
return with_native_thread(
[&] (Native_thread &nt) { return Capability_space::import(nt.l4id, Rpc_obj_key(badge)); },
[&] { return Untyped_capability(); });
}

@ -35,18 +35,19 @@ void Thread::_thread_start()
Thread::Start_result Thread::start()
{
/* create and start platform thread */
native_thread().pt = new (Core::platform_specific().thread_slab())
Core::Platform_thread(Core::platform_specific().core_pd(), _stack->name().string());
native_thread().pt->start((void *)_thread_start, stack_top());
with_native_thread([&] (Native_thread &nt) {
nt.pt = new (Core::platform_specific().thread_slab())
Core::Platform_thread(Core::platform_specific().core_pd(),
_stack->name().string());
nt.pt->start((void *)_thread_start, stack_top());
});
return Start_result::OK;
}
void Thread::_deinit_platform_thread()
void Thread::_deinit_native_thread(Stack &stack)
{
/* destruct platform thread */
destroy(Core::platform_specific().thread_slab(), native_thread().pt);
destroy(Core::platform_specific().thread_slab(), stack.native_thread().pt);
}

@ -33,6 +33,18 @@ static inline void thread_yield() { Okl4::L4_Yield(); }
extern Okl4::L4_ThreadId_t main_thread_tid;
static inline Okl4::L4_ThreadId_t okl4_tid(Genode::Thread *thread_ptr)
{
if (!thread_ptr)
return main_thread_tid;
return thread_ptr->with_native_thread(
[&] (Genode::Native_thread &nt) { return nt.l4id; },
[&] { return Okl4::L4_ThreadId_t { }; });
}
/**
* Custom ExchangeRegisters wrapper for waking up a thread
*
@ -42,20 +54,17 @@ extern Okl4::L4_ThreadId_t main_thread_tid;
*
* \return true if the thread was in blocking state
*/
static inline bool thread_check_stopped_and_restart(Genode::Thread *thread_base)
static inline bool thread_check_stopped_and_restart(Genode::Thread *thread_ptr)
{
using namespace Okl4;
L4_Word_t dummy;
L4_ThreadId_t dummy_id;
L4_Word_t dummy;
L4_ThreadId_t dummy_id;
L4_ThreadState_t state;
Okl4::L4_ThreadId_t tid = thread_base ?
thread_base->native_thread().l4id :
main_thread_tid;
L4_ExchangeRegisters(tid, L4_ExReg_Resume + L4_ExReg_AbortIPC, 0, 0, 0,
0, L4_nilthread, &state.raw, &dummy, &dummy, &dummy,
L4_ExchangeRegisters(okl4_tid(thread_ptr),
L4_ExReg_Resume + L4_ExReg_AbortIPC, 0, 0, 0, 0,
L4_nilthread, &state.raw, &dummy, &dummy, &dummy,
&dummy, &dummy_id);
return L4_ThreadWasHalted(state);
@ -65,24 +74,18 @@ static inline bool thread_check_stopped_and_restart(Genode::Thread *thread_base)
/**
* Yield CPU time to the specified thread
*/
static inline void thread_switch_to(Genode::Thread *thread_base)
static inline void thread_switch_to(Genode::Thread *thread_ptr)
{
Okl4::L4_ThreadId_t tid = thread_base ?
thread_base->native_thread().l4id :
main_thread_tid;
Okl4::L4_ThreadSwitch(tid);
Okl4::L4_ThreadSwitch(okl4_tid(thread_ptr));
}
/**
* Unconditionally block the calling thread
*/
static inline void thread_stop_myself(Genode::Thread *myself)
static inline void thread_stop_myself(Genode::Thread *thread_ptr)
{
Okl4::L4_ThreadId_t tid = myself ?
myself->native_thread().l4id :
main_thread_tid;
Okl4::L4_Stop(tid);
Okl4::L4_Stop(okl4_tid(thread_ptr));
}
#endif /* _INCLUDE__BASE__INTERNAL__LOCK_HELPER_H_ */

@ -15,6 +15,7 @@
#define _INCLUDE__BASE__INTERNAL__NATIVE_THREAD_H_
/* Genode includes */
#include <util/noncopyable.h>
#include <base/stdint.h>
/* base-internal includes */
@ -22,22 +23,24 @@
namespace Core { class Platform_thread; }
namespace Genode { struct Native_thread; }
struct Genode::Native_thread
struct Genode::Native_thread : Noncopyable
{
Okl4::L4_ThreadId_t l4id;
Okl4::L4_ThreadId_t l4id { };
/**
* Only used in core
*
* For 'Thread' objects created within core, 'pt' points to
* the physical thread object, which is going to be destroyed
* on destruction of the 'Thread'.
* For 'Thread' objects created within core, 'pt' points to the physical
* thread object, which is going to be destroyed on destruction of the
* 'Thread'.
*/
Core::Platform_thread *pt;
struct { Core::Platform_thread *pt = nullptr; };
Native_thread() { }
};
#endif /* _INCLUDE__BASE__INTERNAL__NATIVE_THREAD_H_ */

@ -17,10 +17,10 @@
#include <base/env.h>
/* base-internal includes */
#include <base/internal/native_thread.h>
#include <base/internal/native_utcb.h>
#include <base/internal/globals.h>
#include <base/internal/okl4.h>
#include <base/internal/stack.h>
using namespace Genode;
@ -81,16 +81,17 @@ void Genode::prepare_init_main_thread()
void Thread::_thread_bootstrap()
{
native_thread().l4id.raw = Okl4::copy_uregister_to_utcb();
with_native_thread([&] (Native_thread &nt) {
nt.l4id.raw = Okl4::copy_uregister_to_utcb(); });
}
void Thread::_init_platform_thread(size_t, Type type)
void Thread::_init_native_thread(Stack &stack, size_t, Type type)
{
if (type == NORMAL)
return;
native_thread().l4id.raw = main_thread_tid.raw;
stack.native_thread().l4id.raw = main_thread_tid.raw;
_thread_cap = main_thread_cap();
}

@ -130,7 +130,9 @@ void Ipc_pager::acknowledge_wakeup()
Untyped_capability Pager_entrypoint::_pager_object_cap(unsigned long badge)
{
return Capability_space::import(native_thread().l4id, Rpc_obj_key(badge));
return with_native_thread(
[&] (Native_thread &nt) { return Capability_space::import(nt.l4id, Rpc_obj_key(badge)); },
[&] { return Untyped_capability(); });
}

@ -36,21 +36,22 @@ void Thread::_thread_start()
Thread::Start_result Thread::start()
{
/* create and start platform thread */
native_thread().pt = new (platform().core_mem_alloc())
Platform_thread(platform_specific().core_pd(), _stack->name().string());
return with_native_thread(
[&] (Native_thread &nt) {
nt.pt = new (platform().core_mem_alloc())
Platform_thread(platform_specific().core_pd(), _stack->name().string());
native_thread().pt->pager(platform_specific().core_pager());
native_thread().l4id = native_thread().pt->native_thread_id();
nt.pt->pager(platform_specific().core_pager());
nt.l4id = nt.pt->native_thread_id();
native_thread().pt->start((void *)_thread_start, stack_top());
return Start_result::OK;
nt.pt->start((void *)_thread_start, stack_top());
return Start_result::OK;
},
[&] { return Start_result::DENIED; });
}
void Thread::_deinit_platform_thread()
void Thread::_deinit_native_thread(Stack &stack)
{
/* destruct platform thread */
destroy(platform().core_mem_alloc(), native_thread().pt);
destroy(platform().core_mem_alloc(), stack.native_thread().pt);
}

@ -33,6 +33,17 @@ extern Pistachio::L4_ThreadId_t main_thread_tid;
static inline void thread_yield() { Pistachio::L4_Yield(); }
static inline Pistachio::L4_ThreadId_t pistachio_tid(Genode::Thread *thread_ptr)
{
if (!thread_ptr)
return main_thread_tid;
return thread_ptr->with_native_thread(
[&] (Genode::Native_thread &nt) { return nt.l4id; },
[&] { return Pistachio::L4_ThreadId_t { }; });
}
/**
* Custom ExchangeRegisters wrapper for waking up a thread
*
@ -42,21 +53,17 @@ static inline void thread_yield() { Pistachio::L4_Yield(); }
*
* \return true if the thread was in blocking state
*/
static inline bool thread_check_stopped_and_restart(Genode::Thread *thread_base)
static inline bool thread_check_stopped_and_restart(Genode::Thread *thread_ptr)
{
using namespace Pistachio;
L4_Word_t dummy;
L4_ThreadId_t dummy_id;
L4_Word_t dummy;
L4_ThreadId_t dummy_id;
L4_ThreadState_t state;
Pistachio::L4_ThreadId_t tid = thread_base ?
thread_base->native_thread().l4id :
main_thread_tid;
enum { RESUME = 1 << 8, CANCEL_IPC = 3 << 1 };
L4_ExchangeRegisters(tid, RESUME | CANCEL_IPC, 0, 0, 0,
0, L4_nilthread, &state.raw, &dummy, &dummy, &dummy,
L4_ExchangeRegisters(pistachio_tid(thread_ptr), RESUME | CANCEL_IPC, 0, 0, 0, 0,
L4_nilthread, &state.raw, &dummy, &dummy, &dummy,
&dummy, &dummy_id);
return L4_ThreadWasHalted(state);
@ -66,24 +73,18 @@ static inline bool thread_check_stopped_and_restart(Genode::Thread *thread_base)
/**
* Yield CPU time to the specified thread
*/
static inline void thread_switch_to(Genode::Thread *thread_base)
static inline void thread_switch_to(Genode::Thread *thread_ptr)
{
Pistachio::L4_ThreadId_t tid = thread_base ?
thread_base->native_thread().l4id :
main_thread_tid;
Pistachio::L4_ThreadSwitch(tid);
Pistachio::L4_ThreadSwitch(pistachio_tid(thread_ptr));
}
/**
* Unconditionally block the calling thread
*/
static inline void thread_stop_myself(Genode::Thread *myself)
static inline void thread_stop_myself(Genode::Thread *thread_ptr)
{
Pistachio::L4_ThreadId_t tid = myself ?
myself->native_thread().l4id :
main_thread_tid;
Pistachio::L4_Stop(tid);
Pistachio::L4_Stop(pistachio_tid(thread_ptr));
}
#endif /* _INCLUDE__BASE__INTERNAL__LOCK_HELPER_H_ */

@ -15,6 +15,7 @@
#define _INCLUDE__BASE__INTERNAL__NATIVE_THREAD_H_
/* Genode includes */
#include <util/noncopyable.h>
#include <base/stdint.h>
/* base-internal includes */
@ -25,9 +26,9 @@ namespace Genode { struct Native_thread; }
namespace Core { class Platform_thread; }
struct Genode::Native_thread
struct Genode::Native_thread : Noncopyable
{
Pistachio::L4_ThreadId_t l4id;
Pistachio::L4_ThreadId_t l4id { };
/**
* Only used in core
@ -36,7 +37,9 @@ struct Genode::Native_thread
* the physical thread object, which is going to be destroyed
* on destruction of the 'Thread'.
*/
Core::Platform_thread *pt;
struct { Core::Platform_thread *pt = nullptr; };
Native_thread() { }
};
#endif /* _INCLUDE__BASE__INTERNAL__NATIVE_THREAD_H_ */

@ -16,7 +16,7 @@
#include <base/env.h>
/* base-internal includes */
#include <base/internal/native_thread.h>
#include <base/internal/stack.h>
#include <base/internal/globals.h>
#include <base/internal/pistachio.h>
@ -49,16 +49,17 @@ void Genode::prepare_init_main_thread()
void Genode::Thread::_thread_bootstrap()
{
native_thread().l4id = Pistachio::L4_Myself();
with_native_thread([&] (Native_thread &nt) {
nt.l4id = Pistachio::L4_Myself(); });
}
void Genode::Thread::_init_platform_thread(size_t, Type type)
void Genode::Thread::_init_native_thread(Stack &stack, size_t, Type type)
{
if (type == NORMAL)
return;
native_thread().l4id = main_thread_tid;
stack.native_thread().l4id = main_thread_tid;
_thread_cap = main_thread_cap();
}

@ -123,16 +123,19 @@ Capability_space::create_rpc_obj_cap(Native_capability ep_cap,
Native_capability Capability_space::create_ep_cap(Thread &ep_thread)
{
Cap_sel const ep_sel(ep_thread.native_thread().ep_sel);
return ep_thread.with_native_thread(
[&] (Native_thread &nt) {
Cap_sel const ep_sel(nt.attr.ep_sel);
/* entrypoint capabilities are not allocated from a PD session */
Pd_session const *pd_session = nullptr;
/* entrypoint capabilities are not allocated from a PD session */
Pd_session const *pd_session = nullptr;
Native_capability::Data &data =
local_capability_space().create_capability(ep_sel, pd_session,
Rpc_obj_key());
return Native_capability(&data);
Native_capability::Data &data =
local_capability_space().create_capability(ep_sel, pd_session,
Rpc_obj_key());
return Native_capability(&data);
},
[&] { return Native_capability(); });
}

@ -65,19 +65,19 @@ void Ipc_pager::reply_and_wait_for_fault()
{
seL4_Word badge = Rpc_obj_key::INVALID;
seL4_MessageInfo_t page_fault_msg_info;
seL4_MessageInfo_t page_fault_msg_info { };
if (_badge) {
Thread::myself()->with_native_thread([&] (Native_thread &nt) {
seL4_MessageInfo_t const reply_msg = seL4_MessageInfo_new(0, 0, 0, 0);
if (_badge) {
seL4_MessageInfo_t const reply_msg = seL4_MessageInfo_new(0, 0, 0, 0);
page_fault_msg_info =
seL4_ReplyRecv(Thread::myself()->native_thread().ep_sel, reply_msg, &badge);
page_fault_msg_info = seL4_ReplyRecv(nt.attr.ep_sel, reply_msg, &badge);
} else {
page_fault_msg_info =
seL4_Recv(Thread::myself()->native_thread().ep_sel, &badge);
}
} else {
page_fault_msg_info = seL4_Recv(nt.attr.ep_sel, &badge);
}
});
Fault_info const fault_info(page_fault_msg_info);

@ -30,13 +30,15 @@
using namespace Core;
void Thread::_init_platform_thread(size_t, Type type)
void Thread::_init_native_thread(Stack &stack, size_t, Type type)
{
Utcb_virt const utcb_virt { (addr_t)&_stack->utcb() };
Native_thread &nt = stack.native_thread();
Utcb_virt const utcb_virt { (addr_t)&stack.utcb() };
if (type == MAIN) {
native_thread().tcb_sel = seL4_CapInitThreadTCB;
native_thread().lock_sel = INITIAL_SEL_LOCK;
nt.attr.tcb_sel = seL4_CapInitThreadTCB;
nt.attr.lock_sel = INITIAL_SEL_LOCK;
return;
}
@ -49,15 +51,15 @@ void Thread::_init_platform_thread(size_t, Type type)
"local=%", Hex(utcb_virt.addr));
}
native_thread().tcb_sel = thread_info.tcb_sel.value();
native_thread().ep_sel = thread_info.ep_sel.value();
native_thread().lock_sel = thread_info.lock_sel.value();
nt.attr.tcb_sel = thread_info.tcb_sel.value();
nt.attr.ep_sel = thread_info.ep_sel.value();
nt.attr.lock_sel = thread_info.lock_sel.value();
Platform &platform = platform_specific();
seL4_CNode_CapData guard = seL4_CNode_CapData_new(0, CONFIG_WORD_SIZE - 32);
seL4_CNode_CapData no_cap_data = { { 0 } };
int const ret = seL4_TCB_SetSpace(native_thread().tcb_sel, 0,
int const ret = seL4_TCB_SetSpace(nt.attr.tcb_sel, 0,
platform.top_cnode().sel().value(),
guard.words[0],
seL4_CapInitThreadPD, no_cap_data.words[0]);
@ -69,24 +71,24 @@ void Thread::_init_platform_thread(size_t, Type type)
platform.core_cnode().mint(platform.core_cnode(), unbadged_sel, lock_sel);
native_thread().lock_sel = lock_sel.value();
nt.attr.lock_sel = lock_sel.value();
}
void Thread::_deinit_platform_thread()
void Thread::_deinit_native_thread(Stack &stack)
{
addr_t const utcb_virt_addr = (addr_t)&_stack->utcb();
addr_t const utcb_virt_addr = (addr_t)&stack.utcb();
bool ret = unmap_local(utcb_virt_addr, 1);
ASSERT(ret);
int res = seL4_CNode_Delete(seL4_CapInitThreadCNode,
native_thread().lock_sel, 32);
stack.native_thread().attr.lock_sel, 32);
if (res)
error(__PRETTY_FUNCTION__, ": seL4_CNode_Delete (",
Hex(native_thread().lock_sel), ") returned ", res);
Hex(stack.native_thread().attr.lock_sel), ") returned ", res);
platform_specific().core_sel_alloc().free(Cap_sel(native_thread().lock_sel));
platform_specific().core_sel_alloc().free(Cap_sel(stack.native_thread().attr.lock_sel));
}
@ -99,13 +101,7 @@ void Thread::_thread_start()
}
Thread::Start_result Thread::start()
{
/* write ipcbuffer address to utcb*/
utcb()->ipcbuffer(Native_utcb::Virt { addr_t(utcb()) });
start_sel4_thread(Cap_sel(native_thread().tcb_sel), (addr_t)&_thread_start,
(addr_t)stack_top(), _affinity.xpos(), addr_t(utcb()));
namespace {
struct Core_trace_source : public Core::Trace::Source::Info_accessor,
private Core::Trace::Control,
@ -123,14 +119,15 @@ Thread::Start_result Thread::start()
seL4_IPCBuffer &ipc_buffer = *reinterpret_cast<seL4_IPCBuffer *>(myself.utcb());
uint64_t const * const buf = reinterpret_cast<uint64_t *>(ipc_buffer.msg);
seL4_BenchmarkGetThreadUtilisation(_thread.native_thread().tcb_sel);
_thread.with_native_thread([&] (Native_thread &nt) {
seL4_BenchmarkGetThreadUtilisation(nt.attr.tcb_sel); });
uint64_t const thread_time = buf[BENCHMARK_TCB_UTILISATION];
return { Session_label("core"), _thread.name(),
Trace::Execution_time(thread_time, 0), _thread._affinity };
Genode::Trace::Execution_time(thread_time, 0), _thread.affinity() };
}
Core_trace_source(Core::Trace::Source_registry &registry, Thread &t)
:
Core::Trace::Control(),
@ -139,9 +136,26 @@ Thread::Start_result Thread::start()
registry.insert(this);
}
};
}
new (platform().core_mem_alloc())
Core_trace_source(Core::Trace::sources(), *this);
Thread::Start_result Thread::start()
{
if (!_stack)
return Start_result::DENIED;
Stack &stack = *_stack;
/* write ipcbuffer address to utcb*/
utcb()->ipcbuffer(Native_utcb::Virt { addr_t(utcb()) });
start_sel4_thread(Cap_sel(stack.native_thread().attr.tcb_sel),
(addr_t)&_thread_start, stack.top(),
_affinity.xpos(), addr_t(utcb()));
try {
new (platform().core_mem_alloc())
Core_trace_source(Core::Trace::sources(), *this);
} catch (...) { }
return Start_result::OK;
}

@ -32,7 +32,8 @@ static inline void kernel_debugger_panic(char const *msg)
{
kernel_debugger_outstring(msg);
kernel_debugger_outstring("\n");
seL4_TCB_Suspend(Genode::Thread::myself()->native_thread().tcb_sel);
Genode::Thread::myself()->with_native_thread([&] (Genode::Native_thread &nt) {
seL4_TCB_Suspend(nt.attr.tcb_sel); });
}
#endif /* _INCLUDE__BASE__INTERNAL__KERNEL_DEBUGGER_H_ */

@ -31,28 +31,28 @@ static inline void thread_switch_to(Genode::Thread *)
}
static inline bool thread_check_stopped_and_restart(Genode::Thread *thread)
static inline unsigned sel4_lock_sel(Genode::Thread *thread_ptr)
{
unsigned lock_sel = Genode::INITIAL_SEL_LOCK; /* main thread */
if (!thread_ptr)
return Genode::INITIAL_SEL_LOCK; /* main thread */
if (thread)
lock_sel = thread->native_thread().lock_sel;
return thread_ptr->with_native_thread(
[&] (Genode::Native_thread &nt) { return nt.attr.lock_sel; },
[&] () -> unsigned { return Genode::INITIAL_SEL_LOCK; });
}
seL4_Signal(lock_sel);
static inline bool thread_check_stopped_and_restart(Genode::Thread *thread_ptr)
{
seL4_Signal(sel4_lock_sel(thread_ptr));
return true;
}
static inline void thread_stop_myself(Genode::Thread *myself)
static inline void thread_stop_myself(Genode::Thread *myself_ptr)
{
unsigned lock_sel = Genode::INITIAL_SEL_LOCK; /* main thread */
if (myself)
lock_sel = myself->native_thread().lock_sel;
seL4_Word sender = ~0U;
seL4_Wait(lock_sel, &sender);
seL4_Wait(sel4_lock_sel(myself_ptr), &sender);
}
#endif /* _INCLUDE__BASE__INTERNAL__LOCK_HELPER_H_ */

@ -14,16 +14,17 @@
#ifndef _INCLUDE__BASE__INTERNAL__NATIVE_THREAD_H_
#define _INCLUDE__BASE__INTERNAL__NATIVE_THREAD_H_
#include <util/noncopyable.h>
#include <base/stdint.h>
namespace Genode { struct Native_thread; }
struct Genode::Native_thread
struct Genode::Native_thread : Noncopyable
{
unsigned tcb_sel = 0;
unsigned ep_sel = 0;
unsigned rcv_sel = 0;
unsigned lock_sel = 0;
struct Attr { unsigned tcb_sel, ep_sel, rcv_sel, lock_sel; } attr { };
Native_thread() { }
};
#endif /* _INCLUDE__BASE__INTERNAL__NATIVE_THREAD_H_ */

@ -95,11 +95,15 @@ namespace {
Native_capability Capability_space::create_ep_cap(Thread &ep_thread)
{
Cap_sel const ep_sel = Cap_sel(ep_thread.native_thread().ep_sel);
return ep_thread.with_native_thread(
[&] (Native_thread &nt) {
Cap_sel const ep_sel = Cap_sel(nt.attr.ep_sel);
Native_capability::Data *data =
&local_capability_space().create_capability(ep_sel, Rpc_obj_key());
return Native_capability(data);
Native_capability::Data *data =
&local_capability_space().create_capability(ep_sel, Rpc_obj_key());
return Native_capability(data);
},
[&] { return Native_capability(); });
}

@ -39,10 +39,17 @@ enum {
};
static unsigned &main_rcv_sel()
{
static unsigned _main_rcv_sel = Capability_space::alloc_rcv_sel();
return _main_rcv_sel;
}
/**
* Return reference to receive selector of the calling thread
* Call 'fn' with a reference to the receive selector of the calling thread
*/
static unsigned &rcv_sel()
static void with_rcv_sel_ref(auto const &fn)
{
/*
* When the function is called at the very early initialization phase, we
@ -50,12 +57,21 @@ static unsigned &rcv_sel()
* Thread object of the main thread does not exist yet. During this
* phase, we return a reference to the 'main_rcv_sel' variable.
*/
if (Thread::myself()) {
return Thread::myself()->native_thread().rcv_sel;
}
Thread * const myself_ptr = Thread::myself();
if (!myself_ptr)
fn(main_rcv_sel());
else
myself_ptr->with_native_thread(
[&] (Native_thread &nt) { fn(nt.attr.rcv_sel); },
[&] { ASSERT(false); /* thread w/o stack cannot execute */ });
}
static unsigned main_rcv_sel = Capability_space::alloc_rcv_sel();
return main_rcv_sel;
static void allocate_and_define_rcv_sel()
{
with_rcv_sel_ref([&] (unsigned &rcv_sel) {
if (!rcv_sel)
rcv_sel = Capability_space::alloc_rcv_sel(); });
}
@ -253,34 +269,37 @@ static void decode_seL4_message(seL4_MessageInfo_t const &msg_info,
Native_capability arg_cap = Capability_space::lookup(rpc_obj_key);
if (arg_cap.valid()) {
with_rcv_sel_ref([&] (unsigned &rcv_sel_ref) {
/*
* Discard the received selector and keep using the already
* present one.
*
* XXX We'd need to find out if both the received and the
* looked-up selector refer to the same endpoint.
* Unfortunaltely, seL4 lacks such a comparison operation.
*/
if (arg_cap.valid()) {
Capability_space::reset_sel(rcv_sel());
/*
* Discard the received selector and keep using the already
* present one.
*
* XXX We'd need to find out if both the received and the
* looked-up selector refer to the same endpoint.
* Unfortunaltely, seL4 lacks such a comparison operation.
*/
dst_msg.insert(arg_cap);
Capability_space::reset_sel(rcv_sel_ref);
} else {
dst_msg.insert(arg_cap);
Capability_space::Ipc_cap_data const
ipc_cap_data(rpc_obj_key, rcv_sel());
} else {
dst_msg.insert(Capability_space::import(ipc_cap_data));
Capability_space::Ipc_cap_data const
ipc_cap_data(rpc_obj_key, rcv_sel_ref);
/*
* Since we keep using the received selector, we need to
* allocate a fresh one for the next incoming delegation.
*/
rcv_sel() = Capability_space::alloc_rcv_sel();
}
dst_msg.insert(Capability_space::import(ipc_cap_data));
/*
* Since we keep using the received selector, we need to
* allocate a fresh one for the next incoming delegation.
*/
rcv_sel_ref = Capability_space::alloc_rcv_sel();
}
});
}
curr_sel4_cap_idx++;
}
@ -300,9 +319,7 @@ Rpc_exception_code Genode::ipc_call(Native_capability dst,
kernel_debugger_panic("IPC destination is invalid");
}
/* allocate and define receive selector */
if (!rcv_sel())
rcv_sel() = Capability_space::alloc_rcv_sel();
allocate_and_define_rcv_sel();
rcv_msg.reset();
@ -330,9 +347,7 @@ Rpc_exception_code Genode::ipc_call(Native_capability dst,
void Genode::ipc_reply(Native_capability, Rpc_exception_code exc,
Msgbuf_base &snd_msg)
{
/* allocate and define receive selector */
if (!rcv_sel())
rcv_sel() = Capability_space::alloc_rcv_sel();
allocate_and_define_rcv_sel();
/**
* Do not use Genode primitives after this point until the return which may
@ -352,29 +367,30 @@ Genode::Rpc_request Genode::ipc_reply_wait(Reply_capability const &,
Msgbuf_base &reply_msg,
Msgbuf_base &request_msg)
{
/* allocate and define receive selector */
if (!rcv_sel())
rcv_sel() = Capability_space::alloc_rcv_sel();
allocate_and_define_rcv_sel();
seL4_CPtr const dest = Thread::myself()->native_thread().ep_sel;
seL4_Word badge = 0;
if (exc.value == Rpc_exception_code::INVALID_OBJECT)
reply_msg.reset();
Thread::myself()->with_native_thread([&] (Native_thread &nt) {
request_msg.reset();
seL4_CPtr const dest = nt.attr.ep_sel;
/**
* Do not use Genode primitives after this point until the return which may
* alter the content of the IPCBuffer, e.g. Lock or RPC.
*/
if (exc.value == Rpc_exception_code::INVALID_OBJECT)
reply_msg.reset();
seL4_MessageInfo_t const reply_msg_info = new_seL4_message(reply_msg);
seL4_SetMR(MR_IDX_EXC_CODE, exc.value);
seL4_MessageInfo_t const req = seL4_ReplyRecv(dest, reply_msg_info, &badge);
request_msg.reset();
decode_seL4_message(req, request_msg);
/**
* Do not use Genode primitives after this point until the return which may
* alter the content of the IPCBuffer, e.g. Lock or RPC.
*/
seL4_MessageInfo_t const reply_msg_info = new_seL4_message(reply_msg);
seL4_SetMR(MR_IDX_EXC_CODE, exc.value);
seL4_MessageInfo_t const req = seL4_ReplyRecv(dest, reply_msg_info, &badge);
decode_seL4_message(req, request_msg);
});
return Rpc_request(Native_capability(), badge);
}

@ -31,12 +31,14 @@ void Genode::prepare_init_main_thread() { }
** Thread **
************/
void Genode::Thread::_thread_bootstrap()
void Genode::Thread::_thread_bootstrap()
{
if (native_thread().ep_sel == 0) {
native_thread().ep_sel = (unsigned)_stack->utcb().ep_sel();
native_thread().lock_sel = (unsigned)_stack->utcb().lock_sel();
}
with_native_thread([&] (Native_thread &nt) {
if (nt.attr.ep_sel == 0) {
nt.attr.ep_sel = (unsigned)_stack->utcb().ep_sel();
nt.attr.lock_sel = (unsigned)_stack->utcb().lock_sel();
}
});
}

@ -13,22 +13,20 @@
/* Genode includes */
#include <base/thread.h>
#include <base/internal/native_thread.h>
#include <base/internal/stack.h>
#include <base/internal/capability_space_sel4.h>
using namespace Genode;
void Thread::_init_platform_thread(size_t, Type type)
void Thread::_init_native_thread(Stack &stack, size_t, Type type)
{
/**
/*
* Reset to default values. The default values trigger initial allocations
* and associations the thread, like IPCbuffer in ipc.cc.
*/
native_thread() = Native_thread();
stack.native_thread().attr = { };
if (type == MAIN) {
native_thread().lock_sel = INITIAL_SEL_LOCK;
return;
}
if (type == MAIN)
stack.native_thread().attr.lock_sel = INITIAL_SEL_LOCK;
}

@ -84,7 +84,7 @@ class Genode::Thread
/**
* Hook for platform-specific destructor supplements
*/
void _deinit_platform_thread();
void _deinit_native_thread(Stack &);
/*
* Noncopyable
@ -125,7 +125,7 @@ class Genode::Thread
/**
* Pointer to kernel-specific meta data
*/
Native_thread *_native_thread = nullptr;
Native_thread *_native_thread_ptr = nullptr;
/**
* Blockade used for synchronizing the finalization of the thread
@ -163,7 +163,7 @@ class Genode::Thread
* \param weight weighting regarding the CPU session quota
* \param type enables selection of special initialization
*/
void _init_platform_thread(size_t weight, Type type);
void _init_native_thread(Stack &, size_t weight, Type type);
void _init_cpu_session_and_trace_control();
@ -330,9 +330,26 @@ class Genode::Thread
}
/**
* Return kernel-specific thread meta data
* Call 'fn' with kernel-specific 'Native_thread &' as argument,
* or 'invalid_fn' if the thread has not been successfully constructed
*/
Native_thread &native_thread();
auto with_native_thread(auto const &fn,
auto const &invalid_fn) const -> decltype(invalid_fn())
{
if (_native_thread_ptr) {
Native_thread &native_thread = *_native_thread_ptr;
return fn(native_thread);
}
return invalid_fn();
}
/**
* Conditionally call 'fn' with kernel-specific 'Native_thread &'
*/
void with_native_thread(auto const &fn) const
{
with_native_thread(fn, [&] { });
}
/**
* Return top of primary stack

@ -201,11 +201,6 @@ void Thread::free_secondary_stack(void* stack_addr)
}
Native_thread &Thread::native_thread() {
return _stack->native_thread(); }
void *Thread::stack_top() const { return (void *)_stack->top(); }
@ -250,7 +245,8 @@ Thread::Thread(size_t weight, const char *name, size_t stack_size,
_trace_control(nullptr),
_stack(_alloc_stack(stack_size, name, type == MAIN))
{
_init_platform_thread(weight, type);
_native_thread_ptr = &_stack->native_thread();
_init_native_thread(*_stack, weight, type);
}
@ -308,7 +304,8 @@ Thread::~Thread()
sleep_forever();
}
_deinit_platform_thread();
_deinit_native_thread(*_stack);
_free_stack(_stack);
cxx_free_tls(this);

@ -64,7 +64,7 @@ void Thread::_thread_start()
** Thread **
************/
void Thread::_deinit_platform_thread()
void Thread::_deinit_native_thread(Stack &)
{
if (!_cpu_session) {
error("Thread::_cpu_session unexpectedly not defined");
@ -81,8 +81,13 @@ Thread::Start_result Thread::start()
{
_init_cpu_session_and_trace_control();
if (!_stack)
return Start_result::DENIED;
Stack &stack = *_stack;
/* create thread at core */
addr_t const utcb = (addr_t)&_stack->utcb();
addr_t const utcb = (addr_t)&stack.utcb();
_thread_cap = _cpu_session->create_thread(pd_session_cap(), name(), _affinity,
Weight(), utcb);
@ -90,7 +95,7 @@ Thread::Start_result Thread::start()
[&] (Thread_capability cap) {
/* start execution at initial instruction pointer and stack pointer */
Cpu_thread_client(cap).start((addr_t)_thread_start, _stack->top());
Cpu_thread_client(cap).start((addr_t)_thread_start, stack.top());
return Start_result::OK;
},
[&] (Cpu_session::Create_thread_error) { return Start_result::DENIED; });

@ -79,7 +79,9 @@ class Vmm::Vcpu_dispatcher : public T
using namespace Genode;
/* request creation of a 'local' EC */
T::native_thread().ec_sel = Native_thread::INVALID_INDEX - 1;
T::with_native_thread([&] (Native_thread &nt) {
nt.ec_sel = Native_thread::INVALID_INDEX - 1; });
T::start();
}
@ -95,25 +97,32 @@ class Vmm::Vcpu_dispatcher : public T
*/
void (*entry)() = &_portal_entry<EV, DISPATCHER, FUNC>;
/* create the portal at the desired selector index (EV) */
Native_capability thread_cap =
Capability_space::import(T::native_thread().ec_sel);
Untyped_capability handler { };
Untyped_capability handler =
retry<Genode::Out_of_ram>(
[&] () {
/* manually define selector used for RPC result */
Thread::myself()->native_thread().client_rcv_sel = exc_base + EV;
return _native_pd.alloc_rpc_cap(thread_cap, (addr_t)entry,
mtd.value());
},
[&] () {
Thread::myself()->native_thread().reset_client_rcv_sel();
_env.parent().upgrade(Parent::Env::pd(), "ram_quota=16K");
});
T::with_native_thread([&] (Native_thread &nt) {
/* revert selector allocation to automatic mode of operation */
Thread::myself()->native_thread().reset_client_rcv_sel();
/* create the portal at the desired selector index (EV) */
Native_capability thread_cap =
Capability_space::import(nt.ec_sel);
Thread::myself()->with_native_thread([&] (Native_thread &myself_nt) {
handler = retry<Genode::Out_of_ram>(
[&] () {
/* manually define selector used for RPC result */
myself_nt.client_rcv_sel = exc_base + EV;
return _native_pd.alloc_rpc_cap(thread_cap, (addr_t)entry,
mtd.value());
},
[&] () {
myself_nt.reset_client_rcv_sel();
_env.parent().upgrade(Parent::Env::pd(), "ram_quota=16K");
});
/* revert selector allocation to automatic mode of operation */
myself_nt.reset_client_rcv_sel();
});
});
return handler.valid() && (exc_base + EV == (addr_t)handler.local_name());
}

@ -112,9 +112,9 @@ class Vmm::Vcpu_other_pd : public Vmm::Vcpu_thread
* Translate vcpu_vm thread cap via current executing thread,
* which is used to lookup current PD to delegate VM-exit portals.
*/
addr_t const current = Thread::myself()->native_thread().exc_pt_sel
+ Nova::PT_SEL_PAGE_FAULT;
translate_remote_pager(current, vcpu_vm.local_name());
Thread::myself()->with_native_thread([&] (Native_thread &nt) {
addr_t const current = nt.exc_pt_sel + Nova::PT_SEL_PAGE_FAULT;
translate_remote_pager(current, vcpu_vm.local_name()); });
/* start vCPU in separate PD */
cpu_thread.start(0, 0);

@ -725,10 +725,14 @@ void genode_update_tsc(void (*update_func)(void), Genode::uint64_t update_us)
Trace::Timestamp wakeup_absolute = Trace::timestamp();
/* initialize first time in context of running thread */
auto const &exc_base = Thread::myself()->native_thread().exc_pt_sel;
request_signal_sm_cap(exc_base + Nova::PT_SEL_PAGE_FAULT,
exc_base + Nova::SM_SEL_SIGNAL);
Genode::addr_t const sem = exc_base + SM_SEL_SIGNAL;
Genode::addr_t const sem = Thread::myself()->with_native_thread(
[&] (Native_thread &nt) {
auto const &exc_base = nt.exc_pt_sel;
request_signal_sm_cap(exc_base + Nova::PT_SEL_PAGE_FAULT,
exc_base + Nova::SM_SEL_SIGNAL);
return exc_base + SM_SEL_SIGNAL;
},
[&] () -> Genode::addr_t { return Native_thread::INVALID_INDEX; });
for (;;) {
update_func();