base: scoped access of Native_thread

With planned removal of Thread:: exceptions, we need to consider that a
'Thread' object may exist without a valid 'Stack' and therefore without
a valid 'Native_thread', which is hosted as part of the 'Stack'.

This patch reworks the code that accesses the 'Native_thread' to use the
new 'Thread::with_native_thread' interface. Within the local scope,
the native thread is referred to as 'nt'.

The _init_platform_thread and _deinit_platform_thread() have been
replaced by _init_native_thread and _deinit_native_thread, which take
a 'Stack &' as argument.

As a safety caution, 'Native_thread' objects can no longer be copied.

Issue #5245
This commit is contained in:
Norman Feske
2025-03-28 22:57:34 +01:00
parent d241baec61
commit aa9ff3894c
65 changed files with 1113 additions and 866 deletions

View File

@ -27,11 +27,14 @@ void Genode::init_rpc_cap_alloc(Parent &) { }
Native_capability Rpc_entrypoint::_alloc_rpc_cap(Pd_session &, Native_capability,
addr_t)
{
return Thread::native_thread().epoll.alloc_rpc_cap();
return with_native_thread(
[&] (Native_thread &nt) { return nt.epoll.alloc_rpc_cap(); },
[&] { return Native_capability(); });
}
void Rpc_entrypoint::_free_rpc_cap(Pd_session &, Native_capability cap)
{
Thread::native_thread().epoll.free_rpc_cap(cap);
with_native_thread([&] (Native_thread &nt) {
nt.epoll.free_rpc_cap(cap); });
}

View File

@ -58,15 +58,19 @@ void Thread::_thread_start()
}
void Thread::_init_platform_thread(size_t, Type) { }
void Thread::_init_native_thread(Stack &, size_t, Type) { }
void Thread::_deinit_platform_thread() { }
void Thread::_deinit_native_thread(Stack &) { }
Thread::Start_result Thread::start()
{
native_thread().tid = lx_create_thread(Thread::_thread_start, stack_top());
native_thread().pid = lx_getpid();
return Start_result::OK;
return with_native_thread(
[&] (Native_thread &nt) {
nt.tid = lx_create_thread(Thread::_thread_start, stack_top());
nt.pid = lx_getpid();
return Start_result::OK;
},
[&] { return Start_result::DENIED; });
}

View File

@ -37,12 +37,23 @@ static inline void thread_yield()
}
static inline bool thread_check_stopped_and_restart(Genode::Thread *thread_base)
static inline int *futex_counter_ptr(Genode::Thread *thread_ptr)
{
const int *futex_counter_ptr = thread_base ?
&thread_base->native_thread().futex_counter :
&main_thread_futex_counter;
return lx_futex(futex_counter_ptr, LX_FUTEX_WAKE, 1);
if (!thread_ptr)
return &main_thread_futex_counter;
return thread_ptr->with_native_thread(
[&] (Genode::Native_thread &nt) { return &nt.futex_counter; },
[&] {
Genode::error("attempt to access futex of invalid thread");
return (int *)nullptr;
});
}
static inline bool thread_check_stopped_and_restart(Genode::Thread *thread_ptr)
{
return lx_futex(futex_counter_ptr(thread_ptr), LX_FUTEX_WAKE, 1);
}
@ -56,10 +67,7 @@ static inline void thread_stop_myself(Genode::Thread *myself)
* 'thread_check_stopped_and_restart()' function will get called
* repeatedly until this thread has actually executed the syscall.
*/
const int *futex_counter_ptr = myself ?
&myself->native_thread().futex_counter :
&main_thread_futex_counter;
lx_futex(futex_counter_ptr, LX_FUTEX_WAIT, 0);
lx_futex(futex_counter_ptr(myself), LX_FUTEX_WAIT, 0);
}
#endif /* _INCLUDE__BASE__INTERNAL__LOCK_HELPER_H_ */

View File

@ -22,92 +22,82 @@
namespace Genode { struct Native_thread; }
class Genode::Native_thread
struct Genode::Native_thread : Noncopyable
{
private:
/*
* Unfortunately, both - PID and TID - are needed for lx_tgkill()
*/
unsigned int tid = 0; /* Native thread ID type as returned by the
'clone' system call */
unsigned int pid = 0; /* process ID (resp. thread-group ID) */
/*
* Noncopyable
*/
Native_thread(Native_thread const &);
Native_thread &operator = (Native_thread const &);
bool is_ipc_server = false;
public:
/**
* Natively aligned memory location used in the lock implementation
*/
int futex_counter __attribute__((aligned(sizeof(Genode::addr_t)))) = 0;
/*
* Unfortunately, both - PID and TID - are needed for lx_tgkill()
*/
unsigned int tid = 0; /* Native thread ID type as returned by the
'clone' system call */
unsigned int pid = 0; /* process ID (resp. thread-group ID) */
struct Meta_data;
bool is_ipc_server = false;
/**
* Opaque pointer to additional thread-specific meta data
*
* This pointer is used by hybrid Linux/Genode programs to maintain
* POSIX-thread-related meta data. For non-hybrid Genode programs, it
* remains unused.
*/
struct { Meta_data *meta_data = nullptr; };
/**
* Natively aligned memory location used in the lock implementation
*/
int futex_counter __attribute__((aligned(sizeof(Genode::addr_t)))) = 0;
class Epoll
{
private:
struct Meta_data;
Lx_socketpair _control { };
/**
* Opaque pointer to additional thread-specific meta data
*
* This pointer is used by hybrid Linux/Genode programs to maintain
* POSIX-thread-related meta data. For non-hybrid Genode programs, it
* remains unused.
*/
Meta_data *meta_data = nullptr;
Lx_epoll_sd const _epoll;
class Epoll
{
private:
void _add (Lx_sd);
void _remove(Lx_sd);
Lx_socketpair _control { };
bool _rpc_ep_exited = false;
Lx_epoll_sd const _epoll;
struct Control_function : Interface
{
virtual void execute() = 0;
};
void _add (Lx_sd);
void _remove(Lx_sd);
/*
* Execute functor 'fn' in the context of the 'poll' method.
*/
void _exec_control(auto const &fn);
bool _rpc_ep_exited = false;
public:
struct Control_function : Interface
{
virtual void execute() = 0;
};
Epoll();
/*
* Execute functor 'fn' in the context of the 'poll' method.
*/
void _exec_control(auto const &fn);
~Epoll();
public:
/**
* Wait for incoming RPC messages
*
* \return valid socket descriptor that matches the invoked
* RPC object
*/
Lx_sd poll();
Epoll();
Native_capability alloc_rpc_cap();
~Epoll();
void free_rpc_cap(Native_capability);
/**
* Wait for incoming RPC messages
*
* \return valid socket descriptor that matches the invoked
* RPC object
*/
Lx_sd poll();
/**
* Flag RPC entrypoint as no longer in charge of dispatching
*/
void rpc_ep_exited() { _rpc_ep_exited = true; }
Native_capability alloc_rpc_cap();
} epoll { };
void free_rpc_cap(Native_capability);
/**
* Flag RPC entrypoint as no longer in charge of dispatching
*/
void rpc_ep_exited() { _rpc_ep_exited = true; }
} epoll { };
Native_thread() { }
Native_thread() { }
};
#endif /* _INCLUDE__BASE__INTERNAL__NATIVE_THREAD_H_ */

View File

@ -392,36 +392,38 @@ Rpc_request Genode::ipc_reply_wait(Reply_capability const &last_caller,
for (;;) lx_nanosleep(&ts, 0);
}
Native_thread::Epoll &epoll = myself_ptr->native_thread().epoll;
return myself_ptr->with_native_thread([&] (Native_thread &nt) {
for (;;) {
for (;;) {
Lx_sd const selected_sd = epoll.poll();
Lx_sd const selected_sd = nt.epoll.poll();
Protocol_header &header = request_msg.header<Protocol_header>();
Message msg(header.msg_start(), sizeof(Protocol_header) + request_msg.capacity());
Protocol_header &header = request_msg.header<Protocol_header>();
Message msg(header.msg_start(), sizeof(Protocol_header) + request_msg.capacity());
msg.accept_sockets(Message::MAX_SDS_PER_MSG);
msg.accept_sockets(Message::MAX_SDS_PER_MSG);
request_msg.reset();
int const ret = lx_recvmsg(selected_sd, msg.msg(), 0x40);
request_msg.reset();
int const ret = lx_recvmsg(selected_sd, msg.msg(), 0x40);
if (ret < 0)
continue;
if (ret < 0)
continue;
if (msg.num_sockets() == 0 || !msg.socket_at_index(0).valid()) {
warning("ipc_reply_wait: failed to obtain reply socket");
continue;
if (msg.num_sockets() == 0 || !msg.socket_at_index(0).valid()) {
warning("ipc_reply_wait: failed to obtain reply socket");
continue;
}
Lx_sd const reply_socket = msg.socket_at_index(0);
/* start at offset 1 to skip the reply channel */
extract_sds_from_message(1, msg, header, request_msg);
return Rpc_request(Capability_space::import(Rpc_destination(reply_socket),
Rpc_obj_key()), selected_sd.value);
}
Lx_sd const reply_socket = msg.socket_at_index(0);
/* start at offset 1 to skip the reply channel */
extract_sds_from_message(1, msg, header, request_msg);
return Rpc_request(Capability_space::import(Rpc_destination(reply_socket),
Rpc_obj_key()), selected_sd.value);
}
}, [&] () -> Rpc_request { sleep_forever(); });
}
@ -435,16 +437,16 @@ Ipc_server::Ipc_server()
if (!Thread::myself())
return;
Native_thread &native_thread = Thread::myself()->native_thread();
Thread::myself()->with_native_thread([&] (Native_thread &nt) {
if (native_thread.is_ipc_server) {
Genode::raw(lx_getpid(), ":", lx_gettid(),
" unexpected multiple instantiation of Ipc_server by one thread");
struct Ipc_server_multiple_instance { };
throw Ipc_server_multiple_instance();
}
if (nt.is_ipc_server) {
Genode::raw(lx_getpid(), ":", lx_gettid(),
" unexpected multiple instantiation of Ipc_server by one thread");
sleep_forever();
}
native_thread.is_ipc_server = true;
nt.is_ipc_server = true;
});
}
@ -457,7 +459,6 @@ Ipc_server::~Ipc_server()
* Reset thread role to non-server such that we can enter 'sleep_forever'
* without getting a warning.
*/
Native_thread &native_thread = Thread::myself()->native_thread();
native_thread.is_ipc_server = false;
Thread::myself()->with_native_thread([&] (Native_thread &nt) {
nt.is_ipc_server = false; });
}

View File

@ -130,8 +130,12 @@ void Native_thread::Epoll::_exec_control(FN const &fn)
* If 'myself_ptr' is nullptr, the caller is the initial thread w/o
* a valid 'Thread' object associated yet. This thread is never polling.
*/
bool const myself_is_polling = (myself_ptr != nullptr)
&& (&myself_ptr->native_thread().epoll == this);
auto myself_is_polling = [&]
{
return myself_ptr && myself_ptr->with_native_thread(
[&] (Native_thread &nt) { return (&nt.epoll == this); },
[&] { return false; });
};
/*
* If caller runs in the context of the same thread that executes 'poll' we
@ -139,7 +143,7 @@ void Native_thread::Epoll::_exec_control(FN const &fn)
* block at this time. If the RPC entrypoint has existed its dispatch
* loop, it also cannot poll anymore.
*/
if (myself_is_polling || _rpc_ep_exited) {
if (myself_is_polling() || _rpc_ep_exited) {
fn();
return;
}

View File

@ -61,26 +61,29 @@ Native_capability Rpc_entrypoint::_alloc_rpc_cap(Pd_session& pd, Native_capabili
"cap_quota=", cap_upgrade).string());
});
}
return Thread::native_thread().epoll.alloc_rpc_cap();
return with_native_thread(
[&] (Native_thread &nt) { return nt.epoll.alloc_rpc_cap(); },
[&] { return Native_capability(); });
}
void Rpc_entrypoint::_free_rpc_cap(Pd_session& pd, Native_capability cap)
{
Native_thread::Epoll &epoll = Thread::native_thread().epoll;
with_native_thread([&] (Native_thread &nt) {
/*
* Flag RPC entrypoint as exited to prevent 'free_rpc_cap' from issuing
* a remote control request.
*/
if (_exit_handler.exit)
epoll.rpc_ep_exited();
/*
* Flag RPC entrypoint as exited to prevent 'free_rpc_cap' from issuing
* a remote control request.
*/
if (_exit_handler.exit)
nt.epoll.rpc_ep_exited();
/*
* Perform the accounting of the PDs cap quota at core, to remain
* consistent with other kernel platforms.
*/
pd.free_rpc_cap(Native_capability());
/*
* Perform the accounting of the PDs cap quota at core, to remain
* consistent with other kernel platforms.
*/
pd.free_rpc_cap(Native_capability());
epoll.free_rpc_cap(cap);
nt.epoll.free_rpc_cap(cap);
});
}

View File

@ -83,10 +83,10 @@ void Thread::_thread_start()
lx_sigaction(LX_SIGUSR1, empty_signal_handler, false);
/* inform core about the new thread and process ID of the new thread */
{
thread->with_native_thread([&] (Native_thread &nt) {
Linux_native_cpu_client native_cpu(thread->_cpu_session->native_cpu());
native_cpu.thread_id(thread->cap(), thread->native_thread().pid, thread->native_thread().tid);
}
native_cpu.thread_id(thread->cap(), nt.pid, nt.tid);
});
/* wakeup 'start' function */
startup_lock().wakeup();
@ -100,7 +100,7 @@ void Thread::_thread_start()
}
void Thread::_init_platform_thread(size_t /* weight */, Type type)
void Thread::_init_native_thread(Stack &stack, size_t /* weight */, Type type)
{
/* if no cpu session is given, use it from the environment */
if (!_cpu_session) {
@ -110,7 +110,7 @@ void Thread::_init_platform_thread(size_t /* weight */, Type type)
/* for normal threads create an object at the CPU session */
if (type == NORMAL) {
_cpu_session->create_thread(pd_session_cap(), _stack->name().string(),
_cpu_session->create_thread(pd_session_cap(), stack.name(),
Affinity::Location(), Weight()).with_result(
[&] (Thread_capability cap) { _thread_cap = cap; },
[&] (Cpu_session::Create_thread_error) {
@ -119,12 +119,12 @@ void Thread::_init_platform_thread(size_t /* weight */, Type type)
return;
}
/* adjust initial object state for main threads */
native_thread().futex_counter = main_thread_futex_counter;
stack.native_thread().futex_counter = main_thread_futex_counter;
_thread_cap = main_thread_cap();
}
void Thread::_deinit_platform_thread()
void Thread::_deinit_native_thread(Stack &stack)
{
/*
* Kill thread until it is really really dead
@ -139,12 +139,12 @@ void Thread::_deinit_platform_thread()
* anymore.
*/
for (;;) {
Native_thread &nt = stack.native_thread();
/* destroy thread locally */
int pid = native_thread().pid;
if (pid == 0) break;
if (nt.pid == 0) break;
int ret = lx_tgkill(pid, native_thread().tid, LX_SIGCANCEL);
int ret = lx_tgkill(nt.pid, nt.tid, LX_SIGCANCEL);
if (ret < 0) break;
@ -180,8 +180,10 @@ Thread::Start_result Thread::start()
threadlib_initialized = true;
}
native_thread().tid = lx_create_thread(Thread::_thread_start, stack_top());
native_thread().pid = lx_getpid();
with_native_thread([&] (Native_thread &nt) {
nt.tid = lx_create_thread(Thread::_thread_start, stack_top());
nt.pid = lx_getpid();
});
/* wait until the 'thread_start' function got entered */
startup_lock().block();

View File

@ -383,9 +383,10 @@ static void adopt_thread(Native_thread::Meta_data *meta_data)
/*
* Initialize thread meta data
*/
Native_thread &native_thread = meta_data->thread_base.native_thread();
native_thread.tid = lx_gettid();
native_thread.pid = lx_getpid();
meta_data->thread_base.with_native_thread([&] (Native_thread &nt) {
nt.tid = lx_gettid();
nt.pid = lx_getpid();
});
}
@ -483,10 +484,10 @@ Thread *Thread::myself()
new (global_alloc()) Thread_meta_data_adopted(thread);
/*
* Initialize 'Thread::_native_thread' to point to the default-
* Initialize 'Thread::_native_thread_ptr' to point to the default-
* constructed 'Native_thread' (part of 'Meta_data').
*/
meta_data->thread_base._native_thread = &meta_data->native_thread;
meta_data->thread_base._native_thread_ptr = &meta_data->native_thread;
adopt_thread(meta_data);
return thread;
@ -498,20 +499,20 @@ Thread::Start_result Thread::start()
/*
* Unblock thread that is supposed to slumber in 'thread_start'.
*/
native_thread().meta_data->started();
with_native_thread([&] (Native_thread &nt) {
nt.meta_data->started(); });
return Start_result::OK;
}
void Thread::join()
{
native_thread().meta_data->wait_for_join();
with_native_thread([&] (Native_thread &nt) {
nt.meta_data->wait_for_join(); });
}
Native_thread &Thread::native_thread() { return *_native_thread; }
Thread::Thread(size_t weight, const char *name, size_t /* stack size */,
Type, Cpu_session * cpu_sess, Affinity::Location)
: _cpu_session(cpu_sess), _affinity()
@ -519,7 +520,7 @@ Thread::Thread(size_t weight, const char *name, size_t /* stack size */,
Native_thread::Meta_data *meta_data =
new (global_alloc()) Thread_meta_data_created(this);
_native_thread = &meta_data->native_thread;
_native_thread_ptr = &meta_data->native_thread;
int const ret = pthread_create(&meta_data->pt, 0, thread_start, meta_data);
if (ret) {
@ -528,18 +529,21 @@ Thread::Thread(size_t weight, const char *name, size_t /* stack size */,
throw Out_of_stack_space();
}
native_thread().meta_data->wait_for_construction();
with_native_thread([&] (Native_thread &nt) {
_thread_cap = _cpu_session->create_thread(_env_ptr->pd_session_cap(), name,
Location(), Weight(weight));
_thread_cap.with_result(
[&] (Thread_capability cap) {
Linux_native_cpu_client native_cpu(_cpu_session->native_cpu());
native_cpu.thread_id(cap, native_thread().pid, native_thread().tid);
},
[&] (Cpu_session::Create_thread_error) {
error("failed to create hybrid thread"); }
);
nt.meta_data->wait_for_construction();
_thread_cap = _cpu_session->create_thread(_env_ptr->pd_session_cap(), name,
Location(), Weight(weight));
_thread_cap.with_result(
[&] (Thread_capability cap) {
Linux_native_cpu_client native_cpu(_cpu_session->native_cpu());
native_cpu.thread_id(cap, nt.pid, nt.tid);
},
[&] (Cpu_session::Create_thread_error) {
error("failed to create hybrid thread"); }
);
});
}
@ -561,22 +565,25 @@ Thread::Thread(Env &env, Name const &name, size_t stack_size)
Thread::~Thread()
{
bool const needs_join = (pthread_cancel(native_thread().meta_data->pt) == 0);
with_native_thread([&] (Native_thread &nt) {
if (needs_join) {
int const ret = pthread_join(native_thread().meta_data->pt, 0);
if (ret)
warning("pthread_join unexpectedly returned "
"with ", ret, " (errno=", errno, ")");
}
bool const needs_join = (pthread_cancel(nt.meta_data->pt) == 0);
Thread_meta_data_created *meta_data =
dynamic_cast<Thread_meta_data_created *>(native_thread().meta_data);
if (needs_join) {
int const ret = pthread_join(nt.meta_data->pt, 0);
if (ret)
warning("pthread_join unexpectedly returned "
"with ", ret, " (errno=", errno, ")");
}
if (meta_data)
destroy(global_alloc(), meta_data);
Thread_meta_data_created *meta_data =
dynamic_cast<Thread_meta_data_created *>(nt.meta_data);
_native_thread = nullptr;
if (meta_data)
destroy(global_alloc(), meta_data);
});
_native_thread_ptr = nullptr;
/* inform core about the killed thread */
_thread_cap.with_result(