hw: generalize IPC-helping to a common mechanism

* Removes helping from Ipc_node and Thread class
* Implement helping as mechanism of Scheduler::Context

Ref 
This commit is contained in:
Stefan Kalkowski 2024-08-07 11:34:02 +02:00 committed by Christian Helmuth
parent dc37c396cf
commit 025043cdcf
10 changed files with 98 additions and 113 deletions

@ -31,10 +31,10 @@ using namespace Kernel;
** Cpu_job **
*************/
void Cpu_job::_activate_own_share() { _cpu->schedule(this); }
void Cpu_job::_activate() { _cpu->schedule(this); }
void Cpu_job::_deactivate_own_share()
void Cpu_job::_deactivate()
{
assert(_cpu->id() == Cpu::executing_id());
_cpu->scheduler().unready(*this);
@ -124,7 +124,7 @@ Cpu::Idle_thread::Idle_thread(Board::Address_space_id_allocator &addr_space_id_a
void Cpu::schedule(Job * const job)
{
_scheduler.ready(job->context());
_scheduler.ready(*static_cast<Scheduler::Context*>(job));
if (_id != executing_id() && _scheduler.need_to_schedule())
trigger_ip_interrupt();
}

@ -88,10 +88,7 @@ class Kernel::Cpu : public Core::Cpu, private Irq::Pool,
Halt_job() : Job (0, 0) { }
void exception(Kernel::Cpu &) override { }
void proceed(Kernel::Cpu &) override;
Kernel::Cpu_job* helping_destination() override { return this; }
} _halt_job { };
enum State { RUN, HALT, SUSPEND };
@ -161,7 +158,7 @@ class Kernel::Cpu : public Core::Cpu, private Irq::Pool,
* Returns the currently active job
*/
Job & scheduled_job() {
return *static_cast<Job *>(&_scheduler.current())->helping_destination(); }
return static_cast<Job&>(_scheduler.current().helping_destination()); }
unsigned id() const { return _id; }
Scheduler &scheduler() { return _scheduler; }

@ -56,12 +56,12 @@ class Kernel::Cpu_job : private Scheduler::Context
/**
* Activate our own CPU-share
*/
void _activate_own_share();
void _activate();
/**
* Deactivate our own CPU-share
*/
void _deactivate_own_share();
void _deactivate();
/**
* Yield the currently scheduled CPU share of this context
@ -73,6 +73,11 @@ class Kernel::Cpu_job : private Scheduler::Context
*/
bool _helping_possible(Cpu_job const &j) const { return j._cpu == _cpu; }
using Context::ready;
using Context::helping_finished;
void help(Cpu_job &job) { Context::help(job); }
public:
using Context = Scheduler::Context;
@ -88,11 +93,6 @@ class Kernel::Cpu_job : private Scheduler::Context
*/
virtual void proceed(Cpu & cpu) = 0;
/**
* Return which job currently uses our CPU-share
*/
virtual Cpu_job * helping_destination() = 0;
/**
* Construct a job with scheduling priority 'p' and time quota 'q'
*/
@ -113,11 +113,6 @@ class Kernel::Cpu_job : private Scheduler::Context
*/
void quota(unsigned const q);
/**
* Return wether our CPU-share is currently active
*/
bool own_share_active() { return Context::ready(); }
/**
* Update total execution time
*/
@ -134,8 +129,6 @@ class Kernel::Cpu_job : private Scheduler::Context
***************/
void cpu(Cpu &cpu) { _cpu = &cpu; }
Context &context() { return *this; }
};
#endif /* _CORE__KERNEL__CPU_CONTEXT_H_ */

@ -57,19 +57,13 @@ void Ipc_node::_cancel_send()
}
bool Ipc_node::_helping() const
{
return _out.state == Out::SEND_HELPING && _out.node;
}
bool Ipc_node::ready_to_send() const
{
return _out.state == Out::READY && !_in.waiting();
}
void Ipc_node::send(Ipc_node &node, bool help)
void Ipc_node::send(Ipc_node &node)
{
node._in.queue.enqueue(_queue_item);
@ -78,13 +72,7 @@ void Ipc_node::send(Ipc_node &node, bool help)
node._thread.ipc_await_request_succeeded();
}
_out.node = &node;
_out.state = help ? Out::SEND_HELPING : Out::SEND;
}
Thread &Ipc_node::helping_destination()
{
return _helping() ? _out.node->helping_destination() : _thread;
_out.state = Out::SEND;
}

@ -50,14 +50,14 @@ class Kernel::Ipc_node
struct Out
{
enum State { READY, SEND, SEND_HELPING, DESTRUCT };
enum State { READY, SEND, DESTRUCT };
State state { READY };
Ipc_node *node { nullptr };
bool sending() const
{
return state == SEND_HELPING || state == SEND;
return state == SEND;
}
};
@ -76,11 +76,6 @@ class Kernel::Ipc_node
*/
void _cancel_send();
/**
* Return wether this IPC node is helping another one
*/
bool _helping() const;
/**
* Noncopyable
*/
@ -102,28 +97,8 @@ class Kernel::Ipc_node
* Send a message and wait for the according reply
*
* \param node targeted IPC node
* \param help wether the request implies a helping relationship
*/
void send(Ipc_node &node, bool help);
/**
* Return final destination of the helping-chain
* this IPC node is part of, or its own thread otherwise
*/
Thread &helping_destination();
/**
* Call 'fn' of type 'void (Ipc_node *)' for each helper
*/
void for_each_helper(auto const &fn)
{
_in.queue.for_each([fn] (Queue_item &item) {
Ipc_node &node { item.object() };
if (node._helping())
fn(node._thread);
});
}
void send(Ipc_node &node);
/**
* Return whether this IPC node is ready to wait for messages

@ -19,6 +19,38 @@
using namespace Kernel;
void Scheduler::Context::help(Scheduler::Context &c)
{
_destination = &c;
c._helper_list.insert(&_helper_le);
}
void Scheduler::Context::helping_finished()
{
if (!_destination)
return;
_destination->_helper_list.remove(&_helper_le);
_destination = nullptr;
}
Scheduler::Context& Scheduler::Context::helping_destination()
{
return (_destination) ? _destination->helping_destination() : *this;
}
Scheduler::Context::~Context()
{
helping_finished();
for (Context::List_element *h = _helper_list.first(); h; h = h->next())
h->object()->helping_finished();
}
void Scheduler::_consumed(unsigned const time)
{
if (_super_period_left > time) {
@ -149,7 +181,10 @@ void Scheduler::update(time_t time)
void Scheduler::ready(Context &c)
{
assert(!c.ready() && &c != &_idle);
assert(&c != &_idle);
if (c.ready())
return;
c._ready = true;
@ -170,23 +205,33 @@ void Scheduler::ready(Context &c)
_slack_list.insert_head(&c._slack_le);
if (!keep_current && _state == UP_TO_DATE) _state = OUT_OF_DATE;
for (Context::List_element *helper = c._helper_list.first();
helper; helper = helper->next())
if (!helper->object()->ready()) ready(*helper->object());
}
void Scheduler::unready(Context &c)
{
assert(c.ready() && &c != &_idle);
assert(&c != &_idle);
if (!c.ready())
return;
if (&c == _current && _state == UP_TO_DATE) _state = OUT_OF_DATE;
c._ready = false;
_slack_list.remove(&c._slack_le);
if (!c._quota)
return;
if (c._quota) {
_rpl[c._priority].remove(&c._priotized_le);
_upl[c._priority].insert_tail(&c._priotized_le);
}
_rpl[c._priority].remove(&c._priotized_le);
_upl[c._priority].insert_tail(&c._priotized_le);
for (Context::List_element *helper = c._helper_list.first();
helper; helper = helper->next())
if (helper->object()->ready()) unready(*helper->object());
}

@ -65,6 +65,7 @@ class Kernel::Scheduler
friend class Scheduler_test::Context;
using List_element = Genode::List_element<Context>;
using List = Genode::List<List_element>;
unsigned _priority;
unsigned _quota;
@ -74,10 +75,20 @@ class Kernel::Scheduler
List_element _slack_le { this };
unsigned _slack_time_left { 0 };
List_element _helper_le { this };
List _helper_list {};
Context *_destination { nullptr };
bool _ready { false };
void _reset() { _priotized_time_left = _quota; }
/**
* Noncopyable
*/
Context(const Context&) = delete;
Context& operator=(const Context&) = delete;
public:
Context(Priority const priority,
@ -85,9 +96,14 @@ class Kernel::Scheduler
:
_priority(priority.value),
_quota(quota) { }
~Context();
bool ready() const { return _ready; }
void quota(unsigned const q) { _quota = q; }
void help(Context &c);
void helping_finished();
Context& helping_destination();
};
private:

@ -239,7 +239,8 @@ void Thread::ipc_send_request_succeeded()
assert(_state == AWAITS_IPC);
user_arg_0(0);
_state = ACTIVE;
if (!Cpu_job::own_share_active()) { _activate_used_shares(); }
_activate();
helping_finished();
}
@ -248,7 +249,8 @@ void Thread::ipc_send_request_failed()
assert(_state == AWAITS_IPC);
user_arg_0(-1);
_state = ACTIVE;
if (!Cpu_job::own_share_active()) { _activate_used_shares(); }
_activate();
helping_finished();
}
@ -268,32 +270,16 @@ void Thread::ipc_await_request_failed()
}
void Thread::_deactivate_used_shares()
{
Cpu_job::_deactivate_own_share();
_ipc_node.for_each_helper([&] (Thread &thread) {
thread._deactivate_used_shares(); });
}
void Thread::_activate_used_shares()
{
Cpu_job::_activate_own_share();
_ipc_node.for_each_helper([&] (Thread &thread) {
thread._activate_used_shares(); });
}
void Thread::_become_active()
{
if (_state != ACTIVE && !_paused) { _activate_used_shares(); }
if (_state != ACTIVE && !_paused) Cpu_job::_activate();
_state = ACTIVE;
}
void Thread::_become_inactive(State const s)
{
if (_state == ACTIVE && !_paused) { _deactivate_used_shares(); }
if (_state == ACTIVE && !_paused) Cpu_job::_deactivate();
_state = s;
}
@ -301,10 +287,6 @@ void Thread::_become_inactive(State const s)
void Thread::_die() { _become_inactive(DEAD); }
Cpu_job * Thread::helping_destination() {
return &_ipc_node.helping_destination(); }
size_t Thread::_core_to_kernel_quota(size_t const quota) const
{
using Genode::Cpu_session;
@ -367,8 +349,8 @@ void Thread::_call_start_thread()
void Thread::_call_pause_thread()
{
Thread &thread = *reinterpret_cast<Thread*>(user_arg_1());
if (thread._state == ACTIVE && !thread._paused) {
thread._deactivate_used_shares(); }
if (thread._state == ACTIVE && !thread._paused)
thread._deactivate();
thread._paused = true;
}
@ -377,8 +359,8 @@ void Thread::_call_pause_thread()
void Thread::_call_resume_thread()
{
Thread &thread = *reinterpret_cast<Thread*>(user_arg_1());
if (thread._state == ACTIVE && thread._paused) {
thread._activate_used_shares(); }
if (thread._state == ACTIVE && thread._paused)
thread._activate();
thread._paused = false;
}
@ -572,11 +554,12 @@ void Thread::_call_send_request_msg()
return;
}
_ipc_capid = oir ? oir->capid() : cap_id_invalid();
_ipc_node.send(dst->_ipc_node, help);
_ipc_node.send(dst->_ipc_node);
}
_state = AWAITS_IPC;
if (!help || !dst->own_share_active()) { _deactivate_used_shares(); }
if (help) Cpu_job::help(*dst);
if (!help || !dst->ready()) _deactivate();
}

@ -216,16 +216,6 @@ class Kernel::Thread : private Kernel::Object, public Cpu_job, private Timeout
*/
void _become_inactive(State const s);
/**
* Activate our CPU-share and those of our helpers
*/
void _activate_used_shares();
/**
* Deactivate our CPU-share and those of our helpers
*/
void _deactivate_used_shares();
/**
* Suspend unrecoverably from execution
*/
@ -464,7 +454,6 @@ class Kernel::Thread : private Kernel::Object, public Cpu_job, private Timeout
void exception(Cpu & cpu) override;
void proceed(Cpu & cpu) override;
Cpu_job * helping_destination() override;
/*************

@ -66,7 +66,7 @@ class Kernel::Vm : private Kernel::Object, public Cpu_job
void _pause_vcpu()
{
if (_scheduled != INACTIVE)
Cpu_job::_deactivate_own_share();
Cpu_job::_deactivate();
_scheduled = INACTIVE;
}
@ -135,7 +135,7 @@ class Kernel::Vm : private Kernel::Object, public Cpu_job
void run()
{
_sync_from_vmm();
if (_scheduled != ACTIVE) Cpu_job::_activate_own_share();
if (_scheduled != ACTIVE) Cpu_job::_activate();
_scheduled = ACTIVE;
}
@ -152,7 +152,6 @@ class Kernel::Vm : private Kernel::Object, public Cpu_job
void exception(Cpu & cpu) override;
void proceed(Cpu & cpu) override;
Cpu_job * helping_destination() override { return this; }
};
#endif /* _CORE__KERNEL__VM_H_ */