hw: sanitze cpu context

* Rename Kernel::Cpu_job to Kernel::Cpu_context (alias Kernel::Cpu::Context)
* State first Cpu affinity of Cpu::Context at construction time
* Move cpu affinity argument from kernel syscall create_thread to start_thread
* Ensure that Cpu pointer is always valid

Fix genodelabs/genode#5319
This commit is contained in:
Stefan Kalkowski 2024-08-12 11:23:12 +02:00 committed by Christian Helmuth
parent e275787119
commit f97c8cacde
19 changed files with 271 additions and 291 deletions

View File

@ -137,10 +137,9 @@ namespace Kernel {
* \retval 0 suceeded
* \retval !=0 failed
*/
inline int start_thread(Thread & thread, unsigned const cpu_id,
Pd & pd, Native_utcb & utcb)
inline int start_thread(Thread & thread, Pd & pd, Native_utcb & utcb)
{
return (int)call(call_id_start_thread(), (Call_arg)&thread, cpu_id,
return (int)call(call_id_start_thread(), (Call_arg)&thread,
(Call_arg)&pd, (Call_arg)&utcb);
}

View File

@ -27,35 +27,35 @@
using namespace Kernel;
/*************
** Cpu_job **
*************/
/*****************
** Cpu_context **
*****************/
void Cpu_job::_activate() { _cpu->schedule(this); }
void Cpu_context::_activate() { _cpu().schedule(*this); }
void Cpu_job::_deactivate()
void Cpu_context::_deactivate()
{
assert(_cpu->id() == Cpu::executing_id());
_cpu->scheduler().unready(*this);
assert(_cpu().id() == Cpu::executing_id());
_cpu().scheduler().unready(*this);
}
void Cpu_job::_yield()
void Cpu_context::_yield()
{
assert(_cpu->id() == Cpu::executing_id());
_cpu->scheduler().yield();
assert(_cpu().id() == Cpu::executing_id());
_cpu().scheduler().yield();
}
void Cpu_job::_interrupt(Irq::Pool &user_irq_pool, unsigned const /* cpu_id */)
void Cpu_context::_interrupt(Irq::Pool &user_irq_pool)
{
/* let the IRQ controller take a pending IRQ for handling, if any */
unsigned irq_id;
if (_cpu->pic().take_request(irq_id))
if (_cpu().pic().take_request(irq_id))
/* let the CPU of this job handle the IRQ if it is a CPU-local one */
if (!_cpu->handle_if_cpu_local_interrupt(irq_id)) {
/* let the CPU of this context handle the IRQ if it is a CPU-local one */
if (!_cpu().handle_if_cpu_local_interrupt(irq_id)) {
/* it isn't a CPU-local IRQ, so, it must be a user IRQ */
User_irq * irq = User_irq::object(user_irq_pool, irq_id);
@ -64,38 +64,37 @@ void Cpu_job::_interrupt(Irq::Pool &user_irq_pool, unsigned const /* cpu_id */)
}
/* let the IRQ controller finish the currently taken IRQ */
_cpu->pic().finish_request();
_cpu().pic().finish_request();
}
void Cpu_job::affinity(Cpu &cpu)
void Cpu_context::affinity(Cpu &cpu)
{
_cpu = &cpu;
_cpu->scheduler().insert(*this);
_cpu().scheduler().remove(*this);
_cpu_ptr = &cpu;
_cpu().scheduler().insert(*this);
}
void Cpu_job::quota(unsigned const q)
void Cpu_context::quota(unsigned const q)
{
if (_cpu)
_cpu->scheduler().quota(*this, q);
else
Context::quota(q);
_cpu().scheduler().quota(*this, q);
}
Cpu_job::Cpu_job(Priority const p, unsigned const q)
Cpu_context::Cpu_context(Cpu &cpu,
Priority const priority,
unsigned const quota)
:
Context(p, q), _cpu(0)
{ }
Cpu_job::~Cpu_job()
Context(priority, quota), _cpu_ptr(&cpu)
{
if (!_cpu)
return;
_cpu().scheduler().insert(*this);
}
_cpu->scheduler().remove(*this);
Cpu_context::~Cpu_context()
{
_cpu().scheduler().remove(*this);
}
@ -112,19 +111,17 @@ Cpu::Idle_thread::Idle_thread(Board::Address_space_id_allocator &addr_space_id_a
Cpu &cpu,
Pd &core_pd)
:
Thread { addr_space_id_alloc, user_irq_pool, cpu_pool, core_pd,
Priority::min(), 0, "idle", Thread::IDLE }
Thread { addr_space_id_alloc, user_irq_pool, cpu_pool, cpu,
core_pd, Priority::min(), 0, "idle", Thread::IDLE }
{
regs->ip = (addr_t)&idle_thread_main;
affinity(cpu);
Thread::_pd = &core_pd;
}
void Cpu::schedule(Job * const job)
void Cpu::schedule(Context &context)
{
_scheduler.ready(*static_cast<Scheduler::Context*>(job));
_scheduler.ready(static_cast<Scheduler::Context&>(context));
if (_id != executing_id() && _scheduler.need_to_schedule())
trigger_ip_interrupt();
}
@ -142,26 +139,26 @@ bool Cpu::handle_if_cpu_local_interrupt(unsigned const irq_id)
}
Cpu_job & Cpu::schedule()
Cpu::Context & Cpu::handle_exception_and_schedule()
{
/* update scheduler */
Job & old_job = scheduled_job();
old_job.exception(*this);
Context &context = current_context();
context.exception();
if (_state == SUSPEND || _state == HALT)
return _halt_job;
/* update schedule if necessary */
if (_scheduler.need_to_schedule()) {
_timer.process_timeouts();
_scheduler.update(_timer.time());
time_t t = _scheduler.current_time_left();
_timer.set_timeout(&_timeout, t);
time_t duration = _timer.schedule_timeout();
old_job.update_execution_time(duration);
context.update_execution_time(duration);
}
/* return new job */
return scheduled_job();
/* return current context */
return current_context();
}

View File

@ -39,9 +39,11 @@ namespace Kernel {
class Kernel::Cpu : public Core::Cpu, private Irq::Pool,
public Genode::List<Cpu>::Element
{
private:
public:
using Job = Cpu_job;
using Context = Cpu_context;
private:
/**
* Inter-processor-interrupt object of the cpu
@ -83,13 +85,14 @@ class Kernel::Cpu : public Core::Cpu, private Irq::Pool,
Pd &core_pd);
};
struct Halt_job : Job
struct Halt_job : Cpu_context
{
Halt_job() : Job (0, 0) { }
Halt_job(Cpu &cpu)
: Cpu_context(cpu, 0, 0) { }
void exception(Kernel::Cpu &) override { }
void proceed(Kernel::Cpu &) override;
} _halt_job { };
void exception() override { }
void proceed() override;
} _halt_job { *this };
enum State { RUN, HALT, SUSPEND };
@ -140,14 +143,14 @@ class Kernel::Cpu : public Core::Cpu, private Irq::Pool,
bool handle_if_cpu_local_interrupt(unsigned const irq_id);
/**
* Schedule 'job' at this CPU
* Schedule 'context' at this CPU
*/
void schedule(Job * const job);
void schedule(Context& context);
/**
* Return the job that should be executed at next
* Return the context that should be executed next
*/
Cpu_job& schedule();
Context& handle_exception_and_schedule();
Board::Pic & pic() { return _pic; }
Timer & timer() { return _timer; }
@ -155,10 +158,10 @@ class Kernel::Cpu : public Core::Cpu, private Irq::Pool,
addr_t stack_start();
/**
* Returns the currently active job
* Returns the currently scheduled context
*/
Job & scheduled_job() {
return static_cast<Job&>(_scheduler.current().helping_destination()); }
Context & current_context() {
return static_cast<Context&>(_scheduler.current().helping_destination()); }
unsigned id() const { return _id; }
Scheduler &scheduler() { return _scheduler; }

View File

@ -22,45 +22,38 @@
namespace Kernel {
class Cpu;
/**
* Context of a job (thread, VM, idle) that shall be executed by a CPU
*/
class Cpu_job;
class Cpu_context;
}
class Kernel::Cpu_job : private Scheduler::Context
/**
* Context (thread, vcpu) that shall be executed by a CPU
*/
class Kernel::Cpu_context : private Scheduler::Context
{
private:
friend class Cpu; /* static_cast from 'Scheduler::Context' to 'Cpu_job' */
friend class Cpu;
time_t _execution_time { 0 };
Cpu *_cpu_ptr;
/*
* Noncopyable
*/
Cpu_job(Cpu_job const &);
Cpu_job &operator = (Cpu_job const &);
Cpu_context(Cpu_context const &);
Cpu_context &operator = (Cpu_context const &);
protected:
Cpu * _cpu;
Cpu &_cpu() const { return *_cpu_ptr; }
/**
* Handle interrupt exception that occured during execution on CPU 'id'
* Handle interrupt exception
*/
void _interrupt(Irq::Pool &user_irq_pool, unsigned const id);
void _interrupt(Irq::Pool &user_irq_pool);
/**
* Activate our own CPU-share
*/
void _activate();
/**
* Deactivate our own CPU-share
*/
void _deactivate();
/**
@ -69,47 +62,34 @@ class Kernel::Cpu_job : private Scheduler::Context
void _yield();
/**
* Return wether we are allowed to help job 'j' with our CPU-share
* Return possibility to help context 'j' scheduling-wise
*/
bool _helping_possible(Cpu_job const &j) const { return j._cpu == _cpu; }
bool _helping_possible(Cpu_context const &j) const {
return j._cpu_ptr == _cpu_ptr; }
void _help(Cpu_context &context) { Context::help(context); }
using Context::ready;
using Context::helping_finished;
void help(Cpu_job &job) { Context::help(job); }
public:
using Context = Scheduler::Context;
using Priority = Scheduler::Priority;
/**
* Handle exception that occured during execution on CPU 'id'
*/
virtual void exception(Cpu & cpu) = 0;
Cpu_context(Cpu &cpu,
Priority const priority,
unsigned const quota);
virtual ~Cpu_context();
/**
* Continue execution on CPU 'id'
*/
virtual void proceed(Cpu & cpu) = 0;
/**
* Construct a job with scheduling priority 'p' and time quota 'q'
*/
Cpu_job(Priority const p, unsigned const q);
/**
* Destructor
*/
virtual ~Cpu_job();
/**
* Link job to CPU 'cpu'
* Link context to CPU 'cpu'
*/
void affinity(Cpu &cpu);
/**
* Set CPU quota of the job to 'q'
* Set CPU quota of the context to 'q'
*/
void quota(unsigned const q);
@ -123,12 +103,15 @@ class Kernel::Cpu_job : private Scheduler::Context
*/
time_t execution_time() const { return _execution_time; }
/**
* Handle exception that occured during execution of this context
*/
virtual void exception() = 0;
/***************
** Accessors **
***************/
void cpu(Cpu &cpu) { _cpu = &cpu; }
/**
* Continue execution of this context
*/
virtual void proceed() = 0;
};
#endif /* _CORE__KERNEL__CPU_CONTEXT_H_ */

View File

@ -11,8 +11,8 @@
* under the terms of the GNU Affero General Public License version 3.
*/
#ifndef _CORE__KERNEL__SMP_H_
#define _CORE__KERNEL__SMP_H_
#ifndef _CORE__KERNEL__INTER_PROCESSOR_WORK_H_
#define _CORE__KERNEL__INTER_PROCESSOR_WORK_H_
#include <util/interface.h>
@ -32,11 +32,11 @@ class Kernel::Inter_processor_work : Genode::Interface
{
public:
virtual void execute(Cpu &) = 0;
virtual void execute(Cpu & cpu) = 0;
protected:
Genode::List_element<Inter_processor_work> _le { this };
};
#endif /* _CORE__KERNEL__SMP_H_ */
#endif /* _CORE__KERNEL__INTER_PROCESSOR_WORK_H_ */

View File

@ -63,16 +63,16 @@ Kernel::Main *Kernel::Main::_instance;
void Kernel::Main::_handle_kernel_entry()
{
Cpu &cpu = _cpu_pool.cpu(Cpu::executing_id());
Cpu_job * new_job;
Cpu::Context * context;
{
Lock::Guard guard(_data_lock);
new_job = &cpu.schedule();
context =
&_cpu_pool.cpu(Cpu::executing_id()).handle_exception_and_schedule();
}
new_job->proceed(cpu);
context->proceed();
}

View File

@ -169,7 +169,7 @@ Thread::Destroy::Destroy(Thread & caller, Core::Kernel_object<Thread> & to_delet
:
caller(caller), thread_to_destroy(to_delete)
{
thread_to_destroy->_cpu->work_list().insert(&_le);
thread_to_destroy->_cpu().work_list().insert(&_le);
caller._become_inactive(AWAITS_RESTART);
}
@ -177,7 +177,7 @@ Thread::Destroy::Destroy(Thread & caller, Core::Kernel_object<Thread> & to_delet
void
Thread::Destroy::execute(Cpu &)
{
thread_to_destroy->_cpu->work_list().remove(&_le);
thread_to_destroy->_cpu().work_list().remove(&_le);
thread_to_destroy.destruct();
caller._restart();
}
@ -272,14 +272,14 @@ void Thread::ipc_await_request_failed()
void Thread::_become_active()
{
if (_state != ACTIVE && !_paused) Cpu_job::_activate();
if (_state != ACTIVE && !_paused) Cpu_context::_activate();
_state = ACTIVE;
}
void Thread::_become_inactive(State const s)
{
if (_state == ACTIVE && !_paused) Cpu_job::_deactivate();
if (_state == ACTIVE && !_paused) Cpu_context::_deactivate();
_state = s;
}
@ -293,7 +293,7 @@ size_t Thread::_core_to_kernel_quota(size_t const quota) const
/* we assert at timer construction that cpu_quota_us in ticks fits size_t */
size_t const ticks = (size_t)
_cpu->timer().us_to_ticks(Kernel::cpu_quota_us);
_cpu().timer().us_to_ticks(Kernel::cpu_quota_us);
return Cpu_session::quota_lim_downscale(quota, ticks);
}
@ -301,24 +301,20 @@ size_t Thread::_core_to_kernel_quota(size_t const quota) const
void Thread::_call_thread_quota()
{
Thread * const thread = (Thread *)user_arg_1();
thread->Cpu_job::quota((unsigned)(_core_to_kernel_quota(user_arg_2())));
thread->Cpu_context::quota((unsigned)(_core_to_kernel_quota(user_arg_2())));
}
void Thread::_call_start_thread()
{
/* lookup CPU */
Cpu & cpu = _cpu_pool.cpu((unsigned)user_arg_2());
user_arg_0(0);
Thread &thread = *(Thread*)user_arg_1();
assert(thread._state == AWAITS_START);
thread.affinity(cpu);
/* join protection domain */
thread._pd = (Pd *) user_arg_3();
switch (thread._ipc_init(*(Native_utcb *)user_arg_4(), *this)) {
thread._pd = (Pd *) user_arg_2();
switch (thread._ipc_init(*(Native_utcb *)user_arg_3(), *this)) {
case Ipc_alloc_result::OK:
break;
case Ipc_alloc_result::EXHAUSTED:
@ -338,7 +334,8 @@ void Thread::_call_start_thread()
* semantic changes, and additional core threads are started
* across cpu cores.
*/
if (thread._pd == &_core_pd && cpu.id() != _cpu_pool.primary_cpu().id())
if (thread._pd == &_core_pd &&
thread._cpu().id() != _cpu_pool.primary_cpu().id())
Genode::raw("Error: do not start core threads"
" on CPU cores different than boot cpu");
@ -433,7 +430,7 @@ void Thread::_cancel_blocking()
void Thread::_call_yield_thread()
{
Cpu_job::_yield();
Cpu_context::_yield();
}
@ -443,12 +440,11 @@ void Thread::_call_delete_thread()
*(Core::Kernel_object<Thread>*)user_arg_1();
/**
* Delete a thread immediately if it has no cpu assigned yet,
* or it is assigned to this cpu, or the assigned cpu did not scheduled it.
* Delete a thread immediately if it is assigned to this cpu,
* or the assigned cpu did not scheduled it.
*/
if (!to_delete->_cpu ||
(to_delete->_cpu->id() == Cpu::executing_id() ||
&to_delete->_cpu->scheduled_job() != &*to_delete)) {
if (to_delete->_cpu().id() == Cpu::executing_id() ||
&to_delete->_cpu().current_context() != &*to_delete) {
_call_delete<Thread>();
return;
}
@ -457,7 +453,7 @@ void Thread::_call_delete_thread()
* Construct a cross-cpu work item and send an IPI
*/
_destroy.construct(*this, to_delete);
to_delete->_cpu->trigger_ip_interrupt();
to_delete->_cpu().trigger_ip_interrupt();
}
@ -466,8 +462,8 @@ void Thread::_call_delete_pd()
Core::Kernel_object<Pd> & pd =
*(Core::Kernel_object<Pd>*)user_arg_1();
if (_cpu->active(pd->mmu_regs))
_cpu->switch_to(_core_pd.mmu_regs);
if (_cpu().active(pd->mmu_regs))
_cpu().switch_to(_core_pd.mmu_regs);
_call_delete<Pd>();
}
@ -499,7 +495,7 @@ void Thread::_call_await_request_msg()
void Thread::_call_timeout()
{
Timer & t = _cpu->timer();
Timer & t = _cpu().timer();
_timeout_sigid = (Kernel::capid_t)user_arg_2();
t.set_timeout(this, t.us_to_ticks(user_arg_1()));
}
@ -507,13 +503,13 @@ void Thread::_call_timeout()
void Thread::_call_timeout_max_us()
{
user_ret_time(_cpu->timer().timeout_max_us());
user_ret_time(_cpu().timer().timeout_max_us());
}
void Thread::_call_time()
{
Timer & t = _cpu->timer();
Timer & t = _cpu().timer();
user_ret_time(t.ticks_to_us(t.time()));
}
@ -540,7 +536,7 @@ void Thread::_call_send_request_msg()
_become_inactive(DEAD);
return;
}
bool const help = Cpu_job::_helping_possible(*dst);
bool const help = Cpu_context::_helping_possible(*dst);
oir = oir->find(dst->pd());
if (!_ipc_node.ready_to_send()) {
@ -558,7 +554,7 @@ void Thread::_call_send_request_msg()
}
_state = AWAITS_IPC;
if (help) Cpu_job::help(*dst);
if (help) Cpu_context::_help(*dst);
if (!help || !dst->ready()) _deactivate();
}
@ -727,7 +723,7 @@ void Thread::_call_new_irq()
(Genode::Irq_session::Polarity) (user_arg_3() & 0b11);
_call_new<User_irq>((unsigned)user_arg_2(), trigger, polarity, *c,
_cpu->pic(), _user_irq_pool);
_cpu().pic(), _user_irq_pool);
}
@ -869,13 +865,15 @@ void Thread::_call()
switch (call_id) {
case call_id_new_thread():
_call_new<Thread>(_addr_space_id_alloc, _user_irq_pool, _cpu_pool,
_core_pd, (unsigned) user_arg_2(),
(unsigned) _core_to_kernel_quota(user_arg_3()),
(char const *) user_arg_4(), USER);
_cpu_pool.cpu((unsigned)user_arg_2()),
_core_pd, (unsigned) user_arg_3(),
(unsigned) _core_to_kernel_quota(user_arg_4()),
(char const *) user_arg_5(), USER);
return;
case call_id_new_core_thread():
_call_new<Thread>(_addr_space_id_alloc, _user_irq_pool, _cpu_pool,
_core_pd, (char const *) user_arg_2());
_cpu_pool.cpu((unsigned)user_arg_2()),
_core_pd, (char const *) user_arg_3());
return;
case call_id_thread_quota(): _call_thread_quota(); return;
case call_id_delete_thread(): _call_delete_thread(); return;
@ -972,6 +970,7 @@ void Thread::_exception()
Thread::Thread(Board::Address_space_id_allocator &addr_space_id_alloc,
Irq::Pool &user_irq_pool,
Cpu_pool &cpu_pool,
Cpu &cpu,
Pd &core_pd,
unsigned const priority,
unsigned const quota,
@ -979,7 +978,7 @@ Thread::Thread(Board::Address_space_id_allocator &addr_space_id_alloc,
Type type)
:
Kernel::Object { *this },
Cpu_job { priority, quota },
Cpu_context { cpu, priority, quota },
_addr_space_id_alloc { addr_space_id_alloc },
_user_irq_pool { user_irq_pool },
_cpu_pool { cpu_pool },
@ -1016,8 +1015,8 @@ Core_main_thread(Board::Address_space_id_allocator &addr_space_id_alloc,
Cpu_pool &cpu_pool,
Pd &core_pd)
:
Core_object<Thread>(
core_pd, addr_space_id_alloc, user_irq_pool, cpu_pool, core_pd, "core")
Core_object<Thread>(core_pd, addr_space_id_alloc, user_irq_pool, cpu_pool,
cpu_pool.primary_cpu(), core_pd, "core")
{
using namespace Core;
@ -1033,7 +1032,6 @@ Core_main_thread(Board::Address_space_id_allocator &addr_space_id_alloc,
regs->sp = (addr_t)&__initial_stack_base[0] + DEFAULT_STACK_SIZE;
regs->ip = (addr_t)&_core_start;
affinity(_cpu_pool.primary_cpu());
_utcb = &_utcb_instance;
Thread::_pd = &core_pd;
_become_active();

View File

@ -53,7 +53,7 @@ struct Kernel::Thread_fault
/**
* Kernel back-end for userland execution-contexts
*/
class Kernel::Thread : private Kernel::Object, public Cpu_job, private Timeout
class Kernel::Thread : private Kernel::Object, public Cpu_context, private Timeout
{
public:
@ -335,6 +335,7 @@ class Kernel::Thread : private Kernel::Object, public Cpu_job, private Timeout
Thread(Board::Address_space_id_allocator &addr_space_id_alloc,
Irq::Pool &user_irq_pool,
Cpu_pool &cpu_pool,
Cpu &cpu,
Pd &core_pd,
unsigned const priority,
unsigned const quota,
@ -349,11 +350,12 @@ class Kernel::Thread : private Kernel::Object, public Cpu_job, private Timeout
Thread(Board::Address_space_id_allocator &addr_space_id_alloc,
Irq::Pool &user_irq_pool,
Cpu_pool &cpu_pool,
Cpu &cpu,
Pd &core_pd,
char const *const label)
:
Thread(addr_space_id_alloc, user_irq_pool, cpu_pool, core_pd,
Scheduler::Priority::min(), 0, label, CORE)
Thread(addr_space_id_alloc, user_irq_pool, cpu_pool, cpu,
core_pd, Scheduler::Priority::min(), 0, label, CORE)
{ }
~Thread();
@ -390,13 +392,14 @@ class Kernel::Thread : private Kernel::Object, public Cpu_job, private Timeout
* \retval capability id of the new kernel object
*/
static capid_t syscall_create(Core::Kernel_object<Thread> &t,
unsigned const cpu_id,
unsigned const priority,
size_t const quota,
char const * const label)
{
return (capid_t)call(call_id_new_thread(), (Call_arg)&t,
(Call_arg)priority, (Call_arg)quota,
(Call_arg)label);
(Call_arg)cpu_id, (Call_arg)priority,
(Call_arg)quota, (Call_arg)label);
}
/**
@ -408,10 +411,11 @@ class Kernel::Thread : private Kernel::Object, public Cpu_job, private Timeout
* \retval capability id of the new kernel object
*/
static capid_t syscall_create(Core::Kernel_object<Thread> &t,
unsigned const cpu_id,
char const * const label)
{
return (capid_t)call(call_id_new_core_thread(), (Call_arg)&t,
(Call_arg)label);
(Call_arg)cpu_id, (Call_arg)label);
}
/**
@ -448,12 +452,12 @@ class Kernel::Thread : private Kernel::Object, public Cpu_job, private Timeout
void signal_receive_signal(void * const base, size_t const size);
/*************
** Cpu_job **
*************/
/*****************
** Cpu_context **
*****************/
void exception(Cpu & cpu) override;
void proceed(Cpu & cpu) override;
void exception() override;
void proceed() override;
/*************

View File

@ -31,7 +31,7 @@ namespace Kernel {
}
class Kernel::Vm : private Kernel::Object, public Cpu_job
class Kernel::Vm : private Kernel::Object, public Cpu_context
{
public:
@ -66,7 +66,7 @@ class Kernel::Vm : private Kernel::Object, public Cpu_job
void _pause_vcpu()
{
if (_scheduled != INACTIVE)
Cpu_job::_deactivate();
Cpu_context::_deactivate();
_scheduled = INACTIVE;
}
@ -135,7 +135,7 @@ class Kernel::Vm : private Kernel::Object, public Cpu_job
void run()
{
_sync_from_vmm();
if (_scheduled != ACTIVE) Cpu_job::_activate();
if (_scheduled != ACTIVE) Cpu_context::_activate();
_scheduled = ACTIVE;
}
@ -146,12 +146,12 @@ class Kernel::Vm : private Kernel::Object, public Cpu_job
}
/*************
** Cpu_job **
*************/
/*****************
** Cpu_context **
*****************/
void exception(Cpu & cpu) override;
void proceed(Cpu & cpu) override;
void exception() override;
void proceed() override;
};
#endif /* _CORE__KERNEL__VM_H_ */

View File

@ -93,7 +93,7 @@ Platform_thread::Platform_thread(Label const &label, Native_utcb &utcb)
_utcb((addr_t)&utcb),
_main_thread(false),
_location(Affinity::Location()),
_kobj(_kobj.CALLED_FROM_CORE, _label.string())
_kobj(_kobj.CALLED_FROM_CORE, _location.xpos(), _label.string())
{ }
@ -115,7 +115,8 @@ Platform_thread::Platform_thread(Platform_pd &pd,
_quota((unsigned)quota),
_main_thread(!pd.has_any_thread),
_location(location),
_kobj(_kobj.CALLED_FROM_CORE, _priority, _quota, _label.string())
_kobj(_kobj.CALLED_FROM_CORE, _location.xpos(),
_priority, _quota, _label.string())
{
_address_space = pd.weak_ptr();
pd.has_any_thread = true;
@ -171,9 +172,6 @@ void Platform_thread::start(void * const ip, void * const sp)
_kobj->regs->ip = reinterpret_cast<addr_t>(ip);
_kobj->regs->sp = reinterpret_cast<addr_t>(sp);
/* start executing new thread */
unsigned const cpu = _location.xpos();
Native_utcb &utcb = *Thread::myself()->utcb();
/* reset capability counter */
@ -183,7 +181,9 @@ void Platform_thread::start(void * const ip, void * const sp)
utcb.cap_add(Capability_space::capid(_pd.parent()));
utcb.cap_add(Capability_space::capid(_utcb._ds));
}
Kernel::start_thread(*_kobj, cpu, _pd.kernel_pd(), *(Native_utcb*)_utcb.core_addr);
Kernel::start_thread(*_kobj, _pd.kernel_pd(),
*(Native_utcb*)_utcb.core_addr);
}

View File

@ -23,32 +23,35 @@
using namespace Kernel;
extern "C" void kernel_to_user_context_switch(Cpu::Context*, Cpu::Fpu_context*);
extern "C" void kernel_to_user_context_switch(Core::Cpu::Context*,
Core::Cpu::Fpu_context*);
void Thread::_call_suspend() { }
void Thread::exception(Cpu & cpu)
void Thread::exception()
{
using Ctx = Core::Cpu::Context;
switch (regs->cpu_exception) {
case Cpu::Context::SUPERVISOR_CALL:
case Ctx::SUPERVISOR_CALL:
_call();
return;
case Cpu::Context::PREFETCH_ABORT:
case Cpu::Context::DATA_ABORT:
case Ctx::PREFETCH_ABORT:
case Ctx::DATA_ABORT:
_mmu_exception();
return;
case Cpu::Context::INTERRUPT_REQUEST:
case Cpu::Context::FAST_INTERRUPT_REQUEST:
_interrupt(_user_irq_pool, cpu.id());
case Ctx::INTERRUPT_REQUEST:
case Ctx::FAST_INTERRUPT_REQUEST:
_interrupt(_user_irq_pool);
return;
case Cpu::Context::UNDEFINED_INSTRUCTION:
case Ctx::UNDEFINED_INSTRUCTION:
Genode::raw(*this, ": undefined instruction at ip=",
Genode::Hex(regs->ip));
_die();
return;
case Cpu::Context::RESET:
case Ctx::RESET:
return;
default:
Genode::raw(*this, ": triggered an unknown exception ",
@ -71,17 +74,17 @@ void Kernel::Thread::Tlb_invalidation::execute(Cpu &) { }
void Thread::Flush_and_stop_cpu::execute(Cpu &) { }
void Cpu::Halt_job::proceed(Kernel::Cpu &) { }
void Cpu::Halt_job::proceed() { }
void Thread::proceed(Cpu & cpu)
void Thread::proceed()
{
if (!cpu.active(pd().mmu_regs) && type() != CORE)
cpu.switch_to(pd().mmu_regs);
if (!_cpu().active(pd().mmu_regs) && type() != CORE)
_cpu().switch_to(pd().mmu_regs);
regs->cpu_exception = cpu.stack_start();
kernel_to_user_context_switch((static_cast<Cpu::Context*>(&*regs)),
(static_cast<Cpu::Fpu_context*>(&*regs)));
regs->cpu_exception = _cpu().stack_start();
kernel_to_user_context_switch((static_cast<Core::Cpu::Context*>(&*regs)),
(static_cast<Core::Cpu::Fpu_context*>(&*regs)));
}

View File

@ -28,14 +28,13 @@ Vm::Vm(Irq::Pool & user_irq_pool,
Identity & id)
:
Kernel::Object { *this },
Cpu_job(Scheduler::Priority::min(), 0),
Cpu_context(cpu, Scheduler::Priority::min(), 0),
_user_irq_pool(user_irq_pool),
_state(data),
_context(context),
_id(id),
_vcpu_context(cpu)
{
affinity(cpu);
/* once constructed, exit with a startup exception */
pause();
_state.cpu_exception = Genode::VCPU_EXCEPTION_STARTUP;
@ -46,12 +45,12 @@ Vm::Vm(Irq::Pool & user_irq_pool,
Vm::~Vm() {}
void Vm::exception(Cpu & cpu)
void Vm::exception()
{
switch(_state.cpu_exception) {
case Genode::Cpu_state::INTERRUPT_REQUEST: [[fallthrough]];
case Genode::Cpu_state::FAST_INTERRUPT_REQUEST:
_interrupt(_user_irq_pool, cpu.id());
_interrupt(_user_irq_pool);
return;
case Genode::Cpu_state::DATA_ABORT:
_state.dfar = Cpu::Dfar::read();
@ -69,19 +68,19 @@ bool secure_irq(unsigned const i);
extern "C" void monitor_mode_enter_normal_world(Genode::Vcpu_state&, void*);
void Vm::proceed(Cpu & cpu)
void Vm::proceed()
{
unsigned const irq = _state.irq_injection;
if (irq) {
if (cpu.pic().secure(irq)) {
if (_cpu().pic().secure(irq)) {
Genode::raw("Refuse to inject secure IRQ into VM");
} else {
cpu.pic().trigger(irq);
_cpu().pic().trigger(irq);
_state.irq_injection = 0;
}
}
monitor_mode_enter_normal_world(_state, (void*) cpu.stack_start());
monitor_mode_enter_normal_world(_state, (void*) _cpu().stack_start());
}

View File

@ -101,7 +101,7 @@ void Board::Vcpu_context::Vm_irq::handle(Vm & vm, unsigned irq) {
void Board::Vcpu_context::Vm_irq::occurred()
{
Vm *vm = dynamic_cast<Vm*>(&_cpu.scheduled_job());
Vm *vm = dynamic_cast<Vm*>(&_cpu.current_context());
if (!vm) Genode::raw("VM interrupt while VM is not runnning!");
else handle(*vm, _irq_nr);
}
@ -140,14 +140,13 @@ Kernel::Vm::Vm(Irq::Pool & user_irq_pool,
Identity & id)
:
Kernel::Object { *this },
Cpu_job(Scheduler::Priority::min(), 0),
Cpu_context(cpu, Scheduler::Priority::min(), 0),
_user_irq_pool(user_irq_pool),
_state(data),
_context(context),
_id(id),
_vcpu_context(cpu)
{
affinity(cpu);
/* once constructed, exit with a startup exception */
pause();
_state.cpu_exception = Genode::VCPU_EXCEPTION_STARTUP;
@ -164,29 +163,29 @@ Kernel::Vm::~Vm()
}
void Kernel::Vm::exception(Cpu & cpu)
void Kernel::Vm::exception()
{
switch(_state.cpu_exception) {
case Genode::Cpu_state::INTERRUPT_REQUEST:
case Genode::Cpu_state::FAST_INTERRUPT_REQUEST:
_interrupt(_user_irq_pool, cpu.id());
_interrupt(_user_irq_pool);
break;
default:
pause();
_context.submit(1);
}
if (cpu.pic().ack_virtual_irq(_vcpu_context.pic))
if (_cpu().pic().ack_virtual_irq(_vcpu_context.pic))
inject_irq(Board::VT_MAINTAINANCE_IRQ);
_vcpu_context.vtimer_irq.disable();
}
void Kernel::Vm::proceed(Cpu & cpu)
void Kernel::Vm::proceed()
{
if (_state.timer.irq) _vcpu_context.vtimer_irq.enable();
cpu.pic().insert_virtual_irq(_vcpu_context.pic, _state.irqs.virtual_irq);
_cpu().pic().insert_virtual_irq(_vcpu_context.pic, _state.irqs.virtual_irq);
/*
* the following values have to be enforced by the hypervisor
@ -202,7 +201,7 @@ void Kernel::Vm::proceed(Cpu & cpu)
_state.esr_el2 = Cpu::Hstr::init();
_state.hpfar_el2 = Cpu::Hcr::init();
Hypervisor::switch_world(_state, host_context(cpu));
Hypervisor::switch_world(_state, host_context(_cpu()));
}

View File

@ -27,7 +27,7 @@ using namespace Kernel;
void Thread::_call_suspend() { }
void Thread::exception(Cpu & cpu)
void Thread::exception()
{
switch (regs->exception_type) {
case Cpu::RESET: return;
@ -35,7 +35,7 @@ void Thread::exception(Cpu & cpu)
case Cpu::IRQ_LEVEL_EL1: [[fallthrough]];
case Cpu::FIQ_LEVEL_EL0: [[fallthrough]];
case Cpu::FIQ_LEVEL_EL1:
_interrupt(_user_irq_pool, cpu.id());
_interrupt(_user_irq_pool);
return;
case Cpu::SYNC_LEVEL_EL0: [[fallthrough]];
case Cpu::SYNC_LEVEL_EL1:
@ -94,51 +94,51 @@ void Kernel::Thread::Tlb_invalidation::execute(Cpu &) { }
void Thread::Flush_and_stop_cpu::execute(Cpu &) { }
void Cpu::Halt_job::proceed(Kernel::Cpu &) { }
void Cpu::Halt_job::proceed() { }
bool Kernel::Pd::invalidate_tlb(Cpu & cpu, addr_t addr, size_t size)
{
using namespace Genode;
bool Kernel::Pd::invalidate_tlb(Cpu & cpu, addr_t addr, size_t size)
{
using namespace Genode;
/* only apply to the active cpu */
if (cpu.id() != Cpu::executing_id())
return false;
/* only apply to the active cpu */
if (cpu.id() != Cpu::executing_id())
return false;
/**
* The kernel part of the address space is mapped as global
* therefore we have to invalidate it differently
*/
if (addr >= Hw::Mm::supervisor_exception_vector().base) {
for (addr_t end = addr+size; addr < end; addr += get_page_size())
asm volatile ("tlbi vaae1is, %0" :: "r" (addr >> 12));
return false;
}
/**
* Too big mappings will result in long running invalidation loops,
* just invalidate the whole tlb for the ASID then.
*/
if (size > 8 * get_page_size()) {
asm volatile ("tlbi aside1is, %0"
:: "r" ((uint64_t)mmu_regs.id() << 48));
return false;
}
/**
* The kernel part of the address space is mapped as global
* therefore we have to invalidate it differently
*/
if (addr >= Hw::Mm::supervisor_exception_vector().base) {
for (addr_t end = addr+size; addr < end; addr += get_page_size())
asm volatile ("tlbi vaae1is, %0" :: "r" (addr >> 12));
asm volatile ("tlbi vae1is, %0"
:: "r" (addr >> 12 | (uint64_t)mmu_regs.id() << 48));
return false;
}
/**
* Too big mappings will result in long running invalidation loops,
* just invalidate the whole tlb for the ASID then.
*/
if (size > 8 * get_page_size()) {
asm volatile ("tlbi aside1is, %0"
:: "r" ((uint64_t)mmu_regs.id() << 48));
return false;
}
for (addr_t end = addr+size; addr < end; addr += get_page_size())
asm volatile ("tlbi vae1is, %0"
:: "r" (addr >> 12 | (uint64_t)mmu_regs.id() << 48));
return false;
}
void Thread::proceed()
{
if (!_cpu().active(pd().mmu_regs) && type() != CORE)
_cpu().switch_to(pd().mmu_regs);
void Thread::proceed(Cpu & cpu)
{
if (!cpu.active(pd().mmu_regs) && type() != CORE)
cpu.switch_to(pd().mmu_regs);
kernel_to_user_context_switch((static_cast<Cpu::Context*>(&*regs)),
(void*)cpu.stack_start());
kernel_to_user_context_switch((static_cast<Core::Cpu::Context*>(&*regs)),
(void*)_cpu().stack_start());
}

View File

@ -76,7 +76,7 @@ void Board::Vcpu_context::Vm_irq::handle(Vm & vm, unsigned irq) {
void Board::Vcpu_context::Vm_irq::occurred()
{
Vm *vm = dynamic_cast<Vm*>(&_cpu.scheduled_job());
Vm *vm = dynamic_cast<Vm*>(&_cpu.current_context());
if (!vm) Genode::raw("VM interrupt while VM is not runnning!");
else handle(*vm, _irq_nr);
}
@ -115,15 +115,13 @@ Vm::Vm(Irq::Pool & user_irq_pool,
Identity & id)
:
Kernel::Object { *this },
Cpu_job(Scheduler::Priority::min(), 0),
Cpu_context(cpu, Scheduler::Priority::min(), 0),
_user_irq_pool(user_irq_pool),
_state(data),
_context(context),
_id(id),
_vcpu_context(cpu)
{
affinity(cpu);
_state.id_aa64isar0_el1 = Cpu::Id_aa64isar0_el1::read();
_state.id_aa64isar1_el1 = Cpu::Id_aa64isar1_el1::read();
_state.id_aa64mmfr0_el1 = Cpu::Id_aa64mmfr0_el1::read();
@ -167,14 +165,14 @@ Vm::~Vm()
}
void Vm::exception(Cpu & cpu)
void Vm::exception()
{
switch (_state.exception_type) {
case Cpu::IRQ_LEVEL_EL0: [[fallthrough]];
case Cpu::IRQ_LEVEL_EL1: [[fallthrough]];
case Cpu::FIQ_LEVEL_EL0: [[fallthrough]];
case Cpu::FIQ_LEVEL_EL1:
_interrupt(_user_irq_pool, cpu.id());
_interrupt(_user_irq_pool);
break;
case Cpu::SYNC_LEVEL_EL0: [[fallthrough]];
case Cpu::SYNC_LEVEL_EL1: [[fallthrough]];
@ -188,17 +186,17 @@ void Vm::exception(Cpu & cpu)
" not implemented!");
};
if (cpu.pic().ack_virtual_irq(_vcpu_context.pic))
if (_cpu().pic().ack_virtual_irq(_vcpu_context.pic))
inject_irq(Board::VT_MAINTAINANCE_IRQ);
_vcpu_context.vtimer_irq.disable();
}
void Vm::proceed(Cpu & cpu)
void Vm::proceed()
{
if (_state.timer.irq) _vcpu_context.vtimer_irq.enable();
cpu.pic().insert_virtual_irq(_vcpu_context.pic, _state.irqs.virtual_irq);
_cpu().pic().insert_virtual_irq(_vcpu_context.pic, _state.irqs.virtual_irq);
/*
* the following values have to be enforced by the hypervisor
@ -208,7 +206,7 @@ void Vm::proceed(Cpu & cpu)
Cpu::Vttbr_el2::Asid::set(vttbr_el2, _id.id);
addr_t guest = Hw::Mm::el2_addr(&_state);
addr_t pic = Hw::Mm::el2_addr(&_vcpu_context.pic);
addr_t host = Hw::Mm::el2_addr(&host_context(cpu));
addr_t host = Hw::Mm::el2_addr(&host_context(_cpu()));
Hypervisor::switch_world(guest, host, pic, vttbr_el2);
}

View File

@ -25,21 +25,21 @@ void Thread::Tlb_invalidation::execute(Cpu &) { }
void Thread::Flush_and_stop_cpu::execute(Cpu &) { }
void Cpu::Halt_job::proceed(Kernel::Cpu &) { }
void Cpu::Halt_job::proceed() { }
void Thread::exception(Cpu & cpu)
void Thread::exception()
{
using Context = Core::Cpu::Context;
using Stval = Core::Cpu::Stval;
if (regs->is_irq()) {
/* cpu-local timer interrupt */
if (regs->irq() == cpu.timer().interrupt_id()) {
cpu.handle_if_cpu_local_interrupt(cpu.timer().interrupt_id());
if (regs->irq() == _cpu().timer().interrupt_id()) {
_cpu().handle_if_cpu_local_interrupt(_cpu().timer().interrupt_id());
} else {
/* interrupt controller */
_interrupt(_user_irq_pool, 0);
_interrupt(_user_irq_pool);
}
return;
}
@ -113,7 +113,7 @@ void Kernel::Thread::_call_cache_line_size()
}
void Kernel::Thread::proceed(Cpu & cpu)
void Kernel::Thread::proceed()
{
/*
* The sstatus register defines to which privilege level
@ -123,8 +123,8 @@ void Kernel::Thread::proceed(Cpu & cpu)
Cpu::Sstatus::Spp::set(v, (type() == USER) ? 0 : 1);
Cpu::Sstatus::write(v);
if (!cpu.active(pd().mmu_regs) && type() != CORE)
cpu.switch_to(_pd->mmu_regs);
if (!_cpu().active(pd().mmu_regs) && type() != CORE)
_cpu().switch_to(_pd->mmu_regs);
asm volatile("csrw sscratch, %1 \n"
"mv x31, %0 \n"

View File

@ -55,9 +55,9 @@ void Kernel::Thread::Flush_and_stop_cpu::execute(Cpu &cpu)
}
void Kernel::Cpu::Halt_job::Halt_job::proceed(Kernel::Cpu &cpu)
void Kernel::Cpu::Halt_job::Halt_job::proceed()
{
switch (cpu.state()) {
switch (_cpu().state()) {
case HALT:
while (true) {
asm volatile ("hlt"); }
@ -83,7 +83,7 @@ void Kernel::Cpu::Halt_job::Halt_job::proceed(Kernel::Cpu &cpu)
/* adhere to ACPI specification */
asm volatile ("wbinvd" : : : "memory");
fadt.suspend(cpu.suspend.typ_a, cpu.suspend.typ_b);
fadt.suspend(_cpu().suspend.typ_a, _cpu().suspend.typ_b);
Genode::raw("kernel: unexpected resume");
});
@ -143,7 +143,7 @@ void Kernel::Thread::_call_suspend()
/* single core CPU case */
if (cpu_count == 1) {
/* current CPU triggers final ACPI suspend outside kernel lock */
_cpu->next_state_suspend();
_cpu().next_state_suspend();
return;
}
@ -176,12 +176,12 @@ void Kernel::Thread::_call_cache_line_size()
}
void Kernel::Thread::proceed(Cpu & cpu)
void Kernel::Thread::proceed()
{
if (!cpu.active(pd().mmu_regs) && type() != CORE)
cpu.switch_to(pd().mmu_regs);
if (!_cpu().active(pd().mmu_regs) && type() != CORE)
_cpu().switch_to(pd().mmu_regs);
cpu.switch_to(*regs);
_cpu().switch_to(*regs);
asm volatile("fxrstor (%1) \n"
"mov %0, %%rsp \n"

View File

@ -20,7 +20,7 @@
using namespace Kernel;
void Thread::exception(Cpu & cpu)
void Thread::exception()
{
using Genode::Cpu_state;
@ -45,7 +45,7 @@ void Thread::exception(Cpu & cpu)
if (regs->trapno >= Cpu_state::INTERRUPTS_START &&
regs->trapno <= Cpu_state::INTERRUPTS_END) {
_interrupt(_user_irq_pool, cpu.id());
_interrupt(_user_irq_pool);
return;
}

View File

@ -41,15 +41,12 @@ Vm::Vm(Irq::Pool & user_irq_pool,
Identity & id)
:
Kernel::Object { *this },
Cpu_job(Scheduler::Priority::min(), 0),
Cpu_context(cpu, Scheduler::Priority::min(), 0),
_user_irq_pool(user_irq_pool),
_state(*data.vcpu_state),
_context(context),
_id(id),
_vcpu_context(id.id, data)
{
affinity(cpu);
}
_vcpu_context(id.id, data) { }
Vm::~Vm()
@ -57,10 +54,10 @@ Vm::~Vm()
}
void Vm::proceed(Cpu & cpu)
void Vm::proceed()
{
using namespace Board;
cpu.switch_to(*_vcpu_context.regs);
_cpu().switch_to(*_vcpu_context.regs);
if (_vcpu_context.exit_reason == EXIT_INIT) {
_vcpu_context.regs->trapno = TRAP_VMSKIP;
@ -83,7 +80,7 @@ void Vm::proceed(Cpu & cpu)
}
void Vm::exception(Cpu & cpu)
void Vm::exception()
{
using namespace Board;
@ -121,18 +118,18 @@ void Vm::exception(Cpu & cpu)
* it needs to handle an exit.
*/
if (_vcpu_context.exit_reason == EXIT_PAUSED)
_interrupt(_user_irq_pool, cpu.id());
_interrupt(_user_irq_pool);
else
pause = true;
break;
case Cpu_state::INTERRUPTS_START ... Cpu_state::INTERRUPTS_END:
_interrupt(_user_irq_pool, cpu.id());
_interrupt(_user_irq_pool);
break;
case TRAP_VMSKIP:
/* vCPU is running for the first time */
_vcpu_context.initialize(cpu,
_vcpu_context.initialize(_cpu(),
reinterpret_cast<addr_t>(_id.table));
_vcpu_context.tsc_aux_host = cpu.id();
_vcpu_context.tsc_aux_host = _cpu().id();
/*
* We set the artificial startup exit code, stop the
* vCPU thread and ask the VMM to handle it.