base-hw: unset deleted PD values in MMU

When a PD gets deleted check whether it is active on the current CPU resp. MMU.
If yes, switch to core/kernel's PD to prevent that invalid page-tables or
address-space IDs are still in use. Moreover, whenever we switch to an idle
thread, we switch to kernel/core's PD too. Thereby, we prevent that vanished
PDs are still active on CPUs different from the one, where the core entrypoint
is active, which does the PD deletion.
This whole scheme is only valid under the assumption that core has only one
entrypoint running on one CPU.

Fix genodelabs/genode#4527
This commit is contained in:
Stefan Kalkowski 2022-06-14 11:32:14 +02:00 committed by Christian Helmuth
parent 5cfb9e3e9d
commit d6592ca2cb
15 changed files with 110 additions and 49 deletions

View File

@ -329,6 +329,23 @@ void Thread::_call_start_thread()
/* join protection domain */
thread._pd = (Pd *) user_arg_3();
thread._ipc_init(*(Native_utcb *)user_arg_4(), *this);
/*
* Sanity check core threads!
*
* Currently, the model assumes that there is only one core
* entrypoint, which serves requests, and which can destroy
* threads and pds. If this changes, we have to inform all
* cpus about pd destructions to remove their page-tables
* from the hardware in case that a core-thread running with
* that same pd is currently active. Therefore, warn if the
* semantic changes, and additional core threads are started
* across cpu cores.
*/
if (thread._pd == &_core_pd && cpu.id() != _cpu_pool.primary_cpu().id())
Genode::raw("Error: do not start core threads"
" on CPU cores different than boot cpu");
thread._become_active();
}
@ -447,6 +464,18 @@ void Thread::_call_delete_thread()
}
void Thread::_call_delete_pd()
{
Genode::Kernel_object<Pd> & pd =
*(Genode::Kernel_object<Pd>*)user_arg_1();
if (_cpu->active(pd->mmu_regs))
_cpu->switch_to(_core_pd.mmu_regs);
_call_delete<Pd>();
}
void Thread::_call_await_request_msg()
{
if (_ipc_node.can_await_request()) {
@ -822,7 +851,7 @@ void Thread::_call()
*(Genode::Platform_pd *) user_arg_3(),
_addr_space_id_alloc);
return;
case call_id_delete_pd(): _call_delete<Pd>(); return;
case call_id_delete_pd(): _call_delete_pd(); return;
case call_id_new_signal_receiver(): _call_new<Signal_receiver>(); return;
case call_id_new_signal_context():
_call_new<Signal_context>(*(Signal_receiver*) user_arg_2(), user_arg_3());

View File

@ -234,6 +234,7 @@ class Kernel::Thread : private Kernel::Object, public Cpu_job, private Timeout
void _call_restart_thread();
void _call_yield_thread();
void _call_delete_thread();
void _call_delete_pd();
void _call_await_request_msg();
void _call_send_request_msg();
void _call_send_reply_msg();

View File

@ -87,24 +87,25 @@ void Arm_cpu::mmu_fault_status(Fsr::access_t fsr, Thread_fault & fault)
}
void Arm_cpu::switch_to(Arm_cpu::Context&, Arm_cpu::Mmu_context & o)
bool Arm_cpu::active(Arm_cpu::Mmu_context & ctx)
{
if (o.cidr == 0) return;
return (Cidr::read() == ctx.cidr);
}
Cidr::access_t cidr = Cidr::read();
if (cidr != o.cidr) {
/**
* First switch to global mappings only to prevent
* that wrong branch predicts result due to ASID
* and Page-Table not being in sync (see ARM RM B 3.10.4)
*/
Cidr::write(0);
Cpu::synchronization_barrier();
Ttbr0::write(o.ttbr0);
Cpu::synchronization_barrier();
Cidr::write(o.cidr);
Cpu::synchronization_barrier();
}
void Arm_cpu::switch_to(Arm_cpu::Mmu_context & ctx)
{
/**
* First switch to global mappings only to prevent
* that wrong branch predicts result due to ASID
* and Page-Table not being in sync (see ARM RM B 3.10.4)
*/
Cidr::write(0);
Cpu::synchronization_barrier();
Ttbr0::write(ctx.ttbr0);
Cpu::synchronization_barrier();
Cidr::write(ctx.cidr);
Cpu::synchronization_barrier();
}

View File

@ -104,7 +104,8 @@ struct Genode::Arm_cpu : public Hw::Arm_cpu
else Tlbiall::write(0);
}
void switch_to(Context&, Mmu_context & o);
bool active(Mmu_context &);
void switch_to(Mmu_context &);
static void mmu_fault(Context & c, Kernel::Thread_fault & fault);
static void mmu_fault_status(Fsr::access_t fsr,

View File

@ -67,7 +67,8 @@ void Kernel::Thread::Tlb_invalidation::execute() { };
void Thread::proceed(Cpu & cpu)
{
cpu.switch_to(*regs, pd().mmu_regs);
if (!cpu.active(pd().mmu_regs) && type() != CORE)
cpu.switch_to(pd().mmu_regs);
regs->cpu_exception = cpu.stack_start();
kernel_to_user_context_switch((static_cast<Cpu::Context*>(&*regs)),

View File

@ -26,12 +26,15 @@ Genode::Cpu::Context::Context(bool privileged)
}
void Genode::Cpu::switch_to(Context&, Mmu_context & mmu_context)
bool Genode::Cpu::active(Mmu_context & mmu_context)
{
if (mmu_context.id() == 0) return;
return (mmu_context.id() == Ttbr::Asid::get(Ttbr0_el1::read()));
}
if (mmu_context.id() != Ttbr::Asid::get(Ttbr0_el1::read()))
Ttbr0_el1::write(mmu_context.ttbr);
void Genode::Cpu::switch_to(Mmu_context & mmu_context)
{
Ttbr0_el1::write(mmu_context.ttbr);
}

View File

@ -99,7 +99,8 @@ struct Genode::Cpu : Hw::Arm_64_cpu
return Ttbr::Asid::get(ttbr) & 0xffff; }
};
void switch_to(Context&, Mmu_context &);
bool active(Mmu_context &);
void switch_to(Mmu_context &);
static void mmu_fault(Context &, Kernel::Thread_fault &);

View File

@ -108,7 +108,9 @@ bool Kernel::Pd::invalidate_tlb(Cpu &, addr_t addr, size_t size)
void Thread::proceed(Cpu & cpu)
{
cpu.switch_to(*regs, pd().mmu_regs);
if (!cpu.active(pd().mmu_regs) && type() != CORE)
cpu.switch_to(pd().mmu_regs);
kernel_to_user_context_switch((static_cast<Cpu::Context*>(&*regs)),
(void*)cpu.stack_start());
}

View File

@ -115,11 +115,14 @@ class Genode::Cpu : public Arm_v7_cpu
*/
static unsigned executing_id() { return Mpidr::Aff_0::get(Mpidr::read()); }
void switch_to(Context &, Mmu_context & mmu_context)
bool active(Mmu_context & mmu_context)
{
if (mmu_context.id() && (Ttbr0_64bit::read() != mmu_context.ttbr0))
Ttbr0_64bit::write(mmu_context.ttbr0);
return (Ttbr0_64bit::read() == mmu_context.ttbr0);
}
void switch_to(Mmu_context & mmu_context)
{
Ttbr0_64bit::write(mmu_context.ttbr0);
}
};

View File

@ -53,22 +53,16 @@ Mmu_context::~Mmu_context()
}
bool Genode::Cpu::active(Mmu_context & context)
{
return Satp::read() == context.satp;
}
void Genode::Cpu::switch_to(Mmu_context & context)
{
/*
* The sstatus register defines to which privilege level
* the machin returns when doing an exception return
*/
bool user = Satp::Asid::get(context.satp);
Sstatus::access_t v = Sstatus::read();
Sstatus::Spp::set(v, user ? 0 : 1);
Sstatus::write(v);
/* change the translation table when necessary */
if (user) {
Satp::write(context.satp);
sfence();
}
Satp::write(context.satp);
sfence();
}

View File

@ -97,6 +97,7 @@ class Genode::Cpu : public Hw::Riscv_cpu
static void invalidate_tlb_by_pid(unsigned const /* pid */) { sfence(); }
bool active(Mmu_context & context);
void switch_to(Mmu_context & context);
static void mmu_fault(Context & c, Kernel::Thread_fault & f);

View File

@ -100,7 +100,16 @@ void Kernel::Thread::_call_cache_invalidate_data_region() { }
void Kernel::Thread::proceed(Cpu & cpu)
{
cpu.switch_to(_pd->mmu_regs);
/*
* The sstatus register defines to which privilege level
* the machine returns when doing an exception return
*/
Cpu::Sstatus::access_t v = Cpu::Sstatus::read();
Cpu::Sstatus::Spp::set(v, (type() == USER) ? 0 : 1);
Cpu::Sstatus::write(v);
if (!cpu.active(pd().mmu_regs) && type() != CORE)
cpu.switch_to(_pd->mmu_regs);
asm volatile("csrw sscratch, %1 \n"
"mv x31, %0 \n"

View File

@ -111,11 +111,20 @@ extern void const * const kernel_stack;
extern Genode::size_t const kernel_stack_size;
void Genode::Cpu::switch_to(Context & context, Mmu_context &mmu_context)
bool Genode::Cpu::active(Mmu_context &mmu_context)
{
if ((context.cs != 0x8) && (mmu_context.cr3 != Cr3::read()))
Cr3::write(mmu_context.cr3);
return (mmu_context.cr3 == Cr3::read());
}
void Genode::Cpu::switch_to(Mmu_context &mmu_context)
{
Cr3::write(mmu_context.cr3);
}
void Genode::Cpu::switch_to(Context & context)
{
tss.ist[0] = (addr_t)&context + sizeof(Genode::Cpu_state);
addr_t const stack_base = reinterpret_cast<addr_t>(&kernel_stack);

View File

@ -126,7 +126,10 @@ class Genode::Cpu : public Hw::X86_64_cpu
*
* \param context next CPU context
*/
void switch_to(Context & context, Mmu_context &mmu_context);
void switch_to(Context & context);
bool active(Mmu_context &mmu_context);
void switch_to(Mmu_context &mmu_context);
static void mmu_fault(Context & regs, Kernel::Thread_fault & fault);

View File

@ -43,7 +43,10 @@ void Kernel::Thread::_call_cache_invalidate_data_region() { }
void Kernel::Thread::proceed(Cpu & cpu)
{
cpu.switch_to(*regs, pd().mmu_regs);
if (!cpu.active(pd().mmu_regs) && type() != CORE)
cpu.switch_to(pd().mmu_regs);
cpu.switch_to(*regs);
asm volatile("fxrstor (%1) \n"
"mov %0, %%rsp \n"