hw: explicit cpu state argument in kernel entry

* Instead of only implicitely update the last scheduled Cpu context with
  the CPU state, when entering the architecture-speficic machine
  exception vector, cache this data on kernel context stack, and deliver
  it as argument when entering the kernel via high-level language
* Handle Cpu context's exception explicitely in kernel main routine
* Make cached CPU state available to error handling lambda in case of
  Kernel::Mutex double entering (aka kernel fault)
* Rename Cpu::schedule to Cpu::assign

Ref genodelabs/genode#5425
This commit is contained in:
Stefan Kalkowski 2025-02-12 15:18:03 +01:00 committed by Christian Helmuth
parent 98032a2605
commit 17d1e41053
33 changed files with 246 additions and 281 deletions

View File

@ -31,7 +31,7 @@ using namespace Kernel;
** Cpu_context **
*****************/
void Cpu_context::_activate() { _cpu().schedule(*this); }
void Cpu_context::_activate() { _cpu().assign(*this); }
void Cpu_context::_deactivate()
@ -119,7 +119,7 @@ Cpu::Idle_thread::Idle_thread(Board::Address_space_id_allocator &addr_space_id_a
}
void Cpu::schedule(Context &context)
void Cpu::assign(Context &context)
{
_scheduler.ready(static_cast<Scheduler::Context&>(context));
if (_id != executing_id() && _scheduler.need_to_schedule())
@ -139,11 +139,8 @@ bool Cpu::handle_if_cpu_local_interrupt(unsigned const irq_id)
}
Cpu::Context & Cpu::handle_exception_and_schedule()
Cpu::Context & Cpu::schedule_next_context(Context &last)
{
Context &context = current_context();
context.exception();
if (_state == SUSPEND || _state == HALT)
return _halt_job;
@ -154,7 +151,7 @@ Cpu::Context & Cpu::handle_exception_and_schedule()
time_t t = _scheduler.current_time_left();
_timer.set_timeout(&_timeout, t);
time_t duration = _timer.schedule_timeout();
context.update_execution_time(duration);
last.update_execution_time(duration);
}
/* return current context */

View File

@ -90,7 +90,7 @@ class Kernel::Cpu : public Core::Cpu, private Irq::Pool,
Halt_job(Cpu &cpu)
: Cpu_context(cpu, 0, 0) { }
void exception() override { }
void exception(Genode::Cpu_state&) override { }
void proceed() override;
} _halt_job { *this };
@ -143,14 +143,14 @@ class Kernel::Cpu : public Core::Cpu, private Irq::Pool,
bool handle_if_cpu_local_interrupt(unsigned const irq_id);
/**
* Schedule 'context' at this CPU
* Assign 'context' to this CPU
*/
void schedule(Context& context);
void assign(Context& context);
/**
* Return the context that should be executed next
*/
Context& handle_exception_and_schedule();
Context& schedule_next_context(Context &last);
Board::Pic & pic() { return _pic; }
Timer & timer() { return _timer; }

View File

@ -106,7 +106,7 @@ class Kernel::Cpu_context : private Scheduler::Context
/**
* Handle exception that occured during execution of this context
*/
virtual void exception() = 0;
virtual void exception(Genode::Cpu_state&) = 0;
/**
* Continue execution of this context

View File

@ -30,7 +30,7 @@ class Kernel::Main
{
private:
friend void main_handle_kernel_entry();
friend void main_handle_kernel_entry(Genode::Cpu_state*);
friend void main_initialize_and_handle_kernel_entry();
friend time_t main_read_idle_thread_execution_time(unsigned cpu_idx);
friend void main_print_char(char c);
@ -50,7 +50,7 @@ class Kernel::Main
Board::UART_CLOCK,
SERIAL_BAUD_RATE };
void _handle_kernel_entry();
void _handle_kernel_entry(Genode::Cpu_state*);
public:
@ -61,26 +61,28 @@ class Kernel::Main
Kernel::Main *Kernel::Main::_instance;
void Kernel::Main::_handle_kernel_entry()
void Kernel::Main::_handle_kernel_entry(Genode::Cpu_state *state)
{
Cpu::Context * context;
Cpu::Context *context;
_mutex.execute_exclusive(
[&] () {
Cpu &cpu = _cpu_pool.cpu(Cpu::executing_id());
context = &cpu.handle_exception_and_schedule();
},
Cpu::Context &recent = cpu.current_context();
if (state) recent.exception(*state);
context = &cpu.schedule_next_context(recent);
},
[&] () {
Genode::error("Cpu ", Cpu::executing_id(), " re-entered lock.",
Genode::error("Cpu ", Cpu::executing_id(), " re-entered lock. ",
"Kernel exception?!"); });
context->proceed();
}
void Kernel::main_handle_kernel_entry()
void Kernel::main_handle_kernel_entry(Genode::Cpu_state *state)
{
Main::_instance->_handle_kernel_entry();
Main::_instance->_handle_kernel_entry(state);
}
@ -132,7 +134,7 @@ void Kernel::main_initialize_and_handle_kernel_entry()
while (nr_of_initialized_cpus < nr_of_cpus) { }
Main::_instance->_handle_kernel_entry();
Main::_instance->_handle_kernel_entry(nullptr);
/* never reached */
return;
}
@ -194,7 +196,7 @@ void Kernel::main_initialize_and_handle_kernel_entry()
*/
while (!kernel_initialized) {;}
Main::_instance->_handle_kernel_entry();
Main::_instance->_handle_kernel_entry(nullptr);
}

View File

@ -14,6 +14,8 @@
#ifndef _KERNEL__MAIN_H_
#define _KERNEL__MAIN_H_
#include <cpu/cpu_state.h>
/* base-hw core includes */
#include <kernel/types.h>
@ -21,7 +23,7 @@ namespace Kernel {
void main_print_char(char const c);
void main_handle_kernel_entry();
void main_handle_kernel_entry(Genode::Cpu_state *state);
void main_initialize_and_handle_kernel_entry();

View File

@ -470,7 +470,7 @@ class Kernel::Thread : private Kernel::Object, public Cpu_context, private Timeo
** Cpu_context **
*****************/
void exception() override;
void exception(Genode::Cpu_state&) override;
void proceed() override;

View File

@ -145,7 +145,7 @@ class Kernel::Vm : private Kernel::Object, public Cpu_context
** Cpu_context **
*****************/
void exception() override;
void exception(Genode::Cpu_state&) override;
void proceed() override;
};

View File

@ -61,9 +61,9 @@
.macro _user_to_kernel exception_type, mode, pc_adjust
cpsid f, #SVC_MODE /* disable interrupts and change to SVC mode */
sub sp, sp, #0x150 /* make room for cpu context */
stm sp, {r0-r14}^ /* the sp_svc contains the user context pointer */
add r0, sp, #PC_OFFSET
ldr sp, [sp, #STACK_OFFSET] /* restore kernel stack pointer */
cps #\mode
sub r1, lr, #\pc_adjust /* calculate user program counter */
mrs r2, spsr /* get user cpsr */
@ -131,10 +131,11 @@
*/
adr lr, _kernel_entry
ldr lr, [lr]
mov r0, sp
bx lr
_kernel_entry:
.long _ZN6Kernel24main_handle_kernel_entryEv
.long _ZN6Kernel24main_handle_kernel_entryEPN6Genode9Cpu_stateE
_fpu_save:
.long vfp_save_fpu_context
@ -158,14 +159,14 @@
.global kernel_to_user_context_switch
kernel_to_user_context_switch:
push { r0 }
mov r0, r1
mov sp, r2
bl vfp_load_fpu_context
pop { r0 }
mov sp, r0
ldr lr, [sp, #15*4]
ldr r1, [sp, #16*4]
msr spsr_cxsf, r1
ldm sp, {r0-r14}^
ldr lr, [r1, #15*4]
ldr r0, [r1, #16*4]
msr spsr_cxsf, r0
add r0, r1, #4
ldm r0, {r1-r14}^
sub r0, r0, #4
ldr r0, [r0]
SYSTEM_REGISTER_SYNC_BARRIER /* synchronize after the context switch */
subs pc, lr, #0

View File

@ -23,17 +23,19 @@
using namespace Kernel;
extern "C" void kernel_to_user_context_switch(Core::Cpu::Context*,
Core::Cpu::Fpu_context*);
extern "C" void kernel_to_user_context_switch(Core::Cpu::Fpu_context*,
Core::Cpu::Context*, void*);
void Thread::_call_suspend() { }
void Thread::exception()
void Thread::exception(Genode::Cpu_state &state)
{
using Ctx = Core::Cpu::Context;
Genode::memcpy(&*regs, &state, sizeof(Ctx));
switch (regs->cpu_exception) {
case Ctx::SUPERVISOR_CALL:
_call();
@ -82,9 +84,9 @@ void Thread::proceed()
if (!_cpu().active(pd().mmu_regs) && type() != CORE)
_cpu().switch_to(pd().mmu_regs);
regs->cpu_exception = _cpu().stack_start();
kernel_to_user_context_switch((static_cast<Core::Cpu::Context*>(&*regs)),
(static_cast<Core::Cpu::Fpu_context*>(&*regs)));
kernel_to_user_context_switch((static_cast<Core::Cpu::Fpu_context*>(&*regs)),
(static_cast<Core::Cpu::Context*>(&*regs)),
(void*)_cpu().stack_start());
}

View File

@ -89,17 +89,18 @@ monitor_mode_exception_vector:
vstm r0!, {d0-d15} /* save FPU registers */
vstm r0!, {d16-d31}
cps #22 /* switch back to monitor mode */
mov r0, #0b111010011 /* spsr to SVC mode, irqs masked */
msr spsr_cxsf, r0
mov r2, #0b111010011 /* spsr to SVC mode, irqs masked */
msr spsr_cxsf, r2
mov r1, lr
cps #19
mov sp, r1
cps #22
adr lr, _kernel_entry
ldr lr, [lr]
mov r0, sp /* set vm context as first arg */
subs pc, lr, #0 /* jump back into kernel */
_kernel_entry: .long _ZN6Kernel24main_handle_kernel_entryEv
_kernel_entry: .long _ZN6Kernel24main_handle_kernel_entryEPN6Genode9Cpu_stateE
/* jump to this point to switch to TrustZone's normal world */

View File

@ -45,7 +45,7 @@ Vm::Vm(Irq::Pool & user_irq_pool,
Vm::~Vm() {}
void Vm::exception()
void Vm::exception(Genode::Cpu_state&)
{
switch(_state.cpu_exception) {
case Genode::Cpu_state::INTERRUPT_REQUEST: [[fallthrough]];

View File

@ -215,7 +215,7 @@ _vm_to_host:
** Restore host context **
**************************/
pop { r0, r1 }
pop { r0 }
ldmia r0!, {r1-r12}
mcrr p15, 6, r1, r2, c2 /* write VTTBR */
mcr p15, 4, r3, c1, c1, 0 /* write HCR register */
@ -226,12 +226,13 @@ _vm_to_host:
msr spsr_cxfs, r8
mcrr p15, 0, r9, r10, c2 /* write TTBR0 */
mcrr p15, 1, r11, r12, c2 /* write TTBR1 */
ldmia r0, {r1-r5}
ldmia r0!, {r1-r5}
mcr p15, 0, r1, c1, c0, 0 /* write SCTRL */
mcr p15, 0, r2, c2, c0, 2 /* write TTBRC */
mcr p15, 0, r3, c10, c2, 0 /* write MAIR0 */
mcr p15, 0, r4, c3, c0, 0 /* write DACR */
mcr p15, 4, r5, c0, c0, 5 /* write VMPIDR */
pop { r0 }
eret

View File

@ -163,7 +163,7 @@ Kernel::Vm::~Vm()
}
void Kernel::Vm::exception()
void Kernel::Vm::exception(Genode::Cpu_state&)
{
switch(_state.cpu_exception) {
case Genode::Cpu_state::INTERRUPT_REQUEST:

View File

@ -64,19 +64,16 @@ struct Core::Cpu : Hw::Arm_64_cpu
RESET = 0x800
};
struct alignas(16) Fpu_state
{
uint128_t q[32];
uint64_t fpsr;
uint64_t fpcr;
};
struct alignas(16) Context : Cpu_state
{
uint64_t pstate { };
uint64_t mdscr_el1 { };
uint64_t exception_type { RESET };
Fpu_state fpu_state { };
/* SIMD & FP registers */
uint64_t fpsr { };
uint128_t q[32];
uint64_t fpcr { };
Context(bool privileged);

View File

@ -14,63 +14,61 @@
.section .text.crt0
.rept 16
str x0, [sp, #-32]
ldr x0, [sp, #-16]
add x0, x0, #8
stp x1, x2, [x0], #16
stp x3, x4, [x0], #16
stp x5, x6, [x0], #16
stp x7, x8, [x0], #16
stp x9, x10, [x0], #16
stp x11, x12, [x0], #16
stp x13, x14, [x0], #16
stp x15, x16, [x0], #16
stp x17, x18, [x0], #16
stp x19, x20, [x0], #16
stp x21, x22, [x0], #16
stp x23, x24, [x0], #16
stp x25, x26, [x0], #16
stp x27, x28, [x0], #16
stp x29, x30, [x0], #16
mrs x1, sp_el0
mrs x2, elr_el1
mrs x3, esr_el1
mrs x4, spsr_el1
mrs x5, mdscr_el1
adr x6, .
and x6, x6, #0xf80
stp x1, x2, [x0], #16
stp x3, x4, [x0], #16
stp x5, x6, [x0], #16
sub sp, sp, #0x340 /* keep room for cpu state */
stp x0, x1, [sp], #16
stp x2, x3, [sp], #16
stp x4, x5, [sp], #16
stp x6, x7, [sp], #16
stp x8, x9, [sp], #16
stp x10, x11, [sp], #16
stp x12, x13, [sp], #16
stp x14, x15, [sp], #16
stp x16, x17, [sp], #16
stp x18, x19, [sp], #16
stp x20, x21, [sp], #16
stp x22, x23, [sp], #16
stp x24, x25, [sp], #16
stp x26, x27, [sp], #16
stp x28, x29, [sp], #16
mrs x0, sp_el0
mrs x1, elr_el1
mrs x2, esr_el1
mrs x3, spsr_el1
mrs x4, mdscr_el1
adr x5, .
and x5, x5, #0xf80
mrs x6, fpsr
stp x30, x0, [sp], #16
stp x1, x2, [sp], #16
stp x3, x4, [sp], #16
stp x5, x6, [sp], #16
b _kernel_entry
.balign 128
.endr
_kernel_entry:
stp q0, q1, [x0], #32
stp q2, q3, [x0], #32
stp q4, q5, [x0], #32
stp q6, q7, [x0], #32
stp q8, q9, [x0], #32
stp q10, q11, [x0], #32
stp q12, q13, [x0], #32
stp q14, q15, [x0], #32
stp q16, q17, [x0], #32
stp q18, q19, [x0], #32
stp q20, q21, [x0], #32
stp q22, q23, [x0], #32
stp q24, q25, [x0], #32
stp q26, q27, [x0], #32
stp q28, q29, [x0], #32
stp q30, q31, [x0], #32
mrs x1, fpcr
mrs x2, fpsr
stp x1, x2, [x0], #16
stp q0, q1, [sp], #32
stp q2, q3, [sp], #32
stp q4, q5, [sp], #32
stp q6, q7, [sp], #32
stp q8, q9, [sp], #32
stp q10, q11, [sp], #32
stp q12, q13, [sp], #32
stp q14, q15, [sp], #32
stp q16, q17, [sp], #32
stp q18, q19, [sp], #32
stp q20, q21, [sp], #32
stp q22, q23, [sp], #32
stp q24, q25, [sp], #32
stp q26, q27, [sp], #32
stp q28, q29, [sp], #32
stp q30, q31, [sp], #32
mrs x0, fpcr
str x0, [sp], #8
msr fpsr, xzr
ldr x0, [sp, #-16]
ldr x1, [sp, #-32]
str x1, [x0]
bl _ZN6Kernel24main_handle_kernel_entryEv
sub sp, sp, #0x338 /* overall cpu state size */
mov x0, sp
bl _ZN6Kernel24main_handle_kernel_entryEPN6Genode9Cpu_stateE
.section .text
@ -92,7 +90,6 @@ _kernel_entry:
.global kernel_to_user_context_switch
kernel_to_user_context_switch:
mov sp, x1 /* reset stack */
str x0, [sp, #-16] /* store cpu state pointer */
add x1, x0, #8*31
ldp x2, x3, [x1], #16+8 /* load sp, ip, skip esr_el1 */
ldp x4, x5, [x1], #16+8 /* load pstate, mdscr_el1, skip exception_type */
@ -100,6 +97,8 @@ _kernel_entry:
msr elr_el1, x3
msr spsr_el1, x4
msr mdscr_el1, x5
ldr x2, [x1], #8
msr fpsr, x2
ldp q0, q1, [x1], #32
ldp q2, q3, [x1], #32
ldp q4, q5, [x1], #32
@ -116,9 +115,8 @@ _kernel_entry:
ldp q26, q27, [x1], #32
ldp q28, q29, [x1], #32
ldp q30, q31, [x1], #32
ldp x2, x3, [x1], #16
msr fpcr, x2
msr fpsr, x3
ldr x3, [x1], #8
msr fpcr, x3
add x0, x0, #8
ldp x1, x2, [x0], #16
ldp x3, x4, [x0], #16

View File

@ -27,8 +27,10 @@ using namespace Kernel;
void Thread::_call_suspend() { }
void Thread::exception()
void Thread::exception(Genode::Cpu_state &state)
{
Genode::memcpy(&*regs, &state, sizeof(Core::Cpu::Context));
switch (regs->exception_type) {
case Cpu::RESET: return;
case Cpu::IRQ_LEVEL_EL0: [[fallthrough]];

View File

@ -298,10 +298,11 @@ _from_vm:
mrs x27, cntv_ctl_el0
mrs x28, cntkctl_el1
stp x25, x26, [x0], #2*8
stp w27, w28, [x0]
stp w27, w28, [x0], #2*4
sub x0, x0, #0x3f0 /* overall vcpu state size */
mov x0, #0b111
msr cnthctl_el2, x0
mov x1, #0b111
msr cnthctl_el2, x1
ldr x29, [sp], #2*8 /* pop vm pic state from stack */
ldp x2, x30, [sp], #2*8 /* pop vm, and host state from stack */
@ -331,53 +332,53 @@ _from_vm:
***********************/
add x30, x30, #32*8 /* skip general-purpose regs, sp */
ldr x0, [x30], #2*8 /* host state ip, skip esr_el1 */
ldr x1, [x30], #3*8 /* host state pstate,
ldr x1, [x30], #2*8 /* host state ip, skip esr_el1 */
ldr x2, [x30], #3*8 /* host state pstate,
skip exception_type and esr_el2 */
ldp w2, w3, [x30], #2*4 /* fpcr and fpsr */
ldp w3, w4, [x30], #2*4 /* fpcr and fpsr */
add x30, x30, #32*16+8 /* skip remaining fpu regs and elr_el1 */
ldr x4, [x30], #2*8 /* sp_el1 */
ldp x5, x6, [x30], #2*8 /* sctlr_el1, actlr_el1 */
ldr x7, [x30], #1*8 /* vbar_el1 */
ldr w8, [x30], #4*4 /* cpacr_el1 */
ldp x9, x10, [x30], #2*8 /* ttbr0_el1, ttbr1_el1 */
ldp x11, x12, [x30], #2*8 /* tcr_el1, mair_el1 */
ldr x13, [x30] /* amair_el1 */
ldr x5, [x30], #2*8 /* sp_el1 */
ldp x6, x7, [x30], #2*8 /* sctlr_el1, actlr_el1 */
ldr x8, [x30], #1*8 /* vbar_el1 */
ldr w9, [x30], #4*4 /* cpacr_el1 */
ldp x10, x11, [x30], #2*8 /* ttbr0_el1, ttbr1_el1 */
ldp x12, x13, [x30], #2*8 /* tcr_el1, mair_el1 */
ldr x14, [x30] /* amair_el1 */
msr elr_el2, x0
msr spsr_el2, x1
msr fpcr, x2
msr fpsr, x3
msr sp_el1, x4
msr sctlr_el1, x5
msr actlr_el1, x6
msr vbar_el1, x7
msr cpacr_el1, x8
msr ttbr0_el1, x9
msr ttbr1_el1, x10
msr tcr_el1, x11
msr mair_el1, x12
msr amair_el1, x13
mrs x0, mpidr_el1
msr vmpidr_el2, x0
msr elr_el2, x1
msr spsr_el2, x2
msr fpcr, x3
msr fpsr, x4
msr sp_el1, x5
msr sctlr_el1, x6
msr actlr_el1, x7
msr vbar_el1, x8
msr cpacr_el1, x9
msr ttbr0_el1, x10
msr ttbr1_el1, x11
msr tcr_el1, x12
msr mair_el1, x13
msr amair_el1, x14
mrs x1, mpidr_el1
msr vmpidr_el2, x1
/************************
** debug/perfm access **
************************/
mrs x0, mdcr_el2
movz x1, #0b111101100000
bic x0, x0, x1
msr mdcr_el2, x0
mrs x1, mdcr_el2
movz x2, #0b111101100000
bic x1, x1, x2
msr mdcr_el2, x1
/** disable VM mode **/
movz x1, #0b1110000000111001
movk x1, #0b10111, lsl 16
mrs x0, hcr_el2
movz x2, #0b1110000000111001
movk x2, #0b10111, lsl 16
mrs x1, hcr_el2
msr vttbr_el2, xzr /* stage2 table pointer zeroing */
bic x0, x0, x1
msr hcr_el2, x0
bic x1, x1, x2
msr hcr_el2, x1
eret

View File

@ -165,7 +165,7 @@ Vm::~Vm()
}
void Vm::exception()
void Vm::exception(Genode::Cpu_state&)
{
switch (_state.exception_type) {
case Cpu::IRQ_LEVEL_EL0: [[fallthrough]];

View File

@ -52,7 +52,8 @@ _kernel_entry:
add x29, x29, x30
li x30, HW_MM_KERNEL_STACK_SIZE
add sp, x29, x30
la x30, _ZN6Kernel24main_handle_kernel_entryEv
mv x10, x31
la x30, _ZN6Kernel24main_handle_kernel_entryEPN6Genode9Cpu_stateE
jalr x30

View File

@ -28,7 +28,7 @@ void Thread::Flush_and_stop_cpu::execute(Cpu &) { }
void Cpu::Halt_job::proceed() { }
void Thread::exception()
void Thread::exception(Genode::Cpu_state&)
{
using Context = Core::Cpu::Context;
using Stval = Core::Cpu::Stval;

View File

@ -141,12 +141,6 @@ void Cpu::switch_to(Mmu_context &mmu_context)
}
void Cpu::switch_to(Context &context)
{
tss.ist[0] = (addr_t)&context + sizeof(Cpu_state);
}
unsigned Cpu::executing_id()
{
return Cpu::Cpuid_1_ebx::Apic_id::get(Cpu::Cpuid_1_ebx::read());

View File

@ -18,6 +18,7 @@
#define _CORE__SPEC__X86_64__CPU_H_
/* base includes */
#include <util/mmio.h>
#include <util/register.h>
#include <cpu/cpu_state.h>
@ -29,7 +30,6 @@
/* core includes */
#include <types.h>
#include <spec/x86_64/fpu.h>
#include <spec/x86_64/address_space_id_allocator.h>
#include <hw/spec/x86_64/page_table.h>
@ -91,6 +91,36 @@ class Core::Cpu : public Hw::X86_64_cpu
} __attribute__((packed)) gdt { };
struct Fpu_context
{
static constexpr size_t SIZE = 512;
/*
* FXSAVE area providing storage for x87 FPU, MMX, XMM,
* and MXCSR registers.
*
* For further details see Intel SDM Vol. 2A,
* 'FXSAVE instruction'.
*/
char _fxsave_area[SIZE] = { 0 };
struct Context : Mmio<SIZE>
{
struct Fcw : Register<0, 16> { };
struct Mxcsr : Register<24, 32> { };
using Mmio<SIZE>::Mmio;
};
Fpu_context()
{
Context init({ _fxsave_area, SIZE });
init.write<Context::Fcw>(0x37f); /* mask exceptions SysV ABI */
init.write<Context::Mxcsr>(0x1f80);
}
} __attribute__((packed));
struct alignas(16) Context : Cpu_state, Fpu_context
{
enum Eflags {
@ -112,6 +142,9 @@ class Core::Cpu : public Hw::X86_64_cpu
fp = (void **) fp[0];
}
}
Fpu_context& fpu_context() {
return static_cast<Fpu_context&>(*this); }
} __attribute__((packed));
@ -128,13 +161,6 @@ class Core::Cpu : public Hw::X86_64_cpu
*/
static unsigned executing_id();
/**
* Switch to new context
*
* \param context next CPU context
*/
void switch_to(Context & context);
bool active(Mmu_context &mmu_context);
void switch_to(Mmu_context &mmu_context);

View File

@ -14,7 +14,7 @@
* under the terms of the GNU Affero General Public License version 3.
*/
.include "stack_switch.s"
.include "memory_consts.s"
/* offsets of member variables in a CPU context */
.set IP_OFFSET, 17 * 8
@ -123,15 +123,15 @@
.set FPU_CONTEXT_OFFSET, SIZEOF_CPU_STATE
/* rsp contains pointer to Cpu::Context */
movq %rsp, %rdi
/* save FPU context */
movq %rsp, %rax
addq $FPU_CONTEXT_OFFSET, %rax
movq (%rax), %rax
fxsave (%rax)
switch_to_kernel_stack
_load_address _ZN6Kernel24main_handle_kernel_entryEv rcx
_load_address _ZN6Kernel24main_handle_kernel_entryEPN6Genode9Cpu_stateE rcx
subq $8, %rsp
jmp *%rcx

View File

@ -1,66 +0,0 @@
/*
* \brief x86_64 FPU context
* \author Sebastian Sumpf
* \date 2019-05-23
*/
/*
* Copyright (C) 2019 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
#ifndef _CORE__SPEC__X86_64__FPU_H_
#define _CORE__SPEC__X86_64__FPU_H_
/* Genode includes */
#include <util/misc_math.h>
#include <util/mmio.h>
#include <util/string.h>
namespace Genode { class Fpu_context; }
class Genode::Fpu_context
{
addr_t _fxsave_addr { 0 };
/*
* FXSAVE area providing storage for x87 FPU, MMX, XMM,
* and MXCSR registers.
*
* For further details see Intel SDM Vol. 2A,
* 'FXSAVE instruction'.
*/
char _fxsave_area[527];
struct Context : Mmio<512>
{
struct Fcw : Register<0, 16> { };
struct Mxcsr : Register<24, 32> { };
Context(addr_t const base) : Mmio({(char *)base, Mmio::SIZE})
{
memset((void *)base, 0, Mmio::SIZE);
write<Fcw>(0x37f); /* mask exceptions SysV ABI */
write<Mxcsr>(0x1f80);
}
};
public:
Fpu_context()
{
/* align to 16 byte boundary */
_fxsave_addr = align_addr((addr_t)_fxsave_area, 4);
/* set fcw/mxcsr */
Context init(_fxsave_addr);
}
addr_t fpu_context() const { return _fxsave_addr; }
addr_t fpu_size() const { return sizeof(_fxsave_area); }
};
#endif /* _CORE__SPEC__X86_64__FPU_H_ */

View File

@ -22,6 +22,9 @@ void Kernel::Cpu::_arch_init()
Idt::init();
Tss::init();
/* set interrupt stack pointer to kernel context stack minus FPU state size */
tss.ist[0] = stack_start() - Fpu_context::SIZE;
_pic.init();
_timer.init();
_ipi_irq.init();

View File

@ -181,8 +181,6 @@ void Kernel::Thread::proceed()
if (!_cpu().active(pd().mmu_regs) && type() != CORE)
_cpu().switch_to(pd().mmu_regs);
_cpu().switch_to(*regs);
asm volatile("fxrstor (%1) \n"
"mov %0, %%rsp \n"
"popq %%r8 \n"
@ -202,7 +200,7 @@ void Kernel::Thread::proceed()
"popq %%rbp \n"
"add $16, %%rsp \n"
"iretq \n"
:: "r" (&regs->r8), "r" (regs->fpu_context()));
:: "r" (&regs->r8), "r" (&regs->fpu_context()));
}

View File

@ -20,9 +20,12 @@
using namespace Kernel;
void Thread::exception()
void Thread::exception(Genode::Cpu_state &state)
{
using Genode::Cpu_state;
using Ctx = Core::Cpu::Context;
Genode::memcpy(&*regs, &state, sizeof(Ctx));
switch (regs->trapno) {

View File

@ -462,44 +462,42 @@ uint64_t Vmcb::handle_vm_exit()
return exitcode;
}
void Vmcb::switch_world(Core::Cpu::Context &regs)
void Vmcb::switch_world(Core::Cpu::Context &regs, addr_t stack_start)
{
/*
* We push the host context's physical address to trapno so that
* we can pop it later
*/
regs.trapno = root_vmcb_phys;
asm volatile(
"pushq %[stack];"
"pushq %[host_state];"
"fxrstor (%[fpu_context]);"
"mov %[guest_state], %%rax;"
"mov %[regs], %%rsp;"
"popq %%r8;"
"popq %%r9;"
"popq %%r10;"
"popq %%r11;"
"popq %%r12;"
"popq %%r13;"
"popq %%r14;"
"popq %%r15;"
"add $8, %%rsp;" /* don't pop rax */
"popq %%rbx;"
"popq %%rcx;"
"popq %%rdx;"
"popq %%rdi;"
"popq %%rsi;"
"popq %%rbp;"
"mov %[regs], %%rbx;"
"mov (%%rbx), %%r8;"
"mov 0x8(%%rbx), %%r9;"
"mov 0x10(%%rbx), %%r10;"
"mov 0x18(%%rbx), %%r11;"
"mov 0x20(%%rbx), %%r12;"
"mov 0x28(%%rbx), %%r13;"
"mov 0x30(%%rbx), %%r14;"
"mov 0x38(%%rbx), %%r15;"
"mov 0x50(%%rbx), %%rcx;"
"mov 0x58(%%rbx), %%rdx;"
"mov 0x60(%%rbx), %%rdi;"
"mov 0x68(%%rbx), %%rsi;"
"mov 0x70(%%rbx), %%rbp;"
"mov 0x48(%%rbx), %%rbx;"
"clgi;"
"sti;"
"vmload;"
"vmrun;"
"vmsave;"
"popq %%rax;" /* get the physical address of the host VMCB from
the stack */
the stack again */
"vmload;"
"popq %%rsp;" /* load stack start */
"stgi;" /* maybe enter the kernel to handle an external interrupt
that occured ... */
"nop;"
"cli;" /* ... otherwise, just disable interrupts again */
"subq $568, %%rsp;" /* keep room for fpu and general-purpose registers */
"pushq %[trap_vmexit];" /* make the stack point to trapno, the right place
to jump to _kernel_entry. We push 256 because
this is outside of the valid range for interrupts
@ -507,8 +505,11 @@ void Vmcb::switch_world(Core::Cpu::Context &regs)
"jmp _kernel_entry;" /* jump to _kernel_entry to save the
GPRs without breaking any */
:
: [regs] "r"(&regs.r8), [fpu_context] "r"(regs.fpu_context()),
[guest_state] "r"(vcpu_data.phys_addr + get_page_size()),
: [regs] "r" (&regs.r8),
[fpu_context] "r" (&regs.fpu_context()),
[guest_state] "r" (vcpu_data.phys_addr + get_page_size()),
[host_state] "r" (root_vmcb_phys),
[stack] "r" (stack_start),
[trap_vmexit] "i"(TRAP_VMEXIT)
: "rax", "memory");
}

View File

@ -72,8 +72,6 @@ void Vm::run()
void Vm::proceed()
{
using namespace Board;
_cpu().switch_to(*_vcpu_context.regs);
if (_vcpu_context.init_state == Board::Vcpu_context::Init_state::INITIALIZING) {
_vcpu_context.initialize(_cpu(),
@ -84,7 +82,7 @@ void Vm::proceed()
Cpu::Ia32_tsc_aux::write(
(Cpu::Ia32_tsc_aux::access_t)_vcpu_context.tsc_aux_guest);
_vcpu_context.virt.switch_world(*_vcpu_context.regs);
_vcpu_context.virt.switch_world(*_vcpu_context.regs, _cpu().stack_start());
/*
* This will fall into an interrupt or otherwise jump into
* _kernel_entry. If VMX encountered a severe error condition,
@ -95,13 +93,16 @@ void Vm::proceed()
}
void Vm::exception()
void Vm::exception(Genode::Cpu_state &state)
{
using namespace Board;
using Ctx = Core::Cpu::Context;
Genode::memcpy(&*_vcpu_context.regs, &state, sizeof(Ctx));
bool pause = false;
switch (_vcpu_context.regs->trapno) {
switch (state.trapno) {
case TRAP_VMEXIT:
_vcpu_context.exit_reason =
_vcpu_context.virt.handle_vm_exit();
@ -219,7 +220,7 @@ void Board::Vcpu_context::read_vcpu_state(Vcpu_state &state)
if (state.fpu.charged()) {
state.fpu.with_state(
[&](Vcpu_state::Fpu::State const &fpu) {
memcpy((void *) regs->fpu_context(), &fpu, regs->fpu_size());
memcpy(&regs->fpu_context(), &fpu, Cpu::Fpu_context::SIZE);
});
}
}
@ -230,8 +231,8 @@ void Board::Vcpu_context::write_vcpu_state(Vcpu_state &state)
state.exit_reason = (unsigned) exit_reason;
state.fpu.charge([&](Vcpu_state::Fpu::State &fpu) {
memcpy(&fpu, (void *) regs->fpu_context(), regs->fpu_size());
return regs->fpu_size();
memcpy(&fpu, &regs->fpu_context(), Cpu::Fpu_context::SIZE);
return Cpu::Fpu_context::SIZE;
});
/* SVM will overwrite rax but VMX doesn't. */

View File

@ -821,7 +821,7 @@ void Vmcs::read_vcpu_state(Genode::Vcpu_state &state)
}
}
void Vmcs::switch_world(Core::Cpu::Context &regs)
void Vmcs::switch_world(Core::Cpu::Context &regs, addr_t)
{
_load_pointer();
@ -852,7 +852,7 @@ void Vmcs::switch_world(Core::Cpu::Context &regs)
"vmlaunch;"
:
: [regs] "r"(&regs.r8),
[fpu_context] "r"(regs.fpu_context())
[fpu_context] "r"(&regs.fpu_context())
: "memory");
/*
* Usually when exiting guest mode, VMX will jump to the address

View File

@ -336,7 +336,7 @@ struct Board::Vmcb
Core::Cpu::Context &) override;
void write_vcpu_state(Vcpu_state &state) override;
void read_vcpu_state(Vcpu_state &state) override;
void switch_world(Core::Cpu::Context &regs) override;
void switch_world(Core::Cpu::Context &regs, addr_t) override;
Genode::uint64_t handle_vm_exit() override;
Virt_type virt_type() override

View File

@ -42,7 +42,7 @@ struct Virt_interface
Core::Cpu::Context &regs) = 0;
virtual void write_vcpu_state(Vcpu_state &state) = 0;
virtual void read_vcpu_state(Vcpu_state &state) = 0;
virtual void switch_world(Core::Cpu::Context &regs) = 0;
virtual void switch_world(Core::Cpu::Context &regs, addr_t) = 0;
virtual Virt_type virt_type() = 0;
virtual Genode::uint64_t handle_vm_exit() = 0;

View File

@ -386,7 +386,7 @@ Board::Vmcs
Core::Cpu::Context &regs) override;
void write_vcpu_state(Genode::Vcpu_state &state) override;
void read_vcpu_state(Genode::Vcpu_state &state) override;
void switch_world(Core::Cpu::Context &regs) override;
void switch_world(Core::Cpu::Context &regs, addr_t) override;
uint64_t handle_vm_exit() override;
void save_host_msrs();