vm_session: adjustments to work with seoul vmm

for foc, nova, sel4

Issue #3111
This commit is contained in:
Alexander Boettcher
2019-04-11 14:28:36 +02:00
committed by Christian Helmuth
parent 8950de5a89
commit cc64c43758
19 changed files with 302 additions and 187 deletions

View File

@ -46,7 +46,6 @@ static uint32_t svm_features()
}
static bool svm_np() { return svm_features() & (1U << 0); }
static bool svm_instr_decode_support() { return svm_features() & (1U << 7); }
struct Vcpu;
@ -137,6 +136,7 @@ struct Vcpu : Genode::Thread
INTR_INFO = 0x4016,
INTR_ERROR = 0x4018,
ENTRY_INST_LEN = 0x401a,
IDT_INFO = 0x4408,
IDT_ERROR = 0x440a,
@ -154,39 +154,44 @@ struct Vcpu : Genode::Thread
enum Vmcb
{
CTRL0_INTR = 1u << 0,
CTRL0_NMI = 1u << 1,
CTRL0_INIT = 1u << 3,
CTRL0_INVD = 1u << 22,
CTRL0_HLT = 1u << 24,
CTRL0_VINTR = 1u << 4,
CTRL0_IO = 1u << 27,
CTRL0_MSR = 1u << 28,
CTRL0_SHUTDOWN = 1u << 31,
AMD_EXIT_INVALID = 0xfd
AMD_SVM_ENABLE = 1 << 12,
AMD_EXIT_INVALID = 0xfd,
};
enum {
CR0_PE = 0, /* 1U << 0 - not needed in case of UG */
CR0_CP = 1U << 1,
CR0_TS = 1u << 3,
CR0_NE = 1U << 5,
CR0_NM = 1U << 29,
CR0_CD = 1U << 30,
CR0_PG = 0 /* 1U << 31 - not needed in case of UG */
};
addr_t const vmcb_ctrl0 = CTRL0_INTR | CTRL0_NMI | CTRL0_INIT |
CTRL0_INVD | CTRL0_HLT | CTRL0_IO |
CTRL0_MSR | CTRL0_SHUTDOWN;
addr_t const _cr0_mask { CR0_CP | CR0_NM | CR0_NE | CR0_CD };
addr_t const vmcb_ctrl0 { CTRL0_IO | CTRL0_MSR };
addr_t const vmcb_ctrl1 { 0 };
addr_t vmcb_cr0_shadow { 0 };
addr_t vmcb_cr4_shadow { 0 };
addr_t const vmcb_cr0_mask { _cr0_mask | CR0_TS };
addr_t const vmcb_cr0_set { 0 };
addr_t const vmcb_cr4_mask { 0 };
addr_t const vmcb_cr4_set { 0 };
enum { EXIT_ON_HLT = 1U << 7 };
addr_t const _vmcs_ctrl0 = EXIT_ON_HLT;
addr_t const _vmcs_ctrl0 { EXIT_ON_HLT };
addr_t const vmcs_cr0_mask = CR0_PE | CR0_CP | CR0_NM | CR0_NE | CR0_CD | CR0_PG;
addr_t const vmcs_cr0_set = 0;
addr_t const vmcs_cr0_mask { _cr0_mask | CR0_PE | CR0_PG };
addr_t const vmcs_cr0_set { 0 };
addr_t const vmcs_cr4_mask = CR4_VMX;
addr_t const vmcs_cr4_set = CR4_VMX;
addr_t const vmcs_cr4_mask { CR4_VMX };
addr_t const vmcs_cr4_set { CR4_VMX };
Signal_context_capability _signal;
Semaphore _wake_up { 0 };
@ -211,7 +216,7 @@ struct Vcpu : Genode::Thread
PAUSE = 1,
RUN = 2,
TERMINATE = 3,
} _remote { PAUSE };
} _remote { NONE };
Lock _remote_lock { Lock::UNLOCKED };
void entry() override
@ -265,7 +270,6 @@ struct Vcpu : Genode::Thread
* Fiasoc.OC peculiarities
*/
if (_vm_type == Virt::SVM) {
enum { AMD_SVM_ENABLE = 1 << 12 };
state.efer.value(state.efer.value() | AMD_SVM_ENABLE);
}
if (_vm_type == Virt::SVM) {
@ -274,12 +278,9 @@ struct Vcpu : Genode::Thread
if (!vmcb->control_area.np_enable)
vmcb->control_area.intercept_exceptions |= 1 << 14;
/* XXX - further stuff needed .. */
if (!svm_instr_decode_support()) {
vmcb->control_area.intercept_instruction0 = vmcb_ctrl0;
vmcb->control_area.intercept_rd_crX = 0x0001; /* cr0 */
vmcb->control_area.intercept_wr_crX = 0x0001; /* cr0 */
}
vmcb->control_area.intercept_instruction0 = vmcb_ctrl0;
vmcb->control_area.intercept_rd_crX = 0x0001; /* cr0 */
vmcb->control_area.intercept_wr_crX = 0x0001; /* cr0 */
}
if (_vm_type == Virt::VMX) {
Fiasco::l4_vm_vmx_write(vmcs, Vmcs::CR0_MASK, vmcs_cr0_mask);
@ -295,6 +296,13 @@ struct Vcpu : Genode::Thread
Fiasco::l4_vm_vmx_write(vmcs, Vmcs::ENTRY_CTRL, ENTRY_LOAD_EFER);
}
if (_vm_type == Virt::SVM)
_write_amd_state(state, vmcb, vcpu);
if (_vm_type == Virt::VMX)
_write_intel_state(state, vmcs, vcpu);
vcpu->saved_state = L4_VCPU_F_USER_MODE | L4_VCPU_F_FPU_ENABLED;
State local_state { NONE };
while (true) {
@ -303,6 +311,15 @@ struct Vcpu : Genode::Thread
Lock::Guard guard(_remote_lock);
local_state = _remote;
_remote = NONE;
if (local_state == PAUSE) {
while (vcpu->sticky_flags) {
/* consume spurious notifications */
Fiasco::l4_cap_idx_t tid = native_thread().kcap;
Fiasco::l4_cap_idx_t irq = tid + Fiasco::TASK_VCPU_IRQ_CAP;
l4_irq_receive(irq, L4_IPC_RECV_TIMEOUT_0);
}
}
}
if (local_state == NONE) {
@ -316,11 +333,6 @@ struct Vcpu : Genode::Thread
if (_vm_type == Virt::VMX)
_write_intel_state(state, vmcs, vcpu);
/* consume spurious notifications */
Fiasco::l4_cap_idx_t tid = native_thread().kcap;
Fiasco::l4_cap_idx_t irq = tid + Fiasco::TASK_VCPU_IRQ_CAP;
l4_irq_receive(irq, L4_IPC_RECV_TIMEOUT_0);
state.exit_reason = VMEXIT_PAUSED;
if (_vm_type == Virt::SVM)
@ -350,8 +362,6 @@ struct Vcpu : Genode::Thread
if (_vm_type == Virt::VMX)
_write_intel_state(state, vmcs, vcpu);
vcpu->saved_state = L4_VCPU_F_USER_MODE | L4_VCPU_F_FPU_ENABLED;
/* tell Fiasco.OC to run the vCPU */
l4_msgtag_t tag = l4_thread_vcpu_resume_start();
tag = l4_thread_vcpu_resume_commit(L4_INVALID_CAP, tag);
@ -377,9 +387,11 @@ struct Vcpu : Genode::Thread
}
/* consume notification */
Fiasco::l4_cap_idx_t tid = native_thread().kcap;
Fiasco::l4_cap_idx_t irq = tid + Fiasco::TASK_VCPU_IRQ_CAP;
l4_irq_receive(irq, L4_IPC_NEVER);
while (vcpu->sticky_flags) {
Fiasco::l4_cap_idx_t tid = native_thread().kcap;
Fiasco::l4_cap_idx_t irq = tid + Fiasco::TASK_VCPU_IRQ_CAP;
l4_irq_receive(irq, L4_IPC_RECV_TIMEOUT_0);
}
}
state.exit_reason = reason & 0xff;
@ -400,9 +412,11 @@ struct Vcpu : Genode::Thread
}
/* consume notification */
Fiasco::l4_cap_idx_t tid = native_thread().kcap;
Fiasco::l4_cap_idx_t irq = tid + Fiasco::TASK_VCPU_IRQ_CAP;
l4_irq_receive(irq, L4_IPC_RECV_TIMEOUT_0);
while (vcpu->sticky_flags) {
Fiasco::l4_cap_idx_t tid = native_thread().kcap;
Fiasco::l4_cap_idx_t irq = tid + Fiasco::TASK_VCPU_IRQ_CAP;
l4_irq_receive(irq, L4_IPC_RECV_TIMEOUT_0);
}
}
state.exit_reason = reason & 0xff;
@ -468,12 +482,8 @@ struct Vcpu : Genode::Thread
addr_t const cr0 = Fiasco::l4_vm_vmx_read(vmcs, Vmcs::CR0);
addr_t const cr0_shadow = Fiasco::l4_vm_vmx_read(vmcs, Vmcs::CR0_SHADOW);
state.cr0.value((cr0 & ~vmcs_cr0_mask) | (cr0_shadow & vmcs_cr0_mask));
if (state.cr0.value() != cr0_shadow) {
Genode::error("reset cr0_shadow to cr0 ", Genode::Hex(cr0),
" ", Genode::Hex(cr0_shadow), "->",
Genode::Hex(state.cr0.value()));
if (state.cr0.value() != cr0_shadow)
Fiasco::l4_vm_vmx_write(vmcs, Vmcs::CR0_SHADOW, state.cr0.value());
}
}
unsigned const cr2 = Fiasco::l4_vm_vmx_get_cr2_index(vmcs);
@ -484,19 +494,11 @@ struct Vcpu : Genode::Thread
addr_t const cr4 = Fiasco::l4_vm_vmx_read(vmcs, Vmcs::CR4);
addr_t const cr4_shadow = Fiasco::l4_vm_vmx_read(vmcs, Vmcs::CR4_SHADOW);
state.cr4.value((cr4 & ~vmcs_cr4_mask) | (cr4_shadow & vmcs_cr4_mask));
if (state.cr4.value() != cr4_shadow) {
Genode::error("reset cr0_shadow to cr4 ", Genode::Hex(cr4),
" ", Genode::Hex(cr4_shadow), "->",
Genode::Hex(state.cr4.value()));
if (state.cr4.value() != cr4_shadow)
Fiasco::l4_vm_vmx_write(vmcs, Vmcs::CR4_SHADOW,
state.cr4.value());
}
}
/*
state.cs.value(Segment{utcb.cs.sel, utcb.cs.ar, utcb.cs.limit,
utcb.cs.base});
*/
using Fiasco::l4_vm_vmx_read;
using Fiasco::l4_vm_vmx_read_16;
using Fiasco::l4_vm_vmx_read_32;
@ -636,7 +638,7 @@ struct Vcpu : Genode::Thread
state.sp.value(vmcb->state_save_area.rsp);
state.ip.value(vmcb->state_save_area.rip);
state.ip_len.value(state.ip_len.value()); /* unsupported on AMD */
state.ip_len.value(0); /* unsupported on AMD */
state.dr7.value(vmcb->state_save_area.dr7);
@ -651,10 +653,20 @@ struct Vcpu : Genode::Thread
state.r15.value(vcpu->r.r15);
#endif
state.cr0.value(vmcb->state_save_area.cr0);
{
addr_t const cr0 = vmcb->state_save_area.cr0;
state.cr0.value((cr0 & ~vmcb_cr0_mask) | (vmcb_cr0_shadow & vmcb_cr0_mask));
if (state.cr0.value() != vmcb_cr0_shadow)
vmcb_cr0_shadow = state.cr0.value();
}
state.cr2.value(vmcb->state_save_area.cr2);
state.cr3.value(vmcb->state_save_area.cr3);
state.cr4.value(vmcb->state_save_area.cr4);
{
addr_t const cr4 = vmcb->state_save_area.cr4;
state.cr4.value((cr4 & ~vmcb_cr4_mask) | (vmcb_cr4_shadow & vmcb_cr4_mask));
if (state.cr4.value() != vmcb_cr4_shadow)
vmcb_cr4_shadow = state.cr4.value();
}
typedef Genode::Vm_state::Segment Segment;
@ -733,9 +745,7 @@ struct Vcpu : Genode::Thread
state.tsc.value(Trace::timestamp());
state.tsc_offset.value(_tsc_offset);
if (state.efer.valid()) {
Genode::error("efer not implemented");
}
state.efer.value(vmcb->state_save_area.efer);
if (state.pdpte_0.valid() || state.pdpte_1.valid() ||
state.pdpte_2.valid() || state.pdpte_3.valid()) {
@ -760,12 +770,14 @@ struct Vcpu : Genode::Thread
using Fiasco::l4_vm_vmx_write;
if (state.ax.valid() || state.cx.valid() || state.dx.valid() ||
state.bx.valid() || state.bp.valid() || state.di.valid() ||
state.si.valid()) {
state.bx.valid()) {
vcpu->r.ax = state.ax.value();
vcpu->r.cx = state.cx.value();
vcpu->r.dx = state.dx.value();
vcpu->r.bx = state.bx.value();
}
if (state.bp.valid() || state.di.valid() || state.si.valid()) {
vcpu->r.bp = state.bp.value();
vcpu->r.di = state.di.value();
vcpu->r.si = state.si.value();
@ -825,9 +837,10 @@ struct Vcpu : Genode::Thread
l4_vm_vmx_write(vmcs, Vmcs::CR4_SHADOW, state.cr4.value());
}
#if 1
if (state.inj_info.valid() || state.inj_error.valid()) {
addr_t ctrl_0 = Fiasco::l4_vm_vmx_read(vmcs, Vmcs::CTRL_0);
addr_t ctrl_0 = state.ctrl_primary.valid() ?
state.ctrl_primary.value() :
Fiasco::l4_vm_vmx_read(vmcs, Vmcs::CTRL_0);
if (state.inj_info.value() & 0x2000)
Genode::warning("unimplemented ", state.inj_info.value() & 0x1000, " ", state.inj_info.value() & 0x2000, " ", Genode::Hex(ctrl_0), " ", Genode::Hex(state.ctrl_secondary.value()));
@ -844,7 +857,6 @@ struct Vcpu : Genode::Thread
l4_vm_vmx_write(vmcs, Vmcs::INTR_ERROR,
state.inj_error.value());
}
#endif
if (state.flags.valid())
l4_vm_vmx_write(vmcs, Vmcs::FLAGS, state.flags.value());
@ -855,24 +867,27 @@ struct Vcpu : Genode::Thread
if (state.ip.valid())
l4_vm_vmx_write(vmcs, Vmcs::IP, state.ip.value());
if (state.ip_len.valid())
l4_vm_vmx_write(vmcs, Vmcs::ENTRY_INST_LEN, state.ip_len.value());
if (state.efer.valid())
l4_vm_vmx_write(vmcs, Vmcs::EFER, state.efer.value());
if (state.ctrl_primary.valid())
l4_vm_vmx_write(vmcs, Vmcs::CTRL_0,
_vmcs_ctrl0 | state.ctrl_primary.value());
_vmcs_ctrl0 | state.ctrl_primary.value());
if (state.ctrl_secondary.valid())
l4_vm_vmx_write(vmcs, Vmcs::CTRL_1,
state.ctrl_secondary.value());
state.ctrl_secondary.value());
if (state.intr_state.valid())
l4_vm_vmx_write(vmcs, Vmcs::STATE_INTR,
state.intr_state.value());
state.intr_state.value());
if (state.actv_state.valid())
l4_vm_vmx_write(vmcs, Vmcs::STATE_ACTV,
state.actv_state.valid());
state.actv_state.value());
if (state.cs.valid()) {
l4_vm_vmx_write(vmcs, Vmcs::CS_SEL, state.cs.value().sel);
@ -964,13 +979,16 @@ struct Vcpu : Genode::Thread
Fiasco::l4_vcpu_state_t *vcpu)
{
if (state.ax.valid() || state.cx.valid() || state.dx.valid() ||
state.bx.valid() || state.bp.valid() || state.di.valid() ||
state.si.valid()) {
state.bx.valid()) {
vmcb->state_save_area.rax = state.ax.value();
vcpu->r.ax = state.ax.value();
vcpu->r.cx = state.cx.value();
vcpu->r.dx = state.dx.value();
vcpu->r.bx = state.bx.value();
}
if (state.bp.valid() || state.di.valid() || state.si.valid()) {
vcpu->r.bp = state.bp.value();
vcpu->r.di = state.di.value();
vcpu->r.si = state.si.value();
@ -1007,7 +1025,8 @@ struct Vcpu : Genode::Thread
vmcb->state_save_area.dr7 = state.dr7.value();
if (state.cr0.valid()) {
vmcb->state_save_area.cr0 = state.cr0.value();
vmcb->state_save_area.cr0 = vmcb_cr0_set | (~vmcb_cr0_mask & state.cr0.value());
vmcb_cr0_shadow = state.cr0.value();
#if 0
vmcb->state_save_area.xcr0 = state.cr0();
#endif
@ -1019,13 +1038,36 @@ struct Vcpu : Genode::Thread
if (state.cr3.valid())
vmcb->state_save_area.cr3 = state.cr3.value();
if (state.cr4.valid())
vmcb->state_save_area.cr4 = state.cr4.value();
if (state.cr4.valid()) {
vmcb->state_save_area.cr4 = vmcb_cr4_set | (~vmcb_cr4_mask & state.cr4.value());
vmcb_cr4_shadow = state.cr4.value();
}
if (state.inj_info.valid())
Genode::error(__LINE__, " not implemented ");
if (state.inj_error.valid())
Genode::error(__LINE__, " not implemented ");
if (state.ctrl_primary.valid())
vmcb->control_area.intercept_instruction0 = vmcb_ctrl0 |
state.ctrl_primary.value();
if (state.ctrl_secondary.valid())
vmcb->control_area.intercept_instruction1 = vmcb_ctrl1 |
state.ctrl_secondary.value();
if (state.inj_info.valid()) {
if (state.inj_info.value() & 0x1000) {
vmcb->control_area.interrupt_ctl |= (1ul << 8 | 1ul << 20);
vmcb->control_area.intercept_instruction0 |= Vmcb::CTRL0_VINTR;
} else {
vmcb->control_area.interrupt_ctl &= ~(1ul << 8 | 1ul << 20);
vmcb->control_area.intercept_instruction0 &= ~Vmcb::CTRL0_VINTR;
}
vmcb->control_area.eventinj = 0;
vmcb->control_area.eventinj |= ~0x3000U & state.inj_info.value();
}
if (state.inj_error.valid()) {
vmcb->control_area.eventinj &= ((1ULL << 32) - 1);
uint64_t value = (0ULL + state.inj_error.value()) << 32;
vmcb->control_area.eventinj |= value;
}
if (state.flags.valid())
vmcb->state_save_area.rflags = state.flags.value();
@ -1037,14 +1079,7 @@ struct Vcpu : Genode::Thread
vmcb->state_save_area.rip = state.ip.value();
if (state.efer.valid())
vmcb->state_save_area.efer = state.efer.value();
if (state.ctrl_primary.valid())
vmcb->control_area.intercept_instruction0 = vmcb_ctrl0 |
state.ctrl_primary.value();
if (state.ctrl_secondary.valid())
vmcb->control_area.intercept_instruction1 = state.ctrl_secondary.value();
vmcb->state_save_area.efer = state.efer.value() | AMD_SVM_ENABLE;
if (state.intr_state.valid())
vmcb->control_area.interrupt_shadow = state.intr_state.value();
@ -1162,7 +1197,7 @@ struct Vcpu : Genode::Thread
void resume() {
Lock::Guard guard(_remote_lock);
if (_remote == RUN)
if (_remote == RUN || _remote == PAUSE)
return;
_remote = RUN;