vbox6: refactor vcpu implementation

Also add recent SVM changes from vbox5.

Issue #4031
This commit is contained in:
Christian Helmuth 2021-03-23 16:22:37 +01:00 committed by Norman Feske
parent 96cc660f95
commit 1a57a5a959
25 changed files with 1404 additions and 1639 deletions

View File

@ -107,9 +107,6 @@ SRC_CC += GuestHost/DragAndDrop/DnDDroppedFiles.cpp
SRC_CC += GuestHost/DragAndDrop/DnDMIME.cpp
SRC_CC += GuestHost/DragAndDrop/DnDPath.cpp
SRC_CC += devxhci.cc
INC_DIR += $(call select_from_repositories,src/lib/libc)
INC_DIR += $(VBOX_DIR)/Devices/build
INC_DIR += $(VBOX_DIR)/Devices/Bus
INC_DIR += $(VIRTUALBOX_DIR)/include/VBox/Graphics
@ -158,6 +155,5 @@ vboxssdt_cpuhotplug.hex: vbox-cpuhotplug.dsl
)
vpath %.dsl $(VBOX_DIR)/Devices/PC
vpath devxhci.cc $(REP_DIR)/src/virtualbox6
CC_CXX_WARN_STRICT =

View File

@ -2,22 +2,15 @@ include $(REP_DIR)/lib/mk/virtualbox6-common.inc
LIBS += stdcxx
SRC_CC := sup.cc sup_sem.cc sup_gmm.cc sup_drv.cc sup_vm.cc vcpu.cc
SRC_CC += HostDrivers/Support/SUPLib.cpp
SRC_CC := HostDrivers/Support/SUPLib.cpp
SRC_CC += HostDrivers/Support/SUPLibLdr.cpp
INC_DIR += $(call select_from_repositories,src/lib/libc)
INC_DIR += $(VIRTUALBOX_DIR)/VBoxAPIWrap
INC_DIR += $(VBOX_DIR)/HostDrivers/Support
INC_DIR += $(VBOX_DIR)/Devices/Bus
INC_DIR += $(REP_DIR)/src/virtualbox6
INC_DIR += $(VBOX_DIR)/Main/xml
INC_DIR += $(VBOX_DIR)/Main/include
INC_DIR += $(VBOX_DIR)/VMM/include
vpath %.cc $(REP_DIR)/src/virtualbox6
CC_CXX_WARN_STRICT =

View File

@ -4,7 +4,6 @@ LIBS += stdcxx
SRC_CC += $(addprefix VBoxAPIWrap/, $(notdir $(wildcard $(VIRTUALBOX_DIR)/VBoxAPIWrap/*.cpp)))
INC_DIR += $(REP_DIR)/src/virtualbox6/frontend
INC_DIR += $(REP_DIR)/src/virtualbox6/include
INC_DIR += $(VBOX_DIR)/Main/include

View File

@ -69,7 +69,6 @@ CC_OPT_Main/src-client/GuestDnDPrivate = -Wno-enum-compare
INC_DIR += $(VBOX_DIR)/Main/xml
INC_DIR += $(VBOX_DIR)/Main/include
INC_DIR += $(REP_DIR)/src/virtualbox6/frontend
INC_DIR += $(VIRTUALBOX_DIR)/VBoxAPIWrap
INC_DIR += $(VIRTUALBOX_DIR)/include/VBox/Graphics

View File

@ -77,7 +77,6 @@ VBOX_CC_OPT += -DIN_VBOXSVC
INC_DIR += $(VBOX_DIR)/Main/xml
INC_DIR += $(VBOX_DIR)/Main/include
INC_DIR += $(REP_DIR)/src/virtualbox6/frontend
INC_DIR += $(VIRTUALBOX_DIR)/VBoxAPIWrap
INC_DIR += $(VIRTUALBOX_DIR)/include/VBox/Graphics

View File

@ -414,8 +414,6 @@ PDMBOTHCBDECL(VBOXSTRICTRC) xhciMmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGC
*/
static DECLCALLBACK(void) xhciR3Reset(PPDMDEVINS pDevIns)
{
PXHCI pThis = PDMINS_2_DATA(pDevIns, PXHCI);
Qemu::usb_reset();
Qemu::usb_update_devices();
}

View File

@ -31,7 +31,7 @@
/* local includes */
#include <stub_macros.h>
#include <sup.h>
#include <vcpu.h>
#include <sup_vcpu.h>
#include <sup_gmm.h>
#include <sup_vm.h>
@ -246,9 +246,8 @@ VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
nem_ptr->commit_range();
VBOXSTRICTRC result = 0;
vm.with_vcpu_handler(Cpu_index { pVCpu->idCpu }, [&] (Sup::Vcpu_handler &handler) {
result = handler.run_hw(vm);
});
vm.with_vcpu(Cpu_index { pVCpu->idCpu }, [&] (Sup::Vcpu &vcpu) {
result = vcpu.run(); });
return result;
}
@ -280,8 +279,8 @@ void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, ::uint32_t fFlags)
if (fFlags & VMNOTIFYFF_FLAGS_POKE) {
Sup::Vm &vm = *(Sup::Vm *)pVM;
vm.with_vcpu_handler(Sup::Cpu_index { pVCpu->idCpu }, [&] (Sup::Vcpu_handler &handler) {
handler.recall(vm); });
vm.with_vcpu(Sup::Cpu_index { pVCpu->idCpu }, [&] (Sup::Vcpu &vcpu) {
vcpu.pause(); });
}
}

View File

@ -176,9 +176,9 @@ static void setup_vcpu_handler(Sup::Vm &vm, Sup::Cpu_index cpu)
{
Pthread::Emt &emt = Pthread::emt_for_cpu(cpu);
Sup::Vcpu_handler &handler = sup_drv->create_vcpu_handler(cpu, emt);
Sup::Vcpu &vcpu = sup_drv->create_vcpu(vm, cpu, emt);
vm.register_vcpu_handler(cpu, handler);
vm.register_vcpu(cpu, vcpu);
}
@ -238,9 +238,8 @@ static int vmmr0_gvmm_sched_halt(PVMR0 pvmr0, ::uint32_t cpu, ::uint64_t expire_
ns_diff = RT_NS_1SEC;
}
vm.with_vcpu_handler(Sup::Cpu_index { cpu }, [&] (Sup::Vcpu_handler &handler) {
handler.halt(ns_diff);
});
vm.with_vcpu(Sup::Cpu_index { cpu }, [&] (Sup::Vcpu &vcpu) {
vcpu.halt(ns_diff); });
/*
* returns VINF_SUCCESS on normal wakeup (timeout or kicked by other thread)
@ -254,9 +253,8 @@ static int vmmr0_gvmm_wake_up(PVMR0 pvmr0, uint32_t cpu)
{
Sup::Vm &vm = *(Sup::Vm *)pvmr0;
vm.with_vcpu_handler(Sup::Cpu_index { cpu }, [&] (Sup::Vcpu_handler &handler) {
handler.wake_up();
});
vm.with_vcpu(Sup::Cpu_index { cpu }, [&] (Sup::Vcpu &vcpu) {
vcpu.wake_up(); });
return VINF_SUCCESS;
}
@ -543,8 +541,6 @@ static int vmmr0_pgm_allocate_handy_pages(PVMR0 pvmr0)
static int vmmr0_vmmr0_init(PVMR0 pvmr0)
{
Sup::Vm &vm = *(Sup::Vm *)pvmr0;
/* produces
*
* [init -> vbox1] EMT VMM: Thread-context hooks unavailable
@ -558,8 +554,6 @@ static int vmmr0_vmmr0_init(PVMR0 pvmr0)
static int vmmr0_vmmr0_init_emt(PVMR0 pvmr0, uint32_t cpu)
{
Sup::Vm &vm = *(Sup::Vm *)pvmr0;
return VINF_SUCCESS;
}

View File

@ -29,6 +29,8 @@ namespace Sup {
struct Gmm;
void nem_init(Gmm &);
void update_gim_system_time(VM &, VMCPU &);
}
#endif /* _SUP_H_ */

View File

@ -55,26 +55,15 @@ Sup::Drv::Cpu_virt Sup::Drv::_cpu_virt_from_rom()
}
Sup::Vcpu_handler &Sup::Drv::create_vcpu_handler(Cpu_index cpu_index,
Pthread::Emt &emt)
Sup::Vcpu & Sup::Drv::create_vcpu(VM &vm, Cpu_index cpu_index, Pthread::Emt &emt)
{
Libc::Allocator alloc { };
switch (_cpu_virt) {
case Cpu_virt::VMX:
return *new Vcpu_handler_vmx(_env,
cpu_index.value,
emt,
_vm_connection,
alloc);
return Vcpu::create_vmx(_env, vm, _vm_connection, cpu_index, emt);
case Cpu_virt::SVM:
return *new Vcpu_handler_svm(_env,
cpu_index.value,
emt,
_vm_connection,
alloc);
return Vcpu::create_svm(_env, vm, _vm_connection, cpu_index, emt);
case Cpu_virt::NONE:
break;

View File

@ -20,7 +20,7 @@
#include <sup.h>
#include <sup_gip.h>
#include <sup_gmm.h>
#include <vcpu.h>
#include <sup_vcpu.h>
/* Genode includes */
#include <base/env.h>
@ -73,7 +73,7 @@ class Sup::Drv
/*
* \throw Virtualization_support_missing
*/
Vcpu_handler &create_vcpu_handler(Cpu_index, Pthread::Emt &);
Vcpu & create_vcpu(VM &, Cpu_index, Pthread::Emt &);
};
#endif /* _SUP_DRV_H_ */

View File

@ -35,13 +35,13 @@
#include <VBox/vmm/vmcc.h>
/* local includes */
#include <vcpu.h>
#include <sup.h>
using namespace Genode;
/*
* This function must be called by the VCPU handler when detecting an MSR-write
* This function must be called by the vCPU handler when detecting an MSR-write
* VM exit for MSR_KVM_SYSTEM_TIME_NEW before entering the VirtualBox code
* (which calls gimKvmWriteMsr). Since we are never executing any R0 code, the
* pKvmCpu value would remain undefined when arriving the the following
@ -52,7 +52,7 @@ using namespace Genode;
*
* The implementation roughly corresponds to 'gimR0KvmUpdateSystemTime'
*/
void Sup::Vcpu_handler::_update_gim_system_time()
void Sup::update_gim_system_time(VM &vm, VMCPU &vmcpu)
{
using ::uint64_t;
@ -64,9 +64,9 @@ void Sup::Vcpu_handler::_update_gim_system_time()
*/
for (unsigned round = 1; ; ++round) {
uTsc = TMCpuTickGetNoCheck(_vcpu) | UINT64_C(1);
uVirtNanoTS = TMVirtualGetNoCheck(_vm) | UINT64_C(1);
uint64_t const uTsc_again = TMCpuTickGetNoCheck(_vcpu) | UINT64_C(1);
uTsc = TMCpuTickGetNoCheck(&vmcpu) | UINT64_C(1);
uVirtNanoTS = TMVirtualGetNoCheck(&vm) | UINT64_C(1);
uint64_t const uTsc_again = TMCpuTickGetNoCheck(&vmcpu) | UINT64_C(1);
enum { MAX_MEASUREMENT_DURATION = 200U };
@ -78,9 +78,9 @@ void Sup::Vcpu_handler::_update_gim_system_time()
" uTsc_again=", uTsc_again, " uVirtNanoTS=", uVirtNanoTS);
}
for (VMCPUID idCpu = 0; idCpu < _vm->cCpus; idCpu++) {
for (VMCPUID idCpu = 0; idCpu < vm.cCpus; idCpu++) {
PGIMKVMCPU pKvmCpu = &VMCC_GET_CPU(_vm, idCpu)->gim.s.u.KvmCpu;
PGIMKVMCPU pKvmCpu = &VMCC_GET_CPU(&vm, idCpu)->gim.s.u.KvmCpu;
if (!pKvmCpu->uTsc && !pKvmCpu->uVirtNanoTS) {
pKvmCpu->uTsc = uTsc;

View File

@ -145,7 +145,7 @@ class Sup::Gip
SUPGIPCPU *cpu = _gip.aCPUs;
/* XXX in SUPGIPMODE_SYNC_TSC only the first CPU's TSC is updated */
Entrypoint &ep = *new Entrypoint(env, cpu, cpu_hz);
new Entrypoint(env, cpu, cpu_hz);
for (unsigned i = 0; i < cpu_count.value; ++i) {
cpu[i].u32TransactionId = 0;

View File

@ -81,8 +81,6 @@ int SUPSemEventMultiCreate(PSUPDRVSESSION pSession,
{
AssertPtrReturn(phEventMulti, VERR_INVALID_POINTER);
RTSEMEVENTMULTI sem;
return RTSemEventMultiCreate((RTSEMEVENTMULTI*)phEventMulti);
}

View File

@ -0,0 +1,822 @@
/*
* \brief SUPLib vCPU utility
* \author Alexander Boettcher
* \author Norman Feske
* \author Christian Helmuth
*/
/*
* Copyright (C) 2013-2021 Genode Labs GmbH
*
* This file is distributed under the terms of the GNU General Public License
* version 2.
*/
/* Genode includes */
#include <base/attached_dataspace.h>
#include <vm_session/handler.h>
#include <util/noncopyable.h>
#include <cpu/vcpu_state.h>
#include <cpu/memory_barrier.h>
#include <libc/allocator.h>
/* VirtualBox includes */
#include <VBox/vmm/cpum.h> /* must be included before CPUMInternal.h */
#include <CPUMInternal.h> /* enable access to cpum.s.* */
#include <HMInternal.h> /* enable access to hm.s.* */
#include <PGMInternal.h> /* enable access to pgm.s.* */
#include <VBox/vmm/vmcc.h> /* must be included before PGMInline.h */
#include <PGMInline.h> /* pgmPhysGetRangeAtOrAbove() */
#include <VBox/vmm/vm.h>
#include <VBox/vmm/hm.h>
#include <VBox/vmm/apic.h>
#include <VBox/vmm/em.h>
#include <VBox/vmm/pdmapi.h>
#include <VBox/err.h>
#include <iprt/time.h>
/* libc includes */
#include <stdlib.h> /* for exit() */
#include <pthread.h>
#include <errno.h>
/* local includes */
#include <sup_vcpu.h>
#include <pthread_emt.h>
using namespace Genode;
/*
* VirtualBox stores segment attributes in Intel format using 17 bits of a
* 32-bit value, which includes bits 19:16 of segment limit (see
* X86DESCATTRBITS).
*
* Genode represents the attributes in packed SVM VMCB format using 13 bits of
* a 16-bit value without segment-limit bits.
*/
static inline uint16_t sel_ar_conv_to_genode(Genode::uint32_t v)
{
return (v & 0xff) | ((v & 0x1f000) >> 4);
}
static inline Genode::uint32_t sel_ar_conv_from_genode(Genode::uint16_t v)
{
return (v & 0xff) | (((uint32_t )v << 4) & 0x1f000);
}
namespace Sup {
struct Vmx;
struct Svm;
enum class Exit_state { DEFAULT, NPT_EPT, PAUSED, IRQ_WINDOW, STARTUP, ERROR };
struct Handle_exit_result
{
Exit_state state;
VBOXSTRICTRC rc;
};
template <typename> struct Vcpu_impl;
}
#include <sup_vcpu_vmx.h>
#include <sup_vcpu_svm.h>
template <typename VIRT>
class Sup::Vcpu_impl : public Sup::Vcpu, Genode::Noncopyable
{
private:
Pthread::Emt &_emt;
Cpu_index const _cpu;
VM &_vm;
VMCPU &_vmcpu;
Libc::Allocator _alloc;
/* exit handler run in vCPU mode - switches to EMT */
void _handle_exit();
Vcpu_handler<Vcpu_impl<VIRT>> _handler {
_emt.genode_ep(), *this, &Vcpu_impl<VIRT>::_handle_exit };
Vm_connection::Vcpu _vcpu;
/* halt/wake_up support */
pthread_cond_t _halt_cond;
pthread_mutex_t _halt_mutex;
/* state machine between EMT and vCPU mode */
enum Current_state { RUNNING, PAUSED } _current_state { PAUSED };
enum Next_state { PAUSE_EXIT, RUN } _next_state { RUN };
/* interrupt-window exit requested */
bool _irq_window = false;
enum {
REQ_IRQ_WINDOW_EXIT = 0x1000U,
VMX_ENTRY_INT_INFO_NONE = 0U,
VMX_VMCS_GUEST_INT_STATE_NONE = 0U,
};
struct {
unsigned intr_state = 0;
unsigned ctrl_primary = VIRT::ctrl_primary();
unsigned ctrl_secondary = VIRT::ctrl_secondary();
} _cached_state;
inline void _transfer_state_to_vcpu(CPUMCTX const &);
inline void _transfer_state_to_vbox(CPUMCTX &);
inline bool _check_and_request_irq_window();
inline bool _continue_hw_accelerated();
inline VBOXSTRICTRC _switch_to_hw();
inline Current_state _handle_npt_ept(VBOXSTRICTRC &);
inline Current_state _handle_paused();
inline Current_state _handle_irq_window();
inline Current_state _handle_startup();
public:
Vcpu_impl(Genode::Env &, VM &, Vm_connection &, Cpu_index, Pthread::Emt &);
/* Vcpu interface */
VBOXSTRICTRC run() override;
void pause() override;
void halt(Genode::uint64_t const wait_ns) override;
void wake_up() override;
};
template <typename T> void Sup::Vcpu_impl<T>::_handle_exit()
{
_emt.switch_to_emt();
if (_next_state == RUN)
_vcpu.run(); /* resume vCPU */
else
_vcpu.pause(); /* cause pause exit */
}
template <typename VIRT> void Sup::Vcpu_impl<VIRT>::_transfer_state_to_vcpu(CPUMCTX const &ctx)
{
Vcpu_state &state { _vcpu.state() };
/* transfer defaults and cached state */
state.inj_info.charge(VMX_ENTRY_INT_INFO_NONE); /* XXX never injects events */
state.intr_state.charge(_cached_state.intr_state);
state.actv_state.charge(VMX_VMCS_GUEST_ACTIVITY_ACTIVE);
state.ctrl_primary.charge(_cached_state.ctrl_primary); /* XXX always updates ctrls */
state.ctrl_secondary.charge(_cached_state.ctrl_secondary); /* XXX always updates ctrls */
typedef Genode::Vcpu_state::Range Range;
state.ip.charge(ctx.rip);
state.sp.charge(ctx.rsp);
state.ax.charge(ctx.rax);
state.bx.charge(ctx.rbx);
state.cx.charge(ctx.rcx);
state.dx.charge(ctx.rdx);
state.bp.charge(ctx.rbp);
state.si.charge(ctx.rsi);
state.di.charge(ctx.rdi);
state.r8.charge(ctx.r8);
state.r9.charge(ctx.r9);
state.r10.charge(ctx.r10);
state.r11.charge(ctx.r11);
state.r12.charge(ctx.r12);
state.r13.charge(ctx.r13);
state.r14.charge(ctx.r14);
state.r15.charge(ctx.r15);
state.flags.charge(ctx.rflags.u);
state.sysenter_cs.charge(ctx.SysEnter.cs);
state.sysenter_sp.charge(ctx.SysEnter.esp);
state.sysenter_ip.charge(ctx.SysEnter.eip);
state.dr7.charge(ctx.dr[7]);
state.cr0.charge(ctx.cr0);
state.cr2.charge(ctx.cr2);
state.cr3.charge(ctx.cr3);
state.cr4.charge(ctx.cr4);
state.idtr.charge(Range { .limit = ctx.idtr.cbIdt,
.base = ctx.idtr.pIdt });
state.gdtr.charge(Range { .limit = ctx.gdtr.cbGdt,
.base = ctx.gdtr.pGdt });
state.efer.charge(CPUMGetGuestEFER(&_vmcpu));
/*
* Update the PDPTE registers if necessary
*
* Intel manual sections 4.4.1 of Vol. 3A and 26.3.2.4 of Vol. 3C
* indicate the conditions when this is the case. The following
* code currently does not check if the recompiler modified any
* CR registers, which means the update can happen more often
* than really necessary.
*/
if (_vm.hm.s.vmx.fSupported &&
CPUMIsGuestPagingEnabledEx(&ctx) &&
CPUMIsGuestInPAEModeEx(&ctx)) {
Genode::warning("PDPTE updates disabled!");
}
state.star.charge(ctx.msrSTAR);
state.lstar.charge(ctx.msrLSTAR);
state.cstar.charge(ctx.msrCSTAR);
state.fmask.charge(ctx.msrSFMASK);
state.kernel_gs_base.charge(ctx.msrKERNELGSBASE);
/* from HMVMXR0.cpp */
bool interrupt_pending = false;
uint8_t tpr = 0;
uint8_t pending_interrupt = 0;
APICGetTpr(&_vmcpu, &tpr, &interrupt_pending, &pending_interrupt);
state.tpr.charge(tpr);
state.tpr_threshold.charge(0);
if (interrupt_pending) {
const uint8_t pending_priority = (pending_interrupt >> 4) & 0xf;
const uint8_t tpr_priority = (tpr >> 4) & 0xf;
if (pending_priority <= tpr_priority)
state.tpr_threshold.charge(pending_priority);
else
state.tpr_threshold.charge(tpr_priority);
}
/* export FPU state */
AssertCompile(sizeof(Vcpu_state::Fpu::State) >= sizeof(X86FXSTATE));
_vcpu.state().fpu.charge([&] (Vcpu_state::Fpu::State &fpu) {
::memcpy(fpu._buffer, ctx.pXStateR3, sizeof(fpu));
});
/* do SVM/VMX-specific transfers */
VIRT::transfer_state_to_vcpu(state, ctx);
}
template <typename VIRT> void Sup::Vcpu_impl<VIRT>::_transfer_state_to_vbox(CPUMCTX &ctx)
{
Vcpu_state const &state { _vcpu.state() };
ctx.rip = state.ip.value();
ctx.rsp = state.sp.value();
ctx.rax = state.ax.value();
ctx.rbx = state.bx.value();
ctx.rcx = state.cx.value();
ctx.rdx = state.dx.value();
ctx.rbp = state.bp.value();
ctx.rsi = state.si.value();
ctx.rdi = state.di.value();
ctx.rflags.u = state.flags.value();
ctx.r8 = state.r8.value();
ctx.r9 = state.r9.value();
ctx.r10 = state.r10.value();
ctx.r11 = state.r11.value();
ctx.r12 = state.r12.value();
ctx.r13 = state.r13.value();
ctx.r14 = state.r14.value();
ctx.r15 = state.r15.value();
ctx.dr[7] = state.dr7.value();
PVMCPU pVCpu = &_vmcpu;
if (ctx.SysEnter.cs != state.sysenter_cs.value())
CPUMSetGuestMsr(pVCpu, MSR_IA32_SYSENTER_CS, state.sysenter_cs.value());
if (ctx.SysEnter.esp != state.sysenter_sp.value())
CPUMSetGuestMsr(pVCpu, MSR_IA32_SYSENTER_ESP, state.sysenter_sp.value());
if (ctx.SysEnter.eip != state.sysenter_ip.value())
CPUMSetGuestMsr(pVCpu, MSR_IA32_SYSENTER_EIP, state.sysenter_ip.value());
if (ctx.idtr.cbIdt != state.idtr.value().limit ||
ctx.idtr.pIdt != state.idtr.value().base)
CPUMSetGuestIDTR(pVCpu, state.idtr.value().base, state.idtr.value().limit);
if (ctx.gdtr.cbGdt != state.gdtr.value().limit ||
ctx.gdtr.pGdt != state.gdtr.value().base)
CPUMSetGuestGDTR(pVCpu, state.gdtr.value().base, state.gdtr.value().limit);
CPUMSetGuestEFER(pVCpu, state.efer.value());
if (ctx.cr0 != state.cr0.value())
CPUMSetGuestCR0(pVCpu, state.cr0.value());
if (ctx.cr2 != state.cr2.value())
CPUMSetGuestCR2(pVCpu, state.cr2.value());
if (ctx.cr3 != state.cr3.value()) {
CPUMSetGuestCR3(pVCpu, state.cr3.value());
VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
}
if (ctx.cr4 != state.cr4.value())
CPUMSetGuestCR4(pVCpu, state.cr4.value());
if (ctx.msrSTAR != state.star.value())
CPUMSetGuestMsr(pVCpu, MSR_K6_STAR, state.star.value());
if (ctx.msrLSTAR != state.lstar.value())
CPUMSetGuestMsr(pVCpu, MSR_K8_LSTAR, state.lstar.value());
if (ctx.msrCSTAR != state.cstar.value())
CPUMSetGuestMsr(pVCpu, MSR_K8_CSTAR, state.cstar.value());
if (ctx.msrSFMASK != state.fmask.value())
CPUMSetGuestMsr(pVCpu, MSR_K8_SF_MASK, state.fmask.value());
if (ctx.msrKERNELGSBASE != state.kernel_gs_base.value())
CPUMSetGuestMsr(pVCpu, MSR_K8_KERNEL_GS_BASE, state.kernel_gs_base.value());
uint32_t const tpr = state.tpr.value();
/* update cached state */
Assert(!VMX_ENTRY_INT_INFO_IS_VALID(state.inj_info.value()));
_cached_state.intr_state = state.intr_state.value();
_cached_state.ctrl_primary = state.ctrl_primary.value();
_cached_state.ctrl_secondary = state.ctrl_secondary.value();
/* clear blocking by MOV SS or STI bits */
if (_cached_state.intr_state & 3)
_cached_state.intr_state &= ~3U;
VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
_vmcpu.cpum.s.fUseFlags |= CPUM_USED_FPU_GUEST;
if (state.intr_state.value() != VMX_VMCS_GUEST_INT_STATE_NONE) {
Assert(state.intr_state.value() == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI ||
state.intr_state.value() == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
EMSetInhibitInterruptsPC(pVCpu, ctx.rip);
} else {
VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
}
APICSetTpr(pVCpu, tpr);
/* import FPU state */
_vcpu.state().fpu.with_state([&] (Vcpu_state::Fpu::State const &fpu) {
::memcpy(ctx.pXStateR3, fpu._buffer, sizeof(X86FXSTATE));
});
/* do SVM/VMX-specific transfers */
VIRT::transfer_state_to_vbox(state, _vmcpu, ctx);
}
template <typename T> bool Sup::Vcpu_impl<T>::_check_and_request_irq_window()
{
PVMCPU pVCpu = &_vmcpu;
if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
return false;
if (!TRPMHasTrap(pVCpu) &&
!VMCPU_FF_IS_SET(pVCpu, (VMCPU_FF_INTERRUPT_APIC |
VMCPU_FF_INTERRUPT_PIC)))
return false;
_vcpu.state().inj_info.charge(REQ_IRQ_WINDOW_EXIT);
return true;
}
template <typename T> bool Sup::Vcpu_impl<T>::_continue_hw_accelerated()
{
uint32_t check_vm = VM_FF_HM_TO_R3_MASK
| VM_FF_REQUEST
| VM_FF_PGM_POOL_FLUSH_PENDING
| VM_FF_PDM_DMA;
uint32_t check_vmcpu = VMCPU_FF_HM_TO_R3_MASK
| VMCPU_FF_PGM_SYNC_CR3
| VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
| VMCPU_FF_REQUEST;
if (!VM_FF_IS_SET(&_vm, check_vm) &&
!VMCPU_FF_IS_SET(&_vmcpu, check_vmcpu))
return true;
Assert(!(VM_FF_IS_SET(&_vm, VM_FF_PGM_NO_MEMORY)));
#define VERBOSE_VM(flag) \
if (VM_FF_IS_SET(&_vm, flag)) log("flag ", #flag, " (", Hex(flag), ") pending")
#define VERBOSE_VMCPU(flag) \
if (VMCPU_FF_IS_SET(&_vmcpu, flag)) log("flag ", #flag, " (", Hex(flag), ") pending")
if (false && VM_FF_IS_SET(&_vm, check_vm)) {
log("VM_FF=", Hex(_vm.fGlobalForcedActions));
VERBOSE_VM(VM_FF_TM_VIRTUAL_SYNC);
VERBOSE_VM(VM_FF_PGM_NEED_HANDY_PAGES);
/* handled by the assertion above
VERBOSE_VM(VM_FF_PGM_NO_MEMORY); */
VERBOSE_VM(VM_FF_PDM_QUEUES);
VERBOSE_VM(VM_FF_EMT_RENDEZVOUS);
VERBOSE_VM(VM_FF_REQUEST);
VERBOSE_VM(VM_FF_PGM_POOL_FLUSH_PENDING);
VERBOSE_VM(VM_FF_PDM_DMA);
}
if (false && VMCPU_FF_IS_SET(&_vmcpu, check_vmcpu)) {
log("VMCPU_FF=", Hex(_vmcpu.fLocalForcedActions));
VERBOSE_VMCPU(VMCPU_FF_TO_R3);
/* when this flag gets set, a pause request follows
VERBOSE_VMCPU(VMCPU_FF_TIMER); */
VERBOSE_VMCPU(VMCPU_FF_PDM_CRITSECT);
VERBOSE_VMCPU(VMCPU_FF_PGM_SYNC_CR3);
VERBOSE_VMCPU(VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
VERBOSE_VMCPU(VMCPU_FF_REQUEST);
}
#undef VERBOSE_VMCPU
#undef VERBOSE_VM
return false;
}
template <typename T>
typename Sup::Vcpu_impl<T>::Current_state Sup::Vcpu_impl<T>::_handle_npt_ept(VBOXSTRICTRC &rc)
{
rc = VINF_EM_RAW_EMULATE_INSTR;
RTGCPHYS const GCPhys = RT_ALIGN(_vcpu.state().qual_secondary.value(), X86_PAGE_SIZE);
PPGMRAMRANGE const pRam = pgmPhysGetRangeAtOrAbove(&_vm, GCPhys);
if (!pRam)
return PAUSED;
RTGCPHYS const off = GCPhys - pRam->GCPhys;
if (off >= pRam->cb)
return PAUSED;
unsigned const iPage = off >> PAGE_SHIFT;
PPGMPAGE const pPage = &pRam->aPages[iPage];
/* EMHandleRCTmpl.h does not distinguish READ/WRITE rc */
if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO)
rc = VINF_IOM_R3_MMIO_READ_WRITE;
return PAUSED;
}
template <typename T>
typename Sup::Vcpu_impl<T>::Current_state Sup::Vcpu_impl<T>::_handle_paused()
{
Vcpu_state &state { _vcpu.state() };
Assert(state.actv_state.value() == VMX_VMCS_GUEST_ACTIVITY_ACTIVE);
if (VMX_ENTRY_INT_INFO_IS_VALID(state.inj_info.value())) {
Assert(state.flags.value() & X86_EFL_IF);
if (state.intr_state.value() != VMX_VMCS_GUEST_INT_STATE_NONE)
Genode::log("intr state ", Genode::Hex(state.intr_state.value()),
" ", Genode::Hex(state.intr_state.value() & 0xf));
Assert(state.intr_state.value() == VMX_VMCS_GUEST_INT_STATE_NONE);
if (!_continue_hw_accelerated())
warning("unexpected pause exit");
/*
* We got a pause exit during IRQ injection and the guest is ready for
* IRQ injection. So, just continue running the vCPU.
*/
return RUNNING;
}
/* are we forced to go back to emulation mode ? */
if (!_continue_hw_accelerated()) {
/* go back to emulation mode */
return PAUSED;
}
/* check whether we have to request irq injection window */
if (_check_and_request_irq_window()) {
state.discharge();
state.inj_info.charge(state.inj_info.value());
_irq_window = true;
return RUNNING;
}
return PAUSED;
}
template <typename T>
typename Sup::Vcpu_impl<T>::Current_state Sup::Vcpu_impl<T>::_handle_startup()
{
return PAUSED;
}
template <typename T>
typename Sup::Vcpu_impl<T>::Current_state Sup::Vcpu_impl<T>::_handle_irq_window()
{
Vcpu_state &state { _vcpu.state() };
state.discharge();
PVMCPU pVCpu = &_vmcpu;
Assert(state.intr_state.value() == VMX_VMCS_GUEST_INT_STATE_NONE);
Assert(state.flags.value() & X86_EFL_IF);
Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
Assert(!VMX_ENTRY_INT_INFO_IS_VALID(state.inj_info.value()));
Assert(_irq_window);
_irq_window = false;
/* request current tpr state from guest, it may block IRQs */
APICSetTpr(pVCpu, state.tpr_threshold.value());
if (!TRPMHasTrap(pVCpu)) {
bool res = VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
if (res)
warning("NMI was set");
if (VMCPU_FF_IS_SET(pVCpu, (VMCPU_FF_INTERRUPT_APIC |
VMCPU_FF_INTERRUPT_PIC))) {
uint8_t irq;
int rc = PDMGetInterrupt(pVCpu, &irq);
Assert(RT_SUCCESS(rc));
rc = TRPMAssertTrap(pVCpu, irq, TRPM_HARDWARE_INT);
Assert(RT_SUCCESS(rc));
}
if (!TRPMHasTrap(pVCpu)) {
/* happens if APICSetTpr (see above) mask IRQ */
state.inj_info.charge(VMX_ENTRY_INT_INFO_NONE);
Genode::error("virq window pthread aaaaaaa while loop");
return PAUSED;
}
}
/*
* If we have no IRQ for injection, something with requesting the
* IRQ window went wrong. Probably it was forgotten to be reset.
*/
Assert(TRPMHasTrap(pVCpu));
/* interrupt can be dispatched */
uint8_t u8Vector;
TRPMEVENT enmType;
SVMEVENT Event;
uint32_t u32ErrorCode;
RTGCUINT cr2;
Event.u = 0;
/* If a new event is pending, then dispatch it now. */
int rc = TRPMQueryTrapAll(pVCpu, &u8Vector, &enmType, &u32ErrorCode, &cr2, 0, 0);
AssertRC(rc);
Assert(enmType == TRPM_HARDWARE_INT);
Assert(u8Vector != X86_XCPT_NMI);
/* Clear the pending trap. */
rc = TRPMResetTrap(pVCpu);
AssertRC(rc);
Event.n.u8Vector = u8Vector;
Event.n.u1Valid = 1;
Event.n.u32ErrorCode = u32ErrorCode;
Assert(VMX_ENTRY_INT_INFO_TYPE_EXT_INT == SVM_EVENT_EXTERNAL_IRQ);
Event.n.u3Type = VMX_ENTRY_INT_INFO_TYPE_EXT_INT;
state.inj_info.charge(Event.u);
state.inj_error.charge(Event.n.u32ErrorCode);
return RUNNING;
}
template <typename VIRT> VBOXSTRICTRC Sup::Vcpu_impl<VIRT>::_switch_to_hw()
{
Handle_exit_result result;
do {
_current_state = RUNNING;
/* run vCPU until next exit */
_emt.switch_to_vcpu();
/*
* We left the VM, so we should "run" on next switch_to_vcpu(). Currently,
* this may be changed by Sup::Vcpu::pause(), which induces a synchronized
* "pause" exit on next switch.
*/
_next_state = RUN;
result = VIRT::handle_exit(_vcpu.state());
switch (result.state) {
case Exit_state::STARTUP:
_current_state = _handle_startup();
break;
case Exit_state::IRQ_WINDOW:
_current_state = _handle_irq_window();
break;
case Exit_state::PAUSED:
_current_state = _handle_paused();
break;
case Exit_state::NPT_EPT:
_current_state = _handle_npt_ept(result.rc);
break;
case Exit_state::DEFAULT:
case Exit_state::ERROR:
_current_state = PAUSED;
break;
}
} while (_current_state == RUNNING);
return result.rc;
}
/********************
** Vcpu interface **
********************/
static timespec add_timespec_ns(timespec a, ::uint64_t ns)
{
enum { NSEC_PER_SEC = 1'000'000'000ull };
long sec = a.tv_sec;
while (a.tv_nsec >= NSEC_PER_SEC) {
a.tv_nsec -= NSEC_PER_SEC;
sec++;
}
while (ns >= NSEC_PER_SEC) {
ns -= NSEC_PER_SEC;
sec++;
}
long nsec = a.tv_nsec + ns;
while (nsec >= NSEC_PER_SEC) {
nsec -= NSEC_PER_SEC;
sec++;
}
return timespec { sec, nsec };
}
template <typename T> void Sup::Vcpu_impl<T>::halt(Genode::uint64_t const wait_ns)
{
/* calculate timeout */
timespec ts { 0, 0 };
clock_gettime(CLOCK_REALTIME, &ts);
ts = add_timespec_ns(ts, wait_ns);
/* wait for condition or timeout */
pthread_mutex_lock(&_halt_mutex);
pthread_cond_timedwait(&_halt_cond, &_halt_mutex, &ts);
pthread_mutex_unlock(&_halt_mutex);
}
template <typename T> void Sup::Vcpu_impl<T>::wake_up()
{
pthread_mutex_lock(&_halt_mutex);
pthread_cond_signal(&_halt_cond);
pthread_mutex_unlock(&_halt_mutex);
}
template <typename T> void Sup::Vcpu_impl<T>::pause()
{
/* skip pause request as we requested interrupt-window exit already */
if (_irq_window)
return;
/* XXX why do we need this special barrier here but nowhere else ? */
memory_barrier();
if (_current_state != PAUSED)
_vcpu.pause();
_next_state = PAUSE_EXIT;
}
template <typename T> VBOXSTRICTRC Sup::Vcpu_impl<T>::run()
{
PVMCPU pVCpu = &_vmcpu;
CPUMCTX &ctx = *CPUMQueryGuestCtxPtr(pVCpu);
/* mimic state machine implemented in nemHCWinRunGC() etc. */
VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM);
_transfer_state_to_vcpu(ctx);
/* XXX move this into _transfer_state_to_vcpu ? */
/* check whether to request interrupt window for injection */
_irq_window = _check_and_request_irq_window();
VBOXSTRICTRC const rc = _switch_to_hw();
_transfer_state_to_vbox(ctx);
Assert(_vcpu.state().actv_state.value() == VMX_VMCS_GUEST_ACTIVITY_ACTIVE);
/* see hmR0VmxExitToRing3 - sync recompiler state */
CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR |
CPUM_CHANGED_LDTR | CPUM_CHANGED_GDTR |
CPUM_CHANGED_IDTR | CPUM_CHANGED_TR |
CPUM_CHANGED_HIDDEN_SEL_REGS |
CPUM_CHANGED_GLOBAL_TLB_FLUSH);
/* mimic state machine implemented in nemHCWinRunGC() etc. */
VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
/*
* Dispatch write to MSR_KVM_SYSTEM_TIME_NEW to emulate
* gimR0KvmUpdateSystemTime before entering the gimKvmWriteMsr function.
*/
if (rc == VINF_CPUM_R3_MSR_WRITE) {
enum { MSR_KVM_SYSTEM_TIME_NEW = 0x4b564d01 };
if (ctx.ecx == MSR_KVM_SYSTEM_TIME_NEW)
Sup::update_gim_system_time(_vm, _vmcpu);
}
/* track guest mode changes - see VMM/VMMAll/IEMAllCImpl.cpp.h */
PGMChangeMode(pVCpu, ctx.cr0, ctx.cr4, ctx.msrEFER);
/* evaluated in VMM/include/EMHandleRCTmpl.h */
return rc;
}
template <typename VIRT>
Sup::Vcpu_impl<VIRT>::Vcpu_impl(Env &env, VM &vm, Vm_connection &vm_con,
Cpu_index cpu, Pthread::Emt &emt)
:
_emt(emt), _cpu(cpu), _vm(vm), _vmcpu(*vm.apCpusR3[cpu.value]),
_vcpu(vm_con, _alloc, _handler, VIRT::exit_config)
{
pthread_mutexattr_t _attr;
pthread_mutexattr_init(&_attr);
pthread_cond_init(&_halt_cond, nullptr);
pthread_mutexattr_settype(&_attr, PTHREAD_MUTEX_ERRORCHECK);
pthread_mutex_init(&_halt_mutex, &_attr);
/* run vCPU until initial startup exception */
_vcpu.run();
_switch_to_hw();
}
/*****************************
** vCPU creation functions **
*****************************/
Sup::Vcpu & Sup::Vcpu::create_svm(Genode::Env &env, VM &vm, Vm_connection &vm_con,
Cpu_index cpu, Pthread::Emt &emt)
{
return *new Vcpu_impl<Svm>(env, vm, vm_con, cpu, emt);
}
Sup::Vcpu & Sup::Vcpu::create_vmx(Genode::Env &env, VM &vm, Vm_connection &vm_con,
Cpu_index cpu, Pthread::Emt &emt)
{
return *new Vcpu_impl<Vmx>(env, vm, vm_con, cpu, emt);
}

View File

@ -0,0 +1,53 @@
/*
* \brief SUPLib vCPU utility
* \author Alexander Boettcher
* \author Norman Feske
* \author Christian Helmuth
*/
/*
* Copyright (C) 2013-2021 Genode Labs GmbH
*
* This file is distributed under the terms of the GNU General Public License
* version 2.
*/
#ifndef _SUP_VCPU_H_
#define _SUP_VCPU_H_
/* Genode includes */
#include <base/env.h>
#include <base/env.h>
#include <vm_session/connection.h>
/* local includes */
#include <sup.h>
namespace Sup { struct Vcpu; }
namespace Pthread { struct Emt; }
struct Sup::Vcpu : Genode::Interface
{
/* enter VM to run vCPU (called by EMT) */
virtual VBOXSTRICTRC run() = 0;
/* request vCPU to exit VM with pause */
virtual void pause() = 0;
/* halt until woken up or timeout expiration (called by EMT) */
virtual void halt(Genode::uint64_t const wait_ns) = 0;
/* wake up halted EMT */
virtual void wake_up() = 0;
/* create VMX vCPU */
static Vcpu & create_vmx(Genode::Env &, VM &, Vm_connection &, Cpu_index, Pthread::Emt &);
/* create SVM vCPU */
static Vcpu & create_svm(Genode::Env &, VM &, Vm_connection &, Cpu_index, Pthread::Emt &);
};
#endif /* _SUP_VCPU_H_ */

View File

@ -0,0 +1,226 @@
/*
* \brief SUPLib vCPU SVM utilities
* \author Norman Feske
* \author Alexander Boettcher
* \author Christian Helmuth
* \date 2013-08-21
*
* This header is private to sup_vcpu.cc.
*/
/*
* Copyright (C) 2013-2021 Genode Labs GmbH
*
* This file is distributed under the terms of the GNU General Public License
* version 2.
*/
#ifndef _SUP_VCPU_SVM_H_
#define _SUP_VCPU_SVM_H_
#include <VBox/vmm/hm_svm.h>
struct Sup::Svm
{
private:
enum Exit_condition
{
VCPU_SVM_NPT = 0xfc,
VCPU_SVM_INVALID = 0xfd,
VCPU_STARTUP = 0xfe,
VCPU_PAUSED = 0xff,
};
static ::uint64_t const _intercepts;
public:
/* prevent instantiation */
Svm() = delete;
static Genode::Vm_connection::Exit_config const exit_config;
static inline unsigned ctrl_primary();
static inline unsigned ctrl_secondary();
static inline void transfer_state_to_vbox(Vcpu_state const &, VMCPU &, CPUMCTX &);
static inline void transfer_state_to_vcpu(Vcpu_state &state, CPUMCTX const &);
static inline Handle_exit_result handle_exit(Vcpu_state &);
};
Genode::Vm_connection::Exit_config const Sup::Svm::exit_config { /* ... */ };
::uint64_t const Sup::Svm::_intercepts = SVM_CTRL_INTERCEPT_INTR
| SVM_CTRL_INTERCEPT_NMI
| SVM_CTRL_INTERCEPT_INIT
| SVM_CTRL_INTERCEPT_RDPMC
| SVM_CTRL_INTERCEPT_CPUID
| SVM_CTRL_INTERCEPT_RSM
| SVM_CTRL_INTERCEPT_HLT
| SVM_CTRL_INTERCEPT_IOIO_PROT
| SVM_CTRL_INTERCEPT_MSR_PROT
| SVM_CTRL_INTERCEPT_INVLPGA
| SVM_CTRL_INTERCEPT_SHUTDOWN
| SVM_CTRL_INTERCEPT_RDTSC
| SVM_CTRL_INTERCEPT_FERR_FREEZE
| SVM_CTRL_INTERCEPT_VMRUN
| SVM_CTRL_INTERCEPT_VMMCALL
| SVM_CTRL_INTERCEPT_VMLOAD
| SVM_CTRL_INTERCEPT_VMSAVE
| SVM_CTRL_INTERCEPT_STGI
| SVM_CTRL_INTERCEPT_CLGI
| SVM_CTRL_INTERCEPT_SKINIT
| SVM_CTRL_INTERCEPT_WBINVD
| SVM_CTRL_INTERCEPT_MONITOR
| SVM_CTRL_INTERCEPT_RDTSCP
| SVM_CTRL_INTERCEPT_MWAIT;
unsigned Sup::Svm::ctrl_primary()
{
return (unsigned)(_intercepts & 0xffffffff);
}
unsigned Sup::Svm::ctrl_secondary()
{
return (unsigned)((_intercepts >> 32) & 0xffffffff);
}
#define GENODE_SVM_ASSERT_SELREG(REG) \
AssertMsg(!ctx.REG.Attr.n.u1Present || \
(ctx.REG.Attr.n.u1Granularity \
? (ctx.REG.u32Limit & 0xfffU) == 0xfffU \
: ctx.REG.u32Limit <= 0xfffffU), \
("%u %u %#x %#x %#llx\n", ctx.REG.Attr.n.u1Present, \
ctx.REG.Attr.n.u1Granularity, ctx.REG.u32Limit, \
ctx.REG.Attr.u, ctx.REG.u64Base))
#define GENODE_READ_SELREG(REG) \
ctx.REG.Sel = state.REG.value().sel; \
ctx.REG.ValidSel = state.REG.value().sel; \
ctx.REG.fFlags = CPUMSELREG_FLAGS_VALID; \
ctx.REG.u32Limit = state.REG.value().limit; \
ctx.REG.u64Base = state.REG.value().base; \
ctx.REG.Attr.u = sel_ar_conv_from_genode(state.REG.value().ar)
void Sup::Svm::transfer_state_to_vbox(Genode::Vcpu_state const &state, VMCPU &vmcpu, CPUMCTX &ctx)
{
GENODE_READ_SELREG(cs);
GENODE_READ_SELREG(ds);
GENODE_READ_SELREG(es);
GENODE_READ_SELREG(fs);
GENODE_READ_SELREG(gs);
GENODE_READ_SELREG(ss);
if (!ctx.cs.Attr.n.u1Granularity
&& ctx.cs.Attr.n.u1Present
&& ctx.cs.u32Limit > UINT32_C(0xfffff))
{
Assert((ctx.cs.u32Limit & 0xfff) == 0xfff);
ctx.cs.Attr.n.u1Granularity = 1;
}
GENODE_SVM_ASSERT_SELREG(cs);
GENODE_SVM_ASSERT_SELREG(ds);
GENODE_SVM_ASSERT_SELREG(es);
GENODE_SVM_ASSERT_SELREG(fs);
GENODE_SVM_ASSERT_SELREG(gs);
GENODE_SVM_ASSERT_SELREG(ss);
GENODE_READ_SELREG(ldtr);
GENODE_READ_SELREG(tr);
CPUMSetGuestEFER(&vmcpu, CPUMGetGuestEFER(&vmcpu) & ~(::uint64_t)MSR_K6_EFER_SVME);
}
#undef GENODE_ASSERT_SELREG
#undef GENODE_READ_SELREG
#define GENODE_WRITE_SELREG(REG) \
Assert(ctx.REG.fFlags & CPUMSELREG_FLAGS_VALID); \
Assert(ctx.REG.ValidSel == ctx.REG.Sel); \
state.REG.charge(Segment { .sel = ctx.REG.Sel, \
.ar = sel_ar_conv_to_genode(ctx.REG.Attr.u), \
.limit = ctx.REG.u32Limit, \
.base = ctx.REG.u64Base});
void Sup::Svm::transfer_state_to_vcpu(Genode::Vcpu_state &state, CPUMCTX const &ctx)
{
typedef Genode::Vcpu_state::Segment Segment;
state.efer.charge(state.efer.value() | MSR_K6_EFER_SVME);
GENODE_WRITE_SELREG(cs);
GENODE_WRITE_SELREG(ds);
GENODE_WRITE_SELREG(es);
GENODE_WRITE_SELREG(fs);
GENODE_WRITE_SELREG(gs);
GENODE_WRITE_SELREG(ss);
GENODE_WRITE_SELREG(ldtr);
GENODE_WRITE_SELREG(tr);
}
#undef GENODE_WRITE_SELREG
Sup::Handle_exit_result Sup::Svm::handle_exit(Vcpu_state &state)
{
/*
* Table B-1. 070h 63:0 EXITCODE
*
* Appendix C SVM Intercept Exit Codes defines only
* 0x000..0x403 plus -1 and -2
*/
unsigned short const exit = state.exit_reason & 0xffff;
switch (exit) {
case SVM_EXIT_CPUID:
case SVM_EXIT_HLT:
case SVM_EXIT_INVLPGA:
case SVM_EXIT_IOIO:
case SVM_EXIT_MSR:
case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR15:
case SVM_EXIT_RDTSC:
case SVM_EXIT_RDTSCP:
case SVM_EXIT_WBINVD:
Assert(state.actv_state.value() == VMX_VMCS_GUEST_ACTIVITY_ACTIVE);
Assert(!VMX_ENTRY_INT_INFO_IS_VALID(state.inj_info.value()));
return { Exit_state::DEFAULT, VINF_EM_RAW_EMULATE_INSTR };
case SVM_EXIT_VINTR:
return { Exit_state::IRQ_WINDOW, VINF_SUCCESS };
case VCPU_SVM_NPT:
return { Exit_state::NPT_EPT, VINF_EM_RAW_EMULATE_INSTR };
case VCPU_PAUSED:
return { Exit_state::PAUSED, VINF_SUCCESS };
case VCPU_STARTUP:
return { Exit_state::STARTUP, VINF_SUCCESS };
/* error conditions */
case VCPU_SVM_INVALID:
error("invalid SVM guest state - dead");
return { Exit_state::ERROR, VERR_EM_GUEST_CPU_HANG };
case SVM_EXIT_SHUTDOWN:
error("unexpected SVM exit shutdown - dead");
return { Exit_state::ERROR, VERR_EM_GUEST_CPU_HANG };
default:
return { Exit_state::ERROR, VERR_EM_GUEST_CPU_HANG };
}
}
#endif /* _SUP_VCPU_SVM_H_ */

View File

@ -0,0 +1,253 @@
/*
* \brief SUPLib vCPU VMX utilities
* \author Norman Feske
* \author Alexander Boettcher
* \author Christian Helmuth
* \date 2013-08-21
*
* This header is private to sup_vcpu.cc.
*/
/*
* Copyright (C) 2013-2021 Genode Labs GmbH
*
* This file is distributed under the terms of the GNU General Public License
* version 2.
*/
#ifndef _SUP_VCPU_VMX_H_
#define _SUP_VCPU_VMX_H_
#include <VBox/vmm/hm_vmx.h>
class Sup::Vmx
{
private:
enum { VMCS_SEG_UNUSABLE = 0x10000 };
enum Exit_condition
{
VCPU_STARTUP = 0xfe,
VCPU_PAUSED = 0xff,
};
static inline void _handle_default(Vcpu_state &);
static inline void _handle_startup(Vcpu_state &);
static inline void _handle_invalid(Vcpu_state const &);
public:
/* prevent instantiation */
Vmx() = delete;
static Vm_connection::Exit_config const exit_config;
static inline unsigned ctrl_primary();
static inline unsigned ctrl_secondary();
static inline void transfer_state_to_vbox(Vcpu_state const &, VMCPU &, CPUMCTX &);
static inline void transfer_state_to_vcpu(Vcpu_state &, CPUMCTX const &);
static inline Handle_exit_result handle_exit(Vcpu_state &);
};
Vm_connection::Exit_config const Sup::Vmx::exit_config { /* ... */ };
unsigned Sup::Vmx::ctrl_primary()
{
/* primary VM exit controls (from src/VBox/VMM/VMMR0/HWVMXR0.cpp) */
return 0
| VMX_PROC_CTLS_HLT_EXIT
| VMX_PROC_CTLS_MOV_DR_EXIT
| VMX_PROC_CTLS_UNCOND_IO_EXIT
| VMX_PROC_CTLS_USE_TPR_SHADOW
| VMX_PROC_CTLS_RDPMC_EXIT
;
}
unsigned Sup::Vmx::ctrl_secondary()
{
/* secondary VM exit controls (from src/VBox/VMM/VMMR0/HWVMXR0.cpp) */
return 0
| VMX_PROC_CTLS2_APIC_REG_VIRT
| VMX_PROC_CTLS2_WBINVD_EXIT
| VMX_PROC_CTLS2_UNRESTRICTED_GUEST
| VMX_PROC_CTLS2_VPID
| VMX_PROC_CTLS2_RDTSCP
| VMX_PROC_CTLS2_EPT
| VMX_PROC_CTLS2_INVPCID
;
}
#define GENODE_READ_SELREG_REQUIRED(REG) \
(ctx.REG.Sel != state.REG.value().sel) || \
(ctx.REG.ValidSel != state.REG.value().sel) || \
(ctx.REG.fFlags != CPUMSELREG_FLAGS_VALID) || \
(ctx.REG.u32Limit != state.REG.value().limit) || \
(ctx.REG.u64Base != state.REG.value().base) || \
(ctx.REG.Attr.u != sel_ar_conv_from_genode(state.REG.value().ar))
#define GENODE_READ_SELREG(REG) \
ctx.REG.Sel = state.REG.value().sel; \
ctx.REG.ValidSel = state.REG.value().sel; \
ctx.REG.fFlags = CPUMSELREG_FLAGS_VALID; \
ctx.REG.u32Limit = state.REG.value().limit; \
ctx.REG.u64Base = state.REG.value().base; \
ctx.REG.Attr.u = sel_ar_conv_from_genode(state.REG.value().ar)
void Sup::Vmx::transfer_state_to_vbox(Vcpu_state const &state, VMCPU &vmcpu, CPUMCTX &ctx)
{
GENODE_READ_SELREG(cs);
GENODE_READ_SELREG(ds);
GENODE_READ_SELREG(es);
GENODE_READ_SELREG(fs);
GENODE_READ_SELREG(gs);
GENODE_READ_SELREG(ss);
if (GENODE_READ_SELREG_REQUIRED(ldtr)) {
GENODE_READ_SELREG(ldtr);
CPUMSetChangedFlags(&vmcpu, CPUM_CHANGED_LDTR);
}
if (GENODE_READ_SELREG_REQUIRED(tr)) {
GENODE_READ_SELREG(tr);
CPUMSetChangedFlags(&vmcpu, CPUM_CHANGED_TR);
}
}
#undef GENODE_READ_SELREG_REQUIRED
#undef GENODE_READ_SELREG
#define GENODE_WRITE_SELREG(REG) \
Assert(ctx.REG.fFlags & CPUMSELREG_FLAGS_VALID); \
Assert(ctx.REG.ValidSel == ctx.REG.Sel); \
state.REG.charge( Segment { .sel = ctx.REG.Sel, \
.ar = sel_ar_conv_to_genode(ctx.REG.Attr.u ? : VMCS_SEG_UNUSABLE), \
.limit = ctx.REG.u32Limit, \
.base = ctx.REG.u64Base });
void Sup::Vmx::transfer_state_to_vcpu(Vcpu_state &state, CPUMCTX const &ctx)
{
typedef Vcpu_state::Segment Segment;
GENODE_WRITE_SELREG(cs);
GENODE_WRITE_SELREG(ds);
GENODE_WRITE_SELREG(es);
GENODE_WRITE_SELREG(fs);
GENODE_WRITE_SELREG(gs);
GENODE_WRITE_SELREG(ss);
if (ctx.ldtr.Sel == 0) {
state.ldtr.charge(Segment { .sel = 0,
.ar = sel_ar_conv_to_genode(0x82),
.limit = 0,
.base = 0 });
} else {
state.ldtr.charge(Segment { .sel = ctx.ldtr.Sel,
.ar = sel_ar_conv_to_genode(ctx.ldtr.Attr.u),
.limit = ctx.ldtr.u32Limit,
.base = ctx.ldtr.u64Base });
}
state.tr.charge(Segment { .sel = ctx.tr.Sel,
.ar = sel_ar_conv_to_genode(ctx.tr.Attr.u),
.limit = ctx.tr.u32Limit,
.base = ctx.tr.u64Base });
}
#undef GENODE_WRITE_SELREG
void Sup::Vmx::_handle_invalid(Vcpu_state const &state)
{
unsigned const dubious = state.inj_info.value() |
state.intr_state.value() |
state.actv_state.value();
if (dubious)
warning(__func__, " - dubious -"
" inj_info=", Hex(state.inj_info.value()),
" inj_error=", Hex(state.inj_error.value()),
" intr_state=", Hex(state.intr_state.value()),
" actv_state=", Hex(state.actv_state.value()));
error("invalid guest state - dead");
}
void Sup::Vmx::_handle_default(Vcpu_state &state)
{
Assert(state.actv_state.value() == VMX_VMCS_GUEST_ACTIVITY_ACTIVE);
Assert(!VMX_ENTRY_INT_INFO_IS_VALID(state.inj_info.value()));
}
Sup::Handle_exit_result Sup::Vmx::handle_exit(Vcpu_state &state)
{
/* table 24-14. Format of Exit Reason - 15:0 Basic exit reason */
unsigned short const exit = state.exit_reason & 0xffff;
switch (exit) {
case VMX_EXIT_INIT_SIGNAL:
case VMX_EXIT_TASK_SWITCH:
case VMX_EXIT_CPUID:
case VMX_EXIT_RDTSCP:
case VMX_EXIT_VMCALL:
case VMX_EXIT_WBINVD:
case VMX_EXIT_MOV_DRX:
case VMX_EXIT_XSETBV:
case VMX_EXIT_MOV_CRX:
case VMX_EXIT_HLT:
_handle_default(state);
return { Exit_state::DEFAULT, VINF_EM_RAW_EMULATE_INSTR };
case VMX_EXIT_INT_WINDOW:
return { Exit_state::IRQ_WINDOW, VINF_SUCCESS };
case VMX_EXIT_EPT_VIOLATION:
return { Exit_state::NPT_EPT, VINF_EM_RAW_EMULATE_INSTR };
case VMX_EXIT_IO_INSTR:
_handle_default(state);
/* EMHandleRCTmpl.h does not distinguish READ/WRITE rc */
return { Exit_state::DEFAULT, VINF_IOM_R3_IOPORT_WRITE };
case VMX_EXIT_TPR_BELOW_THRESHOLD:
_handle_default(state);
/* the instruction causing the exit has already been executed */
return { Exit_state::DEFAULT, VINF_SUCCESS };
case VMX_EXIT_RDMSR:
_handle_default(state);
return { Exit_state::DEFAULT, VINF_CPUM_R3_MSR_READ };
case VMX_EXIT_WRMSR:
_handle_default(state);
return { Exit_state::DEFAULT, VINF_CPUM_R3_MSR_WRITE };
case VCPU_PAUSED:
return { Exit_state::PAUSED, VINF_SUCCESS };
case VCPU_STARTUP:
return { Exit_state::STARTUP, VINF_SUCCESS };
/* error conditions */
case VMX_EXIT_ERR_INVALID_GUEST_STATE:
_handle_invalid(state);
return { Exit_state::ERROR, VERR_EM_GUEST_CPU_HANG };
case VMX_EXIT_TRIPLE_FAULT:
return { Exit_state::ERROR, VINF_EM_TRIPLE_FAULT };
default:
return { Exit_state::ERROR, VERR_EM_GUEST_CPU_HANG };
}
}
#endif /* _SUP_VCPU_VMX_H_ */

View File

@ -21,7 +21,7 @@
/* local includes */
#include <sup_vm.h>
#include <vcpu.h>
#include <sup_vcpu.h>
static size_t gvm_size(Sup::Cpu_count cpu_count)
@ -85,17 +85,17 @@ Sup::Vm & Sup::Vm::create(PSUPDRVSESSION psession, Cpu_count cpu_count)
}
void Sup::Vm::register_vcpu_handler(Cpu_index cpu_index, Vcpu_handler &handler)
void Sup::Vm::register_vcpu(Cpu_index cpu_index, Vcpu &vcpu)
{
if (cpu_index.value >= VM::cCpus)
throw Cpu_index_out_of_range();
VMCPU &cpu = GVM::aCpus[cpu_index.value];
VMCPU &vmcpu = GVM::aCpus[cpu_index.value];
/*
* We misuse the pVCpuR0ForVtg member for storing the pointer
* to the CPU's corresponding Vcpu_handler.
* to the CPU's corresponding Vcpu.
*/
cpu.pVCpuR0ForVtg = (RTR0PTR)&handler;
vmcpu.pVCpuR0ForVtg = (RTR0PTR)&vcpu;
}

View File

@ -24,7 +24,7 @@
namespace Sup {
struct Vm;
struct Vcpu_handler;
struct Vcpu;
}
struct Sup::Vm : GVM
@ -35,21 +35,21 @@ struct Sup::Vm : GVM
class Cpu_index_out_of_range : Exception { };
void register_vcpu_handler(Cpu_index cpu_index, Vcpu_handler &handler);
void register_vcpu(Cpu_index cpu_index, Vcpu &vcpu);
template <typename FN>
void with_vcpu_handler(Cpu_index cpu_index, FN const &fn)
void with_vcpu(Cpu_index cpu_index, FN const &fn)
{
if (cpu_index.value >= VM::cCpus)
throw Cpu_index_out_of_range();
VMCPU &cpu = GVM::aCpus[cpu_index.value];
VMCPU &vmcpu = GVM::aCpus[cpu_index.value];
Vcpu_handler * const handler_ptr = (Vcpu_handler *)cpu.pVCpuR0ForVtg;
Vcpu * const vcpu_ptr = (Vcpu *)vmcpu.pVCpuR0ForVtg;
Assert(handler_ptr);
Assert(vcpu_ptr);
fn(*handler_ptr);
fn(*vcpu_ptr);
}
};

View File

@ -1,103 +0,0 @@
/*
* \brief Genode specific VirtualBox SUPLib supplements
* \author Norman Feske
* \author Alexander Boettcher
* \author Christian Helmuth
* \date 2013-08-21
*/
/*
* Copyright (C) 2013-2021 Genode Labs GmbH
*
* This file is distributed under the terms of the GNU General Public License
* version 2.
*/
#ifndef _VIRTUALBOX__SVM_H_
#define _VIRTUALBOX__SVM_H_
/* based on HWSVMR0.h - adjusted to Genode */
#define GENODE_SVM_ASSERT_SELREG(REG) \
AssertMsg(!pCtx->REG.Attr.n.u1Present || \
(pCtx->REG.Attr.n.u1Granularity \
? (pCtx->REG.u32Limit & 0xfffU) == 0xfffU \
: pCtx->REG.u32Limit <= 0xfffffU), \
("%u %u %#x %#x %#llx\n", pCtx->REG.Attr.n.u1Present, \
pCtx->REG.Attr.n.u1Granularity, pCtx->REG.u32Limit, \
pCtx->REG.Attr.u, pCtx->REG.u64Base))
#define GENODE_READ_SELREG(REG) \
pCtx->REG.Sel = state.REG.value().sel; \
pCtx->REG.ValidSel = state.REG.value().sel; \
pCtx->REG.fFlags = CPUMSELREG_FLAGS_VALID; \
pCtx->REG.u32Limit = state.REG.value().limit; \
pCtx->REG.u64Base = state.REG.value().base; \
pCtx->REG.Attr.u = sel_ar_conv_from_genode(state.REG.value().ar)
static inline bool svm_save_state(Genode::Vcpu_state const &state, VM *pVM, PVMCPU pVCpu)
{
PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
GENODE_READ_SELREG(cs);
GENODE_READ_SELREG(ds);
GENODE_READ_SELREG(es);
GENODE_READ_SELREG(fs);
GENODE_READ_SELREG(gs);
GENODE_READ_SELREG(ss);
GENODE_SVM_ASSERT_SELREG(cs);
GENODE_SVM_ASSERT_SELREG(ds);
GENODE_SVM_ASSERT_SELREG(es);
GENODE_SVM_ASSERT_SELREG(fs);
GENODE_SVM_ASSERT_SELREG(gs);
GENODE_SVM_ASSERT_SELREG(ss);
GENODE_READ_SELREG(ldtr);
GENODE_READ_SELREG(tr);
return true;
}
#undef GENODE_ASSERT_SELREG
#undef GENODE_READ_SELREG
#define GENODE_WRITE_SELREG(REG) \
Assert(pCtx->REG.fFlags & CPUMSELREG_FLAGS_VALID); \
Assert(pCtx->REG.ValidSel == pCtx->REG.Sel); \
state.REG.charge(Segment { .sel = pCtx->REG.Sel, \
.ar = sel_ar_conv_to_genode(pCtx->REG.Attr.u), \
.limit = pCtx->REG.u32Limit, \
.base = pCtx->REG.u64Base});
static inline bool svm_load_state(Genode::Vcpu_state &state, VM const *pVM, PVMCPU pVCpu)
{
typedef Genode::Vcpu_state::Segment Segment;
PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
state.efer.charge(pCtx->msrEFER | MSR_K6_EFER_SVME);
/* unimplemented */
if (CPUMIsGuestInLongModeEx(pCtx))
return false;
state.efer.charge(state.efer.value() & ~MSR_K6_EFER_LME);
GENODE_WRITE_SELREG(es);
GENODE_WRITE_SELREG(ds);
GENODE_WRITE_SELREG(fs);
GENODE_WRITE_SELREG(gs);
GENODE_WRITE_SELREG(cs);
GENODE_WRITE_SELREG(ss);
GENODE_WRITE_SELREG(ldtr);
GENODE_WRITE_SELREG(tr);
return true;
}
#undef GENODE_WRITE_SELREG
#endif /* _VIRTUALBOX__SVM_H_ */

View File

@ -6,9 +6,11 @@ include $(REP_DIR)/lib/mk/virtualbox6-common.inc
CC_WARN += -Wall
SRC_CC := main.cc drivers.cc vcpu_gim.cc
SRC_CC := main.cc drivers.cc
SRC_CC += libc.cc unimpl.cc dummies.cc pdm.cc devices.cc nem.cc dynlib.cc
SRC_CC += pthread.cc network.cc
SRC_CC += pthread.cc network.cc devxhci.cc
SRC_CC += sup.cc sup_sem.cc sup_gmm.cc sup_drv.cc sup_vm.cc sup_vcpu.cc sup_gim.cc
SRC_CC += HostServices/common/message.cpp
LIBS += base
LIBS += stdcxx
@ -26,21 +28,16 @@ LIBS += $(LIB_MK_FILES:.mk=)
INC_DIR += $(call select_from_repositories,src/lib/libc)
INC_DIR += $(call select_from_repositories,src/lib/libc)/spec/x86_64
INC_DIR += $(VBOX_DIR)/Runtime/include
SRC_CC += HostServices/common/message.cpp
INC_DIR += $(REP_DIR)/src/virtualbox6
INC_DIR += $(VBOX_DIR)/HostDrivers/Support
INC_DIR += $(VBOX_DIR)/Main/include
INC_DIR += $(VBOX_DIR)/VMM/include
INC_DIR += $(VIRTUALBOX_DIR)/VBoxAPIWrap
INC_DIR += $(VBOX_DIR)/Main/xml
INC_DIR += $(VIRTUALBOX_DIR)/include/VBox/Graphics
INC_DIR += $(VBOX_DIR)/Main/src-server
INC_DIR += $(VBOX_DIR)/Main/xml
INC_DIR += $(VBOX_DIR)/NetworkServices
INC_DIR += $(VBOX_DIR)/Runtime/include
INC_DIR += $(VBOX_DIR)/VMM/include
INC_DIR += $(VIRTUALBOX_DIR)/VBoxAPIWrap
INC_DIR += $(VIRTUALBOX_DIR)/include/VBox/Graphics
# search path to 'scan_code_set_1.h'
INC_DIR += $(call select_from_repositories,src/drivers/ps2)

File diff suppressed because it is too large Load Diff

View File

@ -1,235 +0,0 @@
/*
* \brief SUPLib vCPU utility
* \author Alexander Boettcher
* \author Norman Feske
* \author Christian Helmuth
*/
/*
* Copyright (C) 2013-2021 Genode Labs GmbH
*
* This file is distributed under the terms of the GNU General Public License
* version 2.
*/
#ifndef _VIRTUALBOX__VCPU_H_
#define _VIRTUALBOX__VCPU_H_
/* Genode includes */
#include <base/attached_dataspace.h>
#include <vm_session/connection.h>
#include <vm_session/handler.h>
#include <util/noncopyable.h>
/* local includes */
#include <sup.h>
namespace Sup {
struct Vcpu_handler;
struct Vcpu_handler_vmx;
struct Vcpu_handler_svm;
}
namespace Pthread { struct Emt; }
class Sup::Vcpu_handler : Genode::Noncopyable
{
protected:
static Genode::Vm_connection::Exit_config const _exit_config;
Pthread::Emt &_emt;
Genode::Vcpu_state *_state { nullptr };
bool _last_exit_triggered_by_wrmsr = false;
/* TODO move into Emt */
/* halt/wake_up */
pthread_cond_t _cond_wait;
pthread_mutex_t _mutex;
/* TODO move into Emt */
timespec _add_timespec_ns(timespec a, ::uint64_t ns) const;
/* information used for NPT/EPT handling */
Genode::addr_t _npt_ept_exit_addr { 0 };
RTGCUINT _npt_ept_errorcode { 0 };
bool _npt_ept_unmap { false };
/* state machine between EMT and EP thread of a vCPU */
enum { RUNNING, PAUSED, IRQ_WIN, NPT_EPT } _vm_state { PAUSED };
enum { PAUSE_EXIT, RUN } _next_state { RUN };
private:
bool _irq_win = false;
unsigned const _cpu_id;
PVM _vm { nullptr };
PVMCPU _vcpu { nullptr };
unsigned int _last_inj_info = 0;
unsigned int _last_inj_error = 0;
enum {
REQ_IRQWIN_EXIT = 0x1000U,
IRQ_INJ_VALID_MASK = 0x80000000UL,
IRQ_INJ_NONE = 0U,
/*
* Intel® 64 and IA-32 Architectures Software Developers Manual
* Volume 3C, Chapter 24.4.2.
* May 2012
*/
ACTIVITY_STATE_ACTIVE = 0U,
INTERRUPT_STATE_NONE = 0U,
INTERRUPT_STATE_BLOCKING_BY_STI = 1U << 0,
INTERRUPT_STATE_BLOCKING_BY_MOV_SS = 1U << 1,
};
void _update_gim_system_time();
protected:
Genode::addr_t _vm_exits = 0;
Genode::addr_t _recall_skip = 0;
Genode::addr_t _recall_req = 0;
Genode::addr_t _recall_inv = 0;
Genode::addr_t _recall_drop = 0;
Genode::addr_t _irq_request = 0;
Genode::addr_t _irq_inject = 0;
Genode::addr_t _irq_drop = 0;
struct {
unsigned intr_state = 0;
unsigned ctrl[2] = { 0, 0 };
} _next_utcb;
unsigned _ept_fault_addr_type;
Genode::uint64_t * _pdpte_map(VM *pVM, RTGCPHYS cr3);
void _switch_to_hw(PCPUMCTX pCtx);
/* VM exit handlers */
void _default_handler();
bool _recall_handler();
void _irq_window();
void _npt_ept();
void _irq_window_pthread();
inline bool _vbox_to_state(VM *pVM, PVMCPU pVCpu);
inline bool _state_to_vbox(VM *pVM, PVMCPU pVCpu);
inline bool _check_to_request_irq_window(PVMCPU pVCpu);
inline bool _continue_hw_accelerated();
virtual bool _hw_load_state(VM *, PVMCPU) = 0;
virtual bool _hw_save_state(VM *, PVMCPU) = 0;
virtual int _vm_exit_requires_instruction_emulation(PCPUMCTX) = 0;
virtual void _run_vm() = 0;
virtual void _pause_vm() = 0;
public:
enum Exit_condition
{
SVM_NPT = 0xfc,
SVM_INVALID = 0xfd,
VCPU_STARTUP = 0xfe,
RECALL = 0xff,
};
Vcpu_handler(Genode::Env &env, unsigned int cpu_id, Pthread::Emt &emt);
unsigned int cpu_id() const { return _cpu_id; }
void recall(VM &vm);
void halt(Genode::uint64_t const wait_ns);
void wake_up();
int run_hw(VM &vm);
};
class Sup::Vcpu_handler_vmx : public Vcpu_handler
{
private:
Genode::Vcpu_handler<Vcpu_handler_vmx> _handler;
Genode::Vm_connection &_vm_connection;
Genode::Vm_connection::Vcpu _vcpu;
/* VM exit handlers */
void _vmx_default();
void _vmx_startup();
void _vmx_triple();
void _vmx_irqwin();
void _vmx_mov_crx();
template <unsigned X> void _vmx_ept();
__attribute__((noreturn)) void _vmx_invalid();
void _handle_exit();
void _run_vm() override { _vcpu.run(); }
void _pause_vm() override { _vcpu.pause(); }
bool _hw_save_state(VM * pVM, PVMCPU pVCpu) override;
bool _hw_load_state(VM * pVM, PVMCPU pVCpu) override;
int _vm_exit_requires_instruction_emulation(PCPUMCTX pCtx) override;
public:
Vcpu_handler_vmx(Genode::Env &env,
unsigned int cpu_id,
Pthread::Emt &emt,
Genode::Vm_connection &vm_connection,
Genode::Allocator &alloc);
};
class Sup::Vcpu_handler_svm : public Vcpu_handler
{
private:
Genode::Vcpu_handler<Vcpu_handler_svm> _handler;
Genode::Vm_connection &_vm_connection;
Genode::Vm_connection::Vcpu _vcpu;
/* VM exit handlers */
void _svm_default();
void _svm_vintr();
void _svm_ioio();
template <unsigned X> void _svm_npt();
void _svm_startup();
void _handle_exit();
void _run_vm() override { _vcpu.run(); }
void _pause_vm() override { _vcpu.pause(); }
bool _hw_save_state(VM * pVM, PVMCPU pVCpu) override;
bool _hw_load_state(VM * pVM, PVMCPU pVCpu) override;
int _vm_exit_requires_instruction_emulation(PCPUMCTX) override;
public:
Vcpu_handler_svm(Genode::Env &env,
unsigned int cpu_id,
Pthread::Emt &emt,
Genode::Vm_connection &vm_connection,
Genode::Allocator &alloc);
};
#endif /* _VIRTUALBOX__VCPU_H_ */

View File

@ -1,111 +0,0 @@
/*
* \brief Genode specific VirtualBox SUPLib supplements
* \author Norman Feske
* \author Alexander Boettcher
* \author Christian Helmuth
* \date 2013-08-21
*/
/*
* Copyright (C) 2013-2021 Genode Labs GmbH
*
* This file is distributed under the terms of the GNU General Public License
* version 2.
*/
#ifndef _VIRTUALBOX__VMX_H_
#define _VIRTUALBOX__VMX_H_
#define GENODE_READ_SELREG_REQUIRED(REG) \
(pCtx->REG.Sel != state.REG.value().sel) || \
(pCtx->REG.ValidSel != state.REG.value().sel) || \
(pCtx->REG.fFlags != CPUMSELREG_FLAGS_VALID) || \
(pCtx->REG.u32Limit != state.REG.value().limit) || \
(pCtx->REG.u64Base != state.REG.value().base) || \
(pCtx->REG.Attr.u != sel_ar_conv_from_genode(state.REG.value().ar))
#define GENODE_READ_SELREG(REG) \
pCtx->REG.Sel = state.REG.value().sel; \
pCtx->REG.ValidSel = state.REG.value().sel; \
pCtx->REG.fFlags = CPUMSELREG_FLAGS_VALID; \
pCtx->REG.u32Limit = state.REG.value().limit; \
pCtx->REG.u64Base = state.REG.value().base; \
pCtx->REG.Attr.u = sel_ar_conv_from_genode(state.REG.value().ar)
static inline bool vmx_save_state(Genode::Vcpu_state const &state, VM *pVM, PVMCPU pVCpu)
{
PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
GENODE_READ_SELREG(cs);
GENODE_READ_SELREG(ds);
GENODE_READ_SELREG(es);
GENODE_READ_SELREG(fs);
GENODE_READ_SELREG(gs);
GENODE_READ_SELREG(ss);
if (GENODE_READ_SELREG_REQUIRED(ldtr)) {
GENODE_READ_SELREG(ldtr);
CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
}
if (GENODE_READ_SELREG_REQUIRED(tr)) {
GENODE_READ_SELREG(tr);
CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
}
return true;
}
#undef GENODE_READ_SELREG_REQUIRED
#undef GENODE_READ_SELREG
enum { VMCS_SEG_UNUSABLE = 0x10000 };
#define GENODE_WRITE_SELREG(REG) \
Assert(pCtx->REG.fFlags & CPUMSELREG_FLAGS_VALID); \
Assert(pCtx->REG.ValidSel == pCtx->REG.Sel); \
state.REG.charge( Segment { .sel = pCtx->REG.Sel, \
.ar = sel_ar_conv_to_genode(pCtx->REG.Attr.u ? : VMCS_SEG_UNUSABLE), \
.limit = pCtx->REG.u32Limit, \
.base = pCtx->REG.u64Base });
static inline bool vmx_load_state(Genode::Vcpu_state &state, VM const *pVM, PVMCPU pVCpu)
{
PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
typedef Genode::Vcpu_state::Segment Segment;
GENODE_WRITE_SELREG(es);
GENODE_WRITE_SELREG(ds);
GENODE_WRITE_SELREG(fs);
GENODE_WRITE_SELREG(gs);
GENODE_WRITE_SELREG(cs);
GENODE_WRITE_SELREG(ss);
/* ldtr */
if (pCtx->ldtr.Sel == 0) {
state.ldtr.charge(Segment { .sel = 0,
.ar = sel_ar_conv_to_genode(0x82),
.limit = 0,
.base = 0 });
} else {
state.ldtr.charge(Segment { .sel = pCtx->ldtr.Sel,
.ar = sel_ar_conv_to_genode(pCtx->ldtr.Attr.u),
.limit = pCtx->ldtr.u32Limit,
.base = pCtx->ldtr.u64Base });
}
/* tr */
state.tr.charge(Segment { .sel = pCtx->tr.Sel,
.ar = sel_ar_conv_to_genode(pCtx->tr.Attr.u),
.limit = pCtx->tr.u32Limit,
.base = pCtx->tr.u64Base });
return true;
}
#undef GENODE_WRITE_SELREG
#endif /* _VIRTUALBOX__VMX_H_ */