mirror of
https://github.com/genodelabs/genode.git
synced 2024-12-18 21:27:56 +00:00
parent
c6ec2c1dd7
commit
faee97dd1e
21
repos/base-sel4/patches/intel_vmx_disable_vpid.patch
Normal file
21
repos/base-sel4/patches/intel_vmx_disable_vpid.patch
Normal file
@ -0,0 +1,21 @@
|
||||
--- src/kernel/sel4/src/arch/x86/object/vcpu.c
|
||||
+++ src/kernel/sel4/src/arch/x86/object/vcpu.c
|
||||
@@ -285,13 +288,13 @@ init_vtx_fixed_values(bool_t useTrueMsrs)
|
||||
cr4_low = x86_rdmsr_low(IA32_VMX_CR4_FIXED1_MSR);
|
||||
|
||||
/* Check for VPID support */
|
||||
- if (!(secondary_control_low & BIT(5))) {
|
||||
+// if (!(secondary_control_low & BIT(5))) {
|
||||
vmx_feature_vpid = 0;
|
||||
printf("vt-x: VPIDs are not supported. Expect performance degredation\n");
|
||||
- } else {
|
||||
- vmx_feature_vpid = 1;
|
||||
- secondary_control_mask |= BIT(5);
|
||||
- }
|
||||
+// } else {
|
||||
+// vmx_feature_vpid = 1;
|
||||
+// secondary_control_mask |= BIT(5);
|
||||
+// }
|
||||
|
||||
/* Check for load perf global control */
|
||||
if (!(exit_control_low & BIT(12))) {
|
87
repos/base-sel4/patches/intel_vmx_full_state.patch
Normal file
87
repos/base-sel4/patches/intel_vmx_full_state.patch
Normal file
@ -0,0 +1,87 @@
|
||||
--- src/kernel/sel4/src/arch/x86/object/vcpu.c
|
||||
+++ src/kernel/sel4/src/arch/x86/object/vcpu.c
|
||||
@@ -88,6 +88,9 @@ static bool_t vmx_feature_ack_on_exit;
|
||||
static vcpu_t *x86KSVPIDTable[VPID_LAST + 1];
|
||||
static vpid_t x86KSNextVPID = VPID_FIRST;
|
||||
|
||||
+static void
|
||||
+setMRs_vmexit(uint32_t reason, word_t qualification, tcb_t *tcb);
|
||||
+
|
||||
static inline bool_t
|
||||
vmxon(paddr_t vmxon_region)
|
||||
{
|
||||
@@ -967,10 +987,8 @@ vcpu_update_state_sysvmenter(vcpu_t *vcpu)
|
||||
void
|
||||
vcpu_sysvmenter_reply_to_user(tcb_t *tcb)
|
||||
{
|
||||
- word_t *buffer;
|
||||
vcpu_t *vcpu;
|
||||
|
||||
- buffer = lookupIPCBuffer(true, tcb);
|
||||
vcpu = tcb->tcbArch.tcbVCPU;
|
||||
|
||||
assert(vcpu);
|
||||
@@ -979,11 +997,9 @@ vcpu_sysvmenter_reply_to_user(tcb_t *tcb)
|
||||
switchVCPU(vcpu);
|
||||
}
|
||||
|
||||
- setMR(tcb, buffer, SEL4_VMENTER_CALL_EIP_MR, vmread(VMX_GUEST_RIP));
|
||||
- setMR(tcb, buffer, SEL4_VMENTER_CALL_CONTROL_PPC_MR, vmread(VMX_CONTROL_PRIMARY_PROCESSOR_CONTROLS));
|
||||
+ setMRs_vmexit(/* unused */ -1, /* unused */ -1, tcb);
|
||||
|
||||
- setMR(tcb, buffer, SEL4_VMENTER_CALL_CONTROL_ENTRY_MR, vmread(VMX_CONTROL_ENTRY_INTERRUPTION_INFO));
|
||||
- setRegister(tcb, msgInfoRegister, 0);
|
||||
+ setRegister(tcb, msgInfoRegister, 0 /* notification that this is no VM exit */);
|
||||
}
|
||||
|
||||
exception_t
|
||||
@@ -1113,27 +1141,27 @@ vtx_init(void)
|
||||
}
|
||||
|
||||
static void
|
||||
-setMRs_vmexit(uint32_t reason, word_t qualification)
|
||||
+setMRs_vmexit(uint32_t reason, word_t qualification, tcb_t *tcb)
|
||||
{
|
||||
word_t *buffer;
|
||||
int i;
|
||||
|
||||
- buffer = lookupIPCBuffer(true, NODE_STATE(ksCurThread));
|
||||
+ buffer = lookupIPCBuffer(true, tcb);
|
||||
|
||||
- setMR(NODE_STATE(ksCurThread), buffer, SEL4_VMENTER_CALL_EIP_MR, vmread(VMX_GUEST_RIP));
|
||||
- setMR(NODE_STATE(ksCurThread), buffer, SEL4_VMENTER_CALL_CONTROL_PPC_MR, vmread(VMX_CONTROL_PRIMARY_PROCESSOR_CONTROLS));
|
||||
- setMR(NODE_STATE(ksCurThread), buffer, SEL4_VMENTER_CALL_CONTROL_ENTRY_MR, vmread(VMX_CONTROL_ENTRY_INTERRUPTION_INFO));
|
||||
- setMR(NODE_STATE(ksCurThread), buffer, SEL4_VMENTER_FAULT_REASON_MR, reason);
|
||||
- setMR(NODE_STATE(ksCurThread), buffer, SEL4_VMENTER_FAULT_QUALIFICATION_MR, qualification);
|
||||
+ setMR(tcb, buffer, SEL4_VMENTER_CALL_EIP_MR, vmread(VMX_GUEST_RIP));
|
||||
+ setMR(tcb, buffer, SEL4_VMENTER_CALL_CONTROL_PPC_MR, vmread(VMX_CONTROL_PRIMARY_PROCESSOR_CONTROLS));
|
||||
+ setMR(tcb, buffer, SEL4_VMENTER_CALL_CONTROL_ENTRY_MR, vmread(VMX_CONTROL_ENTRY_INTERRUPTION_INFO));
|
||||
+ setMR(tcb, buffer, SEL4_VMENTER_FAULT_REASON_MR, reason);
|
||||
+ setMR(tcb, buffer, SEL4_VMENTER_FAULT_QUALIFICATION_MR, qualification);
|
||||
|
||||
- setMR(NODE_STATE(ksCurThread), buffer, SEL4_VMENTER_FAULT_INSTRUCTION_LEN_MR, vmread(VMX_DATA_EXIT_INSTRUCTION_LENGTH));
|
||||
- setMR(NODE_STATE(ksCurThread), buffer, SEL4_VMENTER_FAULT_GUEST_PHYSICAL_MR, vmread(VMX_DATA_GUEST_PHYSICAL));
|
||||
- setMR(NODE_STATE(ksCurThread), buffer, SEL4_VMENTER_FAULT_RFLAGS_MR, vmread(VMX_GUEST_RFLAGS));
|
||||
- setMR(NODE_STATE(ksCurThread), buffer, SEL4_VMENTER_FAULT_GUEST_INT_MR, vmread(VMX_GUEST_INTERRUPTABILITY));
|
||||
- setMR(NODE_STATE(ksCurThread), buffer, SEL4_VMENTER_FAULT_CR3_MR, vmread(VMX_GUEST_CR3));
|
||||
+ setMR(tcb, buffer, SEL4_VMENTER_FAULT_INSTRUCTION_LEN_MR, vmread(VMX_DATA_EXIT_INSTRUCTION_LENGTH));
|
||||
+ setMR(tcb, buffer, SEL4_VMENTER_FAULT_GUEST_PHYSICAL_MR, vmread(VMX_DATA_GUEST_PHYSICAL));
|
||||
+ setMR(tcb, buffer, SEL4_VMENTER_FAULT_RFLAGS_MR, vmread(VMX_GUEST_RFLAGS));
|
||||
+ setMR(tcb, buffer, SEL4_VMENTER_FAULT_GUEST_INT_MR, vmread(VMX_GUEST_INTERRUPTABILITY));
|
||||
+ setMR(tcb, buffer, SEL4_VMENTER_FAULT_CR3_MR, vmread(VMX_GUEST_CR3));
|
||||
|
||||
for (i = 0; i < n_vcpu_gp_register; i++) {
|
||||
- setMR(NODE_STATE(ksCurThread), buffer, SEL4_VMENTER_FAULT_EAX + i, NODE_STATE(ksCurThread)->tcbArch.tcbVCPU->gp_registers[i]);
|
||||
+ setMR(tcb, buffer, SEL4_VMENTER_FAULT_EAX + i, tcb->tcbArch.tcbVCPU->gp_registers[i]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1143,7 +1171,7 @@ handleVmxFault(uint32_t reason, word_t qualification)
|
||||
/* Indicate that we are returning the from VMEnter with a fault */
|
||||
setRegister(NODE_STATE(ksCurThread), msgInfoRegister, SEL4_VMENTER_RESULT_FAULT);
|
||||
|
||||
- setMRs_vmexit(reason, qualification);
|
||||
+ setMRs_vmexit(reason, qualification, NODE_STATE(ksCurThread));
|
||||
|
||||
/* Set the thread back to running */
|
||||
setThreadState(NODE_STATE(ksCurThread), ThreadState_Running);
|
@ -1 +1 @@
|
||||
34b8f0e01692d1d2ba2f02c98bafe321fc09de22
|
||||
7935487f91a31c0cd8aaf09278f6312af56bb935
|
||||
|
@ -134,6 +134,8 @@ class Genode::Vm_space
|
||||
*/
|
||||
using Selector_allocator = Bit_allocator<1UL << NUM_VM_SEL_LOG2>;
|
||||
|
||||
class Alloc_page_table_failed : Exception { };
|
||||
|
||||
private:
|
||||
|
||||
Selector_allocator _sel_alloc { };
|
||||
@ -247,8 +249,6 @@ class Genode::Vm_space
|
||||
long _invalidate_page(Genode::Cap_sel const &, seL4_Word const,
|
||||
seL4_Word const);
|
||||
|
||||
class Alloc_page_table_failed : Exception { };
|
||||
|
||||
/**
|
||||
* Allocate and install page structures for the protection domain.
|
||||
*
|
||||
|
@ -251,22 +251,24 @@ void Vm_session_component::_attach_vm_memory(Dataspace_component &dsc,
|
||||
while (page.valid()) {
|
||||
enum { NO_FLUSH = false, FLUSH = true };
|
||||
try {
|
||||
_vm_space.alloc_guest_page_tables(page.hotspot, 1 << page.log2_order);
|
||||
_vm_space.map_guest(page.addr, page.hotspot,
|
||||
(1 << page.log2_order) / 4096,
|
||||
dsc.cacheability(),
|
||||
dsc.writable() && attribute.writeable,
|
||||
attribute.executable, NO_FLUSH);
|
||||
} catch (Page_table_registry::Mapping_cache_full full) {
|
||||
if (full.reason == Page_table_registry::Mapping_cache_full::MEMORY) {
|
||||
if (_ram_quota_guard().limit().value > 4 * 1024 * 1024)
|
||||
/* we get in trouble in core if we use too much memory */
|
||||
throw Vm_space::Selector_allocator::Out_of_indices();
|
||||
throw Out_of_ram();
|
||||
try {
|
||||
_vm_space.alloc_guest_page_tables(page.hotspot, 1 << page.log2_order);
|
||||
_vm_space.map_guest(page.addr, page.hotspot,
|
||||
(1 << page.log2_order) / 4096,
|
||||
dsc.cacheability(),
|
||||
dsc.writable() && attribute.writeable,
|
||||
attribute.executable, NO_FLUSH);
|
||||
} catch (Page_table_registry::Mapping_cache_full full) {
|
||||
if (full.reason == Page_table_registry::Mapping_cache_full::MEMORY) {
|
||||
if (_ram_quota_guard().limit().value > 4 * 1024 * 1024)
|
||||
/* we get in trouble in core if we use too much memory */
|
||||
throw Vm_space::Selector_allocator::Out_of_indices();
|
||||
throw Out_of_ram();
|
||||
}
|
||||
if (full.reason == Page_table_registry::Mapping_cache_full::CAPS)
|
||||
throw Out_of_caps();
|
||||
return;
|
||||
}
|
||||
if (full.reason == Page_table_registry::Mapping_cache_full::CAPS)
|
||||
throw Out_of_caps();
|
||||
return;
|
||||
} catch (Vm_space::Selector_allocator::Out_of_indices) {
|
||||
Genode::warning("run out of indices - flush all - cap=",
|
||||
_cap_quota_guard().used(), "/",
|
||||
@ -277,12 +279,6 @@ void Vm_session_component::_attach_vm_memory(Dataspace_component &dsc,
|
||||
_ram_quota_guard().limit(), " guest=",
|
||||
Genode::Hex(0UL - _map.avail()));
|
||||
|
||||
_vm_space.map_guest(page.addr, page.hotspot,
|
||||
(1 << page.log2_order) / 4096,
|
||||
dsc.cacheability(),
|
||||
dsc.writable() && attribute.writeable,
|
||||
attribute.executable, FLUSH);
|
||||
|
||||
/* drop all attachment to limit ram usage of this session */
|
||||
while (true) {
|
||||
addr_t out_addr = 0;
|
||||
@ -292,9 +288,14 @@ void Vm_session_component::_attach_vm_memory(Dataspace_component &dsc,
|
||||
|
||||
detach(out_addr);
|
||||
}
|
||||
} catch (...) {
|
||||
// Alloc_page_table_failed
|
||||
Genode::error("alloc_guest_page_table exception");
|
||||
|
||||
_vm_space.map_guest(page.addr, page.hotspot,
|
||||
(1 << page.log2_order) / 4096,
|
||||
dsc.cacheability(),
|
||||
dsc.writable() && attribute.writeable,
|
||||
attribute.executable, FLUSH);
|
||||
} catch (Vm_space::Alloc_page_table_failed) {
|
||||
Genode::error("alloc page table failed");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -87,6 +87,8 @@ struct Vcpu : Genode::Thread
|
||||
addr_t const cr4_mask = CR4_VMX;
|
||||
addr_t const cr4_set = CR4_VMX;
|
||||
|
||||
seL4_VCPUContext _recent_gpr { };
|
||||
|
||||
void entry() override
|
||||
{
|
||||
/* trigger that thread is up */
|
||||
@ -113,7 +115,7 @@ struct Vcpu : Genode::Thread
|
||||
|
||||
/* initial startup VM exit to get valid VM state */
|
||||
state.exit_reason = VMEXIT_STARTUP;
|
||||
_read_sel4_state_async(service, state);
|
||||
_read_sel4_state(service, state);
|
||||
|
||||
Genode::Signal_transmitter(_signal).submit();
|
||||
|
||||
@ -155,12 +157,9 @@ struct Vcpu : Genode::Thread
|
||||
|
||||
state = Vm_state {};
|
||||
|
||||
state.ip.value(seL4_GetMR(SEL4_VMENTER_CALL_EIP_MR));
|
||||
state.ctrl_primary.value(seL4_GetMR(SEL4_VMENTER_CALL_CONTROL_PPC_MR));
|
||||
state.inj_info.value(seL4_GetMR(SEL4_VMENTER_CALL_CONTROL_ENTRY_MR));
|
||||
|
||||
state.exit_reason = VMEXIT_RECALL;
|
||||
_read_sel4_state_async(service, state);
|
||||
|
||||
_read_sel4_state(service, state);
|
||||
|
||||
/* notify VM handler */
|
||||
Genode::Signal_transmitter(_signal).submit();
|
||||
@ -189,36 +188,14 @@ struct Vcpu : Genode::Thread
|
||||
|
||||
state = Vm_state {};
|
||||
|
||||
if (res == SEL4_VMENTER_RESULT_FAULT) {
|
||||
state.ip.value(seL4_GetMR(SEL4_VMENTER_CALL_EIP_MR));
|
||||
state.ctrl_primary.value(seL4_GetMR(SEL4_VMENTER_CALL_CONTROL_PPC_MR));
|
||||
if (res != SEL4_VMENTER_RESULT_FAULT)
|
||||
state.exit_reason = VMEXIT_RECALL;
|
||||
else
|
||||
state.exit_reason = seL4_GetMR(SEL4_VMENTER_FAULT_REASON_MR);
|
||||
|
||||
state.ip_len.value(seL4_GetMR(SEL4_VMENTER_FAULT_INSTRUCTION_LEN_MR));
|
||||
state.qual_primary.value(seL4_GetMR(SEL4_VMENTER_FAULT_QUALIFICATION_MR));
|
||||
state.qual_secondary.value(seL4_GetMR(SEL4_VMENTER_FAULT_GUEST_PHYSICAL_MR));
|
||||
|
||||
state.flags.value(seL4_GetMR(SEL4_VMENTER_FAULT_RFLAGS_MR));
|
||||
state.intr_state.value(seL4_GetMR(SEL4_VMENTER_FAULT_GUEST_INT_MR));
|
||||
state.cr3.value(seL4_GetMR(SEL4_VMENTER_FAULT_CR3_MR));
|
||||
|
||||
state.ax.value(seL4_GetMR(SEL4_VMENTER_FAULT_EAX));
|
||||
state.bx.value(seL4_GetMR(SEL4_VMENTER_FAULT_EBX));
|
||||
state.cx.value(seL4_GetMR(SEL4_VMENTER_FAULT_ECX));
|
||||
state.dx.value(seL4_GetMR(SEL4_VMENTER_FAULT_EDX));
|
||||
state.si.value(seL4_GetMR(SEL4_VMENTER_FAULT_ESI));
|
||||
state.di.value(seL4_GetMR(SEL4_VMENTER_FAULT_EDI));
|
||||
state.bp.value(seL4_GetMR(SEL4_VMENTER_FAULT_EBP));
|
||||
|
||||
_read_sel4_state(service, state);
|
||||
} else {
|
||||
state.ip.value(seL4_GetMR(SEL4_VMENTER_CALL_EIP_MR));
|
||||
state.ctrl_primary.value(seL4_GetMR(SEL4_VMENTER_CALL_CONTROL_PPC_MR));
|
||||
/* what about the other GPR stuff ? XXX */
|
||||
|
||||
state.exit_reason = VMEXIT_RECALL;
|
||||
_read_sel4_state_async(service, state);
|
||||
_read_sel4_state(service, state);
|
||||
|
||||
if (res != SEL4_VMENTER_RESULT_FAULT) {
|
||||
Lock::Guard guard(_remote_lock);
|
||||
if (_remote == PAUSE) {
|
||||
_remote = NONE;
|
||||
@ -342,23 +319,6 @@ struct Vcpu : Genode::Thread
|
||||
" ", res.written);
|
||||
}
|
||||
|
||||
void _write_gpr(seL4_X86_VCPU const service, Vm_state &state)
|
||||
{
|
||||
seL4_VCPUContext regs;
|
||||
regs.eax = state.ax.value();
|
||||
regs.ebx = state.bx.value();
|
||||
regs.ecx = state.cx.value();
|
||||
regs.edx = state.dx.value();
|
||||
regs.esi = state.si.value();
|
||||
regs.edi = state.di.value();
|
||||
regs.ebp = state.bp.value();
|
||||
|
||||
seL4_Error res = seL4_X86_VCPU_WriteRegisters(service, ®s);
|
||||
if (res != seL4_NoError)
|
||||
Genode::error("setting general purpose register failed ",
|
||||
(int)res);
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert to Intel format comprising 32 bits.
|
||||
*/
|
||||
@ -373,12 +333,24 @@ struct Vcpu : Genode::Thread
|
||||
|
||||
void _write_sel4_state(seL4_X86_VCPU const service, Vm_state &state)
|
||||
{
|
||||
if (state.ax.valid()) _recent_gpr.eax = state.ax.value();
|
||||
if (state.bx.valid()) _recent_gpr.ebx = state.bx.value();
|
||||
if (state.cx.valid()) _recent_gpr.ecx = state.cx.value();
|
||||
if (state.dx.valid()) _recent_gpr.edx = state.dx.value();
|
||||
if (state.si.valid()) _recent_gpr.esi = state.si.value();
|
||||
if (state.di.valid()) _recent_gpr.edi = state.di.value();
|
||||
if (state.bp.valid()) _recent_gpr.ebp = state.bp.value();
|
||||
|
||||
if (state.ax.valid() || state.cx.valid() ||
|
||||
state.dx.valid() || state.bx.valid() ||
|
||||
state.bp.valid() || state.di.valid() ||
|
||||
state.si.valid()) {
|
||||
/* XXX read first all values and write back only the changed ones ... */
|
||||
_write_gpr(service, state);
|
||||
state.si.valid())
|
||||
{
|
||||
seL4_Error res = seL4_X86_VCPU_WriteRegisters(service,
|
||||
&_recent_gpr);
|
||||
if (res != seL4_NoError)
|
||||
Genode::error("setting general purpose registers failed ",
|
||||
(int)res);
|
||||
}
|
||||
|
||||
if (state.r8.valid() || state.r9.valid() ||
|
||||
@ -597,36 +569,35 @@ struct Vcpu : Genode::Thread
|
||||
uint32_t _read_vmcs_32(seL4_X86_VCPU const service, enum Vmcs const field) {
|
||||
return _read_vmcsX<uint32_t>(service, field); }
|
||||
|
||||
void _read_sel4_state_async(seL4_X86_VCPU const service, Vm_state &state)
|
||||
{
|
||||
#if 0
|
||||
state.ax.value(state.ax.value()); /* XXX ? */
|
||||
state.cx.value(state.cx.value());
|
||||
state.dx.value(state.dx.value());
|
||||
state.bx.value(state.bx.value());
|
||||
|
||||
state.di.value(state.di.value()); /* XXX ? */
|
||||
state.si.value(state.si.value());
|
||||
state.bp.value(state.bp.value());
|
||||
#endif
|
||||
|
||||
state.flags.value(_read_vmcs(service, Vmcs::RFLAGS));
|
||||
|
||||
state.ip.value(_read_vmcs(service, Vmcs::RIP));
|
||||
state.ip_len.value(_read_vmcs(service, Vmcs::EXIT_INST_LEN));
|
||||
|
||||
state.cr3.value(_read_vmcs(service, Vmcs::CR3));
|
||||
|
||||
state.qual_primary.value(state.qual_primary.value()); /* XXX ? */
|
||||
state.qual_secondary.value(state.qual_secondary.value()); /* XXX ? */
|
||||
|
||||
state.ctrl_primary.value(_read_vmcs(service, Vmcs::CTRL_0));
|
||||
|
||||
_read_sel4_state(service, state);
|
||||
}
|
||||
|
||||
void _read_sel4_state(seL4_X86_VCPU const service, Vm_state &state)
|
||||
{
|
||||
state.ip.value(seL4_GetMR(SEL4_VMENTER_CALL_EIP_MR));
|
||||
state.ctrl_primary.value(seL4_GetMR(SEL4_VMENTER_CALL_CONTROL_PPC_MR));
|
||||
|
||||
state.ip_len.value(seL4_GetMR(SEL4_VMENTER_FAULT_INSTRUCTION_LEN_MR));
|
||||
state.qual_primary.value(seL4_GetMR(SEL4_VMENTER_FAULT_QUALIFICATION_MR));
|
||||
state.qual_secondary.value(seL4_GetMR(SEL4_VMENTER_FAULT_GUEST_PHYSICAL_MR));
|
||||
|
||||
state.flags.value(seL4_GetMR(SEL4_VMENTER_FAULT_RFLAGS_MR));
|
||||
state.intr_state.value(seL4_GetMR(SEL4_VMENTER_FAULT_GUEST_INT_MR));
|
||||
state.cr3.value(seL4_GetMR(SEL4_VMENTER_FAULT_CR3_MR));
|
||||
|
||||
state.ax.value(seL4_GetMR(SEL4_VMENTER_FAULT_EAX));
|
||||
state.bx.value(seL4_GetMR(SEL4_VMENTER_FAULT_EBX));
|
||||
state.cx.value(seL4_GetMR(SEL4_VMENTER_FAULT_ECX));
|
||||
state.dx.value(seL4_GetMR(SEL4_VMENTER_FAULT_EDX));
|
||||
state.si.value(seL4_GetMR(SEL4_VMENTER_FAULT_ESI));
|
||||
state.di.value(seL4_GetMR(SEL4_VMENTER_FAULT_EDI));
|
||||
state.bp.value(seL4_GetMR(SEL4_VMENTER_FAULT_EBP));
|
||||
|
||||
_recent_gpr.eax = state.ax.value();
|
||||
_recent_gpr.ebx = state.bx.value();
|
||||
_recent_gpr.ecx = state.cx.value();
|
||||
_recent_gpr.edx = state.dx.value();
|
||||
_recent_gpr.esi = state.si.value();
|
||||
_recent_gpr.edi = state.di.value();
|
||||
_recent_gpr.ebp = state.bp.value();
|
||||
|
||||
state.sp.value(_read_vmcs(service, Vmcs::RSP));
|
||||
state.dr7.value(_read_vmcs(service, Vmcs::DR7));
|
||||
|
||||
|
@ -21,6 +21,7 @@ set vmm_vcpu_same_cpu "no"
|
||||
|
||||
if {[have_spec sel4]} {
|
||||
set map_small "yes"
|
||||
set vmm_vcpu_same_cpu "yes"
|
||||
|
||||
# seL4 has no AMD SVM support
|
||||
if {[have_include "power_on/qemu"]} {
|
||||
@ -274,7 +275,7 @@ append_if $use_framebuffer config {
|
||||
|
||||
if {!$use_fancy_stuff} {
|
||||
append config {
|
||||
<start name="seoul" priority="-3" caps="400">
|
||||
<start name="seoul" priority="-3" caps="800">
|
||||
<binary name="seoul"/>}
|
||||
append config "
|
||||
<resource name=\"RAM\" quantum=\"$memory_vmm_vm\"/>"
|
||||
|
Loading…
Reference in New Issue
Block a user