2019-05-14 14:10:13 +00:00
|
|
|
--- src/kernel/sel4/src/arch/x86/object/vcpu.c
|
|
|
|
+++ src/kernel/sel4/src/arch/x86/object/vcpu.c
|
2023-04-11 16:42:54 +00:00
|
|
|
@@ -921,6 +921,8 @@
|
|
|
|
vmwrite(VMX_CONTROL_ENTRY_INTERRUPTION_INFO, getSyscallArg(2, buffer));
|
|
|
|
}
|
2019-05-14 14:10:13 +00:00
|
|
|
|
2023-04-11 16:42:54 +00:00
|
|
|
+static void setMRs_vmexit(uint32_t reason, word_t qualification, tcb_t *tcb);
|
2019-05-14 14:10:13 +00:00
|
|
|
+
|
2023-04-11 16:42:54 +00:00
|
|
|
void vcpu_sysvmenter_reply_to_user(tcb_t *tcb)
|
2019-05-14 14:10:13 +00:00
|
|
|
{
|
|
|
|
vcpu_t *vcpu;
|
|
|
|
@@ -979,11 +997,9 @@ vcpu_sysvmenter_reply_to_user(tcb_t *tcb)
|
|
|
|
switchVCPU(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
- setMR(tcb, buffer, SEL4_VMENTER_CALL_EIP_MR, vmread(VMX_GUEST_RIP));
|
|
|
|
- setMR(tcb, buffer, SEL4_VMENTER_CALL_CONTROL_PPC_MR, vmread(VMX_CONTROL_PRIMARY_PROCESSOR_CONTROLS));
|
|
|
|
+ setMRs_vmexit(/* unused */ -1, /* unused */ -1, tcb);
|
|
|
|
|
|
|
|
- setMR(tcb, buffer, SEL4_VMENTER_CALL_CONTROL_ENTRY_MR, vmread(VMX_CONTROL_ENTRY_INTERRUPTION_INFO));
|
|
|
|
- setRegister(tcb, msgInfoRegister, 0);
|
|
|
|
+ setRegister(tcb, msgInfoRegister, 0 /* notification that this is no VM exit */);
|
|
|
|
}
|
|
|
|
|
|
|
|
exception_t
|
2023-04-11 16:42:54 +00:00
|
|
|
@@ -1065,29 +1067,29 @@
|
|
|
|
return true;
|
2019-05-14 14:10:13 +00:00
|
|
|
}
|
|
|
|
|
2023-04-11 16:42:54 +00:00
|
|
|
-static void setMRs_vmexit(uint32_t reason, word_t qualification)
|
|
|
|
+static void setMRs_vmexit(uint32_t reason, word_t qualification, tcb_t *tcb)
|
2019-05-14 14:10:13 +00:00
|
|
|
{
|
|
|
|
word_t *buffer;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
- buffer = lookupIPCBuffer(true, NODE_STATE(ksCurThread));
|
|
|
|
+ buffer = lookupIPCBuffer(true, tcb);
|
|
|
|
|
|
|
|
- setMR(NODE_STATE(ksCurThread), buffer, SEL4_VMENTER_CALL_EIP_MR, vmread(VMX_GUEST_RIP));
|
2023-04-11 16:42:54 +00:00
|
|
|
- setMR(NODE_STATE(ksCurThread), buffer, SEL4_VMENTER_CALL_CONTROL_PPC_MR,
|
|
|
|
+ setMR(tcb, buffer, SEL4_VMENTER_CALL_EIP_MR, vmread(VMX_GUEST_RIP));
|
|
|
|
+ setMR(tcb, buffer, SEL4_VMENTER_CALL_CONTROL_PPC_MR,
|
|
|
|
vmread(VMX_CONTROL_PRIMARY_PROCESSOR_CONTROLS));
|
2019-05-14 14:10:13 +00:00
|
|
|
- setMR(NODE_STATE(ksCurThread), buffer, SEL4_VMENTER_CALL_CONTROL_ENTRY_MR, vmread(VMX_CONTROL_ENTRY_INTERRUPTION_INFO));
|
|
|
|
- setMR(NODE_STATE(ksCurThread), buffer, SEL4_VMENTER_FAULT_REASON_MR, reason);
|
|
|
|
- setMR(NODE_STATE(ksCurThread), buffer, SEL4_VMENTER_FAULT_QUALIFICATION_MR, qualification);
|
2023-04-11 16:42:54 +00:00
|
|
|
-
|
2019-05-14 14:10:13 +00:00
|
|
|
- setMR(NODE_STATE(ksCurThread), buffer, SEL4_VMENTER_FAULT_INSTRUCTION_LEN_MR, vmread(VMX_DATA_EXIT_INSTRUCTION_LENGTH));
|
|
|
|
- setMR(NODE_STATE(ksCurThread), buffer, SEL4_VMENTER_FAULT_GUEST_PHYSICAL_MR, vmread(VMX_DATA_GUEST_PHYSICAL));
|
|
|
|
- setMR(NODE_STATE(ksCurThread), buffer, SEL4_VMENTER_FAULT_RFLAGS_MR, vmread(VMX_GUEST_RFLAGS));
|
|
|
|
- setMR(NODE_STATE(ksCurThread), buffer, SEL4_VMENTER_FAULT_GUEST_INT_MR, vmread(VMX_GUEST_INTERRUPTABILITY));
|
|
|
|
- setMR(NODE_STATE(ksCurThread), buffer, SEL4_VMENTER_FAULT_CR3_MR, vmread(VMX_GUEST_CR3));
|
2023-04-11 16:42:54 +00:00
|
|
|
+ setMR(tcb, buffer, SEL4_VMENTER_CALL_CONTROL_ENTRY_MR, vmread(VMX_CONTROL_ENTRY_INTERRUPTION_INFO));
|
|
|
|
+ setMR(tcb, buffer, SEL4_VMENTER_FAULT_REASON_MR, reason);
|
|
|
|
+ setMR(tcb, buffer, SEL4_VMENTER_FAULT_QUALIFICATION_MR, qualification);
|
|
|
|
+
|
2019-05-14 14:10:13 +00:00
|
|
|
+ setMR(tcb, buffer, SEL4_VMENTER_FAULT_INSTRUCTION_LEN_MR, vmread(VMX_DATA_EXIT_INSTRUCTION_LENGTH));
|
|
|
|
+ setMR(tcb, buffer, SEL4_VMENTER_FAULT_GUEST_PHYSICAL_MR, vmread(VMX_DATA_GUEST_PHYSICAL));
|
|
|
|
+ setMR(tcb, buffer, SEL4_VMENTER_FAULT_RFLAGS_MR, vmread(VMX_GUEST_RFLAGS));
|
|
|
|
+ setMR(tcb, buffer, SEL4_VMENTER_FAULT_GUEST_INT_MR, vmread(VMX_GUEST_INTERRUPTABILITY));
|
|
|
|
+ setMR(tcb, buffer, SEL4_VMENTER_FAULT_CR3_MR, vmread(VMX_GUEST_CR3));
|
|
|
|
|
|
|
|
for (i = 0; i < n_vcpu_gp_register; i++) {
|
2023-04-11 16:42:54 +00:00
|
|
|
- setMR(NODE_STATE(ksCurThread), buffer, SEL4_VMENTER_FAULT_EAX + i,
|
|
|
|
- NODE_STATE(ksCurThread)->tcbArch.tcbVCPU->gp_registers[i]);
|
|
|
|
+ setMR(tcb, buffer, SEL4_VMENTER_FAULT_EAX + i,
|
|
|
|
+ tcb->tcbArch.tcbVCPU->gp_registers[i]);
|
2019-05-14 14:10:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
@@ -1143,7 +1171,7 @@ handleVmxFault(uint32_t reason, word_t qualification)
|
|
|
|
/* Indicate that we are returning the from VMEnter with a fault */
|
|
|
|
setRegister(NODE_STATE(ksCurThread), msgInfoRegister, SEL4_VMENTER_RESULT_FAULT);
|
|
|
|
|
|
|
|
- setMRs_vmexit(reason, qualification);
|
|
|
|
+ setMRs_vmexit(reason, qualification, NODE_STATE(ksCurThread));
|
|
|
|
|
|
|
|
/* Set the thread back to running */
|
|
|
|
setThreadState(NODE_STATE(ksCurThread), ThreadState_Running);
|