vbox6: enable AVX support

Issue #5314
This commit is contained in:
Alexander Boettcher 2024-09-05 09:54:42 +02:00 committed by Norman Feske
parent a07b5937d9
commit 75266e467d
6 changed files with 100 additions and 8 deletions

View File

@ -1 +1 @@
4f4c05a80b5767a0132c333a352fada6ba8965a8
f425c0058d023cabfa555a0605982e935c582902

View File

@ -0,0 +1,66 @@
--- a/src/virtualbox6/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
+++ b/src/virtualbox6/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
@@ -2470,6 +2470,8 @@
pCpum->GuestFeatures.cbMaxExtendedState),
VERR_CPUM_IPE_1);
pVCpu0->cpum.s.Guest.aoffXState[iComponent] = pSubLeaf->uEbx;
+ /* store uEax to later on detect compact mode */
+// pVCpu0->cpum.s.Guest.aoffXState[iComponent] = pSubLeaf->uEax;
}
/* Copy the CPU #0 data to the other CPUs. */
@@ -3558,6 +3560,8 @@
VERR_CPUM_IPE_2);
continue;
case 1:
+ /* permit compact AVX mode, Intel: 13.2 ENUMERATION OF CPU SUPPORT FOR XSAVE INSTRUCTIONS AND XSAVE- SUPPORTED FEATURES */
+// pCurLeaf->uEax &= 1 | 2;
pCurLeaf->uEax &= 0;
pCurLeaf->uEcx &= 0;
pCurLeaf->uEdx &= 0;
@@ -4285,7 +4289,8 @@
rc = cpumR3CpuIdReadIsaExtCfgLegacy(pVM, pIsaExts, pCpumCfg, "SSE4.2", &pConfig->enmSse42, true);
AssertLogRelRCReturn(rc, rc);
- bool const fMayHaveXSave = fNestedPagingAndFullGuestExec
+ bool const fEnforceHWusage = true;
+ bool const fMayHaveXSave = fEnforceHWusage
&& pVM->cpum.s.HostFeatures.fXSaveRstor
&& pVM->cpum.s.HostFeatures.fOpSysXSaveRstor;
uint64_t const fXStateHostMask = pVM->cpum.s.fXStateHostMask;
@@ -4296,7 +4301,7 @@
* unrestricted guest execution mode. Not possible to force this one without
* host support at the moment.
*/
- rc = cpumR3CpuIdReadIsaExtCfgEx(pVM, pIsaExts, "XSAVE", &pConfig->enmXSave, fNestedPagingAndFullGuestExec,
+ rc = cpumR3CpuIdReadIsaExtCfgEx(pVM, pIsaExts, "XSAVE", &pConfig->enmXSave, fEnforceHWusage,
fMayHaveXSave /*fAllowed*/);
AssertLogRelRCReturn(rc, rc);
@@ -4305,7 +4310,7 @@
* XSAVE is exposed too. For the time being the default is to only expose this
* to VMs with nested paging and AMD-V or unrestricted guest execution mode.
*/
- rc = cpumR3CpuIdReadIsaExtCfgEx(pVM, pIsaExts, "AVX", &pConfig->enmAvx, fNestedPagingAndFullGuestExec,
+ rc = cpumR3CpuIdReadIsaExtCfgEx(pVM, pIsaExts, "AVX", &pConfig->enmAvx, fEnforceHWusage,
fMayHaveXSave && pConfig->enmXSave && (fXStateHostMask & XSAVE_C_YMM) /*fAllowed*/);
AssertLogRelRCReturn(rc, rc);
@@ -4314,7 +4319,7 @@
* XSAVE is exposed too. For the time being the default is to only expose this
* to VMs with nested paging and AMD-V or unrestricted guest execution mode.
*/
- rc = cpumR3CpuIdReadIsaExtCfgEx(pVM, pIsaExts, "AVX2", &pConfig->enmAvx2, fNestedPagingAndFullGuestExec /* temporarily */,
+ rc = cpumR3CpuIdReadIsaExtCfgEx(pVM, pIsaExts, "AVX2", &pConfig->enmAvx2, fEnforceHWusage /* temporarily */,
fMayHaveXSave && pConfig->enmXSave && (fXStateHostMask & XSAVE_C_YMM) /*fAllowed*/);
AssertLogRelRCReturn(rc, rc);
@@ -4425,7 +4430,7 @@
* being the default is to only do this for VMs with nested paging and AMD-V or
* unrestricted guest mode.
*/
- rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "SSE4A", &pConfig->enmSse4A, fNestedPagingAndFullGuestExec);
+ rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "SSE4A", &pConfig->enmSse4A, fEnforceHWusage);
AssertLogRelRCReturn(rc, rc);
/** @cfgm{/CPUM/IsaExts/MISALNSSE, isaextcfg, depends}

View File

@ -14,3 +14,4 @@ pgmphys.patch
sup_ioctl_query_func_size.patch
disk_geometry.patch
stack_size.patch
avx.patch

View File

@ -265,12 +265,27 @@ template <typename VIRT> void Sup::Vcpu_impl<VIRT>::_transfer_state_to_vcpu(CPUM
state.tpr_threshold.charge(pending_priority);
}
/* export FPU state */
/* export FPU state - start */
state.xcr0.charge(ctx.aXcr[0]);
{
::uint64_t ia32_xss = 0;
auto const rc = CPUMQueryGuestMsr(&_vmcpu, 0xDA0 /* MSR_IA32_XSS */,
&ia32_xss);
if (rc == VINF_SUCCESS)
state.xss.charge(ia32_xss);
}
_state->ref.fpu.charge([&](Vcpu_state::Fpu::State &fpu) {
static_assert(sizeof(*ctx.pXStateR3) >= sizeof(fpu._buffer));
::memcpy(fpu._buffer, ctx.pXStateR3, sizeof(X86FXSTATE));
return sizeof(X86FXSTATE);
unsigned fpu_size = min(_vm.cpum.s.HostFeatures.cbMaxExtendedState,
sizeof(fpu._buffer));
::memcpy(fpu._buffer, ctx.pXStateR3, fpu_size);
return fpu_size;
});
/* export FPU state - end */
{
::uint64_t tsc_aux = 0;
@ -413,13 +428,21 @@ template <typename VIRT> void Sup::Vcpu_impl<VIRT>::_transfer_state_to_vbox(CPUM
APICSetTpr(pVCpu, tpr);
/* import FPU state */
/* import FPU state - start */
_state->ref.fpu.with_state([&](Vcpu_state::Fpu::State const &fpu) {
static_assert(sizeof(*ctx.pXStateR3) >= sizeof(fpu._buffer));
::memcpy(ctx.pXStateR3, fpu._buffer, sizeof(X86FXSTATE));
unsigned fpu_size = min(_vm.cpum.s.HostFeatures.cbMaxExtendedState,
sizeof(fpu._buffer));
::memcpy(ctx.pXStateR3, fpu._buffer, fpu_size);
return true;
});
CPUMSetGuestMsr (pVCpu, 0xDA0 /* MSR_IA32_XSS */, state.xss.value());
CPUMSetGuestXcr0(pVCpu, state.xcr0.value());
/* import FPU state - end */
/* do SVM/VMX-specific transfers */
VIRT::transfer_state_to_vbox(state, _vmcpu, ctx);
}

View File

@ -77,6 +77,7 @@ Genode::Vm_connection::Exit_config const Sup::Svm::exit_config { /* ... */ };
| SVM_CTRL_INTERCEPT_WBINVD
| SVM_CTRL_INTERCEPT_MONITOR
| SVM_CTRL_INTERCEPT_RDTSCP
| SVM_CTRL_INTERCEPT_XSETBV
| SVM_CTRL_INTERCEPT_MWAIT;
unsigned Sup::Svm::ctrl_primary()

View File

@ -81,6 +81,7 @@ unsigned Sup::Vmx::ctrl_secondary()
| VMX_PROC_CTLS2_RDTSCP
| VMX_PROC_CTLS2_EPT
| VMX_PROC_CTLS2_INVPCID
| VMX_PROC_CTLS2_XSAVES_XRSTORS
;
}