vbox6: enable VM reset

This commit is contained in:
Christian Helmuth 2021-06-30 12:53:00 +02:00
parent 6954547b4c
commit 2a76ae002e
4 changed files with 41 additions and 7 deletions

View File

@ -516,5 +516,8 @@ void nemHCNativeNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS H
void nemHCNativeNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, void nemHCNativeNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev,
RTHCPHYS HCPhysNew, ::uint32_t fPageProt, RTHCPHYS HCPhysNew, ::uint32_t fPageProt,
PGMPAGETYPE enmType, ::uint8_t *pu2State) STOP PGMPAGETYPE enmType, ::uint8_t *pu2State)
{
nemHCNativeNotifyPhysPageProtChanged(pVM, GCPhys, HCPhysNew, fPageProt, enmType, pu2State);
}

View File

@ -320,6 +320,28 @@ static int vmmr0_gmm_allocate_pages(GMMALLOCATEPAGESREQ &request)
} }
static int vmmr0_gmm_free_pages(GMMFREEPAGESREQ &request)
{
for (unsigned i = 0; i < request.cPages; i++) {
GMMFREEPAGEDESC &page = request.aPages[i];
Sup::Gmm::Pages one_page { 1 };
using Vmm_addr = Sup::Gmm::Vmm_addr;
using Page_id = Sup::Gmm::Page_id;
Page_id const page_id { page.idPage };
Vmm_addr const vmm_addr = sup_drv->gmm().vmm_addr(page_id);
sup_drv->gmm().free(vmm_addr, one_page);
}
return VINF_SUCCESS;
}
static int vmmr0_gmm_map_unmap_chunk(GMMMAPUNMAPCHUNKREQ &request) static int vmmr0_gmm_map_unmap_chunk(GMMMAPUNMAPCHUNKREQ &request)
{ {
if (request.idChunkMap != NIL_GMM_CHUNKID) { if (request.idChunkMap != NIL_GMM_CHUNKID) {
@ -513,18 +535,16 @@ static int vmmr0_pgm_allocate_handy_pages(PVMR0 pvmr0)
uint32_t const start_idx = vm.pgm.s.cHandyPages; uint32_t const start_idx = vm.pgm.s.cHandyPages;
uint32_t const stop_idx = RT_ELEMENTS(vm.pgm.s.aHandyPages); uint32_t const stop_idx = RT_ELEMENTS(vm.pgm.s.aHandyPages);
Sup::Gmm::Pages pages { stop_idx - start_idx };
using Vmm_addr = Sup::Gmm::Vmm_addr; using Vmm_addr = Sup::Gmm::Vmm_addr;
using Page_id = Sup::Gmm::Page_id; using Page_id = Sup::Gmm::Page_id;
Vmm_addr const vmm_addr = sup_drv->gmm().alloc_from_reservation(pages);
for (unsigned i = start_idx; i < stop_idx; ++i) { for (unsigned i = start_idx; i < stop_idx; ++i) {
Vmm_addr const vmm_addr = sup_drv->gmm().alloc_from_reservation( Sup::Gmm::Pages { 1 } );
GMMPAGEDESC &page = vm.pgm.s.aHandyPages[i]; GMMPAGEDESC &page = vm.pgm.s.aHandyPages[i];
Vmm_addr const page_addr { vmm_addr.value + i*PAGE_SIZE }; Vmm_addr const page_addr { vmm_addr.value };
Page_id const page_id = sup_drv->gmm().page_id(page_addr); Page_id const page_id = sup_drv->gmm().page_id(page_addr);
page.HCPhysGCPhys = page_addr.value; page.HCPhysGCPhys = page_addr.value;
@ -597,6 +617,10 @@ static void ioctl(SUPCALLVMMR0 &request)
rc = vmmr0_gmm_allocate_pages(*(GMMALLOCATEPAGESREQ *)request.abReqPkt); rc = vmmr0_gmm_allocate_pages(*(GMMALLOCATEPAGESREQ *)request.abReqPkt);
return; return;
case VMMR0_DO_GMM_FREE_PAGES:
rc = vmmr0_gmm_free_pages(*(GMMFREEPAGESREQ *)request.abReqPkt);
return;
case VMMR0_DO_GMM_MAP_UNMAP_CHUNK: case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
rc = vmmr0_gmm_map_unmap_chunk(*(GMMMAPUNMAPCHUNKREQ *)request.abReqPkt); rc = vmmr0_gmm_map_unmap_chunk(*(GMMMAPUNMAPCHUNKREQ *)request.abReqPkt);
return; return;

View File

@ -100,6 +100,7 @@ void Sup::Gmm::reservation_pages(Pages pages)
} }
Sup::Gmm::Vmm_addr Sup::Gmm::alloc_ex(Pages pages) Sup::Gmm::Vmm_addr Sup::Gmm::alloc_ex(Pages pages)
{ {
Mutex::Guard guard(_mutex); Mutex::Guard guard(_mutex);
@ -120,6 +121,12 @@ Sup::Gmm::Vmm_addr Sup::Gmm::alloc_from_reservation(Pages pages)
} }
void Sup::Gmm::free(Vmm_addr addr, Pages pages)
{
_alloc.free((void *)(addr.value - _map.base.value), 1);
}
Sup::Gmm::Page_id Sup::Gmm::page_id(Vmm_addr addr) Sup::Gmm::Page_id Sup::Gmm::page_id(Vmm_addr addr)
{ {
Mutex::Guard guard(_mutex); Mutex::Guard guard(_mutex);

View File

@ -31,7 +31,7 @@
namespace Sup { struct Gmm; } namespace Sup { struct Gmm; }
/** /**
* Global (guest-memory) manager.(GMM) * Global (guest-memory) manager (GMM)
* *
* Layers in this pool are (top-down) * Layers in this pool are (top-down)
* *