int v3_init_passthrough_pts(struct guest_info * info) {
+ if (info->shdw_pg_mode == NESTED_PAGING && is_vmx_nested()) {
+ // skip - ept_init will do this allocation
+ return 0;
+ }
info->direct_map_pt = (addr_t)V3_PAddr((void *)create_generic_pt_page(info));
return 0;
}
int v3_free_passthrough_pts(struct guest_info * core) {
v3_cpu_mode_t mode = v3_get_vm_cpu_mode(core);
+ if (core->shdw_pg_mode == NESTED_PAGING && is_vmx_nested()) {
+ // there are no passthrough page tables, but
+ // the EPT implementation is using direct_map_pt to store
+ // the EPT root table pointer... and the EPT tables
+ // are not compatible with regular x86 tables, so we
+ // must not attempt to free them here...
+ return 0;
+ }
+
+ // we are either in shadow or in SVM nested
+ // in either case, we can nuke the PTs
+
// Delete the old direct map page tables
switch(mode) {
case REAL:
return init_ept(core, (struct vmx_hw_info *) hwinfo);
} else {
// no initialization for SVM
+ // the direct map page tables are used since the
+ // nested pt format is identical to the main pt format
return 0;
}
}
int v3_deinit_nested_paging_core(struct guest_info *core)
{
- // nothing to do.. probably dealloc? FIXME PAD
-
- return 0;
+ if (core->shdw_pg_mode == NESTED_PAGING) {
+ if (is_vmx_nested()) {
+ return deinit_ept(core);
+ } else {
+ // SVM nested deinit is handled by the passthrough paging teardown
+ return 0;
+ }
+ } else {
+ // not relevant
+ return 0;
+ }
}
ept_ptr->pml_base_addr = PAGE_BASE_ADDR(ept_pa);
+ PrintDebug(core->vm_info,core,"init_ept direct_map_pt=%p\n",(void*)(core->direct_map_pt));
+
+
+ return 0;
+}
+
+//
+// You would think we could just the regular 64 bit PT free
+// routine, but no, because the EPT format is slightly different, in that
+// it has no present bit.... We signify present via the read
+static void delete_page_tables_ept64(ept_pml4_t * pml4) {
+ int i, j, k;
+
+ if (pml4 == NULL) {
+ return;
+ }
+
+ PrintDebug(VM_NONE, VCORE_NONE,"Deleting EPT Page Tables -- PML4 (%p)\n", pml4);
+
+ for (i = 0; i < MAX_PML4E64_ENTRIES; i++) {
+ if (!pml4[i].read && !pml4[i].write && !pml4[i].exec) {
+ continue;
+ }
+
+ ept_pdp_t * pdpe = (ept_pdp_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR_4KB(pml4[i].pdp_base_addr));
+
+ for (j = 0; j < MAX_PDPE64_ENTRIES; j++) {
+ if ((!pdpe[j].read && !pdpe[j].write && !pdpe[j].exec) || (pdpe[j].large_page == 1)) {
+ continue;
+ }
+
+ ept_pde_t * pde = (ept_pde_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR_4KB(pdpe[j].pd_base_addr));
+
+ for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
+ if ((!pde[k].read && !pde[k].write && !pde[k].exec) || (pde[k].large_page == 1)) {
+ continue;
+ }
+
+ V3_FreePages((void *)(addr_t)BASE_TO_PAGE_ADDR_4KB(pde[k].pt_base_addr), 1);
+ }
+
+ V3_FreePages(V3_PAddr(pde), 1);
+ }
+
+ V3_FreePages(V3_PAddr(pdpe), 1);
+ }
+
+ V3_FreePages(V3_PAddr(pml4), 1);
+}
+
+
+
+static int deinit_ept(struct guest_info * core) {
+ ept_pml4_t *pml;
+
+ pml = (ept_pml4_t *)CR3_TO_PML4E64_VA(core->direct_map_pt);
+
+ delete_page_tables_ept64(pml);
+
+ core->direct_map_pt = 0;
return 0;
}