int v3_activate_passthrough_pt(struct guest_info * info);
int v3_invalidate_passthrough_addr(struct guest_info * info, addr_t inv_addr);
+int v3_invalidate_nested_addr(struct guest_info * info, addr_t inv_addr);
#endif // ! __V3VEE__
int v3_activate_shadow_pt(struct guest_info * info);
-
+int v3_invalidate_shadow_pts(struct guest_info * info);
#endif // ! __V3VEE__
}
int v3_invalidate_passthrough_addr(struct guest_info * info, addr_t inv_addr) {
+ v3_vm_cpu_mode_t mode = v3_get_cpu_mode(info);
+
+ switch(mode) {
+ case REAL:
+ case PROTECTED:
+ return invalidate_addr_32(info, inv_addr);
+
+ case PROTECTED_PAE:
+ case LONG:
+ case LONG_32_COMPAT:
+ // Long mode will only use 32PAE page tables...
+ return invalidate_addr_32pae(info, inv_addr);
+
+ default:
+ PrintError("Unknown CPU Mode\n");
+ break;
+ }
+ return -1;
+}
+
+
+int v3_invalidate_nested_addr(struct guest_info * info, addr_t inv_addr) {
+ v3_vm_cpu_mode_t mode = LONG;
+
+ switch(mode) {
+ case REAL:
+ case PROTECTED:
+ return invalidate_addr_32(info, inv_addr);
+
+ case PROTECTED_PAE:
+ return invalidate_addr_32pae(info, inv_addr);
+
+ case LONG:
+ case LONG_32_COMPAT:
+ return invalidate_addr_64(info, inv_addr);
+
+ default:
+ PrintError("Unknown CPU Mode\n");
+ break;
+ }
return -1;
}
}
+
+
+static inline int invalidate_addr_32(struct guest_info * info, addr_t inv_addr) {
+ pde32_t * pde = NULL;
+ pte32_t * pte = NULL;
+
+ // TODO:
+ // Call INVLPGA
+
+ // clear the page table entry
+ int pde_index = PDE32_INDEX(inv_addr);
+ int pte_index = PTE32_INDEX(inv_addr);
+
+
+ // Lookup the correct PDE address based on the PAGING MODE
+ if (info->shdw_pg_mode == SHADOW_PAGING) {
+ pde = CR3_TO_PDE32_VA(info->ctrl_regs.cr3);
+ } else {
+ pde = CR3_TO_PDE32_VA(info->direct_map_pt);
+ }
+
+ if (pde[pde_index].present == 0) {
+ return 0;
+ } else if (pde[pde_index].large_page) {
+ pde[pde_index].present = 0;
+ return 0;
+ }
+
+ pte = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pde[pde_index].pt_base_addr));
+
+ pte[pte_index].present = 0;
+
+ return 0;
+}
+
+
#endif
}
+static inline int invalidate_addr_32pae(struct guest_info * info, addr_t inv_addr) {
+ pdpe32pae_t * pdpe = NULL;
+ pde32pae_t * pde = NULL;
+ pte32pae_t * pte = NULL;
+
+
+ // TODO:
+ // Call INVLPGA
+
+ // clear the page table entry
+ int pdpe_index = PDPE32PAE_INDEX(inv_addr);
+ int pde_index = PDE32PAE_INDEX(inv_addr);
+ int pte_index = PTE32PAE_INDEX(inv_addr);
+
+
+ // Lookup the correct PDE address based on the PAGING MODE
+ if (info->shdw_pg_mode == SHADOW_PAGING) {
+ pdpe = CR3_TO_PDPE32PAE_VA(info->ctrl_regs.cr3);
+ } else {
+ pdpe = CR3_TO_PDPE32PAE_VA(info->direct_map_pt);
+ }
+
+
+ if (pdpe[pdpe_index].present == 0) {
+ return 0;
+ }
+
+ pde = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pdpe[pdpe_index].pd_base_addr));
+
+ if (pde[pde_index].present == 0) {
+ return 0;
+ } else if (pde[pde_index].large_page) {
+ pde[pde_index].present = 0;
+ return 0;
+ }
+
+ pte = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pde[pde_index].pt_base_addr));
+
+ pte[pte_index].present = 0;
+
+ return 0;
+}
+
+
+
#endif
return 0;
}
+static inline int invalidate_addr_64(struct guest_info * info, addr_t inv_addr) {
+ pml4e64_t * pml = NULL;
+ pdpe64_t * pdpe = NULL;
+ pde64_t * pde = NULL;
+ pte64_t * pte = NULL;
+
+
+ // TODO:
+ // Call INVLPGA
+
+ // clear the page table entry
+ int pml_index = PML4E64_INDEX(inv_addr);
+ int pdpe_index = PDPE64_INDEX(inv_addr);
+ int pde_index = PDE64_INDEX(inv_addr);
+ int pte_index = PTE64_INDEX(inv_addr);
+
+
+ // Lookup the correct PDE address based on the PAGING MODE
+ if (info->shdw_pg_mode == SHADOW_PAGING) {
+ pml = CR3_TO_PML4E64_VA(info->ctrl_regs.cr3);
+ } else {
+ pml = CR3_TO_PML4E64_VA(info->direct_map_pt);
+ }
+
+ if (pml[pml_index].present == 0) {
+ return 0;
+ }
+
+ pdpe = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pml[pml_index].pdp_base_addr));
+
+ if (pdpe[pdpe_index].present == 0) {
+ return 0;
+ } else if (pdpe[pdpe_index].large_page == 1) {
+ pdpe[pdpe_index].present = 0;
+ return 0;
+ }
+
+ pde = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pdpe[pdpe_index].pd_base_addr));
+
+ if (pde[pde_index].present == 0) {
+ return 0;
+ } else if (pde[pde_index].large_page == 1) {
+ pde[pde_index].present = 0;
+ return 0;
+ }
+
+ pte = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pde[pde_index].pt_base_addr));
+
+ pte[pte_index].present = 0;
+
+ return 0;
+}
+
+
#endif
#include <palacios/vmm_util.h>
#include <palacios/vmm_emulator.h>
+#include <palacios/vmm_shadow_paging.h>
+#include <palacios/vmm_direct_paging.h>
+
#define MEM_OFFSET_HCALL 0x1000
v3_rb_insert_color(&(region->tree_node), &(info->mem_map.shdw_regions));
+
// flush virtual page tables
// 3 cases shadow, shadow passthrough, and nested
+ if (info->shdw_pg_mode == SHADOW_PAGING) {
+ v3_vm_mem_mode_t mem_mode = v3_get_mem_mode(info);
+
+ if (mem_mode == PHYSICAL_MEM) {
+ addr_t cur_addr;
+
+ for (cur_addr = region->guest_start;
+ cur_addr < region->guest_end;
+ cur_addr += PAGE_SIZE_4KB) {
+ v3_invalidate_passthrough_addr(info, cur_addr);
+ }
+ } else {
+ v3_invalidate_shadow_pts(info);
+ }
+
+ } else if (info->shdw_pg_mode == NESTED_PAGING) {
+ addr_t cur_addr;
+
+ for (cur_addr = region->guest_start;
+ cur_addr < region->guest_end;
+ cur_addr += PAGE_SIZE_4KB) {
+
+ v3_invalidate_nested_addr(info, cur_addr);
+ }
+ }
return NULL;
}
void v3_delete_shadow_region(struct guest_info * info, struct v3_shadow_region * reg) {
- if (reg != NULL) {
- v3_rb_erase(&(reg->tree_node), &(info->mem_map.shdw_regions));
+ if (reg == NULL) {
+ return;
+ }
- V3_Free(reg);
+ // flush virtual page tables
+ // 3 cases shadow, shadow passthrough, and nested
+ if (info->shdw_pg_mode == SHADOW_PAGING) {
+ v3_vm_mem_mode_t mem_mode = v3_get_mem_mode(info);
+
+ if (mem_mode == PHYSICAL_MEM) {
+ addr_t cur_addr;
+
+ for (cur_addr = reg->guest_start;
+ cur_addr < reg->guest_end;
+ cur_addr += PAGE_SIZE_4KB) {
+ v3_invalidate_passthrough_addr(info, cur_addr);
+ }
+ } else {
+ v3_invalidate_shadow_pts(info);
+ }
+
+ } else if (info->shdw_pg_mode == NESTED_PAGING) {
+ addr_t cur_addr;
+
+ for (cur_addr = reg->guest_start;
+ cur_addr < reg->guest_end;
+ cur_addr += PAGE_SIZE_4KB) {
+
+ v3_invalidate_nested_addr(info, cur_addr);
+ }
}
+
+ v3_rb_erase(&(reg->tree_node), &(info->mem_map.shdw_regions));
+
+ V3_Free(reg);
+
// flush virtual page tables
// 3 cases shadow, shadow passthrough, and nested
+// This must flush any caches
+// and reset the cr3 value to the correct value
+int v3_invalidate_shadow_pts(struct guest_info * info) {
+ return v3_activate_shadow_pt(info);
+}
int v3_handle_shadow_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {