From: Jack Lange Date: Mon, 20 Apr 2009 01:38:51 +0000 (-0500) Subject: added support for memory remapping X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?p=palacios.git;a=commitdiff_plain;h=b06d0d0e5e57f5c4163a69efcabe6f96594ad814 added support for memory remapping --- diff --git a/palacios/include/palacios/vmm_direct_paging.h b/palacios/include/palacios/vmm_direct_paging.h index 8b3e0e4..d96ceea 100644 --- a/palacios/include/palacios/vmm_direct_paging.h +++ b/palacios/include/palacios/vmm_direct_paging.h @@ -35,6 +35,7 @@ int v3_handle_nested_pagefault(struct guest_info * info, addr_t fault_addr, pf_e int v3_activate_passthrough_pt(struct guest_info * info); int v3_invalidate_passthrough_addr(struct guest_info * info, addr_t inv_addr); +int v3_invalidate_nested_addr(struct guest_info * info, addr_t inv_addr); #endif // ! __V3VEE__ diff --git a/palacios/include/palacios/vmm_shadow_paging.h b/palacios/include/palacios/vmm_shadow_paging.h index 1633ec8..e10615a 100644 --- a/palacios/include/palacios/vmm_shadow_paging.h +++ b/palacios/include/palacios/vmm_shadow_paging.h @@ -56,7 +56,7 @@ int v3_handle_shadow_invlpg(struct guest_info * info); int v3_activate_shadow_pt(struct guest_info * info); - +int v3_invalidate_shadow_pts(struct guest_info * info); #endif // ! __V3VEE__ diff --git a/palacios/src/palacios/vmm_direct_paging.c b/palacios/src/palacios/vmm_direct_paging.c index 2e86dbe..1f2c6e6 100644 --- a/palacios/src/palacios/vmm_direct_paging.c +++ b/palacios/src/palacios/vmm_direct_paging.c @@ -137,6 +137,46 @@ int v3_handle_nested_pagefault(struct guest_info * info, addr_t fault_addr, pf_e } int v3_invalidate_passthrough_addr(struct guest_info * info, addr_t inv_addr) { + v3_vm_cpu_mode_t mode = v3_get_cpu_mode(info); + + switch(mode) { + case REAL: + case PROTECTED: + return invalidate_addr_32(info, inv_addr); + + case PROTECTED_PAE: + case LONG: + case LONG_32_COMPAT: + // Long mode will only use 32PAE page tables... + return invalidate_addr_32pae(info, inv_addr); + + default: + PrintError("Unknown CPU Mode\n"); + break; + } + return -1; +} + + +int v3_invalidate_nested_addr(struct guest_info * info, addr_t inv_addr) { + v3_vm_cpu_mode_t mode = LONG; + + switch(mode) { + case REAL: + case PROTECTED: + return invalidate_addr_32(info, inv_addr); + + case PROTECTED_PAE: + return invalidate_addr_32pae(info, inv_addr); + + case LONG: + case LONG_32_COMPAT: + return invalidate_addr_64(info, inv_addr); + + default: + PrintError("Unknown CPU Mode\n"); + break; + } return -1; } diff --git a/palacios/src/palacios/vmm_direct_paging_32.h b/palacios/src/palacios/vmm_direct_paging_32.h index 42da5b1..c15712b 100644 --- a/palacios/src/palacios/vmm_direct_paging_32.h +++ b/palacios/src/palacios/vmm_direct_paging_32.h @@ -107,4 +107,40 @@ static inline int handle_passthrough_pagefault_32(struct guest_info * info, } + + +static inline int invalidate_addr_32(struct guest_info * info, addr_t inv_addr) { + pde32_t * pde = NULL; + pte32_t * pte = NULL; + + // TODO: + // Call INVLPGA + + // clear the page table entry + int pde_index = PDE32_INDEX(inv_addr); + int pte_index = PTE32_INDEX(inv_addr); + + + // Lookup the correct PDE address based on the PAGING MODE + if (info->shdw_pg_mode == SHADOW_PAGING) { + pde = CR3_TO_PDE32_VA(info->ctrl_regs.cr3); + } else { + pde = CR3_TO_PDE32_VA(info->direct_map_pt); + } + + if (pde[pde_index].present == 0) { + return 0; + } else if (pde[pde_index].large_page) { + pde[pde_index].present = 0; + return 0; + } + + pte = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pde[pde_index].pt_base_addr)); + + pte[pte_index].present = 0; + + return 0; +} + + #endif diff --git a/palacios/src/palacios/vmm_direct_paging_32pae.h b/palacios/src/palacios/vmm_direct_paging_32pae.h index d681969..fb97ee4 100644 --- a/palacios/src/palacios/vmm_direct_paging_32pae.h +++ b/palacios/src/palacios/vmm_direct_paging_32pae.h @@ -120,4 +120,49 @@ static inline int handle_passthrough_pagefault_32pae(struct guest_info * info, } +static inline int invalidate_addr_32pae(struct guest_info * info, addr_t inv_addr) { + pdpe32pae_t * pdpe = NULL; + pde32pae_t * pde = NULL; + pte32pae_t * pte = NULL; + + + // TODO: + // Call INVLPGA + + // clear the page table entry + int pdpe_index = PDPE32PAE_INDEX(inv_addr); + int pde_index = PDE32PAE_INDEX(inv_addr); + int pte_index = PTE32PAE_INDEX(inv_addr); + + + // Lookup the correct PDE address based on the PAGING MODE + if (info->shdw_pg_mode == SHADOW_PAGING) { + pdpe = CR3_TO_PDPE32PAE_VA(info->ctrl_regs.cr3); + } else { + pdpe = CR3_TO_PDPE32PAE_VA(info->direct_map_pt); + } + + + if (pdpe[pdpe_index].present == 0) { + return 0; + } + + pde = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pdpe[pdpe_index].pd_base_addr)); + + if (pde[pde_index].present == 0) { + return 0; + } else if (pde[pde_index].large_page) { + pde[pde_index].present = 0; + return 0; + } + + pte = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pde[pde_index].pt_base_addr)); + + pte[pte_index].present = 0; + + return 0; +} + + + #endif diff --git a/palacios/src/palacios/vmm_direct_paging_64.h b/palacios/src/palacios/vmm_direct_paging_64.h index c42499d..1c819ee 100644 --- a/palacios/src/palacios/vmm_direct_paging_64.h +++ b/palacios/src/palacios/vmm_direct_paging_64.h @@ -143,5 +143,59 @@ static inline int handle_passthrough_pagefault_64(struct guest_info * info, return 0; } +static inline int invalidate_addr_64(struct guest_info * info, addr_t inv_addr) { + pml4e64_t * pml = NULL; + pdpe64_t * pdpe = NULL; + pde64_t * pde = NULL; + pte64_t * pte = NULL; + + + // TODO: + // Call INVLPGA + + // clear the page table entry + int pml_index = PML4E64_INDEX(inv_addr); + int pdpe_index = PDPE64_INDEX(inv_addr); + int pde_index = PDE64_INDEX(inv_addr); + int pte_index = PTE64_INDEX(inv_addr); + + + // Lookup the correct PDE address based on the PAGING MODE + if (info->shdw_pg_mode == SHADOW_PAGING) { + pml = CR3_TO_PML4E64_VA(info->ctrl_regs.cr3); + } else { + pml = CR3_TO_PML4E64_VA(info->direct_map_pt); + } + + if (pml[pml_index].present == 0) { + return 0; + } + + pdpe = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pml[pml_index].pdp_base_addr)); + + if (pdpe[pdpe_index].present == 0) { + return 0; + } else if (pdpe[pdpe_index].large_page == 1) { + pdpe[pdpe_index].present = 0; + return 0; + } + + pde = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pdpe[pdpe_index].pd_base_addr)); + + if (pde[pde_index].present == 0) { + return 0; + } else if (pde[pde_index].large_page == 1) { + pde[pde_index].present = 0; + return 0; + } + + pte = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pde[pde_index].pt_base_addr)); + + pte[pte_index].present = 0; + + return 0; +} + + #endif diff --git a/palacios/src/palacios/vmm_mem.c b/palacios/src/palacios/vmm_mem.c index 92b6c63..659fd27 100644 --- a/palacios/src/palacios/vmm_mem.c +++ b/palacios/src/palacios/vmm_mem.c @@ -22,6 +22,9 @@ #include #include +#include +#include + #define MEM_OFFSET_HCALL 0x1000 @@ -187,8 +190,34 @@ struct v3_shadow_region * insert_shadow_region(struct guest_info * info, v3_rb_insert_color(&(region->tree_node), &(info->mem_map.shdw_regions)); + // flush virtual page tables // 3 cases shadow, shadow passthrough, and nested + if (info->shdw_pg_mode == SHADOW_PAGING) { + v3_vm_mem_mode_t mem_mode = v3_get_mem_mode(info); + + if (mem_mode == PHYSICAL_MEM) { + addr_t cur_addr; + + for (cur_addr = region->guest_start; + cur_addr < region->guest_end; + cur_addr += PAGE_SIZE_4KB) { + v3_invalidate_passthrough_addr(info, cur_addr); + } + } else { + v3_invalidate_shadow_pts(info); + } + + } else if (info->shdw_pg_mode == NESTED_PAGING) { + addr_t cur_addr; + + for (cur_addr = region->guest_start; + cur_addr < region->guest_end; + cur_addr += PAGE_SIZE_4KB) { + + v3_invalidate_nested_addr(info, cur_addr); + } + } return NULL; } @@ -286,12 +315,43 @@ struct v3_shadow_region * v3_get_shadow_region(struct guest_info * info, addr_t void v3_delete_shadow_region(struct guest_info * info, struct v3_shadow_region * reg) { - if (reg != NULL) { - v3_rb_erase(&(reg->tree_node), &(info->mem_map.shdw_regions)); + if (reg == NULL) { + return; + } - V3_Free(reg); + // flush virtual page tables + // 3 cases shadow, shadow passthrough, and nested + if (info->shdw_pg_mode == SHADOW_PAGING) { + v3_vm_mem_mode_t mem_mode = v3_get_mem_mode(info); + + if (mem_mode == PHYSICAL_MEM) { + addr_t cur_addr; + + for (cur_addr = reg->guest_start; + cur_addr < reg->guest_end; + cur_addr += PAGE_SIZE_4KB) { + v3_invalidate_passthrough_addr(info, cur_addr); + } + } else { + v3_invalidate_shadow_pts(info); + } + + } else if (info->shdw_pg_mode == NESTED_PAGING) { + addr_t cur_addr; + + for (cur_addr = reg->guest_start; + cur_addr < reg->guest_end; + cur_addr += PAGE_SIZE_4KB) { + + v3_invalidate_nested_addr(info, cur_addr); + } } + + v3_rb_erase(&(reg->tree_node), &(info->mem_map.shdw_regions)); + + V3_Free(reg); + // flush virtual page tables // 3 cases shadow, shadow passthrough, and nested diff --git a/palacios/src/palacios/vmm_shadow_paging.c b/palacios/src/palacios/vmm_shadow_paging.c index 559854a..a3c802f 100644 --- a/palacios/src/palacios/vmm_shadow_paging.c +++ b/palacios/src/palacios/vmm_shadow_paging.c @@ -98,6 +98,11 @@ int v3_activate_shadow_pt(struct guest_info * info) { +// This must flush any caches +// and reset the cr3 value to the correct value +int v3_invalidate_shadow_pts(struct guest_info * info) { + return v3_activate_shadow_pt(info); +} int v3_handle_shadow_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {