X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fvmm_shadow_paging.c;h=2475734e349411f6dc5a95d37ae4a0cc088ed728;hb=0733f93d0fc8a20528a9ab2f21704bee5562b302;hp=611e0ea605bef02f7d8df62fc11ed130eff26c57;hpb=2222a18071be7f4f665206a1bfa274b8f27f4fcc;p=palacios.git diff --git a/palacios/src/palacios/vmm_shadow_paging.c b/palacios/src/palacios/vmm_shadow_paging.c index 611e0ea..2475734 100644 --- a/palacios/src/palacios/vmm_shadow_paging.c +++ b/palacios/src/palacios/vmm_shadow_paging.c @@ -28,6 +28,8 @@ #include +#include + #ifndef DEBUG_SHADOW_PAGING #undef PrintDebug #define PrintDebug(fmt, args...) @@ -39,34 +41,14 @@ ***/ -struct guest_table { - addr_t cr3; - struct list_head link; -}; - - -struct backptr { - addr_t ptr; - struct list_head link; -}; - - struct shadow_page_data { - addr_t ptr; - addr_t guest_addr; - - struct list_head backptrs; - struct list_head guest_tables; + v3_reg_t cr3; + addr_t page_pa; + + struct list_head page_list_node; }; - - -//DEFINE_HASHTABLE_INSERT(add_cr3_to_cache, addr_t, struct hashtable *); -//DEFINE_HASHTABLE_SEARCH(find_cr3_in_cache, addr_t, struct hashtable *); -//DEFINE_HASHTABLE_REMOVE(del_cr3_from_cache, addr_t, struct hashtable *, 0); - - DEFINE_HASHTABLE_INSERT(add_pte_map, addr_t, addr_t); DEFINE_HASHTABLE_SEARCH(find_pte_map, addr_t, addr_t); //DEFINE_HASHTABLE_REMOVE(del_pte_map, addr_t, addr_t, 0); @@ -81,7 +63,7 @@ static int pte_equals(addr_t key1, addr_t key2) { return (key1 == key2); } -static addr_t create_new_shadow_pt(); +static struct shadow_page_data * create_new_shadow_pt(struct guest_info * info); static void inject_guest_pf(struct guest_info * info, addr_t fault_addr, pf_error_t error_code); static int is_guest_pf(pt_access_status_t guest_access, pt_access_status_t shadow_access); @@ -98,6 +80,8 @@ int v3_init_shadow_page_state(struct guest_info * info) { state->guest_cr3 = 0; state->guest_cr0 = 0; + INIT_LIST_HEAD(&(state->page_list)); + state->cached_ptes = NULL; state->cached_cr3 = 0; @@ -110,7 +94,7 @@ int v3_init_shadow_page_state(struct guest_info * info) { // creates new shadow page tables // updates the shadow CR3 register to point to the new pts int v3_activate_shadow_pt(struct guest_info * info) { - switch (info->cpu_mode) { + switch (v3_get_cpu_mode(info)) { case PROTECTED: return activate_shadow_pt_32(info); @@ -121,7 +105,7 @@ int v3_activate_shadow_pt(struct guest_info * info) { case LONG_16_COMPAT: return activate_shadow_pt_64(info); default: - PrintError("Invalid CPU mode: %d\n", info->cpu_mode); + PrintError("Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_cpu_mode(info))); return -1; } @@ -143,22 +127,24 @@ int v3_activate_passthrough_pt(struct guest_info * info) { int v3_handle_shadow_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) { - if (info->mem_mode == PHYSICAL_MEM) { + if (v3_get_mem_mode(info) == PHYSICAL_MEM) { // If paging is not turned on we need to handle the special cases - return handle_special_page_fault(info, fault_addr, fault_addr, error_code); - } else if (info->mem_mode == VIRTUAL_MEM) { + return v3_handle_passthrough_pagefault(info, fault_addr, error_code); + } else if (v3_get_mem_mode(info) == VIRTUAL_MEM) { - switch (info->cpu_mode) { + switch (v3_get_cpu_mode(info)) { case PROTECTED: return handle_shadow_pagefault_32(info, fault_addr, error_code); break; case PROTECTED_PAE: return handle_shadow_pagefault_32pae(info, fault_addr, error_code); case LONG: + case LONG_32_COMPAT: + case LONG_16_COMPAT: return handle_shadow_pagefault_64(info, fault_addr, error_code); break; default: - PrintError("Unhandled CPU Mode\n"); + PrintError("Unhandled CPU Mode: %s\n", v3_cpu_mode_to_str(v3_get_cpu_mode(info))); return -1; } } else { @@ -174,14 +160,14 @@ int v3_handle_shadow_invlpg(struct guest_info * info) { int ret = 0; addr_t vaddr = 0; - if (info->mem_mode != VIRTUAL_MEM) { + if (v3_get_mem_mode(info) != VIRTUAL_MEM) { // Paging must be turned on... // should handle with some sort of fault I think PrintError("ERROR: INVLPG called in non paged mode\n"); return -1; } - if (info->mem_mode == PHYSICAL_MEM) { + if (v3_get_mem_mode(info) == PHYSICAL_MEM) { ret = read_guest_pa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); } else { ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); @@ -208,7 +194,7 @@ int v3_handle_shadow_invlpg(struct guest_info * info) { info->rip += dec_instr.instr_length; - switch (info->cpu_mode) { + switch (v3_get_cpu_mode(info)) { case PROTECTED: return handle_shadow_invlpg_32(info, vaddr); case PROTECTED_PAE: @@ -218,7 +204,7 @@ int v3_handle_shadow_invlpg(struct guest_info * info) { case LONG_16_COMPAT: return handle_shadow_invlpg_64(info, vaddr); default: - PrintError("Invalid CPU mode: %d\n", info->cpu_mode); + PrintError("Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_cpu_mode(info))); return -1; } } @@ -226,13 +212,42 @@ int v3_handle_shadow_invlpg(struct guest_info * info) { -static addr_t create_new_shadow_pt() { - void * host_pde = 0; +static struct shadow_page_data * create_new_shadow_pt(struct guest_info * info) { + struct shadow_page_state * state = &(info->shdw_pg_state); + v3_reg_t cur_cr3 = info->ctrl_regs.cr3; + struct shadow_page_data * page_tail = NULL; + addr_t shdw_page = 0; - host_pde = V3_VAddr(V3_AllocPages(1)); - memset(host_pde, 0, PAGE_SIZE); + if (!list_empty(&(state->page_list))) { + page_tail = list_tail_entry(&(state->page_list), struct shadow_page_data, page_list_node); + + if (page_tail->cr3 != cur_cr3) { + PrintDebug("Reusing old shadow Page: %p (cur_CR3=%p)(page_cr3=%p) \n", + (void *) page_tail->page_pa, (void *)cur_cr3, (void *)(page_tail->cr3)); + + list_move(&(page_tail->page_list_node), &(state->page_list)); + + memset(V3_VAddr((void *)(page_tail->page_pa)), 0, PAGE_SIZE_4KB); + + + return page_tail; + } + } - return (addr_t)host_pde; + // else + + page_tail = (struct shadow_page_data *)V3_Malloc(sizeof(struct shadow_page_data)); + page_tail->page_pa = (addr_t)V3_AllocPages(1); + + PrintDebug("Allocating new shadow Page: %p (cur_cr3=%p)\n", (void *)page_tail->page_pa, (void *)cur_cr3); + + page_tail->cr3 = cur_cr3; + list_add(&(page_tail->page_list_node), &(state->page_list)); + + shdw_page = (addr_t)V3_VAddr((void *)(page_tail->page_pa)); + memset((void *)shdw_page, 0, PAGE_SIZE_4KB); + + return page_tail; } @@ -248,9 +263,9 @@ static void inject_guest_pf(struct guest_info * info, addr_t fault_addr, pf_erro static int is_guest_pf(pt_access_status_t guest_access, pt_access_status_t shadow_access) { /* basically the reasoning is that there can be multiple reasons for a page fault: - If there is a permissions failure for a page present in the guest _BUT_ - the reason for the fault was that the page is not present in the shadow, - _THEN_ we have to map the shadow page in and reexecute, this will generate + If there is a permissions failure for a page present in the guest _BUT_ + the reason for the fault was that the page is not present in the shadow, + _THEN_ we have to map the shadow page in and reexecute, this will generate a permissions fault which is _THEN_ valid to send to the guest _UNLESS_ both the guest and shadow have marked the page as not present @@ -258,7 +273,7 @@ static int is_guest_pf(pt_access_status_t guest_access, pt_access_status_t shado */ if (guest_access != PT_ACCESS_OK) { // Guest Access Error - + if ((shadow_access != PT_ACCESS_NOT_PRESENT) && (guest_access != PT_ACCESS_NOT_PRESENT)) { // aka (guest permission error) @@ -266,7 +281,7 @@ static int is_guest_pf(pt_access_status_t guest_access, pt_access_status_t shado } if ((shadow_access == PT_ACCESS_NOT_PRESENT) && - (guest_access == PT_ACCESS_NOT_PRESENT)) { + (guest_access == PT_ACCESS_NOT_PRESENT)) { // Page tables completely blank, handle guest first return 1; }