X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?p=palacios.git;a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fvmm_shadow_paging.c;h=eeb493bc841b5e6380d0bdec559ae14b9667be85;hp=91004f92f3b60d57d61f68a0c37f7bcbb1e76c4d;hb=316c417125ed34aaa60f9fc5486a6d8dec4fb361;hpb=9487b7d59cab801ef521a89187dfd1e33fa36c45 diff --git a/palacios/src/palacios/vmm_shadow_paging.c b/palacios/src/palacios/vmm_shadow_paging.c index 91004f9..eeb493b 100644 --- a/palacios/src/palacios/vmm_shadow_paging.c +++ b/palacios/src/palacios/vmm_shadow_paging.c @@ -30,15 +30,11 @@ #include +#ifdef CONFIG_SYMBIOTIC_SWAP +#include +#endif -// set this to 1 if you want us to attempt to -// fetch multiple entries on a page fault -#define SPECULATIVE_PAGING 1 - -#define REGULAR_PAGE_FAULT 0 -#define SPECULATIVE_PAGE_FAULT 1 - -#ifndef DEBUG_SHADOW_PAGING +#ifndef CONFIG_DEBUG_SHADOW_PAGING #undef PrintDebug #define PrintDebug(fmt, args...) #endif @@ -57,22 +53,9 @@ struct shadow_page_data { }; -DEFINE_HASHTABLE_INSERT(add_pte_map, addr_t, addr_t); -DEFINE_HASHTABLE_SEARCH(find_pte_map, addr_t, addr_t); -//DEFINE_HASHTABLE_REMOVE(del_pte_map, addr_t, addr_t, 0); - - - -static uint_t pte_hash_fn(addr_t key) { - return hash_long(key, 32); -} - -static int pte_equals(addr_t key1, addr_t key2) { - return (key1 == key2); -} static struct shadow_page_data * create_new_shadow_pt(struct guest_info * info); -static void inject_guest_pf(struct guest_info * info, addr_t fault_addr, pf_error_t error_code); +static int inject_guest_pf(struct guest_info * info, addr_t fault_addr, pf_error_t error_code); static int is_guest_pf(pt_access_status_t guest_access, pt_access_status_t shadow_access); @@ -87,11 +70,9 @@ int v3_init_shadow_page_state(struct guest_info * info) { state->guest_cr3 = 0; state->guest_cr0 = 0; + state->guest_efer.value = 0x0LL; INIT_LIST_HEAD(&(state->page_list)); - - state->cached_ptes = NULL; - state->cached_cr3 = 0; return 0; } @@ -102,7 +83,7 @@ int v3_init_shadow_page_state(struct guest_info * info) { // creates new shadow page tables // updates the shadow CR3 register to point to the new pts int v3_activate_shadow_pt(struct guest_info * info) { - switch (v3_get_cpu_mode(info)) { + switch (v3_get_vm_cpu_mode(info)) { case PROTECTED: return activate_shadow_pt_32(info); @@ -113,7 +94,7 @@ int v3_activate_shadow_pt(struct guest_info * info) { case LONG_16_COMPAT: return activate_shadow_pt_64(info); default: - PrintError("Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_cpu_mode(info))); + PrintError("Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(info))); return -1; } @@ -121,63 +102,34 @@ int v3_activate_shadow_pt(struct guest_info * info) { } -int v3_activate_passthrough_pt(struct guest_info * info) { - // For now... But we need to change this.... - // As soon as shadow paging becomes active the passthrough tables are hosed - // So this will cause chaos if it is called at that time - info->ctrl_regs.cr3 = *(addr_t*)&(info->direct_map_pt); - //PrintError("Activate Passthrough Page tables not implemented\n"); - return 0; +// This must flush any caches +// and reset the cr3 value to the correct value +int v3_invalidate_shadow_pts(struct guest_info * info) { + return v3_activate_shadow_pt(info); } - int v3_handle_shadow_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) { - int rc; - if (v3_get_mem_mode(info) == PHYSICAL_MEM) { + if (v3_get_vm_mem_mode(info) == PHYSICAL_MEM) { // If paging is not turned on we need to handle the special cases return v3_handle_passthrough_pagefault(info, fault_addr, error_code); - } else if (v3_get_mem_mode(info) == VIRTUAL_MEM) { + } else if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) { - switch (v3_get_cpu_mode(info)) { + switch (v3_get_vm_cpu_mode(info)) { case PROTECTED: - return handle_shadow_pagefault_32(info, fault_addr, error_code); + return handle_shadow_pagefault_32(info, fault_addr, error_code); break; case PROTECTED_PAE: return handle_shadow_pagefault_32pae(info, fault_addr, error_code); case LONG: case LONG_32_COMPAT: - case LONG_16_COMPAT: { - addr_t curr_addr; - addr_t fault_addr_base; - // first, we will handle the actual fault, non-speculatively - rc=handle_shadow_pagefault_64(info, fault_addr, error_code, REGULAR_PAGE_FAULT); - if (rc) { - return -1; - } - if (!SPECULATIVE_PAGING) { - return 0; - } - fault_addr_base=PAGE_ADDR_4KB(fault_addr); - PrintDebug("Attempting speculative paging around %p\n",(void*)fault_addr_base); - for (curr_addr = (fault_addr_base & (~0x1fffffLL)) ; - curr_addr < (fault_addr_base | (0x1fffffLL)) ; - curr_addr+=PAGE_SIZE) { - if (curr_addr!=fault_addr_base) { - rc=handle_shadow_pagefault_64(info, curr_addr, error_code, SPECULATIVE_PAGE_FAULT); - if (rc) { - PrintDebug("Speculative page fault handler failed at %p\n",(void*)curr_addr); - return -1; - } - } - } - return 0; - } + case LONG_16_COMPAT: + return handle_shadow_pagefault_64(info, fault_addr, error_code); break; default: - PrintError("Unhandled CPU Mode: %s\n", v3_cpu_mode_to_str(v3_get_cpu_mode(info))); + PrintError("Unhandled CPU Mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(info))); return -1; } } else { @@ -193,14 +145,14 @@ int v3_handle_shadow_invlpg(struct guest_info * info) { int ret = 0; addr_t vaddr = 0; - if (v3_get_mem_mode(info) != VIRTUAL_MEM) { + if (v3_get_vm_mem_mode(info) != VIRTUAL_MEM) { // Paging must be turned on... // should handle with some sort of fault I think PrintError("ERROR: INVLPG called in non paged mode\n"); return -1; } - if (v3_get_mem_mode(info) == PHYSICAL_MEM) { + if (v3_get_vm_mem_mode(info) == PHYSICAL_MEM) { ret = read_guest_pa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); } else { ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); @@ -227,7 +179,7 @@ int v3_handle_shadow_invlpg(struct guest_info * info) { info->rip += dec_instr.instr_length; - switch (v3_get_cpu_mode(info)) { + switch (v3_get_vm_cpu_mode(info)) { case PROTECTED: return handle_shadow_invlpg_32(info, vaddr); case PROTECTED_PAE: @@ -237,7 +189,7 @@ int v3_handle_shadow_invlpg(struct guest_info * info) { case LONG_16_COMPAT: return handle_shadow_invlpg_64(info, vaddr); default: - PrintError("Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_cpu_mode(info))); + PrintError("Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(info))); return -1; } } @@ -288,13 +240,17 @@ static struct shadow_page_data * create_new_shadow_pt(struct guest_info * info) } -static void inject_guest_pf(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) { +static int inject_guest_pf(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) { + +#ifdef CONFIG_PROFILE_VMM if (info->enable_profiler) { info->profiler.guest_pf_cnt++; } +#endif info->ctrl_regs.cr2 = fault_addr; - v3_raise_exception_with_error(info, PF_EXCEPTION, *(uint_t *)&error_code); + + return v3_raise_exception_with_error(info, PF_EXCEPTION, *(uint_t *)&error_code); } @@ -317,12 +273,19 @@ static int is_guest_pf(pt_access_status_t guest_access, pt_access_status_t shado return 1; } - if ((shadow_access == PT_ACCESS_NOT_PRESENT) && - (guest_access == PT_ACCESS_NOT_PRESENT)) { + /* + if ((shadow_access == PT_ACCESS_NOT_PRESENT) && + (guest_access == PT_ACCESS_NOT_PRESENT)) { + // Page tables completely blank, handle guest first + return 1; + } + */ + + if (guest_access == PT_ACCESS_NOT_PRESENT) { // Page tables completely blank, handle guest first return 1; } - + // Otherwise we'll handle the guest fault later...? }