X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fvmm_shadow_paging.c;h=a3c802f3da5c83b2ff89bd8fc6acbea47f93f343;hb=70d3ac1e7be22e42fbf8f778367e9bf6d32e5b7f;hp=91004f92f3b60d57d61f68a0c37f7bcbb1e76c4d;hpb=9487b7d59cab801ef521a89187dfd1e33fa36c45;p=palacios-OLD.git diff --git a/palacios/src/palacios/vmm_shadow_paging.c b/palacios/src/palacios/vmm_shadow_paging.c index 91004f9..a3c802f 100644 --- a/palacios/src/palacios/vmm_shadow_paging.c +++ b/palacios/src/palacios/vmm_shadow_paging.c @@ -30,14 +30,6 @@ #include - -// set this to 1 if you want us to attempt to -// fetch multiple entries on a page fault -#define SPECULATIVE_PAGING 1 - -#define REGULAR_PAGE_FAULT 0 -#define SPECULATIVE_PAGE_FAULT 1 - #ifndef DEBUG_SHADOW_PAGING #undef PrintDebug #define PrintDebug(fmt, args...) @@ -57,19 +49,6 @@ struct shadow_page_data { }; -DEFINE_HASHTABLE_INSERT(add_pte_map, addr_t, addr_t); -DEFINE_HASHTABLE_SEARCH(find_pte_map, addr_t, addr_t); -//DEFINE_HASHTABLE_REMOVE(del_pte_map, addr_t, addr_t, 0); - - - -static uint_t pte_hash_fn(addr_t key) { - return hash_long(key, 32); -} - -static int pte_equals(addr_t key1, addr_t key2) { - return (key1 == key2); -} static struct shadow_page_data * create_new_shadow_pt(struct guest_info * info); static void inject_guest_pf(struct guest_info * info, addr_t fault_addr, pf_error_t error_code); @@ -89,9 +68,6 @@ int v3_init_shadow_page_state(struct guest_info * info) { state->guest_cr0 = 0; INIT_LIST_HEAD(&(state->page_list)); - - state->cached_ptes = NULL; - state->cached_cr3 = 0; return 0; } @@ -121,20 +97,15 @@ int v3_activate_shadow_pt(struct guest_info * info) { } -int v3_activate_passthrough_pt(struct guest_info * info) { - // For now... But we need to change this.... - // As soon as shadow paging becomes active the passthrough tables are hosed - // So this will cause chaos if it is called at that time - info->ctrl_regs.cr3 = *(addr_t*)&(info->direct_map_pt); - //PrintError("Activate Passthrough Page tables not implemented\n"); - return 0; +// This must flush any caches +// and reset the cr3 value to the correct value +int v3_invalidate_shadow_pts(struct guest_info * info) { + return v3_activate_shadow_pt(info); } - int v3_handle_shadow_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) { - int rc; if (v3_get_mem_mode(info) == PHYSICAL_MEM) { // If paging is not turned on we need to handle the special cases @@ -143,38 +114,14 @@ int v3_handle_shadow_pagefault(struct guest_info * info, addr_t fault_addr, pf_e switch (v3_get_cpu_mode(info)) { case PROTECTED: - return handle_shadow_pagefault_32(info, fault_addr, error_code); + return handle_shadow_pagefault_32(info, fault_addr, error_code); break; case PROTECTED_PAE: return handle_shadow_pagefault_32pae(info, fault_addr, error_code); case LONG: case LONG_32_COMPAT: - case LONG_16_COMPAT: { - addr_t curr_addr; - addr_t fault_addr_base; - // first, we will handle the actual fault, non-speculatively - rc=handle_shadow_pagefault_64(info, fault_addr, error_code, REGULAR_PAGE_FAULT); - if (rc) { - return -1; - } - if (!SPECULATIVE_PAGING) { - return 0; - } - fault_addr_base=PAGE_ADDR_4KB(fault_addr); - PrintDebug("Attempting speculative paging around %p\n",(void*)fault_addr_base); - for (curr_addr = (fault_addr_base & (~0x1fffffLL)) ; - curr_addr < (fault_addr_base | (0x1fffffLL)) ; - curr_addr+=PAGE_SIZE) { - if (curr_addr!=fault_addr_base) { - rc=handle_shadow_pagefault_64(info, curr_addr, error_code, SPECULATIVE_PAGE_FAULT); - if (rc) { - PrintDebug("Speculative page fault handler failed at %p\n",(void*)curr_addr); - return -1; - } - } - } - return 0; - } + case LONG_16_COMPAT: + return handle_shadow_pagefault_64(info, fault_addr, error_code); break; default: PrintError("Unhandled CPU Mode: %s\n", v3_cpu_mode_to_str(v3_get_cpu_mode(info)));