X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?p=palacios.git;a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fvmm_shadow_paging.c;h=0afc383269000e456d57847a5cdfc6c5d86a1973;hp=8074c1689767803b840438ddc0df2d92d87fa35f;hb=0e097100a26bc43eb8964734fa43130fc4c71429;hpb=61d51be2ae5c4ba6d6824204d5a7136f21dd7d38 diff --git a/palacios/src/palacios/vmm_shadow_paging.c b/palacios/src/palacios/vmm_shadow_paging.c index 8074c16..0afc383 100644 --- a/palacios/src/palacios/vmm_shadow_paging.c +++ b/palacios/src/palacios/vmm_shadow_paging.c @@ -30,7 +30,16 @@ #include -#ifndef DEBUG_SHADOW_PAGING + +#ifdef CONFIG_SHADOW_PAGING_TELEMETRY +#include +#endif + +#ifdef CONFIG_SYMBIOTIC_SWAP +#include +#endif + +#ifndef CONFIG_DEBUG_SHADOW_PAGING #undef PrintDebug #define PrintDebug(fmt, args...) #endif @@ -49,22 +58,9 @@ struct shadow_page_data { }; -DEFINE_HASHTABLE_INSERT(add_pte_map, addr_t, addr_t); -DEFINE_HASHTABLE_SEARCH(find_pte_map, addr_t, addr_t); -//DEFINE_HASHTABLE_REMOVE(del_pte_map, addr_t, addr_t, 0); - - - -static uint_t pte_hash_fn(addr_t key) { - return hash_long(key, 32); -} - -static int pte_equals(addr_t key1, addr_t key2) { - return (key1 == key2); -} static struct shadow_page_data * create_new_shadow_pt(struct guest_info * info); -static void inject_guest_pf(struct guest_info * info, addr_t fault_addr, pf_error_t error_code); +static int inject_guest_pf(struct guest_info * info, addr_t fault_addr, pf_error_t error_code); static int is_guest_pf(pt_access_status_t guest_access, pt_access_status_t shadow_access); @@ -74,16 +70,28 @@ static int is_guest_pf(pt_access_status_t guest_access, pt_access_status_t shado +#ifdef CONFIG_SHADOW_PAGING_TELEMETRY +static void telemetry_cb(struct guest_info * info, void * private_data, char * hdr) { + V3_Print("%s Guest Page faults: %d\n", hdr, info->shdw_pg_state.guest_faults); +} +#endif + + + int v3_init_shadow_page_state(struct guest_info * info) { struct shadow_page_state * state = &(info->shdw_pg_state); state->guest_cr3 = 0; state->guest_cr0 = 0; + state->guest_efer.value = 0x0LL; INIT_LIST_HEAD(&(state->page_list)); - state->cached_ptes = NULL; - state->cached_cr3 = 0; +#ifdef CONFIG_SHADOW_PAGING_TELEMETRY + if (info->enable_telemetry) { + v3_add_telemetry_cb(info, telemetry_cb, NULL); + } +#endif return 0; } @@ -94,7 +102,7 @@ int v3_init_shadow_page_state(struct guest_info * info) { // creates new shadow page tables // updates the shadow CR3 register to point to the new pts int v3_activate_shadow_pt(struct guest_info * info) { - switch (v3_get_cpu_mode(info)) { + switch (v3_get_vm_cpu_mode(info)) { case PROTECTED: return activate_shadow_pt_32(info); @@ -105,7 +113,7 @@ int v3_activate_shadow_pt(struct guest_info * info) { case LONG_16_COMPAT: return activate_shadow_pt_64(info); default: - PrintError("Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_cpu_mode(info))); + PrintError("Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(info))); return -1; } @@ -113,26 +121,22 @@ int v3_activate_shadow_pt(struct guest_info * info) { } -int v3_activate_passthrough_pt(struct guest_info * info) { - // For now... But we need to change this.... - // As soon as shadow paging becomes active the passthrough tables are hosed - // So this will cause chaos if it is called at that time - info->ctrl_regs.cr3 = *(addr_t*)&(info->direct_map_pt); - //PrintError("Activate Passthrough Page tables not implemented\n"); - return 0; +// This must flush any caches +// and reset the cr3 value to the correct value +int v3_invalidate_shadow_pts(struct guest_info * info) { + return v3_activate_shadow_pt(info); } - int v3_handle_shadow_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) { - if (v3_get_mem_mode(info) == PHYSICAL_MEM) { + if (v3_get_vm_mem_mode(info) == PHYSICAL_MEM) { // If paging is not turned on we need to handle the special cases return v3_handle_passthrough_pagefault(info, fault_addr, error_code); - } else if (v3_get_mem_mode(info) == VIRTUAL_MEM) { + } else if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) { - switch (v3_get_cpu_mode(info)) { + switch (v3_get_vm_cpu_mode(info)) { case PROTECTED: return handle_shadow_pagefault_32(info, fault_addr, error_code); break; @@ -144,7 +148,7 @@ int v3_handle_shadow_pagefault(struct guest_info * info, addr_t fault_addr, pf_e return handle_shadow_pagefault_64(info, fault_addr, error_code); break; default: - PrintError("Unhandled CPU Mode: %s\n", v3_cpu_mode_to_str(v3_get_cpu_mode(info))); + PrintError("Unhandled CPU Mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(info))); return -1; } } else { @@ -160,14 +164,14 @@ int v3_handle_shadow_invlpg(struct guest_info * info) { int ret = 0; addr_t vaddr = 0; - if (v3_get_mem_mode(info) != VIRTUAL_MEM) { + if (v3_get_vm_mem_mode(info) != VIRTUAL_MEM) { // Paging must be turned on... // should handle with some sort of fault I think PrintError("ERROR: INVLPG called in non paged mode\n"); return -1; } - if (v3_get_mem_mode(info) == PHYSICAL_MEM) { + if (v3_get_vm_mem_mode(info) == PHYSICAL_MEM) { ret = read_guest_pa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); } else { ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); @@ -194,7 +198,7 @@ int v3_handle_shadow_invlpg(struct guest_info * info) { info->rip += dec_instr.instr_length; - switch (v3_get_cpu_mode(info)) { + switch (v3_get_vm_cpu_mode(info)) { case PROTECTED: return handle_shadow_invlpg_32(info, vaddr); case PROTECTED_PAE: @@ -204,7 +208,7 @@ int v3_handle_shadow_invlpg(struct guest_info * info) { case LONG_16_COMPAT: return handle_shadow_invlpg_64(info, vaddr); default: - PrintError("Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_cpu_mode(info))); + PrintError("Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(info))); return -1; } } @@ -255,13 +259,14 @@ static struct shadow_page_data * create_new_shadow_pt(struct guest_info * info) } -static void inject_guest_pf(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) { - if (info->enable_profiler) { - info->profiler.guest_pf_cnt++; - } - +static int inject_guest_pf(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) { info->ctrl_regs.cr2 = fault_addr; - v3_raise_exception_with_error(info, PF_EXCEPTION, *(uint_t *)&error_code); + +#ifdef CONFIG_SHADOW_PAGING_TELEMETRY + info->shdw_pg_state.guest_faults++; +#endif + + return v3_raise_exception_with_error(info, PF_EXCEPTION, *(uint_t *)&error_code); } @@ -284,12 +289,19 @@ static int is_guest_pf(pt_access_status_t guest_access, pt_access_status_t shado return 1; } - if ((shadow_access == PT_ACCESS_NOT_PRESENT) && - (guest_access == PT_ACCESS_NOT_PRESENT)) { + /* + if ((shadow_access == PT_ACCESS_NOT_PRESENT) && + (guest_access == PT_ACCESS_NOT_PRESENT)) { + // Page tables completely blank, handle guest first + return 1; + } + */ + + if (guest_access == PT_ACCESS_NOT_PRESENT) { // Page tables completely blank, handle guest first return 1; } - + // Otherwise we'll handle the guest fault later...? }