X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?p=palacios.git;a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fvmm_shadow_paging.c;h=0afc383269000e456d57847a5cdfc6c5d86a1973;hp=e6bad8a470ae9a2ab1bca5bd96f58398be430bac;hb=0e097100a26bc43eb8964734fa43130fc4c71429;hpb=e4c0bf63a6e95019f79f7ff9915fd5d360d2c31a diff --git a/palacios/src/palacios/vmm_shadow_paging.c b/palacios/src/palacios/vmm_shadow_paging.c index e6bad8a..0afc383 100644 --- a/palacios/src/palacios/vmm_shadow_paging.c +++ b/palacios/src/palacios/vmm_shadow_paging.c @@ -28,7 +28,18 @@ #include -#ifndef DEBUG_SHADOW_PAGING +#include + + +#ifdef CONFIG_SHADOW_PAGING_TELEMETRY +#include +#endif + +#ifdef CONFIG_SYMBIOTIC_SWAP +#include +#endif + +#ifndef CONFIG_DEBUG_SHADOW_PAGING #undef PrintDebug #define PrintDebug(fmt, args...) #endif @@ -40,29 +51,16 @@ struct shadow_page_data { - v3_reg_t cr3; - addr_t page_pa; + v3_reg_t cr3; + addr_t page_pa; - struct list_head page_list_node; + struct list_head page_list_node; }; -DEFINE_HASHTABLE_INSERT(add_pte_map, addr_t, addr_t); -DEFINE_HASHTABLE_SEARCH(find_pte_map, addr_t, addr_t); -//DEFINE_HASHTABLE_REMOVE(del_pte_map, addr_t, addr_t, 0); - - - -static uint_t pte_hash_fn(addr_t key) { - return hash_long(key, 32); -} - -static int pte_equals(addr_t key1, addr_t key2) { - return (key1 == key2); -} -static addr_t create_new_shadow_pt(struct guest_info * info); -static void inject_guest_pf(struct guest_info * info, addr_t fault_addr, pf_error_t error_code); +static struct shadow_page_data * create_new_shadow_pt(struct guest_info * info); +static int inject_guest_pf(struct guest_info * info, addr_t fault_addr, pf_error_t error_code); static int is_guest_pf(pt_access_status_t guest_access, pt_access_status_t shadow_access); @@ -72,18 +70,30 @@ static int is_guest_pf(pt_access_status_t guest_access, pt_access_status_t shado +#ifdef CONFIG_SHADOW_PAGING_TELEMETRY +static void telemetry_cb(struct guest_info * info, void * private_data, char * hdr) { + V3_Print("%s Guest Page faults: %d\n", hdr, info->shdw_pg_state.guest_faults); +} +#endif + + + int v3_init_shadow_page_state(struct guest_info * info) { - struct shadow_page_state * state = &(info->shdw_pg_state); + struct shadow_page_state * state = &(info->shdw_pg_state); - state->guest_cr3 = 0; - state->guest_cr0 = 0; + state->guest_cr3 = 0; + state->guest_cr0 = 0; + state->guest_efer.value = 0x0LL; - INIT_LIST_HEAD(&(state->page_list)); + INIT_LIST_HEAD(&(state->page_list)); - state->cached_ptes = NULL; - state->cached_cr3 = 0; +#ifdef CONFIG_SHADOW_PAGING_TELEMETRY + if (info->enable_telemetry) { + v3_add_telemetry_cb(info, telemetry_cb, NULL); + } +#endif - return 0; + return 0; } @@ -92,196 +102,210 @@ int v3_init_shadow_page_state(struct guest_info * info) { // creates new shadow page tables // updates the shadow CR3 register to point to the new pts int v3_activate_shadow_pt(struct guest_info * info) { - switch (info->cpu_mode) { - - case PROTECTED: - return activate_shadow_pt_32(info); - case PROTECTED_PAE: - return activate_shadow_pt_32pae(info); - case LONG: - case LONG_32_COMPAT: - case LONG_16_COMPAT: - return activate_shadow_pt_64(info); - default: - PrintError("Invalid CPU mode: %d\n", info->cpu_mode); - return -1; - } - - return 0; + switch (v3_get_vm_cpu_mode(info)) { + + case PROTECTED: + return activate_shadow_pt_32(info); + case PROTECTED_PAE: + return activate_shadow_pt_32pae(info); + case LONG: + case LONG_32_COMPAT: + case LONG_16_COMPAT: + return activate_shadow_pt_64(info); + default: + PrintError("Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(info))); + return -1; + } + + return 0; } -int v3_activate_passthrough_pt(struct guest_info * info) { - // For now... But we need to change this.... - // As soon as shadow paging becomes active the passthrough tables are hosed - // So this will cause chaos if it is called at that time - info->ctrl_regs.cr3 = *(addr_t*)&(info->direct_map_pt); - //PrintError("Activate Passthrough Page tables not implemented\n"); - return 0; +// This must flush any caches +// and reset the cr3 value to the correct value +int v3_invalidate_shadow_pts(struct guest_info * info) { + return v3_activate_shadow_pt(info); } - int v3_handle_shadow_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) { - if (info->mem_mode == PHYSICAL_MEM) { - // If paging is not turned on we need to handle the special cases - return handle_special_page_fault(info, fault_addr, fault_addr, error_code); - } else if (info->mem_mode == VIRTUAL_MEM) { - - switch (info->cpu_mode) { - case PROTECTED: - return handle_shadow_pagefault_32(info, fault_addr, error_code); - break; - case PROTECTED_PAE: - return handle_shadow_pagefault_32pae(info, fault_addr, error_code); - case LONG: - return handle_shadow_pagefault_64(info, fault_addr, error_code); - break; - default: - PrintError("Unhandled CPU Mode\n"); - return -1; + if (v3_get_vm_mem_mode(info) == PHYSICAL_MEM) { + // If paging is not turned on we need to handle the special cases + return v3_handle_passthrough_pagefault(info, fault_addr, error_code); + } else if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) { + + switch (v3_get_vm_cpu_mode(info)) { + case PROTECTED: + return handle_shadow_pagefault_32(info, fault_addr, error_code); + break; + case PROTECTED_PAE: + return handle_shadow_pagefault_32pae(info, fault_addr, error_code); + case LONG: + case LONG_32_COMPAT: + case LONG_16_COMPAT: + return handle_shadow_pagefault_64(info, fault_addr, error_code); + break; + default: + PrintError("Unhandled CPU Mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(info))); + return -1; + } + } else { + PrintError("Invalid Memory mode\n"); + return -1; } - } else { - PrintError("Invalid Memory mode\n"); - return -1; - } } int v3_handle_shadow_invlpg(struct guest_info * info) { - uchar_t instr[15]; - struct x86_instr dec_instr; - int ret = 0; - addr_t vaddr = 0; - - if (info->mem_mode != VIRTUAL_MEM) { - // Paging must be turned on... - // should handle with some sort of fault I think - PrintError("ERROR: INVLPG called in non paged mode\n"); - return -1; - } - - if (info->mem_mode == PHYSICAL_MEM) { - ret = read_guest_pa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); - } else { - ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); - } - - if (ret == -1) { - PrintError("Could not read instruction into buffer\n"); - return -1; - } - - if (v3_decode(info, (addr_t)instr, &dec_instr) == -1) { - PrintError("Decoding Error\n"); - return -1; - } + uchar_t instr[15]; + struct x86_instr dec_instr; + int ret = 0; + addr_t vaddr = 0; + + if (v3_get_vm_mem_mode(info) != VIRTUAL_MEM) { + // Paging must be turned on... + // should handle with some sort of fault I think + PrintError("ERROR: INVLPG called in non paged mode\n"); + return -1; + } + + if (v3_get_vm_mem_mode(info) == PHYSICAL_MEM) { + ret = read_guest_pa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); + } else { + ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); + } + + if (ret == -1) { + PrintError("Could not read instruction into buffer\n"); + return -1; + } + + if (v3_decode(info, (addr_t)instr, &dec_instr) == -1) { + PrintError("Decoding Error\n"); + return -1; + } - if ((dec_instr.op_type != V3_OP_INVLPG) || - (dec_instr.num_operands != 1) || - (dec_instr.dst_operand.type != MEM_OPERAND)) { - PrintError("Decoder Error: Not a valid INVLPG instruction...\n"); - return -1; - } - - vaddr = dec_instr.dst_operand.operand; - - info->rip += dec_instr.instr_length; - - switch (info->cpu_mode) { - case PROTECTED: - return handle_shadow_invlpg_32(info, vaddr); - case PROTECTED_PAE: - return handle_shadow_invlpg_32pae(info, vaddr); - case LONG: - case LONG_32_COMPAT: - case LONG_16_COMPAT: - return handle_shadow_invlpg_64(info, vaddr); - default: - PrintError("Invalid CPU mode: %d\n", info->cpu_mode); - return -1; - } + if ((dec_instr.op_type != V3_OP_INVLPG) || + (dec_instr.num_operands != 1) || + (dec_instr.dst_operand.type != MEM_OPERAND)) { + PrintError("Decoder Error: Not a valid INVLPG instruction...\n"); + return -1; + } + + vaddr = dec_instr.dst_operand.operand; + + info->rip += dec_instr.instr_length; + + switch (v3_get_vm_cpu_mode(info)) { + case PROTECTED: + return handle_shadow_invlpg_32(info, vaddr); + case PROTECTED_PAE: + return handle_shadow_invlpg_32pae(info, vaddr); + case LONG: + case LONG_32_COMPAT: + case LONG_16_COMPAT: + return handle_shadow_invlpg_64(info, vaddr); + default: + PrintError("Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(info))); + return -1; + } } -static addr_t create_new_shadow_pt(struct guest_info * info) { - struct shadow_page_state * state = &(info->shdw_pg_state); - v3_reg_t cur_cr3 = info->ctrl_regs.cr3; - struct shadow_page_data * page_tail = NULL; - addr_t shdw_page = 0; +static struct shadow_page_data * create_new_shadow_pt(struct guest_info * info) { + struct shadow_page_state * state = &(info->shdw_pg_state); + v3_reg_t cur_cr3 = info->ctrl_regs.cr3; + struct shadow_page_data * page_tail = NULL; + addr_t shdw_page = 0; - if (!list_empty(&(state->page_list))) { - page_tail = list_tail_entry(&(state->page_list), struct shadow_page_data, page_list_node); + if (!list_empty(&(state->page_list))) { + page_tail = list_tail_entry(&(state->page_list), struct shadow_page_data, page_list_node); - if (page_tail->cr3 != cur_cr3) { - page_tail->cr3 = cur_cr3; - list_move(&(page_tail->page_list_node), &(state->page_list)); + if (page_tail->cr3 != cur_cr3) { + PrintDebug("Reusing old shadow Page: %p (cur_CR3=%p)(page_cr3=%p) \n", + (void *)(addr_t)page_tail->page_pa, + (void *)(addr_t)cur_cr3, + (void *)(addr_t)(page_tail->cr3)); + + list_move(&(page_tail->page_list_node), &(state->page_list)); - memset(V3_VAddr((void *)(page_tail->page_pa)), 0, PAGE_SIZE_4KB); - PrintDebug("Reusing old shadow Page\n"); + memset(V3_VAddr((void *)(page_tail->page_pa)), 0, PAGE_SIZE_4KB); - return (addr_t)V3_VAddr((void *)(page_tail->page_pa)); + + return page_tail; + } } - } - // else + // else + + page_tail = (struct shadow_page_data *)V3_Malloc(sizeof(struct shadow_page_data)); + page_tail->page_pa = (addr_t)V3_AllocPages(1); - page_tail = (struct shadow_page_data *)V3_Malloc(sizeof(struct shadow_page_data)); - page_tail->page_pa = (addr_t)V3_AllocPages(1); + PrintDebug("Allocating new shadow Page: %p (cur_cr3=%p)\n", + (void *)(addr_t)page_tail->page_pa, + (void *)(addr_t)cur_cr3); - page_tail->cr3 = cur_cr3; - list_add(&(page_tail->page_list_node), &(state->page_list)); + page_tail->cr3 = cur_cr3; + list_add(&(page_tail->page_list_node), &(state->page_list)); - shdw_page = (addr_t)V3_VAddr((void *)(page_tail->page_pa)); - memset((void *)shdw_page, 0, PAGE_SIZE_4KB); + shdw_page = (addr_t)V3_VAddr((void *)(page_tail->page_pa)); + memset((void *)shdw_page, 0, PAGE_SIZE_4KB); - return shdw_page; + return page_tail; } -static void inject_guest_pf(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) { - if (info->enable_profiler) { - info->profiler.guest_pf_cnt++; - } +static int inject_guest_pf(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) { + info->ctrl_regs.cr2 = fault_addr; - info->ctrl_regs.cr2 = fault_addr; - v3_raise_exception_with_error(info, PF_EXCEPTION, *(uint_t *)&error_code); +#ifdef CONFIG_SHADOW_PAGING_TELEMETRY + info->shdw_pg_state.guest_faults++; +#endif + + return v3_raise_exception_with_error(info, PF_EXCEPTION, *(uint_t *)&error_code); } static int is_guest_pf(pt_access_status_t guest_access, pt_access_status_t shadow_access) { - /* basically the reasoning is that there can be multiple reasons for a page fault: - If there is a permissions failure for a page present in the guest _BUT_ - the reason for the fault was that the page is not present in the shadow, - _THEN_ we have to map the shadow page in and reexecute, this will generate - a permissions fault which is _THEN_ valid to send to the guest - _UNLESS_ both the guest and shadow have marked the page as not present - - whew... - */ - if (guest_access != PT_ACCESS_OK) { - // Guest Access Error - - if ((shadow_access != PT_ACCESS_NOT_PRESENT) && - (guest_access != PT_ACCESS_NOT_PRESENT)) { - // aka (guest permission error) - return 1; + /* basically the reasoning is that there can be multiple reasons for a page fault: + If there is a permissions failure for a page present in the guest _BUT_ + the reason for the fault was that the page is not present in the shadow, + _THEN_ we have to map the shadow page in and reexecute, this will generate + a permissions fault which is _THEN_ valid to send to the guest + _UNLESS_ both the guest and shadow have marked the page as not present + + whew... + */ + if (guest_access != PT_ACCESS_OK) { + // Guest Access Error + + if ((shadow_access != PT_ACCESS_NOT_PRESENT) && + (guest_access != PT_ACCESS_NOT_PRESENT)) { + // aka (guest permission error) + return 1; + } + + /* + if ((shadow_access == PT_ACCESS_NOT_PRESENT) && + (guest_access == PT_ACCESS_NOT_PRESENT)) { + // Page tables completely blank, handle guest first + return 1; + } + */ + + if (guest_access == PT_ACCESS_NOT_PRESENT) { + // Page tables completely blank, handle guest first + return 1; + } + + // Otherwise we'll handle the guest fault later...? } - if ((shadow_access == PT_ACCESS_NOT_PRESENT) && - (guest_access == PT_ACCESS_NOT_PRESENT)) { - // Page tables completely blank, handle guest first - return 1; - } - - // Otherwise we'll handle the guest fault later...? - } - - return 0; + return 0; }