X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fvmm_shadow_paging.c;h=9e70bb2ac040a3dbe79456b79fb98998e7e40a6b;hb=a5c5675571882a9b8a7594ef07fe303b195ef9ae;hp=70a2c77ad905a4b9aba6f7e82c7fe1fb5b8a7b63;hpb=eb87f60800c8634e37d1f8e71cd8f88605f2a46e;p=palacios.git diff --git a/palacios/src/palacios/vmm_shadow_paging.c b/palacios/src/palacios/vmm_shadow_paging.c index 70a2c77..9e70bb2 100644 --- a/palacios/src/palacios/vmm_shadow_paging.c +++ b/palacios/src/palacios/vmm_shadow_paging.c @@ -3,11 +3,31 @@ #include #include +#include +#ifndef DEBUG_SHADOW_PAGING +#undef PrintDebug +#define PrintDebug(fmt, args...) +#endif +/*** + *** There be dragons + ***/ -int init_shadow_page_state(struct shadow_page_state * state) { + + + +static int handle_shadow_pte32_fault(struct guest_info* info, + addr_t fault_addr, + pf_error_t error_code, + pte32_t * shadow_pte, + pte32_t * guest_pte); + +static int handle_shadow_pagefault32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code); + +int init_shadow_page_state(struct guest_info * info) { + struct shadow_page_state * state = &(info->shdw_pg_state); state->guest_mode = PDE32; state->shadow_mode = PDE32; @@ -18,115 +38,277 @@ int init_shadow_page_state(struct shadow_page_state * state) { } int handle_shadow_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) { - if (info->cpu_mode == PROTECTED_PG) { - return handle_shadow_pagefault32(info, fault_addr, error_code); + + if (info->mem_mode == PHYSICAL_MEM) { + // If paging is not turned on we need to handle the special cases + return handle_special_page_fault(info, fault_addr, fault_addr, error_code); + } else if (info->mem_mode == VIRTUAL_MEM) { + + switch (info->cpu_mode) { + case PROTECTED: + return handle_shadow_pagefault32(info, fault_addr, error_code); + break; + case PROTECTED_PAE: + case LONG: + default: + PrintError("Unhandled CPU Mode\n"); + return -1; + } } else { + PrintError("Invalid Memory mode\n"); return -1; } } +addr_t create_new_shadow_pt32() { + void * host_pde = 0; -int handle_shadow_pagefault32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) { - pde32_t * guest_pde = NULL; - pde32_t * shadow_pde = (pde32_t *)CR3_TO_PDE32(info->shdw_pg_state.shadow_cr3); - addr_t guest_cr3 = CR3_TO_PDE32(info->shdw_pg_state.guest_cr3); - pt_access_status_t guest_pde_access; - pt_access_status_t shadow_pde_access; - pde32_t * guest_pde_entry = NULL; - pde32_t * shadow_pde_entry = (pde32_t *)&(shadow_pde[PDE32_INDEX(fault_addr)]); + host_pde = V3_AllocPages(1); + memset(host_pde, 0, PAGE_SIZE); - if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pde) == -1) { - PrintDebug("Invalid Guest PDE Address: 0x%x\n", guest_cr3); - return -1; - } + return (addr_t)host_pde; +} - guest_pde_entry = (pde32_t *)&(guest_pde[PDE32_INDEX(fault_addr)]); +static void inject_guest_pf(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) { + info->ctrl_regs.cr2 = fault_addr; + v3_raise_exception_with_error(info, PF_EXCEPTION, *(uint_t *)&error_code); +} - // Check the guest page permissions - guest_pde_access = can_access_pde32(guest_pde, fault_addr, error_code); - if (guest_pde_access != PT_ACCESS_OK) { +static int is_guest_pf(pt_access_status_t guest_access, pt_access_status_t shadow_access) { + /* basically the reasoning is that there can be multiple reasons for a page fault: + If there is a permissions failure for a page present in the guest _BUT_ + the reason for the fault was that the page is not present in the shadow, + _THEN_ we have to map the shadow page in and reexecute, this will generate + a permissions fault which is _THEN_ valid to send to the guest + _UNLESS_ both the guest and shadow have marked the page as not present - // - // inject page fault to the guest (Guest PDE fault) - // + whew... + */ + if (guest_access != PT_ACCESS_OK) { + // Guest Access Error + + if ((shadow_access != PT_ENTRY_NOT_PRESENT) && + (guest_access != PT_ENTRY_NOT_PRESENT)) { + // aka (guest permission error) + return 1; + } - PrintDebug("Guest Page fault (currently not handled)\n"); - return -1; + if ((shadow_access == PT_ENTRY_NOT_PRESENT) && + (guest_access == PT_ENTRY_NOT_PRESENT)) { + // Page tables completely blank, handle guest first + return 1; + } + + // Otherwise we'll handle the guest fault later...? } - shadow_pde_access = can_access_pde32(shadow_pde, fault_addr, error_code); + return 0; +} - if (shadow_pde_access == PT_ENTRY_NOT_PRESENT) { - pte32_t * shadow_pte = NULL; - V3_AllocPages(shadow_pte, 1); - memset(shadow_pte, 0, PAGE_SIZE); - shadow_pde_entry->pt_base_addr = PD32_BASE_ADDR(shadow_pte); - +/* The guest status checks have already been done, + * only special case shadow checks remain + */ +static int handle_large_pagefault32(struct guest_info * info, + addr_t fault_addr, pf_error_t error_code, + pte32_t * shadow_pt, pde32_4MB_t * large_guest_pde) +{ + pt_access_status_t shadow_pte_access = can_access_pte32(shadow_pt, fault_addr, error_code); + pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]); + + if (shadow_pte_access == PT_ACCESS_OK) { + // Inconsistent state... + // Guest Re-Entry will flush tables and everything should now workd + PrintDebug("Inconsistent state... Guest re-entry should flush tlb\n"); + return 0; + } - shadow_pde_entry->present = 1; - shadow_pde_entry->user_page = guest_pde_entry->user_page; - - // VMM Specific options - shadow_pde_entry->write_through = 0; - shadow_pde_entry->cache_disable = 0; - shadow_pde_entry->global_page = 0; - // + + if (shadow_pte_access == PT_ENTRY_NOT_PRESENT) { + // Get the guest physical address of the fault + addr_t guest_fault_pa = PDE32_4MB_T_ADDR(*large_guest_pde) + PD32_4MB_PAGE_OFFSET(fault_addr); + host_region_type_t host_page_type = get_shadow_addr_type(info, guest_fault_pa); + - guest_pde_entry->accessed = 1; + if (host_page_type == HOST_REGION_INVALID) { + // Inject a machine check in the guest + PrintDebug("Invalid Guest Address in page table (0x%x)\n", guest_fault_pa); + v3_raise_exception(info, MC_EXCEPTION); + return 0; + } - if (guest_pde_entry->large_page == 0) { - shadow_pde_entry->writable = guest_pde_entry->writable; - } else { - /* - * Check the Intel manual because we are ignoring Large Page issues here + if (host_page_type == HOST_REGION_PHYSICAL_MEMORY) { + addr_t shadow_pa = get_shadow_addr(info, guest_fault_pa); + + shadow_pte->page_base_addr = PT32_BASE_ADDR(shadow_pa); + + shadow_pte->present = 1; + + /* We are assuming that the PDE entry has precedence + * so the Shadow PDE will mirror the guest PDE settings, + * and we don't have to worry about them here + * Allow everything */ + shadow_pte->user_page = 1; + shadow_pte->writable = 1; + + //set according to VMM policy + shadow_pte->write_through = 0; + shadow_pte->cache_disable = 0; + shadow_pte->global_page = 0; + // + + } else { + // Handle hooked pages as well as other special pages + if (handle_special_page_fault(info, fault_addr, PT32_PAGE_ADDR(guest_fault_pa), error_code) == -1) { + PrintError("Special Page Fault handler returned error for address: %x\n", fault_addr); + return -1; + } } + } else { + PrintError("Error in large page fault handler...\n"); + PrintError("This case should have been handled at the top level handler\n"); + return -1; + } - } else if (shadow_pde_access == PT_WRITE_ERROR) { + PrintDebug("Returning from large page fault handler\n"); + return 0; +} - // - // Page Directory Entry marked read-only - // - PrintDebug("Shadow Paging Write Error\n"); - return -1; - } else if (shadow_pde_access == PT_USER_ERROR) { +static int handle_shadow_pagefault32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) { + pde32_t * guest_pd = NULL; + pde32_t * shadow_pd = (pde32_t *)CR3_TO_PDE32(info->shdw_pg_state.shadow_cr3); + addr_t guest_cr3 = CR3_TO_PDE32(info->shdw_pg_state.guest_cr3); + pt_access_status_t guest_pde_access; + pt_access_status_t shadow_pde_access; + pde32_t * guest_pde = NULL; + pde32_t * shadow_pde = (pde32_t *)&(shadow_pd[PDE32_INDEX(fault_addr)]); - // - // Page Directory Entry marked non-user - // - - PrintDebug("Shadow Paging User access error\n"); + PrintDebug("Shadow page fault handler\n"); + + if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pd) == -1) { + PrintError("Invalid Guest PDE Address: 0x%x\n", guest_cr3); return -1; - } else if (shadow_pde_access == PT_ACCESS_OK) { - pte32_t * shadow_pte = (pte32_t *)PDE32_T_ADDR((*shadow_pde_entry)); - pte32_t * guest_pte = NULL; + } - // Page Table entry fault - - if (guest_pa_to_host_va(info, PDE32_T_ADDR((*guest_pde_entry)), (addr_t*)&guest_pte) == -1) { - PrintDebug("Invalid Guest PTE Address: 0x%x\n", PDE32_T_ADDR((*guest_pde_entry))); - return -1; - } + guest_pde = (pde32_t *)&(guest_pd[PDE32_INDEX(fault_addr)]); - if (handle_shadow_pte32_fault(info, fault_addr, error_code, shadow_pte, guest_pte) == -1) { - PrintDebug("Error handling Page fault caused by PTE\n"); - return -1; - } + // Check the guest page permissions + guest_pde_access = can_access_pde32(guest_pd, fault_addr, error_code); - } else { - PrintDebug("Unknown Error\n"); - return -1; + // Check the shadow page permissions + shadow_pde_access = can_access_pde32(shadow_pd, fault_addr, error_code); + + /* Was the page fault caused by the Guest's page tables? */ + if (is_guest_pf(guest_pde_access, shadow_pde_access) == 1) { + PrintDebug("Injecting PDE pf to guest: (guest access error=%d) (pf error code=%d)\n", + guest_pde_access, error_code); + inject_guest_pf(info, fault_addr, error_code); + return 0; } - PrintDebugPageTables(shadow_pde); + + if (shadow_pde_access == PT_ENTRY_NOT_PRESENT) + { + pte32_t * shadow_pt = (pte32_t *)create_new_shadow_pt32(); + + shadow_pde->present = 1; + shadow_pde->user_page = guest_pde->user_page; + // shadow_pde->large_page = guest_pde->large_page; + shadow_pde->large_page = 0; + + + // VMM Specific options + shadow_pde->write_through = 0; + shadow_pde->cache_disable = 0; + shadow_pde->global_page = 0; + // + + guest_pde->accessed = 1; + + shadow_pde->pt_base_addr = PD32_BASE_ADDR(shadow_pt); + + if (guest_pde->large_page == 0) { + shadow_pde->writable = guest_pde->writable; + } else { + ((pde32_4MB_t *)guest_pde)->dirty = 0; + shadow_pde->writable = 0; + } + } + else if (shadow_pde_access == PT_ACCESS_OK) + { + // + // PTE fault + // + pte32_t * shadow_pt = (pte32_t *)PDE32_T_ADDR((*shadow_pde)); + + if (guest_pde->large_page == 0) { + pte32_t * guest_pt = NULL; + if (guest_pa_to_host_va(info, PDE32_T_ADDR((*guest_pde)), (addr_t*)&guest_pt) == -1) { + // Machine check the guest + PrintDebug("Invalid Guest PTE Address: 0x%x\n", PDE32_T_ADDR((*guest_pde))); + v3_raise_exception(info, MC_EXCEPTION); + return 0; + } + + if (handle_shadow_pte32_fault(info, fault_addr, error_code, shadow_pt, guest_pt) == -1) { + PrintError("Error handling Page fault caused by PTE\n"); + return -1; + } + } else if (guest_pde->large_page == 1) { + if (handle_large_pagefault32(info, fault_addr, error_code, shadow_pt, (pde32_4MB_t *)guest_pde) == -1) { + PrintError("Error handling large pagefault\n"); + return -1; + } + } + } + else if ((shadow_pde_access == PT_WRITE_ERROR) && + (guest_pde->large_page == 1) && + (((pde32_4MB_t *)guest_pde)->dirty == 0)) + { + // + // Page Directory Entry marked read-only + // Its a large page and we need to update the dirty bit in the guest + // + PrintDebug("Large page write error... Setting dirty bit and returning\n"); + ((pde32_4MB_t *)guest_pde)->dirty = 1; + shadow_pde->writable = guest_pde->writable; + return 0; + + } + else if (shadow_pde_access == PT_USER_ERROR) + { + // + // Page Directory Entry marked non-user + // + PrintDebug("Shadow Paging User access error (shadow_pde_access=0x%x, guest_pde_access=0x%x)\n", + shadow_pde_access, guest_pde_access); + inject_guest_pf(info, fault_addr, error_code); + return 0; + } + else + { + // inject page fault in guest + inject_guest_pf(info, fault_addr, error_code); + PrintDebug("Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pde_access); + PrintDebug("Manual Says to inject page fault into guest\n"); +#ifdef DEBUG_SHADOW_PAGING + PrintDebug("Guest PDE: (access=%d)\n\t", guest_pde_access); + PrintPDE32(fault_addr, guest_pde); + PrintDebug("Shadow PDE: (access=%d)\n\t", shadow_pde_access); + PrintPDE32(fault_addr, shadow_pde); +#endif + + return 0; + } + PrintDebug("Returning end of PDE function (rip=%x)\n", info->rip); return 0; } @@ -135,189 +317,346 @@ int handle_shadow_pagefault32(struct guest_info * info, addr_t fault_addr, pf_er /* * We assume the the guest pte pointer has already been translated to a host virtual address */ -int handle_shadow_pte32_fault(struct guest_info* info, - addr_t fault_addr, - pf_error_t error_code, - pte32_t * shadow_pte, - pte32_t * guest_pte) { +static int handle_shadow_pte32_fault(struct guest_info * info, + addr_t fault_addr, + pf_error_t error_code, + pte32_t * shadow_pt, + pte32_t * guest_pt) { pt_access_status_t guest_pte_access; pt_access_status_t shadow_pte_access; - pte32_t * guest_pte_entry = (pte32_t *)&(guest_pte[PTE32_INDEX(fault_addr)]);; - pte32_t * shadow_pte_entry = (pte32_t *)&(shadow_pte[PTE32_INDEX(fault_addr)]); + pte32_t * guest_pte = (pte32_t *)&(guest_pt[PTE32_INDEX(fault_addr)]);; + pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]); // Check the guest page permissions - guest_pte_access = can_access_pte32(guest_pte, fault_addr, error_code); + guest_pte_access = can_access_pte32(guest_pt, fault_addr, error_code); - if (guest_pte_access != PT_ACCESS_OK) { - - // - // Inject page fault into the guest - // - - PrintDebug("Guest Page fault (currently not handled)\n"); - return -1; + // Check the shadow page permissions + shadow_pte_access = can_access_pte32(shadow_pt, fault_addr, error_code); + +#ifdef DEBUG_SHADOW_PAGING + PrintDebug("Guest PTE: (access=%d)\n\t", guest_pte_access); + PrintPTE32(fault_addr, guest_pte); + PrintDebug("Shadow PTE: (access=%d)\n\t", shadow_pte_access); + PrintPTE32(fault_addr, shadow_pte); +#endif + + /* Was the page fault caused by the Guest's page tables? */ + if (is_guest_pf(guest_pte_access, shadow_pte_access) == 1) { + PrintDebug("Access error injecting pf to guest (guest access error=%d) (pf error code=%d)\n", + guest_pte_access, *(uint_t*)&error_code); + inject_guest_pf(info, fault_addr, error_code); + return 0; + } + + + if (shadow_pte_access == PT_ACCESS_OK) { + // Inconsistent state... + // Guest Re-Entry will flush page tables and everything should now work + PrintDebug("Inconsistent state... Guest re-entry should flush tlb\n"); + return 0; } - shadow_pte_access = can_access_pte32(shadow_pte, fault_addr, error_code); if (shadow_pte_access == PT_ENTRY_NOT_PRESENT) { - addr_t shadow_pa; - addr_t guest_pa = PTE32_T_ADDR((*guest_pte_entry)); + + addr_t guest_pa = PTE32_T_ADDR((*guest_pte)); // Page Table Entry Not Present - if (get_shadow_addr_type(info, guest_pa) == HOST_REGION_INVALID) { + host_region_type_t host_page_type = get_shadow_addr_type(info, guest_pa); - // + if (host_page_type == HOST_REGION_INVALID) { // Inject a machine check in the guest - // - PrintDebug("Invalid Guest Address in page table (0x%x)\n", guest_pa); - return -1; + v3_raise_exception(info, MC_EXCEPTION); + return 0; } - shadow_pa = get_shadow_addr(info, guest_pa); - - shadow_pte_entry->page_base_addr = PT32_BASE_ADDR(shadow_pa); + // else... + + if (host_page_type == HOST_REGION_PHYSICAL_MEMORY) { + addr_t shadow_pa = get_shadow_addr(info, guest_pa); + + shadow_pte->page_base_addr = PT32_BASE_ADDR(shadow_pa); + + shadow_pte->present = guest_pte->present; + shadow_pte->user_page = guest_pte->user_page; + + //set according to VMM policy + shadow_pte->write_through = 0; + shadow_pte->cache_disable = 0; + shadow_pte->global_page = 0; + // + + guest_pte->accessed = 1; + + if (guest_pte->dirty == 1) { + shadow_pte->writable = guest_pte->writable; + } else if ((guest_pte->dirty == 0) && (error_code.write == 1)) { + shadow_pte->writable = guest_pte->writable; + guest_pte->dirty = 1; + } else if ((guest_pte->dirty = 0) && (error_code.write == 0)) { + shadow_pte->writable = 0; + } + } else { + // Page fault handled by hook functions + if (handle_special_page_fault(info, fault_addr, guest_pa, error_code) == -1) { + PrintError("Special Page fault handler returned error for address: %x\n", fault_addr); + return -1; + } + } - shadow_pte_entry->present = guest_pte_entry->present; - shadow_pte_entry->user_page = guest_pte_entry->user_page; + } else if ((shadow_pte_access == PT_WRITE_ERROR) && + (guest_pte->dirty == 0)) { - //set according to VMM policy - shadow_pte_entry->write_through = 0; - shadow_pte_entry->cache_disable = 0; - shadow_pte_entry->global_page = 0; - // + PrintDebug("Shadow PTE Write Error\n"); + guest_pte->dirty = 1; + shadow_pte->writable = guest_pte->writable; + return 0; - guest_pte_entry->accessed = 1; + } else { + // Inject page fault into the guest + inject_guest_pf(info, fault_addr, error_code); + PrintError("PTE Page fault fell through... Not sure if this should ever happen\n"); + PrintError("Manual Says to inject page fault into guest\n"); + return -1; + } - if (guest_pte_entry->dirty == 1) { - shadow_pte_entry->writable = guest_pte_entry->writable; - } else if ((guest_pte_entry->dirty == 0) && (error_code.write == 1)) { - shadow_pte_entry->writable = guest_pte_entry->writable; - guest_pte_entry->dirty = 1; - } else if ((guest_pte_entry->dirty = 0) && (error_code.write == 0)) { - shadow_pte_entry->writable = 0; - } + PrintDebug("Returning end of function\n"); + return 0; +} - } else if (shadow_pte_access == PT_WRITE_ERROR) { - // - // Page Table Entry marked read-only - // - PrintDebug("Shadow Paging Write Error\n"); - return -1; - } else if (shadow_pte_access == PT_USER_ERROR) { - // - // Page Table Entry marked non-user - // - PrintDebug("Shadow Paging User access error\n"); - return -1; - } else if (shadow_pte_access == PT_ACCESS_OK) { - PrintDebug("Page Fault occurred for No Reason\n"); - return -1; - } else { - PrintDebug("Unknown Error\n"); +/* Currently Does not work with Segmentation!!! */ +int handle_shadow_invlpg(struct guest_info * info) { + if (info->mem_mode != VIRTUAL_MEM) { + // Paging must be turned on... + // should handle with some sort of fault I think + PrintError("ERROR: INVLPG called in non paged mode\n"); return -1; } - return 0; -} + if (info->cpu_mode == PROTECTED) { + char instr[15]; + int ret; + int index = 0; + + ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr); + if (ret != 15) { + PrintError("Could not read instruction 0x%x (ret=%d)\n", info->rip, ret); + return -1; + } + + /* Can INVLPG work with Segments?? */ + while (is_prefix_byte(instr[index])) { + index++; + } + + + if ((instr[index] == (uchar_t)0x0f) && + (instr[index + 1] == (uchar_t)0x01)) { -addr_t create_new_shadow_pt32(struct guest_info * info) { - void * host_pde = 0; + addr_t first_operand; + addr_t second_operand; + operand_type_t addr_type; + addr_t guest_cr3 = CR3_TO_PDE32(info->shdw_pg_state.guest_cr3); - V3_AllocPages(host_pde, 1); - memset(host_pde, 0, PAGE_SIZE); + pde32_t * guest_pd = NULL; - return (addr_t)host_pde; -} + if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pd) == -1) { + PrintError("Invalid Guest PDE Address: 0x%x\n", guest_cr3); + return -1; + } + + index += 2; -addr_t setup_shadow_pt32(struct guest_info * info, addr_t virt_cr3) { - addr_t cr3_guest_addr = CR3_TO_PDE32(virt_cr3); - pde32_t * guest_pde; - pde32_t * host_pde = NULL; - int i; - - // Setup up guest_pde to point to the PageDir in host addr - if (guest_pa_to_host_va(info, cr3_guest_addr, (addr_t*)&guest_pde) == -1) { - return 0; - } - - V3_AllocPages(host_pde, 1); - memset(host_pde, 0, PAGE_SIZE); + addr_type = decode_operands32(&(info->vm_regs), instr + index, &index, &first_operand, &second_operand, REG32); - for (i = 0; i < MAX_PDE32_ENTRIES; i++) { - if (guest_pde[i].present == 1) { - addr_t pt_host_addr; - addr_t host_pte; + if (addr_type == MEM_OPERAND) { + pde32_t * shadow_pd = (pde32_t *)CR3_TO_PDE32(info->shdw_pg_state.shadow_cr3); + pde32_t * shadow_pde = (pde32_t *)&shadow_pd[PDE32_INDEX(first_operand)]; + pde32_t * guest_pde; - if (guest_pa_to_host_va(info, PDE32_T_ADDR(guest_pde[i]), &pt_host_addr) == -1) { - return 0; - } + //PrintDebug("PDE Index=%d\n", PDE32_INDEX(first_operand)); + //PrintDebug("FirstOperand = %x\n", first_operand); - if ((host_pte = setup_shadow_pte32(info, pt_host_addr)) == 0) { - return 0; - } + PrintDebug("Invalidating page for %x\n", first_operand); - host_pde[i].present = 1; - host_pde[i].pt_base_addr = PD32_BASE_ADDR(host_pte); + guest_pde = (pde32_t *)&(guest_pd[PDE32_INDEX(first_operand)]); - // - // Set Page DIR flags - // + if (guest_pde->large_page == 1) { + shadow_pde->present = 0; + PrintDebug("Invalidating Large Page\n"); + } else { + + if (shadow_pde->present == 1) { + pte32_t * shadow_pt = (pte32_t *)PDE32_T_ADDR((*shadow_pde)); + pte32_t * shadow_pte = (pte32_t *)&shadow_pt[PTE32_INDEX(first_operand)]; + +#ifdef DEBUG_SHADOW_PAGING + PrintDebug("Setting not present\n"); + PrintPTE32(first_operand, shadow_pte); +#endif + + shadow_pte->present = 0; + } + } + + info->rip += index; + + } else { + PrintError("Invalid Operand type\n"); + return -1; + } + } else { + PrintError("invalid Instruction Opcode\n"); + PrintTraceMemDump(instr, 15); + return -1; } } - PrintDebugPageTables(host_pde); - - return (addr_t)host_pde; + return 0; } +/* -addr_t setup_shadow_pte32(struct guest_info * info, addr_t pt_host_addr) { - pte32_t * guest_pte = (pte32_t *)pt_host_addr; - pte32_t * host_pte = NULL; - int i; - V3_AllocPages(host_pte, 1); - memset(host_pte, 0, PAGE_SIZE); +static int create_pd32_nonaligned_4MB_page(struct guest_info * info, pte32_t * pt, addr_t guest_addr, pde32_4MB_t * large_shadow_pde) { + uint_t i = 0; + pte32_t * pte_cursor; + addr_t guest_pa = 0; - for (i = 0; i < MAX_PTE32_ENTRIES; i++) { - if (guest_pte[i].present == 1) { - addr_t guest_pa = PTE32_T_ADDR(guest_pte[i]); - shadow_mem_type_t page_type; - addr_t host_pa = 0; + for (i = 0; i < 1024; i++) { + guest_pa = guest_addr + (PAGE_SIZE * i); + host_region_type_t host_page_type = get_shadow_addr_type(info, guest_pa); + + pte_cursor = &(pt[i]); - page_type = get_shadow_addr_type(info, guest_pa); + if (host_page_type == HOST_REGION_INVALID) { + // Currently we don't support this, but in theory we could + PrintError("Invalid Host Memory Type\n"); + return -1; + } else if (host_page_type == HOST_REGION_PHYSICAL_MEMORY) { + addr_t shadow_pa = get_shadow_addr(info, guest_pa); - if (page_type == HOST_REGION_PHYSICAL_MEMORY) { - host_pa = get_shadow_addr(info, guest_pa); - } else { - - // - // Setup various memory types - // - } - host_pte[i].page_base_addr = PT32_BASE_ADDR(host_pa); - host_pte[i].present = 1; + pte_cursor->page_base_addr = PT32_BASE_ADDR(shadow_pa); + pte_cursor->present = 1; + pte_cursor->writable = large_shadow_pde->writable; + pte_cursor->user_page = large_shadow_pde->user_page; + pte_cursor->write_through = 0; + pte_cursor->cache_disable = 0; + pte_cursor->global_page = 0; + + } else { + PrintError("Unsupported Host Memory Type\n"); + return -1; } } - - return (addr_t)host_pte; + return 0; } +static int handle_large_pagefault32(struct guest_info * info, + pde32_t * guest_pde, pde32_t * shadow_pde, + addr_t fault_addr, pf_error_t error_code ) { + struct shadow_region * mem_reg; + pde32_4MB_t * large_guest_pde = (pde32_4MB_t *)guest_pde; + pde32_4MB_t * large_shadow_pde = (pde32_4MB_t *)shadow_pde; + host_region_type_t host_page_type; + addr_t guest_start_addr = PDE32_4MB_T_ADDR(*large_guest_pde); + // addr_t guest_end_addr = guest_start_addr + PAGE_SIZE_4MB; // start address + 4MB + + + // Check that the Guest PDE entry points to valid memory + // else Machine Check the guest + PrintDebug("Large Page: Page Base Addr=%x\n", guest_start_addr); + + host_page_type = get_shadow_addr_type(info, guest_start_addr); + + if (host_page_type == HOST_REGION_INVALID) { + PrintError("Invalid guest address in large page (0x%x)\n", guest_start_addr); + v3_raise_exception(info, MC_EXCEPTION); + return -1; + } + + // else... + + if (host_page_type == HOST_REGION_PHYSICAL_MEMORY) { + + addr_t host_start_addr = 0; + addr_t region_end_addr = 0; + + // Check for a large enough region in host memory + mem_reg = get_shadow_region_by_addr(&(info->mem_map), guest_start_addr); + PrintDebug("Host region: host_addr=%x (guest_start=%x, end=%x)\n", + mem_reg->host_addr, mem_reg->guest_start, mem_reg->guest_end); + host_start_addr = mem_reg->host_addr + (guest_start_addr - mem_reg->guest_start); + region_end_addr = mem_reg->host_addr + (mem_reg->guest_end - mem_reg->guest_start); + + PrintDebug("Host Start Addr=%x; Region End Addr=%x\n", host_start_addr, region_end_addr); + + + //4f + if (large_guest_pde->dirty == 1) { // dirty + large_shadow_pde->writable = guest_pde->writable; + } else if (error_code.write == 1) { // not dirty, access is write + large_shadow_pde->writable = guest_pde->writable; + large_guest_pde->dirty = 1; + } else { // not dirty, access is read + large_shadow_pde->writable = 0; + } + + + // Check if the region is at least an additional 4MB + + + //4b. + if ((PD32_4MB_PAGE_OFFSET(host_start_addr) == 0) && + (region_end_addr >= host_start_addr + PAGE_SIZE_4MB)) { // if 4MB boundary + large_shadow_pde->page_base_addr = PD32_4MB_BASE_ADDR(host_start_addr); + } else { // else generate 4k pages + pte32_t * shadow_pt = NULL; + PrintDebug("Handling non aligned large page\n"); + + shadow_pde->large_page = 0; + + shadow_pt = create_new_shadow_pt32(); + + if (create_pd32_nonaligned_4MB_page(info, shadow_pt, guest_start_addr, large_shadow_pde) == -1) { + PrintError("Non Aligned Large Page Error\n"); + V3_Free(shadow_pt); + return -1; + } + + +#ifdef DEBUG_SHADOW_PAGING + PrintDebug("non-aligned Shadow PT\n"); + PrintPT32(PT32_PAGE_ADDR(fault_addr), shadow_pt); +#endif + shadow_pde->pt_base_addr = PD32_BASE_ADDR(shadow_pt); + } + + } else { + // Handle hooked pages as well as other special pages + if (handle_special_page_fault(info, fault_addr, guest_start_addr, error_code) == -1) { + PrintError("Special Page Fault handler returned error for address: %x\n", fault_addr); + return -1; + } + } + + return 0; +} +*/