+/*
+ * This file is part of the Palacios Virtual Machine Monitor developed
+ * by the V3VEE Project with funding from the United States National
+ * Science Foundation and the Department of Energy.
+ *
+ * The V3VEE Project is a joint project between Northwestern University
+ * and the University of New Mexico. You can find out more at
+ * http://www.v3vee.org
+ *
+ * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
+ * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
+ * All rights reserved.
+ *
+ * Author: Jack Lange <jarusl@cs.northwestern.edu>
+ *
+ * This is free software. You are permitted to use,
+ * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
+ */
+
+
#include <palacios/vmm_shadow_paging.h>
#include <palacios/vmm.h>
#include <palacios/vm_guest_mem.h>
+#include <palacios/vmm_decoder.h>
+#include <palacios/vmm_ctrl_regs.h>
+#include <palacios/vmm_hashtable.h>
+#ifndef DEBUG_SHADOW_PAGING
+#undef PrintDebug
+#define PrintDebug(fmt, args...)
+#endif
-int init_shadow_page_state(struct shadow_page_state * state) {
- state->guest_mode = PDE32;
- state->shadow_mode = PDE32;
-
- state->guest_cr3 = 0;
- state->shadow_cr3 = 0;
+/***
+ *** There be dragons
+ ***/
- return 0;
-}
-int handle_shadow_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
- if (info->cpu_mode == PROTECTED_PG) {
- return handle_shadow_pagefault32(info, fault_addr, error_code);
- } else {
- return -1;
- }
-}
+struct guest_table {
+ addr_t cr3;
+ struct list_head link;
+};
-int handle_shadow_pagefault32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
- pde32_t * guest_pde = NULL;
- pde32_t * shadow_pde = (pde32_t *)CR3_TO_PDE32(info->shdw_pg_state.shadow_cr3);
- addr_t guest_cr3 = CR3_TO_PDE32(info->shdw_pg_state.guest_cr3);
- pt_access_status_t guest_pde_access;
- pt_access_status_t shadow_pde_access;
- pde32_t * guest_pde_entry = NULL;
- pde32_t * shadow_pde_entry = (pde32_t *)&(shadow_pde[PDE32_INDEX(fault_addr)]);
+struct backptr {
+ addr_t ptr;
+ struct list_head link;
+};
- if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pde) == -1) {
- PrintDebug("Invalid Guest PDE Address: 0x%x\n", guest_cr3);
- return -1;
- }
+struct shadow_page_data {
+ addr_t ptr;
+ addr_t guest_addr;
- guest_pde_entry = (pde32_t *)&(guest_pde[PDE32_INDEX(fault_addr)]);
+ struct list_head backptrs;
+ struct list_head guest_tables;
+};
- // Check the guest page permissions
- guest_pde_access = can_access_pde32(guest_pde, fault_addr, error_code);
- if (guest_pde_access != PT_ACCESS_OK) {
- //
- // inject page fault to the guest (Guest PDE fault)
- //
- PrintDebug("Guest Page fault (currently not handled)\n");
- return -1;
- }
-
- shadow_pde_access = can_access_pde32(shadow_pde, fault_addr, error_code);
+//DEFINE_HASHTABLE_INSERT(add_cr3_to_cache, addr_t, struct hashtable *);
+//DEFINE_HASHTABLE_SEARCH(find_cr3_in_cache, addr_t, struct hashtable *);
+//DEFINE_HASHTABLE_REMOVE(del_cr3_from_cache, addr_t, struct hashtable *, 0);
- if (shadow_pde_access == PT_ENTRY_NOT_PRESENT) {
- pte32_t * shadow_pte = NULL;
+DEFINE_HASHTABLE_INSERT(add_pte_map, addr_t, addr_t);
+DEFINE_HASHTABLE_SEARCH(find_pte_map, addr_t, addr_t);
+//DEFINE_HASHTABLE_REMOVE(del_pte_map, addr_t, addr_t, 0);
- V3_AllocPages(shadow_pte, 1);
- memset(shadow_pte, 0, PAGE_SIZE);
- shadow_pde_entry->pt_base_addr = PD32_BASE_ADDR(shadow_pte);
-
- shadow_pde_entry->present = 1;
- shadow_pde_entry->user_page = guest_pde_entry->user_page;
-
- // VMM Specific options
- shadow_pde_entry->write_through = 0;
- shadow_pde_entry->cache_disable = 0;
- shadow_pde_entry->global_page = 0;
- //
-
- guest_pde_entry->accessed = 1;
-
- if (guest_pde_entry->large_page == 0) {
- shadow_pde_entry->writable = guest_pde_entry->writable;
- } else {
- /*
- * Check the Intel manual because we are ignoring Large Page issues here
- */
- }
+static uint_t pte_hash_fn(addr_t key) {
+ return hash_long(key, 32);
+}
- } else if (shadow_pde_access == PT_WRITE_ERROR) {
+static int pte_equals(addr_t key1, addr_t key2) {
+ return (key1 == key2);
+}
- //
- // Page Directory Entry marked read-only
- //
+static addr_t create_new_shadow_pt();
+static void inject_guest_pf(struct guest_info * info, addr_t fault_addr, pf_error_t error_code);
+static int is_guest_pf(pt_access_status_t guest_access, pt_access_status_t shadow_access);
- PrintDebug("Shadow Paging Write Error\n");
- return -1;
- } else if (shadow_pde_access == PT_USER_ERROR) {
- //
- // Page Directory Entry marked non-user
- //
-
- PrintDebug("Shadow Paging User access error\n");
- return -1;
- } else if (shadow_pde_access == PT_ACCESS_OK) {
- pte32_t * shadow_pte = (pte32_t *)PDE32_T_ADDR((*shadow_pde_entry));
- pte32_t * guest_pte = NULL;
+#include "vmm_shadow_paging_32.h"
+#include "vmm_shadow_paging_32pae.h"
+#include "vmm_shadow_paging_64.h"
- // Page Table entry fault
-
- if (guest_pa_to_host_va(info, PDE32_T_ADDR((*guest_pde_entry)), (addr_t*)&guest_pte) == -1) {
- PrintDebug("Invalid Guest PTE Address: 0x%x\n", PDE32_T_ADDR((*guest_pde_entry)));
- return -1;
- }
- if (handle_shadow_pte32_fault(info, fault_addr, error_code, shadow_pte, guest_pte) == -1) {
- PrintDebug("Error handling Page fault caused by PTE\n");
- return -1;
- }
-
- } else {
- PrintDebug("Unknown Error\n");
- return -1;
- }
+int v3_init_shadow_page_state(struct guest_info * info) {
+ struct shadow_page_state * state = &(info->shdw_pg_state);
+
+ state->guest_cr3 = 0;
+ state->guest_cr0 = 0;
- PrintDebugPageTables(shadow_pde);
+ state->cached_ptes = NULL;
return 0;
}
-/*
- * We assume the the guest pte pointer has already been translated to a host virtual address
- */
-int handle_shadow_pte32_fault(struct guest_info* info,
- addr_t fault_addr,
- pf_error_t error_code,
- pte32_t * shadow_pte,
- pte32_t * guest_pte) {
-
- pt_access_status_t guest_pte_access;
- pt_access_status_t shadow_pte_access;
- pte32_t * guest_pte_entry = (pte32_t *)&(guest_pte[PTE32_INDEX(fault_addr)]);;
- pte32_t * shadow_pte_entry = (pte32_t *)&(shadow_pte[PTE32_INDEX(fault_addr)]);
- // Check the guest page permissions
- guest_pte_access = can_access_pte32(guest_pte, fault_addr, error_code);
- if (guest_pte_access != PT_ACCESS_OK) {
- //
- // Inject page fault into the guest
- //
+// Reads the guest CR3 register
+// creates new shadow page tables
+// updates the shadow CR3 register to point to the new pts
+int v3_activate_shadow_pt(struct guest_info * info) {
+ switch (info->cpu_mode) {
- PrintDebug("Guest Page fault (currently not handled)\n");
+ case PROTECTED:
+ return activate_shadow_pt_32(info);
+ case PROTECTED_PAE:
+ return activate_shadow_pt_32pae(info);
+ case LONG:
+ case LONG_32_COMPAT:
+ case LONG_16_COMPAT:
+ return activate_shadow_pt_64(info);
+ default:
+ PrintError("Invalid CPU mode: %d\n", info->cpu_mode);
return -1;
}
+ return 0;
+}
- shadow_pte_access = can_access_pte32(shadow_pte, fault_addr, error_code);
- if (shadow_pte_access == PT_ENTRY_NOT_PRESENT) {
- addr_t shadow_pa;
- addr_t guest_pa = PTE32_T_ADDR((*guest_pte_entry));
+int v3_activate_passthrough_pt(struct guest_info * info) {
+ // For now... But we need to change this....
+ // As soon as shadow paging becomes active the passthrough tables are hosed
+ // So this will cause chaos if it is called at that time
- // Page Table Entry Not Present
+ info->ctrl_regs.cr3 = *(addr_t*)&(info->direct_map_pt);
+ //PrintError("Activate Passthrough Page tables not implemented\n");
+ return 0;
+}
- if (get_shadow_addr_type(info, guest_pa) == HOST_REGION_INVALID) {
- //
- // Inject a machine check in the guest
- //
- PrintDebug("Invalid Guest Address in page table (0x%x)\n", guest_pa);
+int v3_handle_shadow_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
+
+ if (info->mem_mode == PHYSICAL_MEM) {
+ // If paging is not turned on we need to handle the special cases
+
+#ifdef DEBUG_SHADOW_PAGING
+ PrintHostPageTree(info->cpu_mode, fault_addr, info->ctrl_regs.cr3);
+ PrintGuestPageTree(info, fault_addr, info->shdw_pg_state.guest_cr3);
+#endif
+
+ return handle_special_page_fault(info, fault_addr, fault_addr, error_code);
+ } else if (info->mem_mode == VIRTUAL_MEM) {
+
+ switch (info->cpu_mode) {
+ case PROTECTED:
+ return handle_shadow_pagefault_32(info, fault_addr, error_code);
+ break;
+ case PROTECTED_PAE:
+ return handle_shadow_pagefault_32pae(info, fault_addr, error_code);
+ case LONG:
+ return handle_shadow_pagefault_64(info, fault_addr, error_code);
+ break;
+ default:
+ PrintError("Unhandled CPU Mode\n");
return -1;
}
-
- shadow_pa = get_shadow_addr(info, guest_pa);
-
- shadow_pte_entry->page_base_addr = PT32_BASE_ADDR(shadow_pa);
-
- shadow_pte_entry->present = guest_pte_entry->present;
- shadow_pte_entry->user_page = guest_pte_entry->user_page;
-
- //set according to VMM policy
- shadow_pte_entry->write_through = 0;
- shadow_pte_entry->cache_disable = 0;
- shadow_pte_entry->global_page = 0;
- //
-
- guest_pte_entry->accessed = 1;
-
- if (guest_pte_entry->dirty == 1) {
- shadow_pte_entry->writable = guest_pte_entry->writable;
- } else if ((guest_pte_entry->dirty == 0) && (error_code.write == 1)) {
- shadow_pte_entry->writable = guest_pte_entry->writable;
- guest_pte_entry->dirty = 1;
- } else if ((guest_pte_entry->dirty = 0) && (error_code.write == 0)) {
- shadow_pte_entry->writable = 0;
- }
-
- } else if (shadow_pte_access == PT_WRITE_ERROR) {
-
- //
- // Page Table Entry marked read-only
- //
-
- PrintDebug("Shadow Paging Write Error\n");
- return -1;
- } else if (shadow_pte_access == PT_USER_ERROR) {
-
- //
- // Page Table Entry marked non-user
- //
-
- PrintDebug("Shadow Paging User access error\n");
- return -1;
- } else if (shadow_pte_access == PT_ACCESS_OK) {
-
- PrintDebug("Page Fault occurred for No Reason\n");
- return -1;
} else {
- PrintDebug("Unknown Error\n");
+ PrintError("Invalid Memory mode\n");
return -1;
}
-
-
- return 0;
}
-addr_t create_new_shadow_pt32(struct guest_info * info) {
+static addr_t create_new_shadow_pt() {
void * host_pde = 0;
- V3_AllocPages(host_pde, 1);
+ host_pde = V3_VAddr(V3_AllocPages(1));
memset(host_pde, 0, PAGE_SIZE);
return (addr_t)host_pde;
}
-
-
-addr_t setup_shadow_pt32(struct guest_info * info, addr_t virt_cr3) {
- addr_t cr3_guest_addr = CR3_TO_PDE32(virt_cr3);
- pde32_t * guest_pde;
- pde32_t * host_pde = NULL;
- int i;
-
- // Setup up guest_pde to point to the PageDir in host addr
- if (guest_pa_to_host_va(info, cr3_guest_addr, (addr_t*)&guest_pde) == -1) {
- return 0;
+static void inject_guest_pf(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
+ if (info->enable_profiler) {
+ info->profiler.guest_pf_cnt++;
}
-
- V3_AllocPages(host_pde, 1);
- memset(host_pde, 0, PAGE_SIZE);
- for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
- if (guest_pde[i].present == 1) {
- addr_t pt_host_addr;
- addr_t host_pte;
+ info->ctrl_regs.cr2 = fault_addr;
+ v3_raise_exception_with_error(info, PF_EXCEPTION, *(uint_t *)&error_code);
+}
- if (guest_pa_to_host_va(info, PDE32_T_ADDR(guest_pde[i]), &pt_host_addr) == -1) {
- return 0;
- }
- if ((host_pte = setup_shadow_pte32(info, pt_host_addr)) == 0) {
- return 0;
- }
+static int is_guest_pf(pt_access_status_t guest_access, pt_access_status_t shadow_access) {
+ /* basically the reasoning is that there can be multiple reasons for a page fault:
+ If there is a permissions failure for a page present in the guest _BUT_
+ the reason for the fault was that the page is not present in the shadow,
+ _THEN_ we have to map the shadow page in and reexecute, this will generate
+ a permissions fault which is _THEN_ valid to send to the guest
+ _UNLESS_ both the guest and shadow have marked the page as not present
- host_pde[i].present = 1;
- host_pde[i].pt_base_addr = PD32_BASE_ADDR(host_pte);
+ whew...
+ */
+ if (guest_access != PT_ACCESS_OK) {
+ // Guest Access Error
+
+ if ((shadow_access != PT_ACCESS_NOT_PRESENT) &&
+ (guest_access != PT_ACCESS_NOT_PRESENT)) {
+ // aka (guest permission error)
+ return 1;
+ }
- //
- // Set Page DIR flags
- //
+ if ((shadow_access == PT_ACCESS_NOT_PRESENT) &&
+ (guest_access == PT_ACCESS_NOT_PRESENT)) {
+ // Page tables completely blank, handle guest first
+ return 1;
}
- }
- PrintDebugPageTables(host_pde);
+ // Otherwise we'll handle the guest fault later...?
+ }
- return (addr_t)host_pde;
+ return 0;
}
-addr_t setup_shadow_pte32(struct guest_info * info, addr_t pt_host_addr) {
- pte32_t * guest_pte = (pte32_t *)pt_host_addr;
- pte32_t * host_pte = NULL;
- int i;
- V3_AllocPages(host_pte, 1);
- memset(host_pte, 0, PAGE_SIZE);
- for (i = 0; i < MAX_PTE32_ENTRIES; i++) {
- if (guest_pte[i].present == 1) {
- addr_t guest_pa = PTE32_T_ADDR(guest_pte[i]);
- shadow_mem_type_t page_type;
- addr_t host_pa = 0;
- page_type = get_shadow_addr_type(info, guest_pa);
- if (page_type == HOST_REGION_PHYSICAL_MEMORY) {
- host_pa = get_shadow_addr(info, guest_pa);
- } else {
-
- //
- // Setup various memory types
- //
- }
- host_pte[i].page_base_addr = PT32_BASE_ADDR(host_pa);
- host_pte[i].present = 1;
- }
+
+
+
+
+/* Currently Does not work with Segmentation!!! */
+int v3_handle_shadow_invlpg(struct guest_info * info)
+{
+ if (info->mem_mode != VIRTUAL_MEM) {
+ // Paging must be turned on...
+ // should handle with some sort of fault I think
+ PrintError("ERROR: INVLPG called in non paged mode\n");
+ return -1;
+ }
+
+
+ if (info->cpu_mode != PROTECTED) {
+ PrintError("Unsupported CPU mode (mode=%s)\n", v3_cpu_mode_to_str(info->cpu_mode));
+ return -1;
+ }
+
+ uchar_t instr[15];
+ int index = 0;
+
+ int ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
+ if (ret != 15) {
+ PrintError("Could not read instruction 0x%p (ret=%d)\n", (void *)(addr_t)(info->rip), ret);
+ return -1;
+ }
+
+
+ /* Can INVLPG work with Segments?? */
+ while (is_prefix_byte(instr[index])) {
+ index++;
+ }
+
+
+ if( (instr[index + 0] != (uchar_t) 0x0f) ||
+ (instr[index + 1] != (uchar_t) 0x01) ) {
+ PrintError("invalid Instruction Opcode\n");
+ PrintTraceMemDump(instr, 15);
+ return -1;
+ }
+
+ addr_t first_operand;
+ addr_t second_operand;
+ addr_t guest_cr3 = CR3_TO_PDE32_PA(info->shdw_pg_state.guest_cr3);
+
+ pde32_t * guest_pd = NULL;
+
+ if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pd) == -1) {
+ PrintError("Invalid Guest PDE Address: 0x%p\n", (void *)guest_cr3);
+ return -1;
}
+
+ index += 2;
- return (addr_t)host_pte;
+ v3_operand_type_t addr_type = decode_operands32(&(info->vm_regs), instr + index, &index, &first_operand, &second_operand, REG32);
+
+ if (addr_type != MEM_OPERAND) {
+ PrintError("Invalid Operand type\n");
+ return -1;
+ }
+
+ pde32_t * shadow_pd = (pde32_t *)CR3_TO_PDE32_VA(info->ctrl_regs.cr3);
+ pde32_t * shadow_pde = (pde32_t *)&shadow_pd[PDE32_INDEX(first_operand)];
+ pde32_t * guest_pde;
+
+ //PrintDebug("PDE Index=%d\n", PDE32_INDEX(first_operand));
+ //PrintDebug("FirstOperand = %x\n", first_operand);
+
+ PrintDebug("Invalidating page for %p\n", (void *)first_operand);
+
+ guest_pde = (pde32_t *)&(guest_pd[PDE32_INDEX(first_operand)]);
+
+ if (guest_pde->large_page == 1) {
+ shadow_pde->present = 0;
+ PrintDebug("Invalidating Large Page\n");
+ } else
+ if (shadow_pde->present == 1) {
+ pte32_t * shadow_pt = (pte32_t *)(addr_t)BASE_TO_PAGE_ADDR(shadow_pde->pt_base_addr);
+ pte32_t * shadow_pte = (pte32_t *) V3_VAddr( (void*) &shadow_pt[PTE32_INDEX(first_operand)] );
+
+#ifdef DEBUG_SHADOW_PAGING
+ PrintDebug("Setting not present\n");
+ PrintPTEntry(PAGE_PT32, first_operand, shadow_pte);
+#endif
+
+ shadow_pte->present = 0;
+ }
+
+ info->rip += index;
+
+ return 0;
}