# Makefile for GeekOS kernel, userspace, and tools
# Copyright (c) 2004,2005 David H. Hovemeyer <daveho@cs.umd.edu>
-# $Revision: 1.19 $
+# $Revision: 1.20 $
# This is free software. You are permitted to use,
# redistribute, and modify it as specified in the file "COPYING".
bget.c malloc.c \
synch.c kthread.c \
serial.c reboot.c \
- paging.c vmx.c vmcs_gen.c vmcs.c \
+ paging.c vm_guest.c \
svm.c svm_handler.c vmm.c vmm_util.c vmm_stubs.c svm_ctrl_regs.c \
- vmcb.c vmm_mem.c vm_guest.c vmm_paging.c vmm_io.c vmm_debug.c \
+ vmcb.c vmm_mem.c vmm_paging.c vmm_io.c vmm_debug.c \
vmm_shadow_paging.c vm_guest_mem.c \
- debug.c\
+ debug.c vmx.c vmcs_gen.c vmcs.c\
main.c
# Kernel object files built from C source files
vmcb_t * Allocate_VMCB();
-void Init_VMCB(vmcb_t * vmcb, guest_info_t vm_info);
-void Init_VMCB_pe(vmcb_t * vmcb, guest_info_t vm_info);
+void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info);
+void Init_VMCB_pe(vmcb_t * vmcb, struct guest_info vm_info);
int init_svm_guest(struct guest_info *info);
int start_svm_guest(struct guest_info * info);
-int handle_cr0_write(guest_info_t * info, ullong_t * new_cr0);
+int handle_cr0_write(struct guest_info * info, ullong_t * new_cr0);
};
-int handle_svm_io(guest_info_t * info);
-int handle_shadow_paging(guest_info_t * info);
+int handle_svm_io(struct guest_info * info);
+int handle_shadow_paging(struct guest_info * info);
-int handle_svm_exit(guest_info_t * info);
+int handle_svm_exit(struct guest_info * info);
#endif
#include <geekos/ktypes.h>
#include <geekos/vmm_io.h>
//#include <geekos/vmm_paging.h>
-#include <geekos/vmm_shadow_paging.h>
+
+struct guest_info;
+
+
+#include <geekos/vmm_shadow_paging.h>
+
struct guest_gprs {
ullong_t rbx;
ullong_t rcx;
typedef enum {SHADOW_PAGING, NESTED_PAGING} vm_page_mode_t;
typedef enum {REAL, PROTECTED, PROTECTED_PG, PROTECTED_PAE, PROTECTED_PAE_PG, LONG, LONG_PG} vm_cpu_mode_t;
-typedef struct guest_info {
+struct guest_info {
ullong_t rip;
ullong_t rsp;
vm_page_mode_t page_mode;
- shadow_page_state_t shadow_page_state;
+ struct shadow_page_state shdw_pg_state;
// nested_paging_t nested_page_state;
struct guest_gprs vm_regs;
void * vmm_data;
-} guest_info_t;
+};
#include <geekos/vmm_mem.h>
-int guest_va_to_guest_pa(guest_info_t * guest_info, addr_t guest_va, addr_t * guest_pa);
-int guest_pa_to_guest_va(guest_info_t * guest_info, addr_t guest_pa, addr_t * guest_va);
-int guest_va_to_host_va(guest_info_t * guest_info, addr_t guest_va, addr_t * host_va);
-int guest_pa_to_host_pa(guest_info_t * guest_info, addr_t guest_pa, addr_t * host_pa);
-int guest_pa_to_host_va(guest_info_t * guest_info, addr_t guest_pa, addr_t * host_va);
+/* These functions are ordered such that they can only call the functions defined in a lower order group */
+/* This is to avoid infinite lookup loops */
-int host_va_to_guest_pa(guest_info_t * guest_info, addr_t host_va, addr_t * guest_pa);
-int host_pa_to_guest_va(guest_info_t * guest_info, addr_t host_pa, addr_t * guest_va);
+/**********************************/
+/* GROUP 0 */
+/**********************************/
+/* Fundamental converters */
+// Call out to OS
int host_va_to_host_pa(addr_t host_va, addr_t * host_pa);
int host_pa_to_host_va(addr_t host_pa, addr_t * host_va);
+// guest_pa -> (shadow map) -> host_pa
+int guest_pa_to_host_pa(struct guest_info * guest_info, addr_t guest_pa, addr_t * host_pa);
+
+/* !! Currently not implemented !! */
+// host_pa -> (shadow_map) -> guest_pa
+int host_pa_to_guest_pa(struct guest_info * guest_info, addr_t host_pa, addr_t * guest_pa);
+
+
+/**********************************/
+/* GROUP 1 */
+/**********************************/
+
+
+/* !! Currently not implemented !! */
+// host_va -> host_pa -> guest_pa
+int host_va_to_guest_pa(struct guest_info * guest_info, addr_t host_va, addr_t * guest_pa);
+
+
+// guest_pa -> host_pa -> host_va
+int guest_pa_to_host_va(struct guest_info * guest_info, addr_t guest_pa, addr_t * host_va);
+
+
+// Look up the address in the guests page tables.. This can cause multiple calls that translate
+// ------------------------------------------------
+// | |
+// --> guest_pa -> host_pa -> host_va -> (read table) --> guest_pa
+int guest_va_to_guest_pa(struct guest_info * guest_info, addr_t guest_va, addr_t * guest_pa);
+
+
+
+/* !! Currently not implemented !! */
+// A page table walker in the guest's address space
+// ------------------------------------------------
+// | |
+// --> guest_pa -> host_pa -> host_va -> (read table) --> guest_va
+int guest_pa_to_guest_va(struct guest_info * guest_info, addr_t guest_pa, addr_t * guest_va);
+
+
+
+/**********************************/
+/* GROUP 2 */
+/**********************************/
+// guest_va -> guest_pa -> host_pa
+int guest_va_to_host_pa(struct guest_info * guest_info, addr_t guest_va, addr_t * host_pa);
+
+
+/* !! Currently not implemented !! */
+// host_pa -> guest_pa -> guest_va
+int host_pa_to_guest_va(struct guest_info * guest_info, addr_t host_pa, addr_t * guest_va);
+
+// guest_va -> guest_pa -> host_pa -> host_va
+int guest_va_to_host_va(struct guest_info * guest_info, addr_t guest_va, addr_t * host_va);
+
+
+/* !! Currently not implemented !! */
+// host_va -> host_pa -> guest_pa -> guest_va
+int host_va_to_guest_va(struct guest_info * guest_info, addr_t host_va, addr_t * guest_va);
+
+
+
+
+
+
+
+
+
+int read_guest_va_memory(struct guest_info * guest_info, addr_t guest_va, int count, char * dest);
+int read_guest_pa_memory(struct guest_info * guest_info, addr_t guest_pa, int count, char * dest);
+
-int read_guest_va_memory(guest_info_t * guest_info, addr_t guest_va, int count, char * dest);
-int read_guest_pa_memory(guest_info_t * guest_info, addr_t guest_pa, int count, char * dest);
shadow_region_t * get_shadow_region_by_index(shadow_map_t * map, uint_t index);
-/*
-int guest_paddr_to_host_paddr(shadow_region_t * entry,
- addr_t guest_addr,
- addr_t * host_addr);
-*/
-
host_region_type_t lookup_shadow_map_addr(shadow_map_t * map, addr_t guest_addr, addr_t * host_addr);
#include <geekos/ktypes.h>
-#include <geekos/vm_guest.h>
+
#include <geekos/vmm_mem.h>
#include <geekos/vmm_util.h>
-pde32_t * create_passthrough_pde32_pts(guest_info_t * guest_info);
-
void delete_page_tables_pde32(pde32_t * pde);
+#include <geekos/vm_guest.h>
+
+pde32_t * create_passthrough_pde32_pts(struct guest_info * guest_info);
+
#include <geekos/vmm_util.h>
-typedef struct shadow_page_state {
+
+struct shadow_page_state {
// these two reflect the top-level page directory
// of the guest page table
reg_ex_t shadow_cr3;
-} shadow_page_state_t;
+};
+
+
+
+
+
+
+#include <geekos/vm_guest.h>
+struct guest_info;
-int init_shadow_page_state(shadow_page_state_t * state);
+int init_shadow_page_state(struct shadow_page_state * state);
// This function will cause the shadow page table to be deleted
// and rewritten to reflect the guest page table and the shadow map
-int wholesale_update_shadow_page_state(shadow_page_state_t * state, shadow_map_t * mem_map);
+int wholesale_update_shadow_page_state(struct guest_info * guest_info);
* Copyright (c) 2001,2003,2004 David H. Hovemeyer <daveho@cs.umd.edu>
* Copyright (c) 2003, Jeffrey K. Hollingsworth <hollings@cs.umd.edu>
* Copyright (c) 2004, Iulian Neamtiu <neamtiu@cs.umd.edu>
- * $Revision: 1.24 $
+ * $Revision: 1.25 $
*
* This is free software. You are permitted to use,
* redistribute, and modify it as specified in the file "COPYING".
{
struct vmm_os_hooks os_hooks;
struct vmm_ctrl_ops vmm_ops;
- guest_info_t vm_info;
+ struct guest_info vm_info;
addr_t rsp;
addr_t rip;
memset(&os_hooks, 0, sizeof(struct vmm_os_hooks));
memset(&vmm_ops, 0, sizeof(struct vmm_ctrl_ops));
- memset(&vm_info, 0, sizeof(guest_info_t));
+ memset(&vm_info, 0, sizeof(struct guest_info));
os_hooks.print_debug = &PrintBoth;
os_hooks.print_info = &Print;
Init_VMM(&os_hooks, &vmm_ops);
init_shadow_map(&(vm_info.mem_map));
- init_shadow_page_state(&(vm_info.shadow_page_state));
+ init_shadow_page_state(&(vm_info.shdw_pg_state));
vm_info.page_mode = SHADOW_PAGING;
vm_info.cpu_mode = REAL;
#include <geekos/svm_handler.h>
#include <geekos/vmm_debug.h>
+#include <geekos/vm_guest_mem.h>
/* TEMPORARY BECAUSE SVM IS WEIRD */
}
-void Init_VMCB_Real(vmcb_t * vmcb, guest_info_t vm_info) {
+void Init_VMCB_Real(vmcb_t * vmcb, struct guest_info vm_info) {
vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
uint_t i;
if (vm_info.page_mode == SHADOW_PAGING) {
PrintDebug("Creating initial shadow page table\n");
- vm_info.shadow_page_state.shadow_cr3.e_reg.low |= ((addr_t)create_passthrough_pde32_pts(&(vm_info.mem_map)) & ~0xfff);
+ vm_info.shdw_pg_state.shadow_cr3.e_reg.low |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
PrintDebug("Created\n");
- guest_state->cr3 = vm_info.shadow_page_state.shadow_cr3.r_reg;
+ guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3.r_reg;
ctrl_area->cr_reads.crs.cr3 = 1;
ctrl_area->cr_writes.crs.cr3 = 1;
}
-void Init_VMCB(vmcb_t * vmcb, guest_info_t vm_info) {
+void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
uint_t i;
if (vm_info.page_mode == SHADOW_PAGING) {
PrintDebug("Creating initial shadow page table\n");
- vm_info.shadow_page_state.shadow_cr3.e_reg.low |= ((addr_t)create_passthrough_pde32_pts(&(vm_info.mem_map)) & ~0xfff);
+ vm_info.shdw_pg_state.shadow_cr3.e_reg.low |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
PrintDebug("Created\n");
- guest_state->cr3 = vm_info.shadow_page_state.shadow_cr3.r_reg;
+ guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3.r_reg;
ctrl_area->cr_reads.crs.cr3 = 1;
ctrl_area->cr_writes.crs.cr3 = 1;
}
-void Init_VMCB_pe(vmcb_t *vmcb, guest_info_t vm_info) {
+void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
uint_t i = 0;
#include <geekos/vmm_emulate.h>
-int handle_cr0_write(guest_info_t * info, ullong_t * new_cr0) {
- // vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA((vmcb_t *)(info->vmm_data));
- //vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
+int handle_cr0_write(struct guest_info * info, ullong_t * new_cr0) {
+ vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA((vmcb_t *)(info->vmm_data));
+ vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
+ char instr[15];
-
- /*
+
if (info->cpu_mode == REAL) {
- addr_t host_addr;
- shadow_region_t * region = get_shadow_region_by_addr(&(info->mem_map), (addr_t)(info->rip));
- if (!region || (region->host_type != HOST_REGION_PHYSICAL_MEMORY)) {
- //PANIC
+ read_guest_pa_memory(info, (addr_t)guest_state->rip, 15, instr);
+ int index = 0;
+
+ while (is_prefix_byte(instr[index])) {
+ index++;
+ }
+
+ if ((instr[index] == cr_access_byte) &&
+ (instr[index + 1] == lmsw_byte)) {
+ // LMSW
+ // decode mod/RM
+
+ } else if ((instr[index] == cr_access_byte) &&
+ (instr[index + 1] == clts_byte)) {
+ // CLTS
+ } else {
+ // unsupported instruction, GPF the guest
return -1;
}
- guest_paddr_to_host_paddr(region, (addr_t)(info->rip), &host_addr);
- // pa to va
+
+ }
- PrintDebug("Instr: %.4x\n", *(ushort_t*)host_addr);
-
- if ((*(ushort_t*)host_addr) == LMSW_EAX) {
- PrintDebug("lmsw from eax (0x%x)\n", guest_state->rax);
- }
- }*/
return 0;
}
#include <geekos/svm_ctrl_regs.h>
-int handle_svm_exit(guest_info_t * info) {
+int handle_svm_exit(struct guest_info * info) {
vmcb_ctrl_t * guest_ctrl = 0;
vmcb_saved_state_t * guest_state = 0;
ulong_t exit_code = 0;
// This should package up an IO request and call vmm_handle_io
-int handle_svm_io(guest_info_t * info) {
+int handle_svm_io(struct guest_info * info) {
vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA((vmcb_t *)(info->vmm_data));
vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
}
-int handle_shadow_paging(guest_info_t * info) {
+int handle_shadow_paging(struct guest_info * info) {
vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
// vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
-#include <geekos/vm_guest_mem.c>
-
+#include <geekos/vm_guest_mem.h>
+#include <geekos/vmm.h>
#include <geekos/vmm_paging.h>
extern struct vmm_os_hooks * os_hooks;
+/**********************************/
+/* GROUP 0 */
+/**********************************/
+
+int host_va_to_host_pa(addr_t host_va, addr_t * host_pa) {
+ if ((os_hooks) && (os_hooks)->vaddr_to_paddr) {
+
+ *host_pa = (addr_t)(os_hooks)->vaddr_to_paddr((void *)host_va);
+
+ if (*host_pa == 0) {
+ return -1;
+ }
+ } else {
+ return -1;
+ }
+ return 0;
+}
+
+
+int host_pa_to_host_va(addr_t host_pa, addr_t * host_va) {
+ if ((os_hooks) && (os_hooks)->paddr_to_vaddr) {
+
+ *host_va = (addr_t)(os_hooks)->paddr_to_vaddr((void *)host_pa);
+
+ if (*host_va == 0) {
+ return -1;
+ }
+ } else {
+ return -1;
+ }
+ return 0;
+}
+
+
+
+int guest_pa_to_host_pa(struct guest_info * guest_info, addr_t guest_pa, addr_t * host_pa) {
+ // we use the shadow map here...
+ if (lookup_shadow_map_addr(&(guest_info->mem_map), guest_pa, host_pa) != HOST_REGION_PHYSICAL_MEMORY) {
+ return -1;
+ }
+
+ return 0;
+}
+
+
+/* !! Currently not implemented !! */
+// This is a scan of the shadow map
+// For now we ignore it
+//
+int host_pa_to_guest_pa(struct guest_info * guest_info, addr_t host_pa, addr_t * guest_pa) {
+ *guest_pa = 0;
+
+ return -1;
+}
+
+
+
+/**********************************/
+/* GROUP 1 */
+/**********************************/
+
+
+/* !! Currently not implemented !! */
+// This will return negative until we implement host_pa_to_guest_pa()
+int host_va_to_guest_pa(struct guest_info * guest_info, addr_t host_va, addr_t * guest_pa) {
+ addr_t host_pa;
+ *guest_pa = 0;
+
+ if (host_va_to_host_pa(host_va, &host_pa) != 0) {
+ return -1;
+ }
+
+ if (host_pa_to_guest_pa(guest_info, host_pa, guest_pa) != 0) {
+ return -1;
+ }
+
+ return 0;
+}
+
+
+
+
+int guest_pa_to_host_va(struct guest_info * guest_info, addr_t guest_pa, addr_t * host_va) {
+ addr_t host_pa;
+
+ *host_va = 0;
+
+ if (guest_pa_to_host_pa(guest_info, guest_pa, &host_pa) != 0) {
+ return -1;
+ }
+
+ if (host_pa_to_host_va(host_pa, host_va) != 0) {
+ return -1;
+ }
+
+ return 0;
+}
-int guest_va_to_guest_pa(guest_info_t * guest_info, addr_t guest_va, addr_t * guest_pa) {
+
+int guest_va_to_guest_pa(struct guest_info * guest_info, addr_t guest_va, addr_t * guest_pa) {
if (guest_info->page_mode == SHADOW_PAGING) {
switch (guest_info->cpu_mode) {
case REAL:
{
addr_t tmp_pa;
pde32_t * pde;
- addr_t guest_pde = CR3_TO_PDE32(guest_info->shadow_page_state.guest_cr3);
+ addr_t guest_pde = CR3_TO_PDE32(guest_info->shdw_pg_state.guest_cr3.r_reg);
if (guest_pa_to_host_va(guest_info, guest_pde, (addr_t *)&pde) == -1) {
return -1;
switch (pde32_lookup(pde, guest_va, &tmp_pa)) {
case NOT_PRESENT:
- *guest_page = 0;
+ *guest_pa = 0;
return -1;
case LARGE_PAGE:
*guest_pa = tmp_pa;
return -1;
}
- return 0;
+ return 0;
}
default:
return -1;
+/* !! Currently not implemented !! */
+/* This will be a real pain.... its your standard page table walker in guest memory
+ *
+ * For now we ignore it...
+ */
+int guest_pa_to_guest_va(struct guest_info * guest_info, addr_t guest_pa, addr_t * guest_va) {
+ *guest_va = 0;
+ return -1;
+}
+/**********************************/
+/* GROUP 2 */
+/**********************************/
-int guest_pa_to_host_va(guest_info_t * guest_info, addr_t guest_pa, addr_t * host_va) {
- addr_t host_pa;
+int guest_va_to_host_pa(struct guest_info * guest_info, addr_t guest_va, addr_t * host_pa) {
+ addr_t guest_pa;
- if (guest_pa_to_host_pa(guest_info, guest_pa, &host_pa) != 0) {
+ *host_pa = 0;
+
+ if (guest_va_to_guest_pa(guest_info, guest_va, &guest_pa) != 0) {
return -1;
}
- if (host_pa_to_host_va(host_pa, host_va) != 0) {
+ if (guest_pa_to_host_pa(guest_info, guest_pa, host_pa) != 0) {
return -1;
}
return 0;
}
+/* !! Currently not implemented !! */
+int host_pa_to_guest_va(struct guest_info * guest_info, addr_t host_pa, addr_t * guest_va) {
+ addr_t guest_pa;
-int guest_pa_to_host_pa(guest_info_t * guest_info, addr_t guest_pa, addr_t * host_pa) {
- // we use the shadow map here...
- if (lookup_shadow_map_addr(guest_info->shadow_map, guest_pa, host_pa) != HOST_REGION_PHYSICAL_MEMORY) {
+ *guest_va = 0;
+
+ if (host_pa_to_guest_pa(guest_info, host_pa, &guest_pa) != 0) {
return -1;
}
-
+
+ if (guest_pa_to_guest_va(guest_info, guest_pa, guest_va) != 0) {
+ return -1;
+ }
+
return 0;
}
-int host_va_to_host_pa(addr_t host_va, addr_t * host_pa) {
- *host_pa = os_hooks->vaddr_to_paddr(host_va);
-
- if (*host_pa == 0) {
+int guest_va_to_host_va(struct guest_info * guest_info, addr_t guest_va, addr_t * host_va) {
+ addr_t guest_pa;
+ addr_t host_pa;
+
+ *host_va = 0;
+
+ if (guest_va_to_guest_pa(guest_info, guest_va, &guest_pa) != 0) {
+ return -1;
+ }
+
+ if (guest_pa_to_host_pa(guest_info, guest_pa, &host_pa) != 0) {
+ return -1;
+ }
+
+ if (host_pa_to_host_va(host_pa, host_va) != 0) {
return -1;
}
}
-int host_pa_to_host_va(addr_t host_pa, addr_t * host_va) {
- *host_va = os_hooks->paddr_to_vaddr(host_pa);
+/* !! Currently not implemented !! */
+int host_va_to_guest_va(struct guest_info * guest_info, addr_t host_va, addr_t * guest_va) {
+ addr_t host_pa;
+ addr_t guest_pa;
+
+ *guest_va = 0;
+
+ if (host_va_to_host_pa(host_va, &host_pa) != 0) {
+ return -1;
+ }
+
+ if (host_pa_to_guest_pa(guest_info, host_pa, &guest_pa) != 0) {
+ return -1;
+ }
- if (*host_va == 0) {
+ if (guest_pa_to_guest_va(guest_info, guest_pa, guest_va) != 0) {
return -1;
}
return 0;
}
+
+
+
+
+
+
+/* This is a straight address conversion + copy,
+ * except for the tiny little issue of crossing page boundries.....
+ */
+int read_guest_va_memory(struct guest_info * guest_info, addr_t guest_va, int count, char * dest) {
+ addr_t cursor = guest_va;
+
+ while (count > 0) {
+ int dist_to_pg_edge = (PAGE_OFFSET(cursor) + PAGE_SIZE) - cursor;
+ int bytes_to_copy = (dist_to_pg_edge > count) ? count : dist_to_pg_edge;
+ addr_t host_addr;
+
+ if (guest_va_to_host_va(guest_info, cursor, &host_addr) != 0) {
+ return -1;
+ }
+
+ memcpy(dest, (void*)cursor, bytes_to_copy);
+
+ count -= bytes_to_copy;
+ cursor += bytes_to_copy;
+ }
+
+ return 0;
+}
+
+
+
+
+
+
+/* This is a straight address conversion + copy,
+ * except for the tiny little issue of crossing page boundries.....
+ */
+int read_guest_pa_memory(struct guest_info * guest_info, addr_t guest_pa, int count, char * dest) {
+ addr_t cursor = guest_pa;
+
+ while (count > 0) {
+ int dist_to_pg_edge = (PAGE_OFFSET(cursor) + PAGE_SIZE) - cursor;
+ int bytes_to_copy = (dist_to_pg_edge > count) ? count : dist_to_pg_edge;
+ addr_t host_addr;
+
+ if (guest_pa_to_host_va(guest_info, cursor, &host_addr) != 0) {
+ return -1;
+ }
+
+ memcpy(dest, (void*)cursor, bytes_to_copy);
+
+ count -= bytes_to_copy;
+ cursor += bytes_to_copy;
+ }
+
+ return 0;
+}
+
}
}
-/*
-int guest_paddr_to_host_paddr(shadow_region_t * entry,
- addr_t guest_addr,
- addr_t * host_addr) {
-
- if (!((guest_addr >= entry->guest_start) &&
- (guest_addr < entry->guest_end))) {
- return -1;
- }
-
- switch (entry->host_type) {
- case HOST_REGION_PHYSICAL_MEMORY:
- case HOST_REGION_MEMORY_MAPPED_DEVICE:
- case HOST_REGION_UNALLOCATED:
- *host_addr = (guest_addr-entry->guest_start) + entry->host_addr.phys_addr.host_start;
- return 0;
- break;
- default:
- return -1;
- break;
- }
-}
-
-*/
-
void print_shadow_map(shadow_map_t * map) {
shadow_region_t * cur = map->head;
+
+
+
+
+/* We can't do a full lookup because we don't know what context the page tables are in...
+ * The entry addresses could be pointing to either guest physical memory or host physical memory
+ * Instead we just return the entry address, and a flag to show if it points to a pte or a large page...
+ */
+pde32_entry_type_t pde32_lookup(pde32_t * pde, addr_t addr, addr_t * entry) {
+ pde32_t * pde_entry = &(pde[PDE32_INDEX(addr)]);
+
+ if (!pde_entry->present) {
+ *entry = 0;
+ return NOT_PRESENT;
+ } else {
+ *entry = PAGE_ADDR(pde_entry->pt_base_addr);
+
+ if (pde_entry->large_pages) {
+ *entry += PAGE_OFFSET(addr);
+ return LARGE_PAGE;
+ } else {
+ return PTE32;
+ }
+ }
+ return NOT_PRESENT;
+}
+
+
+int pte32_lookup(pte32_t * pte, addr_t addr, addr_t * entry) {
+ pte32_t * pte_entry = &(pte[PTE32_INDEX(addr)]);
+
+ if (!pte_entry->present) {
+ *entry = 0;
+ return -1;
+ } else {
+ *entry = PAGE_ADDR(pte_entry->page_base_addr);
+ *entry += PAGE_OFFSET(addr);
+ return 0;
+ }
+
+ return -1;
+}
+
+
+
+
+
+
+
+
/* We generate a page table to correspond to a given memory layout
* pulling pages from the mem_list when necessary
* If there are any gaps in the layout, we add them as unmapped pages
*/
-pde32_t * create_passthrough_pde32_pts(guest_info_t * guest_info) {
+pde32_t * create_passthrough_pde32_pts(struct guest_info * guest_info) {
ullong_t current_page_addr = 0;
int i, j;
- shadow_map_t * map = guest_info->mem_map;
+ shadow_map_t * map = &(guest_info->mem_map);
pde32_t * pde = os_hooks->allocate_pages(1);
-/* We can't do a full lookup because we don't know what context the page tables are in...
- * The entry addresses could be pointing to either guest physical memory or host physical memory
- * Instead we just return the entry address, and a flag to show if it points to a pte or a large page...
- */
-pde32_entry_type_t pde32_lookup(pde32_t * pde, addr_t addr, addr_t * entry) {
- pde32_t * pde_entry = pde[PDE32_INDEX(addr)];
-
- if (!pde_entry->present) {
- *entry = 0;
- return NOT_PRESENT;
- } else {
- *entry = PAGE_ADDR(pde_entry->pt_base_addr);
-
- if (pde_entry->large_pages) {
- *entry += PAGE_OFFSET(addr);
- return LARGE_PAGE;
- } else {
- return PTE32;
- }
- }
- return NOT_PRESENT;
-}
-
-
-int pte32_lookup(pte32_t * pte, addr_t addr, addr_t * entry) {
- pte32_t * pte_entry = pte[PTE32_INDEX(addr)];
-
- if (!pte_entry->present) {
- *entry = 0;
- return -1;
- } else {
- *entry = PAGE_ADDR(pte_entry->page_base_addr);
- *entry += PAGE_OFFSET(addr);
- return 0;
- }
-
- return -1;
-}
-
-
-
-
-
-
-
-
-
void PrintPDE32(void * virtual_address, pde32_t * pde)
{
PrintDebug("PDE %p -> %p : present=%x, flags=%x, accessed=%x, reserved=%x, largePages=%x, globalPage=%x, kernelInfo=%x\n",
#include <geekos/vmm_shadow_paging.h>
#include <geekos/vmm.h>
+#include <geekos/vm_guest_mem.h>
extern struct vmm_os_hooks * os_hooks;
-int init_shadow_page_state(shadow_page_state_t * state) {
+int init_shadow_page_state(struct shadow_page_state * state) {
state->guest_mode = PDE32;
state->shadow_mode = PDE32;
}
-int wholesale_update_shadow_page_state(shadow_page_state_t * state, shadow_map_t * mem_map) {
+int wholesale_update_shadow_page_state(struct guest_info * guest_info) {
unsigned i, j;
- vmm_pde_t * guest_pde;
- vmm_pde_t * shadow_pde;
+ pde32_t * guest_pde;
+ pde32_t * shadow_pde;
+
+ struct shadow_page_state * state = &(guest_info->shdw_pg_state);
// For now, we'll only work with PDE32
return -1;
}
-
-
- shadow_pde = (vmm_pde_t *)(CR3_TO_PDE(state->shadow_cr3.e_reg.low));
- guest_pde = (vmm_pde_t *)(os_hooks->paddr_to_vaddr((void*)CR3_TO_PDE(state->guest_cr3.e_reg.low)));
+ shadow_pde = (pde32_t *)(CR3_TO_PDE32(state->shadow_cr3.e_reg.low));
+ guest_pde = (pde32_t *)(host_pa_to_host_va((void*)CR3_TO_PDE32(state->guest_cr3.e_reg.low)));
// Delete the current page table
delete_page_tables_pde32(shadow_pde);
shadow_pde = os_hooks->allocate_pages(1);
-
state->shadow_cr3.e_reg.low = (addr_t)shadow_pde;
state->shadow_mode = PDE32;
-
for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
shadow_pde[i] = guest_pde[i];
addr_t host_addr;
shadow_region_t * ent;
- ent = get_shadow_region_by_addr(mem_map, guest_addr);
+ ent = get_shadow_region_by_addr(&(guest_info->mem_map), guest_addr);
if (!ent) {
// FIXME Panic here - guest is trying to map to physical memory
case HOST_REGION_PHYSICAL_MEMORY:
// points into currently allocated physical memory, so we just
// set up the shadow to point to the mapped location
- if (guest_paddr_to_host_paddr(ent, guest_addr, &host_addr)) {
+ if (guest_pa_to_host_pa(guest_info, guest_addr, &host_addr)) {
// Panic here
return -1;
}
break;
}
} else {
- vmm_pte_t * guest_pte;
- vmm_pte_t * shadow_pte;
+ pte32_t * guest_pte;
+ pte32_t * shadow_pte;
addr_t guest_addr;
addr_t guest_pte_host_addr;
shadow_region_t * ent;
// make our first level page table in the shadow point to it
shadow_pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(shadow_pte);
- ent = get_shadow_region_by_addr(mem_map, guest_addr);
+ ent = get_shadow_region_by_addr(&(guest_info->mem_map), guest_addr);
/* JRL: This is bad.... */
}
// Address of the relevant second level page table in the guest
- if (guest_paddr_to_host_paddr(ent, guest_addr, &guest_pte_host_addr)) {
+ if (guest_pa_to_host_pa(guest_info, guest_addr, &guest_pte_host_addr)) {
// Panic here
return -1;
}
shadow_region_t * ent;
- ent = get_shadow_region_by_addr(mem_map, guest_addr);
+ ent = get_shadow_region_by_addr(&(guest_info->mem_map), guest_addr);
if (!ent) {
// FIXME Panic here - guest is trying to map to physical memory
// points into currently allocated physical memory, so we just
// set up the shadow to point to the mapped location
- if (guest_paddr_to_host_paddr(ent, guest_addr, &host_addr)) {
+ if (guest_pa_to_host_pa(guest_info, guest_addr, &host_addr)) {
// Panic here
return -1;
}