Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


added memory conversions and copies for the guest/host contexts
Jack Lange [Tue, 1 Apr 2008 00:56:55 +0000 (00:56 +0000)]
17 files changed:
palacios/build/Makefile
palacios/include/geekos/svm.h
palacios/include/geekos/svm_ctrl_regs.h
palacios/include/geekos/svm_handler.h
palacios/include/geekos/vm_guest.h
palacios/include/geekos/vm_guest_mem.h
palacios/include/geekos/vmm_mem.h
palacios/include/geekos/vmm_paging.h
palacios/include/geekos/vmm_shadow_paging.h
palacios/src/geekos/main.c
palacios/src/geekos/svm.c
palacios/src/geekos/svm_ctrl_regs.c
palacios/src/geekos/svm_handler.c
palacios/src/geekos/vm_guest_mem.c
palacios/src/geekos/vmm_mem.c
palacios/src/geekos/vmm_paging.c
palacios/src/geekos/vmm_shadow_paging.c

index a8f76d7..7a42b22 100644 (file)
@@ -1,6 +1,6 @@
 # Makefile for GeekOS kernel, userspace, and tools
 # Copyright (c) 2004,2005 David H. Hovemeyer <daveho@cs.umd.edu>
-# $Revision: 1.19 $
+# $Revision: 1.20 $
 
 # This is free software.  You are permitted to use,
 # redistribute, and modify it as specified in the file "COPYING".
@@ -84,11 +84,11 @@ KERNEL_C_SRCS := idt.c int.c trap.c irq.c io.c \
        bget.c malloc.c \
        synch.c kthread.c \
        serial.c  reboot.c \
-        paging.c vmx.c vmcs_gen.c vmcs.c \
+        paging.c  vm_guest.c \
         svm.c svm_handler.c vmm.c vmm_util.c vmm_stubs.c svm_ctrl_regs.c \
-       vmcb.c vmm_mem.c vm_guest.c vmm_paging.c vmm_io.c vmm_debug.c \
+       vmcb.c vmm_mem.c vmm_paging.c vmm_io.c vmm_debug.c \
        vmm_shadow_paging.c vm_guest_mem.c \
-       debug.c\
+       debug.c  vmx.c vmcs_gen.c vmcs.c\
        main.c
 
 # Kernel object files built from C source files
index 0b91f45..ddd3414 100644 (file)
@@ -61,8 +61,8 @@ int is_svm_capable();
 
 
 vmcb_t * Allocate_VMCB();
-void Init_VMCB(vmcb_t * vmcb, guest_info_t vm_info);
-void Init_VMCB_pe(vmcb_t * vmcb, guest_info_t vm_info);
+void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info);
+void Init_VMCB_pe(vmcb_t * vmcb, struct guest_info vm_info);
 
 int init_svm_guest(struct guest_info *info);
 int start_svm_guest(struct guest_info * info);
index f710159..d16e46f 100644 (file)
@@ -17,7 +17,7 @@ static const uchar_t mov_from_cr_byte = 0x20;
 
 
 
-int handle_cr0_write(guest_info_t * info, ullong_t * new_cr0);
+int handle_cr0_write(struct guest_info * info, ullong_t * new_cr0);
 
 
 
index e52222a..b09ac60 100644 (file)
@@ -186,9 +186,9 @@ struct svm_io_info {
 };
 
 
-int handle_svm_io(guest_info_t * info);
-int handle_shadow_paging(guest_info_t * info);
+int handle_svm_io(struct guest_info * info);
+int handle_shadow_paging(struct guest_info * info);
 
-int handle_svm_exit(guest_info_t * info);
+int handle_svm_exit(struct guest_info * info);
 
 #endif
index 2595de1..aee6182 100644 (file)
@@ -5,9 +5,14 @@
 #include <geekos/ktypes.h>
 #include <geekos/vmm_io.h>
 //#include <geekos/vmm_paging.h>
-#include <geekos/vmm_shadow_paging.h>
 
 
+
+struct guest_info;
+
+
+#include <geekos/vmm_shadow_paging.h>
+
 struct guest_gprs {
   ullong_t rbx;
   ullong_t rcx;
@@ -22,7 +27,7 @@ struct guest_gprs {
 typedef enum {SHADOW_PAGING, NESTED_PAGING} vm_page_mode_t;
 typedef enum {REAL, PROTECTED, PROTECTED_PG, PROTECTED_PAE, PROTECTED_PAE_PG, LONG, LONG_PG} vm_cpu_mode_t;
 
-typedef struct guest_info {
+struct guest_info {
   ullong_t rip;
   ullong_t rsp;
 
@@ -30,7 +35,7 @@ typedef struct guest_info {
 
   
   vm_page_mode_t page_mode;
-  shadow_page_state_t  shadow_page_state;
+  struct shadow_page_state  shdw_pg_state;
   // nested_paging_t nested_page_state;
 
 
@@ -43,7 +48,7 @@ typedef struct guest_info {
   struct guest_gprs vm_regs;
 
   void * vmm_data;
-} guest_info_t;
+};
 
 
 
index 9d9c9e0..f738736 100644 (file)
@@ -5,22 +5,89 @@
 #include <geekos/vmm_mem.h>
 
 
-int guest_va_to_guest_pa(guest_info_t * guest_info, addr_t guest_va, addr_t * guest_pa);
-int guest_pa_to_guest_va(guest_info_t * guest_info, addr_t guest_pa, addr_t * guest_va);
-int guest_va_to_host_va(guest_info_t * guest_info, addr_t guest_va, addr_t * host_va);
-int guest_pa_to_host_pa(guest_info_t * guest_info, addr_t guest_pa, addr_t * host_pa);
-int guest_pa_to_host_va(guest_info_t * guest_info, addr_t guest_pa, addr_t * host_va);
+/* These functions are ordered such that they can only call the functions defined in a lower order group */
+/* This is to avoid infinite lookup loops */
 
-int host_va_to_guest_pa(guest_info_t * guest_info, addr_t host_va, addr_t * guest_pa);
-int host_pa_to_guest_va(guest_info_t * guest_info, addr_t host_pa, addr_t * guest_va);
+/**********************************/
+/* GROUP 0                        */
+/**********************************/
 
+/* Fundamental converters */
+// Call out to OS
 int host_va_to_host_pa(addr_t host_va, addr_t * host_pa);
 int host_pa_to_host_va(addr_t host_pa, addr_t * host_va);
 
+// guest_pa -> (shadow map) -> host_pa
+int guest_pa_to_host_pa(struct guest_info * guest_info, addr_t guest_pa, addr_t * host_pa);
+
+/* !! Currently not implemented !! */
+// host_pa -> (shadow_map) -> guest_pa
+int host_pa_to_guest_pa(struct guest_info * guest_info, addr_t host_pa, addr_t * guest_pa);
+
+
+/**********************************/
+/* GROUP 1                        */
+/**********************************/
+
+
+/* !! Currently not implemented !! */
+// host_va -> host_pa -> guest_pa
+int host_va_to_guest_pa(struct guest_info * guest_info, addr_t host_va, addr_t * guest_pa);
+
+
+// guest_pa -> host_pa -> host_va
+int guest_pa_to_host_va(struct guest_info * guest_info, addr_t guest_pa, addr_t * host_va);
+
+
+// Look up the address in the guests page tables.. This can cause multiple calls that translate
+//     ------------------------------------------------
+//     |                                              |
+//     -->   guest_pa -> host_pa -> host_va ->   (read table) --> guest_pa
+int guest_va_to_guest_pa(struct guest_info * guest_info, addr_t guest_va, addr_t * guest_pa);
+
+
+
+/* !! Currently not implemented !! */
+//   A page table walker in the guest's address space
+//     ------------------------------------------------
+//     |                                              |
+//     -->   guest_pa -> host_pa -> host_va ->   (read table) --> guest_va
+int guest_pa_to_guest_va(struct guest_info * guest_info, addr_t guest_pa, addr_t * guest_va);
+
+
+
+/**********************************/
+/* GROUP 2                        */
+/**********************************/
+// guest_va -> guest_pa -> host_pa
+int guest_va_to_host_pa(struct guest_info * guest_info, addr_t guest_va, addr_t * host_pa);
+
+
+/* !! Currently not implemented !! */
+// host_pa -> guest_pa -> guest_va
+int host_pa_to_guest_va(struct guest_info * guest_info, addr_t host_pa, addr_t * guest_va);
+
+// guest_va -> guest_pa -> host_pa -> host_va
+int guest_va_to_host_va(struct guest_info * guest_info, addr_t guest_va, addr_t * host_va);
+
+
+/* !! Currently not implemented !! */
+// host_va -> host_pa -> guest_pa -> guest_va
+int host_va_to_guest_va(struct guest_info * guest_info, addr_t host_va, addr_t  * guest_va);
+
+
+
+
+
+
+
+
+
+int read_guest_va_memory(struct guest_info * guest_info, addr_t guest_va, int count, char * dest);
+int read_guest_pa_memory(struct guest_info * guest_info, addr_t guest_pa, int count, char * dest);
+
 
 
-int read_guest_va_memory(guest_info_t * guest_info, addr_t guest_va, int count, char * dest);
-int read_guest_pa_memory(guest_info_t * guest_info, addr_t guest_pa, int count, char * dest);
 
 
 
index 2eb8157..e52ae7c 100644 (file)
@@ -82,12 +82,6 @@ shadow_region_t * get_shadow_region_by_addr(shadow_map_t * map, addr_t guest_add
 
 shadow_region_t * get_shadow_region_by_index(shadow_map_t * map, uint_t index);
 
-/*
-int guest_paddr_to_host_paddr(shadow_region_t * entry, 
-                             addr_t guest_addr,
-                             addr_t * host_addr);
-*/
-
 host_region_type_t lookup_shadow_map_addr(shadow_map_t * map, addr_t guest_addr, addr_t * host_addr);
 
 
index 5c7952b..7a0ef4e 100644 (file)
@@ -4,7 +4,7 @@
 
 #include <geekos/ktypes.h>
 
-#include <geekos/vm_guest.h>
+
 
 #include <geekos/vmm_mem.h>
 #include <geekos/vmm_util.h>
@@ -216,8 +216,6 @@ typedef enum { PDE32 } paging_mode_t;
 
 
 
-pde32_t * create_passthrough_pde32_pts(guest_info_t * guest_info);
-
 
 void delete_page_tables_pde32(pde32_t * pde);
 
@@ -228,6 +226,10 @@ int pte32_lookup(pte32_t * pte, addr_t addr, addr_t * entry);
 
 
 
+#include <geekos/vm_guest.h>
+
+pde32_t * create_passthrough_pde32_pts(struct guest_info * guest_info);
+
 
 
 
index 5fd9083..cbda937 100644 (file)
@@ -6,7 +6,8 @@
 
 #include <geekos/vmm_util.h>
 
-typedef struct shadow_page_state {
+
+struct shadow_page_state {
 
   // these two reflect the top-level page directory
   // of the guest page table
@@ -22,15 +23,23 @@ typedef struct shadow_page_state {
   reg_ex_t                shadow_cr3;
 
 
-} shadow_page_state_t;
+};
+
+
+
+
+
+
 
 
+#include <geekos/vm_guest.h>
+struct guest_info;
 
-int init_shadow_page_state(shadow_page_state_t * state);
+int init_shadow_page_state(struct shadow_page_state * state);
 
 // This function will cause the shadow page table to be deleted
 // and rewritten to reflect the guest page table and the shadow map
-int wholesale_update_shadow_page_state(shadow_page_state_t * state, shadow_map_t * mem_map);
+int wholesale_update_shadow_page_state(struct guest_info * guest_info);
 
 
 
index dce6c85..575aa70 100644 (file)
@@ -3,7 +3,7 @@
  * Copyright (c) 2001,2003,2004 David H. Hovemeyer <daveho@cs.umd.edu>
  * Copyright (c) 2003, Jeffrey K. Hollingsworth <hollings@cs.umd.edu>
  * Copyright (c) 2004, Iulian Neamtiu <neamtiu@cs.umd.edu>
- * $Revision: 1.24 $
+ * $Revision: 1.25 $
  * 
  * This is free software.  You are permitted to use,
  * redistribute, and modify it as specified in the file "COPYING".
@@ -286,13 +286,13 @@ void Main(struct Boot_Info* bootInfo)
   {
     struct vmm_os_hooks os_hooks;
     struct vmm_ctrl_ops vmm_ops;
-    guest_info_t vm_info;
+    struct guest_info vm_info;
     addr_t rsp;
     addr_t rip;
 
     memset(&os_hooks, 0, sizeof(struct vmm_os_hooks));
     memset(&vmm_ops, 0, sizeof(struct vmm_ctrl_ops));
-    memset(&vm_info, 0, sizeof(guest_info_t));
+    memset(&vm_info, 0, sizeof(struct guest_info));
 
     os_hooks.print_debug = &PrintBoth;
     os_hooks.print_info = &Print;
@@ -309,7 +309,7 @@ void Main(struct Boot_Info* bootInfo)
     Init_VMM(&os_hooks, &vmm_ops);
   
     init_shadow_map(&(vm_info.mem_map));
-    init_shadow_page_state(&(vm_info.shadow_page_state));
+    init_shadow_page_state(&(vm_info.shdw_pg_state));
     vm_info.page_mode = SHADOW_PAGING;
 
     vm_info.cpu_mode = REAL;
index ab2a406..18ddcb5 100644 (file)
@@ -7,6 +7,7 @@
 #include <geekos/svm_handler.h>
 
 #include <geekos/vmm_debug.h>
+#include <geekos/vm_guest_mem.h>
 
 
 /* TEMPORARY BECAUSE SVM IS WEIRD */
@@ -160,7 +161,7 @@ vmcb_t * Allocate_VMCB() {
 }
 
 
-void Init_VMCB_Real(vmcb_t * vmcb, guest_info_t vm_info) {
+void Init_VMCB_Real(vmcb_t * vmcb, struct guest_info vm_info) {
   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
   uint_t i;
@@ -267,10 +268,10 @@ void Init_VMCB_Real(vmcb_t * vmcb, guest_info_t vm_info) {
 
   if (vm_info.page_mode == SHADOW_PAGING) {
     PrintDebug("Creating initial shadow page table\n");
-    vm_info.shadow_page_state.shadow_cr3.e_reg.low |= ((addr_t)create_passthrough_pde32_pts(&(vm_info.mem_map)) & ~0xfff);
+    vm_info.shdw_pg_state.shadow_cr3.e_reg.low |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
     PrintDebug("Created\n");
 
-    guest_state->cr3 = vm_info.shadow_page_state.shadow_cr3.r_reg;
+    guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3.r_reg;
 
     ctrl_area->cr_reads.crs.cr3 = 1;
     ctrl_area->cr_writes.crs.cr3 = 1;
@@ -306,7 +307,7 @@ void Init_VMCB_Real(vmcb_t * vmcb, guest_info_t vm_info) {
 }
 
 
-void Init_VMCB(vmcb_t * vmcb, guest_info_t vm_info) {
+void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
   uint_t i;
@@ -388,10 +389,10 @@ void Init_VMCB(vmcb_t * vmcb, guest_info_t vm_info) {
 
   if (vm_info.page_mode == SHADOW_PAGING) {
     PrintDebug("Creating initial shadow page table\n");
-    vm_info.shadow_page_state.shadow_cr3.e_reg.low |= ((addr_t)create_passthrough_pde32_pts(&(vm_info.mem_map)) & ~0xfff);
+    vm_info.shdw_pg_state.shadow_cr3.e_reg.low |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
     PrintDebug("Created\n");
 
-    guest_state->cr3 = vm_info.shadow_page_state.shadow_cr3.r_reg;
+    guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3.r_reg;
 
     ctrl_area->cr_reads.crs.cr3 = 1;
     ctrl_area->cr_writes.crs.cr3 = 1;
@@ -426,7 +427,7 @@ void Init_VMCB(vmcb_t * vmcb, guest_info_t vm_info) {
 
 }
 
-void Init_VMCB_pe(vmcb_t *vmcb, guest_info_t vm_info) {
+void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
   uint_t i = 0;
index e822e63..656bb9e 100644 (file)
@@ -5,32 +5,39 @@
 #include <geekos/vmm_emulate.h>
 
 
-int handle_cr0_write(guest_info_t * info, ullong_t * new_cr0) {
-  // vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA((vmcb_t *)(info->vmm_data));
-  //vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
+int handle_cr0_write(struct guest_info * info, ullong_t * new_cr0) {
+  vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA((vmcb_t *)(info->vmm_data));
+  vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
+  char instr[15];
   
-
   
-  /*
+
 
   if (info->cpu_mode == REAL) {
-    addr_t host_addr;
-    shadow_region_t * region = get_shadow_region_by_addr(&(info->mem_map), (addr_t)(info->rip));
-    if (!region || (region->host_type != HOST_REGION_PHYSICAL_MEMORY)) {
-      //PANIC
+    read_guest_pa_memory(info, (addr_t)guest_state->rip, 15, instr);
+    int index = 0;
+
+    while (is_prefix_byte(instr[index])) {
+      index++; 
+    }
+    
+    if ((instr[index] == cr_access_byte) && 
+       (instr[index + 1] == lmsw_byte)) {
+      // LMSW
+      // decode mod/RM
+
+    } else if ((instr[index] == cr_access_byte) && 
+              (instr[index + 1] == clts_byte)) {
+      // CLTS
+    } else {
+      // unsupported instruction, GPF the guest
       return -1;
     }
 
-    guest_paddr_to_host_paddr(region, (addr_t)(info->rip), &host_addr);
-    // pa to va
+
+  }
 
 
-    PrintDebug("Instr: %.4x\n", *(ushort_t*)host_addr);
-    
-    if ((*(ushort_t*)host_addr) == LMSW_EAX) {
-      PrintDebug("lmsw from eax (0x%x)\n", guest_state->rax);
-    }
-    }*/
   return 0;
 }
 
index 964aeac..e5ec493 100644 (file)
@@ -3,7 +3,7 @@
 #include <geekos/svm_ctrl_regs.h>
 
 
-int handle_svm_exit(guest_info_t * info) {
+int handle_svm_exit(struct guest_info * info) {
   vmcb_ctrl_t * guest_ctrl = 0;
   vmcb_saved_state_t * guest_state = 0;
   ulong_t exit_code = 0;
@@ -62,7 +62,7 @@ int handle_svm_exit(guest_info_t * info) {
 
 
 // This should package up an IO request and call vmm_handle_io
-int handle_svm_io(guest_info_t * info) {
+int handle_svm_io(struct guest_info * info) {
   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA((vmcb_t *)(info->vmm_data));
   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
 
@@ -85,7 +85,7 @@ int handle_svm_io(guest_info_t * info) {
 }
 
 
-int handle_shadow_paging(guest_info_t * info) {
+int handle_shadow_paging(struct guest_info * info) {
   vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
   //  vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
 
index 4993ed2..2978ae7 100644 (file)
-#include <geekos/vm_guest_mem.c>
-
+#include <geekos/vm_guest_mem.h>
+#include <geekos/vmm.h>
 #include <geekos/vmm_paging.h>
 
 extern struct vmm_os_hooks * os_hooks;
 
 
+/**********************************/
+/* GROUP 0                        */
+/**********************************/
+
+int host_va_to_host_pa(addr_t host_va, addr_t * host_pa) {
+  if ((os_hooks) && (os_hooks)->vaddr_to_paddr) {
+
+    *host_pa = (addr_t)(os_hooks)->vaddr_to_paddr((void *)host_va);
+  
+    if (*host_pa == 0) {
+      return -1;
+    }
+  } else {
+    return -1;
+  }
+  return 0;
+}
+
+
+int host_pa_to_host_va(addr_t host_pa, addr_t * host_va) {
+  if ((os_hooks) && (os_hooks)->paddr_to_vaddr) {
+
+    *host_va = (addr_t)(os_hooks)->paddr_to_vaddr((void *)host_pa);
+    
+    if (*host_va == 0) {
+      return -1;
+    }
+  } else {
+    return -1;
+  }
+  return 0;
+}
+
+
+
+int guest_pa_to_host_pa(struct guest_info * guest_info, addr_t guest_pa, addr_t * host_pa) {
+  // we use the shadow map here...
+  if (lookup_shadow_map_addr(&(guest_info->mem_map), guest_pa, host_pa) != HOST_REGION_PHYSICAL_MEMORY) {
+    return -1;
+  }
+
+  return 0;
+}
+
+
+/* !! Currently not implemented !! */
+// This is a scan of the shadow map
+// For now we ignore it
+// 
+int host_pa_to_guest_pa(struct guest_info * guest_info, addr_t host_pa, addr_t * guest_pa) {
+  *guest_pa = 0;
+
+  return -1;
+}
+
+
+
+/**********************************/
+/* GROUP 1                        */
+/**********************************/
+
+
+/* !! Currently not implemented !! */
+// This will return negative until we implement host_pa_to_guest_pa()
+int host_va_to_guest_pa(struct guest_info * guest_info, addr_t host_va, addr_t * guest_pa) {
+  addr_t host_pa;
+  *guest_pa = 0;
+
+  if (host_va_to_host_pa(host_va, &host_pa) != 0) {
+    return -1;
+  }
+
+  if (host_pa_to_guest_pa(guest_info, host_pa, guest_pa) != 0) {
+    return -1;
+  }
+
+  return 0;
+}
+
+
+
+
+int guest_pa_to_host_va(struct guest_info * guest_info, addr_t guest_pa, addr_t * host_va) {
+  addr_t host_pa;
+
+  *host_va = 0;
+
+  if (guest_pa_to_host_pa(guest_info, guest_pa, &host_pa) != 0) {
+    return -1;
+  }
+  
+  if (host_pa_to_host_va(host_pa, host_va) != 0) {
+    return -1;
+  }
+
+  return 0;
+}
 
-int guest_va_to_guest_pa(guest_info_t * guest_info, addr_t guest_va, addr_t * guest_pa) {
+
+int guest_va_to_guest_pa(struct guest_info * guest_info, addr_t guest_va, addr_t * guest_pa) {
   if (guest_info->page_mode == SHADOW_PAGING) {
     switch (guest_info->cpu_mode) {
     case REAL:
@@ -20,7 +118,7 @@ int guest_va_to_guest_pa(guest_info_t * guest_info, addr_t guest_va, addr_t * gu
       {
        addr_t tmp_pa;
        pde32_t * pde;
-       addr_t guest_pde = CR3_TO_PDE32(guest_info->shadow_page_state.guest_cr3);
+       addr_t guest_pde = CR3_TO_PDE32(guest_info->shdw_pg_state.guest_cr3.r_reg);
 
        if (guest_pa_to_host_va(guest_info, guest_pde, (addr_t *)&pde) == -1) {
          return -1;
@@ -28,7 +126,7 @@ int guest_va_to_guest_pa(guest_info_t * guest_info, addr_t guest_va, addr_t * gu
 
        switch (pde32_lookup(pde, guest_va, &tmp_pa)) {
        case NOT_PRESENT: 
-         *guest_page = 0;
+         *guest_pa = 0;
          return -1;
        case LARGE_PAGE:
          *guest_pa = tmp_pa;
@@ -45,7 +143,7 @@ int guest_va_to_guest_pa(guest_info_t * guest_info, addr_t guest_va, addr_t * gu
              return -1;
            }
 
-           return 0;       
+           return 0;
          }
        default:
          return -1;
@@ -76,41 +174,73 @@ int guest_va_to_guest_pa(guest_info_t * guest_info, addr_t guest_va, addr_t * gu
 
 
 
+/* !! Currently not implemented !! */
+/* This will be a real pain.... its your standard page table walker in guest memory
+ * 
+ * For now we ignore it...
+ */
+int guest_pa_to_guest_va(struct guest_info * guest_info, addr_t guest_pa, addr_t * guest_va) {
+  *guest_va = 0;
+  return -1;
+}
 
 
+/**********************************/
+/* GROUP 2                        */
+/**********************************/
 
 
-int guest_pa_to_host_va(guest_info_t * guest_info, addr_t guest_pa, addr_t * host_va) {
-  addr_t host_pa;
+int guest_va_to_host_pa(struct guest_info * guest_info, addr_t guest_va, addr_t * host_pa) {
+  addr_t guest_pa;
 
-  if (guest_pa_to_host_pa(guest_info, guest_pa, &host_pa) != 0) {
+  *host_pa = 0;
+
+  if (guest_va_to_guest_pa(guest_info, guest_va, &guest_pa) != 0) {
     return -1;
   }
   
-  if (host_pa_to_host_va(host_pa, host_va) != 0) {
+  if (guest_pa_to_host_pa(guest_info, guest_pa, host_pa) != 0) {
     return -1;
   }
 
   return 0;
 }
 
+/* !! Currently not implemented !! */
+int host_pa_to_guest_va(struct guest_info * guest_info, addr_t host_pa, addr_t * guest_va) {
+  addr_t guest_pa;
 
-int guest_pa_to_host_pa(guest_info_t * guest_info, addr_t guest_pa, addr_t * host_pa) {
-  // we use the shadow map here...
-  if (lookup_shadow_map_addr(guest_info->shadow_map, guest_pa, host_pa) != HOST_REGION_PHYSICAL_MEMORY) {
+  *guest_va = 0;
+
+  if (host_pa_to_guest_pa(guest_info, host_pa, &guest_pa) != 0) {
     return -1;
   }
-                                 
+
+  if (guest_pa_to_guest_va(guest_info, guest_pa, guest_va) != 0) {
+    return -1;
+  }
+
   return 0;
 }
 
 
 
 
-int host_va_to_host_pa(addr_t host_va, addr_t * host_pa) {
-  *host_pa = os_hooks->vaddr_to_paddr(host_va);
-  
-  if (*host_pa == 0) {
+int guest_va_to_host_va(struct guest_info * guest_info, addr_t guest_va, addr_t * host_va) {
+  addr_t guest_pa;
+  addr_t host_pa;
+
+  *host_va = 0;
+
+  if (guest_va_to_guest_pa(guest_info, guest_va, &guest_pa) != 0) {
+    return -1;
+  }
+
+  if (guest_pa_to_host_pa(guest_info, guest_pa, &host_pa) != 0) {
+    return -1;
+  }
+
+  if (host_pa_to_host_va(host_pa, host_va) != 0) {
     return -1;
   }
 
@@ -118,12 +248,83 @@ int host_va_to_host_pa(addr_t host_va, addr_t * host_pa) {
 }
 
 
-int host_pa_to_host_va(addr_t host_pa, addr_t * host_va) {
-  *host_va = os_hooks->paddr_to_vaddr(host_pa);
+/* !! Currently not implemented !! */
+int host_va_to_guest_va(struct guest_info * guest_info, addr_t host_va, addr_t * guest_va) {
+  addr_t host_pa;
+  addr_t guest_pa;
+
+  *guest_va = 0;
+
+  if (host_va_to_host_pa(host_va, &host_pa) != 0) {
+    return -1;
+  }
+
+  if (host_pa_to_guest_pa(guest_info, host_pa, &guest_pa) != 0) {
+    return -1;
+  }
 
-  if (*host_va == 0) {
+  if (guest_pa_to_guest_va(guest_info, guest_pa, guest_va) != 0) {
     return -1;
   }
 
   return 0;
 }
+
+
+
+
+
+
+/* This is a straight address conversion + copy, 
+ *   except for the tiny little issue of crossing page boundries.....
+ */
+int read_guest_va_memory(struct guest_info * guest_info, addr_t guest_va, int count, char * dest) {
+  addr_t cursor = guest_va;
+
+  while (count > 0) {
+    int dist_to_pg_edge = (PAGE_OFFSET(cursor) + PAGE_SIZE) - cursor;
+    int bytes_to_copy = (dist_to_pg_edge > count) ? count : dist_to_pg_edge;
+    addr_t host_addr;
+
+    if (guest_va_to_host_va(guest_info, cursor, &host_addr) != 0) {
+      return -1;
+    }
+
+    memcpy(dest, (void*)cursor, bytes_to_copy);
+
+    count -= bytes_to_copy;
+    cursor += bytes_to_copy;    
+  }
+
+  return 0;
+}
+
+
+
+
+
+
+/* This is a straight address conversion + copy, 
+ *   except for the tiny little issue of crossing page boundries.....
+ */
+int read_guest_pa_memory(struct guest_info * guest_info, addr_t guest_pa, int count, char * dest) {
+  addr_t cursor = guest_pa;
+
+  while (count > 0) {
+    int dist_to_pg_edge = (PAGE_OFFSET(cursor) + PAGE_SIZE) - cursor;
+    int bytes_to_copy = (dist_to_pg_edge > count) ? count : dist_to_pg_edge;
+    addr_t host_addr;
+
+    if (guest_pa_to_host_va(guest_info, cursor, &host_addr) != 0) {
+      return -1;
+    }
+
+    memcpy(dest, (void*)cursor, bytes_to_copy);
+
+    count -= bytes_to_copy;
+    cursor += bytes_to_copy;    
+  }
+
+  return 0;
+}
+
index b740788..0551675 100644 (file)
@@ -173,31 +173,6 @@ host_region_type_t lookup_shadow_map_addr(shadow_map_t * map, addr_t guest_addr,
   }
 }
 
-/*
-int guest_paddr_to_host_paddr(shadow_region_t * entry, 
-                       addr_t guest_addr,
-                       addr_t * host_addr) {
-
-  if (!((guest_addr >= entry->guest_start) && 
-       (guest_addr < entry->guest_end))) { 
-    return -1;
-  }
-
-  switch (entry->host_type) { 
-  case HOST_REGION_PHYSICAL_MEMORY:
-  case HOST_REGION_MEMORY_MAPPED_DEVICE:
-  case HOST_REGION_UNALLOCATED:
-    *host_addr = (guest_addr-entry->guest_start) + entry->host_addr.phys_addr.host_start;
-    return 0;
-    break;
-  default:
-    return -1;
-    break;
-  }
-}
-
-*/
-
 
 void print_shadow_map(shadow_map_t * map) {
   shadow_region_t * cur = map->head;
index d73d0f1..eb54e44 100644 (file)
@@ -33,14 +33,64 @@ void delete_page_tables_pde32(pde32_t * pde) {
 
 
 
+
+
+
+
+/* We can't do a full lookup because we don't know what context the page tables are in...
+ * The entry addresses could be pointing to either guest physical memory or host physical memory
+ * Instead we just return the entry address, and a flag to show if it points to a pte or a large page...
+ */
+pde32_entry_type_t pde32_lookup(pde32_t * pde, addr_t addr, addr_t * entry) {
+  pde32_t * pde_entry = &(pde[PDE32_INDEX(addr)]);
+
+  if (!pde_entry->present) {
+    *entry = 0;
+    return NOT_PRESENT;
+  } else  {
+    *entry = PAGE_ADDR(pde_entry->pt_base_addr);
+    
+    if (pde_entry->large_pages) {
+      *entry += PAGE_OFFSET(addr);
+      return LARGE_PAGE;
+    } else {
+      return PTE32;
+    }
+  }  
+  return NOT_PRESENT;
+}
+
+
+int pte32_lookup(pte32_t * pte, addr_t addr, addr_t * entry) {
+  pte32_t * pte_entry = &(pte[PTE32_INDEX(addr)]);
+
+  if (!pte_entry->present) {
+    *entry = 0;
+    return -1;
+  } else {
+    *entry = PAGE_ADDR(pte_entry->page_base_addr);
+    *entry += PAGE_OFFSET(addr);
+    return 0;
+  }
+
+  return -1;
+}
+
+
+
+
+
+
+
+
 /* We generate a page table to correspond to a given memory layout
  * pulling pages from the mem_list when necessary
  * If there are any gaps in the layout, we add them as unmapped pages
  */
-pde32_t * create_passthrough_pde32_pts(guest_info_t * guest_info) {
+pde32_t * create_passthrough_pde32_pts(struct guest_info * guest_info) {
   ullong_t current_page_addr = 0;
   int i, j;
-  shadow_map_t * map = guest_info->mem_map;
+  shadow_map_t * map = &(guest_info->mem_map);
 
 
   pde32_t * pde = os_hooks->allocate_pages(1);
@@ -124,53 +174,6 @@ pde32_t * create_passthrough_pde32_pts(guest_info_t * guest_info) {
 
 
 
-/* We can't do a full lookup because we don't know what context the page tables are in...
- * The entry addresses could be pointing to either guest physical memory or host physical memory
- * Instead we just return the entry address, and a flag to show if it points to a pte or a large page...
- */
-pde32_entry_type_t pde32_lookup(pde32_t * pde, addr_t addr, addr_t * entry) {
-  pde32_t * pde_entry = pde[PDE32_INDEX(addr)];
-
-  if (!pde_entry->present) {
-    *entry = 0;
-    return NOT_PRESENT;
-  } else  {
-    *entry = PAGE_ADDR(pde_entry->pt_base_addr);
-    
-    if (pde_entry->large_pages) {
-      *entry += PAGE_OFFSET(addr);
-      return LARGE_PAGE;
-    } else {
-      return PTE32;
-    }
-  }  
-  return NOT_PRESENT;
-}
-
-
-int pte32_lookup(pte32_t * pte, addr_t addr, addr_t * entry) {
-  pte32_t * pte_entry = pte[PTE32_INDEX(addr)];
-
-  if (!pte_entry->present) {
-    *entry = 0;
-    return -1;
-  } else {
-    *entry = PAGE_ADDR(pte_entry->page_base_addr);
-    *entry += PAGE_OFFSET(addr);
-    return 0;
-  }
-
-  return -1;
-}
-
-
-
-
-
-
-
-
-
 void PrintPDE32(void * virtual_address, pde32_t * pde)
 {
   PrintDebug("PDE %p -> %p : present=%x, flags=%x, accessed=%x, reserved=%x, largePages=%x, globalPage=%x, kernelInfo=%x\n",
index 188956c..3de10c7 100644 (file)
@@ -1,11 +1,12 @@
 #include <geekos/vmm_shadow_paging.h>
 
 #include <geekos/vmm.h>
+#include <geekos/vm_guest_mem.h>
 
 extern struct vmm_os_hooks * os_hooks;
 
 
-int init_shadow_page_state(shadow_page_state_t * state) {
+int init_shadow_page_state(struct shadow_page_state * state) {
   state->guest_mode = PDE32;
   state->shadow_mode = PDE32;
   
@@ -16,10 +17,12 @@ int init_shadow_page_state(shadow_page_state_t * state) {
 }
   
 
-int wholesale_update_shadow_page_state(shadow_page_state_t * state, shadow_map_t * mem_map) {
+int wholesale_update_shadow_page_state(struct guest_info * guest_info) {
   unsigned i, j;
-  vmm_pde_t * guest_pde;
-  vmm_pde_t * shadow_pde;
+  pde32_t * guest_pde;
+  pde32_t * shadow_pde;
+
+  struct shadow_page_state * state = &(guest_info->shdw_pg_state);
 
 
   // For now, we'll only work with PDE32
@@ -27,22 +30,18 @@ int wholesale_update_shadow_page_state(shadow_page_state_t * state, shadow_map_t
     return -1;
   }
 
-
-  
-  shadow_pde = (vmm_pde_t *)(CR3_TO_PDE(state->shadow_cr3.e_reg.low));  
-  guest_pde = (vmm_pde_t *)(os_hooks->paddr_to_vaddr((void*)CR3_TO_PDE(state->guest_cr3.e_reg.low)));
+  shadow_pde = (pde32_t *)(CR3_TO_PDE32(state->shadow_cr3.e_reg.low));  
+  guest_pde = (pde32_t *)(host_pa_to_host_va((void*)CR3_TO_PDE32(state->guest_cr3.e_reg.low)));
 
   // Delete the current page table
   delete_page_tables_pde32(shadow_pde);
 
   shadow_pde = os_hooks->allocate_pages(1);
 
-
   state->shadow_cr3.e_reg.low = (addr_t)shadow_pde;
 
   state->shadow_mode = PDE32;
 
-  
   for (i = 0; i < MAX_PDE32_ENTRIES; i++) { 
     shadow_pde[i] = guest_pde[i];
 
@@ -57,7 +56,7 @@ int wholesale_update_shadow_page_state(shadow_page_state_t * state, shadow_map_t
       addr_t host_addr;
       shadow_region_t * ent;
 
-      ent = get_shadow_region_by_addr(mem_map, guest_addr);
+      ent = get_shadow_region_by_addr(&(guest_info->mem_map), guest_addr);
       
       if (!ent) { 
        // FIXME Panic here - guest is trying to map to physical memory
@@ -71,7 +70,7 @@ int wholesale_update_shadow_page_state(shadow_page_state_t * state, shadow_map_t
       case HOST_REGION_PHYSICAL_MEMORY:
        // points into currently allocated physical memory, so we just
        // set up the shadow to point to the mapped location
-       if (guest_paddr_to_host_paddr(ent, guest_addr, &host_addr)) { 
+       if (guest_pa_to_host_pa(guest_info, guest_addr, &host_addr)) { 
          // Panic here
          return -1;
        }
@@ -102,8 +101,8 @@ int wholesale_update_shadow_page_state(shadow_page_state_t * state, shadow_map_t
        break;
       }
     } else {
-      vmm_pte_t * guest_pte;
-      vmm_pte_t * shadow_pte;
+      pte32_t * guest_pte;
+      pte32_t * shadow_pte;
       addr_t guest_addr;
       addr_t guest_pte_host_addr;
       shadow_region_t * ent;
@@ -119,7 +118,7 @@ int wholesale_update_shadow_page_state(shadow_page_state_t * state, shadow_map_t
       // make our first level page table in the shadow point to it
       shadow_pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(shadow_pte);
       
-      ent = get_shadow_region_by_addr(mem_map, guest_addr);
+      ent = get_shadow_region_by_addr(&(guest_info->mem_map), guest_addr);
       
 
       /* JRL: This is bad.... */
@@ -133,7 +132,7 @@ int wholesale_update_shadow_page_state(shadow_page_state_t * state, shadow_map_t
       }
 
       // Address of the relevant second level page table in the guest
-      if (guest_paddr_to_host_paddr(ent, guest_addr, &guest_pte_host_addr)) { 
+      if (guest_pa_to_host_pa(guest_info, guest_addr, &guest_pte_host_addr)) { 
        // Panic here
        return -1;
       }
@@ -152,7 +151,7 @@ int wholesale_update_shadow_page_state(shadow_page_state_t * state, shadow_map_t
        
        shadow_region_t * ent;
 
-       ent = get_shadow_region_by_addr(mem_map, guest_addr);
+       ent = get_shadow_region_by_addr(&(guest_info->mem_map), guest_addr);
       
        if (!ent) { 
          // FIXME Panic here - guest is trying to map to physical memory
@@ -167,7 +166,7 @@ int wholesale_update_shadow_page_state(shadow_page_state_t * state, shadow_map_t
            
            // points into currently allocated physical memory, so we just
            // set up the shadow to point to the mapped location
-           if (guest_paddr_to_host_paddr(ent, guest_addr, &host_addr)) { 
+           if (guest_pa_to_host_pa(guest_info, guest_addr, &host_addr)) { 
              // Panic here
              return -1;
            }