Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


*** empty log message ***
Jack Lange [Wed, 26 Mar 2008 20:15:58 +0000 (20:15 +0000)]
15 files changed:
palacios/build/vm_kernel
palacios/include/geekos/svm_handler.h
palacios/include/geekos/vm_guest.h
palacios/include/geekos/vmcb.h
palacios/include/geekos/vmm.h
palacios/include/geekos/vmm_mem.h
palacios/include/geekos/vmm_paging.h
palacios/include/geekos/vmm_stubs.h
palacios/src/geekos/main.c
palacios/src/geekos/svm.c
palacios/src/geekos/svm_handler.c
palacios/src/geekos/vmcb.c
palacios/src/geekos/vmm_mem.c
palacios/src/geekos/vmm_paging.c
palacios/src/geekos/vmm_stubs.c

index 9898214..cba609a 100755 (executable)
Binary files a/palacios/build/vm_kernel and b/palacios/build/vm_kernel differ
index 0afbe64..e52222a 100644 (file)
@@ -187,6 +187,7 @@ struct svm_io_info {
 
 
 int handle_svm_io(guest_info_t * info);
+int handle_shadow_paging(guest_info_t * info);
 
 int handle_svm_exit(guest_info_t * info);
 
index e18f1f0..fe3bd1f 100644 (file)
@@ -18,15 +18,20 @@ struct guest_gprs {
 };
 
 
+typedef enum {SHADOW_PAGING, NESTED_PAGING} vm_page_mode_t;
+
 
 typedef struct guest_info {
   ullong_t rip;
   ullong_t rsp;
 
-  shadow_paging_state_t  shadow_paging_state;
+  shadow_map_t mem_map;
+
+  
+  vm_page_mode_t page_mode;
+  shadow_page_state_t  shadow_page_state;
+  // nested_paging_t nested_page_state;
 
-  //  vmm_mem_list_t mem_list;
-  // vmm_mem_layout_t mem_layout;
 
   vmm_io_map_t io_map;
   // device_map
@@ -34,7 +39,6 @@ typedef struct guest_info {
 
   struct guest_gprs vm_regs;
 
-  void * page_tables;
   void * vmm_data;
 } guest_info_t;
 
index 8bce011..801f44a 100644 (file)
@@ -169,8 +169,8 @@ union Instr_Intercepts {
     uint_t INVD        : 1        PACKED;
     uint_t PAUSE       : 1        PACKED;
     uint_t HLT         : 1        PACKED;
-    uint_t INVPLG      : 1        PACKED;
-    uint_t INVPLGA     : 1        PACKED;
+    uint_t INVLPG      : 1        PACKED;
+    uint_t INVLPGA     : 1        PACKED;
     uint_t IOIO_PROT   : 1        PACKED;
     uint_t MSR_PROT    : 1        PACKED;
     uint_t task_switch : 1        PACKED;
index 336c9e0..896598f 100644 (file)
@@ -87,8 +87,8 @@ struct vmm_os_hooks {
   void *(*malloc)(uint_t size);
   void (*free)(void * addr);
 
-  void *(*physical_to_virtual)(void *addr);
-  void *(*virtual_to_physical)(void *addr);
+  void *(*paddr_to_vaddr)(void *addr);
+  void *(*vaddr_to_paddr)(void *addr);
 
 
   void (*start_kernel_thread)(); // include pointer to function
index 00ea157..35c8724 100644 (file)
@@ -37,7 +37,7 @@ typedef enum host_region_type {
 
 
 
-typedef struct shadow_map_entry {
+typedef struct shadow_region {
   guest_region_type_t     guest_type;
   addr_t                  guest_start; 
   addr_t                  guest_end; 
@@ -46,56 +46,54 @@ typedef struct shadow_map_entry {
   union host_addr_t {
     struct physical_addr { 
        addr_t                  host_start; 
-       addr_t                  host_end; 
     }                     phys_addr;
     // Other addresses, like on disk, etc, would go here
   }                       host_addr;
-  struct shadow_map_entry *next, *prev;
-} shadow_map_entry_t;
+  struct shadow_region *next, *prev;
+} shadow_region_t;
 
 
 
 typedef struct shadow_map {
   uint_t num_regions;
 
-  shadow_map_entry_t * head;
+  shadow_region_t * head;
 } shadow_map_t;
 
 
-void init_shadow_map_entry(shadow_map_entry_t *entry,
-                          addr_t              guest_addr_start,
-                          addr_t              guest_addr_end,
-                          guest_region_type_t guest_region_type,
-                          host_region_type_t  host_region_type);
+void init_shadow_region(shadow_region_t * entry,
+                          addr_t               guest_addr_start,
+                          addr_t               guest_addr_end,
+                          guest_region_type_t  guest_region_type,
+                          host_region_type_t   host_region_type);
 
-void init_shadow_map_entry_physical(shadow_map_entry_t *entry,
-                                   addr_t              guest_addr_start,
-                                   addr_t              guest_addr_end,
-                                   guest_region_type_t guest_region_type,
-                                   addr_t              host_addr_start,
-                                   addr_t              host_addr_end,
-                                   host_region_type_t  host_region_type);
+void init_shadow_region_physical(shadow_region_t * entry,
+                                   addr_t               guest_addr_start,
+                                   addr_t               guest_addr_end,
+                                   guest_region_type_t  guest_region_type,
+                                   addr_t               host_addr_start,
+                                   host_region_type_t   host_region_type);
   
-void init_shadow_map(shadow_map_t *map);
-void free_shadow_map(shadow_map_t *map);
+void init_shadow_map(shadow_map_t * map);
+void free_shadow_map(shadow_map_t * map);
 
-shadow_map_entry_t * get_shadow_map_region_by_addr(shadow_map_t *map, addr_t guest_addr);
+shadow_region_t * get_shadow_region_by_addr(shadow_map_t * map, addr_t guest_addr);
 
-shadow_map_entry_t * get_shadow_map_region_by_index(shadow_map_t * map, uint_t index);
+shadow_region_t * get_shadow_region_by_index(shadow_map_t * map, uint_t index);
 
-int map_guest_physical_to_host_physical(shadow_map_entry_t *entry, 
-                                       addr_t guest_addr,
-                                       addr_t *host_addr);
+int guest_paddr_to_host_paddr(shadow_region_t * entry, 
+                             addr_t guest_addr,
+                             addr_t * host_addr);
 
 
 // Semantics:
 // Adding a region that overlaps with an existing region results is undefined
 // and will probably fail
-int add_shadow_map_region(shadow_map_t * map, shadow_map_entry_t *entry);
+int add_shadow_region(shadow_map_t * map, shadow_region_t * entry);
 
 // Semantics:
 // Deletions result in splitting
-int delete_shadow_map_region(shadow_map_t *map,
+int delete_shadow_region(shadow_map_t * map,
                             addr_t guest_start, 
                             addr_t guest_end);
 
index 626049c..f18a492 100644 (file)
@@ -89,12 +89,17 @@ the host state in the vmcs before entering the guest.
 #define PAGE_OFFSET(x)           ((((uint_t)x) & 0xfff))
 
 #define PAGE_ALIGNED_ADDR(x)   (((uint_t) (x)) >> 12)
+
 #ifndef PAGE_ADDR
 #define PAGE_ADDR(x)   (PAGE_ALIGNED_ADDR(x) << 12)
 #endif
 
 #define PAGE_POWER 12
 
+#define CR3_TO_PDE(cr3) (((ulong_t)cr3) & 0xfffff000)
+#define CR3_TO_PDPTRE(cr3) (((ulong_t)cr3) & 0xffffffe0)
+#define CR3_TO_PML4E(cr3)  (((ullong_t)cr3) & 0x000ffffffffff000)
+
 #define VM_WRITE     1
 #define VM_USER      2
 #define VM_NOCACHE   8
@@ -102,9 +107,6 @@ the host state in the vmcs before entering the guest.
 #define VM_EXEC      0
 
 
-#define GUEST_PAGE   0x0
-#define SHARED_PAGE  0x1
-
 typedef struct pde {
   uint_t present         : 1;
   uint_t flags           : 4;
@@ -193,44 +195,40 @@ typedef struct pml4e {
 
 
 
-typedef enum { PDE32 } page_directory_type_t;
+typedef enum { PDE32 } paging_mode_t;
 
 
-typedef struct shadow_paging_state {
+typedef struct shadow_page_state {
+
   // these two reflect the top-level page directory
   // of the guest page table
-  page_directory_type_t  guest_page_directory_type;
-  void                  *guest_page_directory;         // points to guest's current page table
+  paging_mode_t           guest_mode;
+  reg_ex_t                guest_cr3;         // points to guest's current page table
 
-  // This reflects the guest physical to host physical mapping
-  shadow_map_t          shadow_map;
+  // Should thi sbe here
+  reg_ex_t                guest_cr0;
 
   // these two reflect the top-level page directory 
   // the shadow page table
-  page_directory_type_t  shadow_page_directory_type;
-  void                  *shadow_page_directory;
-} shadow_paging_state_t;
+  paging_mode_t           shadow_mode;
+  reg_ex_t                shadow_cr3;
 
 
+} shadow_page_state_t;
 
-int init_shadow_paging_state(shadow_paging_state_t *state);
 
-// This function will cause the shadow page table to be deleted
-// and rewritten to reflect the guest page table and the shadow map
-int wholesale_update_shadow_paging_state(shadow_paging_state_t *state);
 
-//void free_guest_page_tables(vmm_pde_t * pde);
+int init_shadow_page_state(shadow_page_state_t * state);
 
-//generate_shadow_
+// This function will cause the shadow page table to be deleted
+// and rewritten to reflect the guest page table and the shadow map
+int wholesale_update_shadow_page_state(shadow_page_state_t * state, shadow_map_t * mem_map);
 
-//vmm_pde_t * generate_guest_page_tables(shadow_map_t * map, vmm_mem_list_t * list);
-//pml4e64_t * generate_guest_page_tables_64(shadow_map_t * map, vmm_mem_list_t * list);
+vmm_pde_t * create_passthrough_pde32_pts(shadow_map_t * map);
 
+//void free_guest_page_tables(vmm_pde_t * pde);
 
 void PrintDebugPageTables(vmm_pde_t * pde);
 
 
-
-
 #endif
index 1193b04..03fbfd2 100644 (file)
@@ -12,6 +12,6 @@ void Free_VMM_Page(void * page);
 void * VMM_Malloc(uint_t size);
 void VMM_Free(void * addr);
 
-void * Identity(void *addr) { return addr; };
+void * Identity(void *addr);
 
 #endif
index 65d8d18..c463f2d 100644 (file)
@@ -3,7 +3,7 @@
  * Copyright (c) 2001,2003,2004 David H. Hovemeyer <daveho@cs.umd.edu>
  * Copyright (c) 2003, Jeffrey K. Hollingsworth <hollings@cs.umd.edu>
  * Copyright (c) 2004, Iulian Neamtiu <neamtiu@cs.umd.edu>
- * $Revision: 1.21 $
+ * $Revision: 1.22 $
  * 
  * This is free software.  You are permitted to use,
  * redistribute, and modify it as specified in the file "COPYING".
@@ -300,17 +300,16 @@ void Main(struct Boot_Info* bootInfo)
     os_hooks.free_page = &Free_VMM_Page;
     os_hooks.malloc = &VMM_Malloc;
     os_hooks.free = &VMM_Free;
-    os_hooks.virtual_to_physical=&Identity;
-    os_hooks.physical_to_virtual=&Identity;
-
+    os_hooks.vaddr_to_paddr = &Identity;
+    os_hooks.paddr_to_vaddr = &Identity;
 
 
     //   DumpGDT();
     Init_VMM(&os_hooks, &vmm_ops);
   
-
-    init_shadow_paging_state(&(vm_info.shadow_paging_state));
-
+    init_shadow_map(&(vm_info.mem_map));
+    init_shadow_page_state(&(vm_info.shadow_page_state));
+    vm_info.page_mode = SHADOW_PAGING;
 
     init_vmm_io_map(&(vm_info.io_map));
 
@@ -318,7 +317,7 @@ void Main(struct Boot_Info* bootInfo)
     if (0) {
       
       //    add_shared_mem_range(&(vm_info.mem_layout), 0, 0x800000, 0x10000);    
-      //add_shared_mem_range(&(vm_info.mem_layout), 0, 0x1000000, 0);
+      //    add_shared_mem_range(&(vm_info.mem_layout), 0, 0x1000000, 0);
       
       rip = (ulong_t)(void*)&BuzzVM;
       //  rip -= 0x10000;
@@ -334,10 +333,10 @@ void Main(struct Boot_Info* bootInfo)
       //add_shared_mem_range(&(vm_info.mem_layout), 0x0, 0x1000, 0x100000);
       //      add_shared_mem_range(&(vm_info.mem_layout), 0x0, 0x100000, 0x0);
       
-      shadow_map_entry_t *ent = Malloc(sizeof(shadow_map_entry_t));;
-      init_shadow_map_entry_physical(ent,0,0x100000,GUEST_REGION_PHYSICAL_MEMORY,
-                                    0,0x100000,HOST_REGION_PHYSICAL_MEMORY);
-      add_shadow_map_region(&(vm_info.shadow_paging_state.shadow_map),ent);
+      shadow_region_t *ent = Malloc(sizeof(shadow_region_t));;
+      init_shadow_region_physical(ent,0,0x100000,GUEST_REGION_PHYSICAL_MEMORY,
+                                 0x100000, HOST_REGION_PHYSICAL_MEMORY);
+      add_shadow_region(&(vm_info.mem_map),ent);
 
       hook_io_port(&(vm_info.io_map), 0x61, &IO_Read, &IO_Write);
       /*
index 2c865b3..f3865ca 100644 (file)
@@ -105,10 +105,8 @@ int init_svm_guest(struct guest_info *info) {
   info->vmm_data = (void*)Allocate_VMCB();
 
 
-  PrintDebug("Generating Guest nested page tables\n");
-  //  print_mem_list(&(info->mem_list));
-  //print_mem_layout(&(info->mem_layout));
-  info->page_tables = NULL;
+  //PrintDebug("Generating Guest nested page tables\n");
+  //  info->page_tables = NULL;
   //info->page_tables = generate_guest_page_tables_64(&(info->mem_layout), &(info->mem_list));
   //info->page_tables = generate_guest_page_tables(&(info->mem_layout), &(info->mem_list));
   //PrintDebugPageTables(info->page_tables);
@@ -258,7 +256,7 @@ void Init_VMCB_Real(vmcb_t * vmcb, guest_info_t vm_info) {
       *bitmap |= 1 << (port % 8);
     }
 
-    memset((uchar_t*)io_port_bitmap, 0xff, PAGE_SIZE * 2);
+    //    memset((uchar_t*)io_port_bitmap, 0xff, PAGE_SIZE * 2);
     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
 
     ctrl_area->instrs.instrs.IOIO_PROT = 1;
@@ -267,8 +265,27 @@ void Init_VMCB_Real(vmcb_t * vmcb, guest_info_t vm_info) {
   ctrl_area->instrs.instrs.INTR = 1;
 
   // also determine if CPU supports nested paging
-  if (vm_info.page_tables) {
-    //   if (0) {
+
+  if (vm_info.page_mode == SHADOW_PAGING) {
+    PrintDebug("Creating initial shadow page table\n");
+    vm_info.shadow_page_state.shadow_cr3.e_reg.low |= ((addr_t)create_passthrough_pde32_pts(&(vm_info.mem_map)) & ~0xfff);
+    PrintDebug("Created\n");
+
+    guest_state->cr3 = vm_info.shadow_page_state.shadow_cr3.r_reg;
+
+    ctrl_area->cr_reads.crs.cr3 = 1;
+    ctrl_area->cr_writes.crs.cr3 = 1;
+    ctrl_area->cr_reads.crs.cr0 = 1;
+    ctrl_area->cr_writes.crs.cr0 = 1;
+
+    ctrl_area->instrs.instrs.INVLPG = 1;
+    ctrl_area->instrs.instrs.INVLPGA = 1;
+
+       
+    guest_state->g_pat = 0x7040600070406ULL;
+
+    guest_state->cr0 |= 0x80000000;
+  } else if (vm_info.page_mode == NESTED_PAGING) {
     // Flush the TLB on entries/exits
     //ctrl_area->TLB_CONTROL = 1;
 
@@ -279,17 +296,14 @@ void Init_VMCB_Real(vmcb_t * vmcb, guest_info_t vm_info) {
 
         // Set the Nested Page Table pointer
     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
-    ctrl_area->N_CR3 = 0;
-    guest_state->cr3 = (addr_t)(vm_info.page_tables);
+    // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
 
     //   ctrl_area->N_CR3 = Get_CR3();
     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
 
-    guest_state->g_pat = 0x7040600070406ULL;
-
-    //PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
-  guest_state->cr0 |= 0x80000000;
+    //    guest_state->g_pat = 0x7040600070406ULL;
   }
+
 }
 
 
@@ -370,9 +384,30 @@ void Init_VMCB(vmcb_t * vmcb, guest_info_t vm_info) {
 
   ctrl_area->instrs.instrs.INTR = 1;
 
-  // also determine if CPU supports nested paging
-  if (vm_info.page_tables) {
-    //   if (0) {
+
+
+  if (vm_info.page_mode == SHADOW_PAGING) {
+    PrintDebug("Creating initial shadow page table\n");
+    vm_info.shadow_page_state.shadow_cr3.e_reg.low |= ((addr_t)create_passthrough_pde32_pts(&(vm_info.mem_map)) & ~0xfff);
+    PrintDebug("Created\n");
+
+    guest_state->cr3 = vm_info.shadow_page_state.shadow_cr3.r_reg;
+
+    ctrl_area->cr_reads.crs.cr3 = 1;
+    ctrl_area->cr_writes.crs.cr3 = 1;
+    ctrl_area->cr_reads.crs.cr0 = 1;
+    ctrl_area->cr_writes.crs.cr0 = 1;
+
+    ctrl_area->instrs.instrs.INVLPG = 1;
+    ctrl_area->instrs.instrs.INVLPGA = 1;
+    ctrl_area->instrs.instrs.CR0 = 1;
+       
+
+
+    guest_state->g_pat = 0x7040600070406ULL;
+
+    guest_state->cr0 |= 0x80000000;
+  } else if (vm_info.page_mode == NESTED_PAGING) {
     // Flush the TLB on entries/exits
     //ctrl_area->TLB_CONTROL = 1;
 
@@ -383,16 +418,12 @@ void Init_VMCB(vmcb_t * vmcb, guest_info_t vm_info) {
 
         // Set the Nested Page Table pointer
     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
-    ctrl_area->N_CR3 = 0;
-    guest_state->cr3 = (addr_t)(vm_info.page_tables);
+    // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
 
     //   ctrl_area->N_CR3 = Get_CR3();
     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
 
-    guest_state->g_pat = 0x7040600070406ULL;
-
-    //PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
-  guest_state->cr0 |= 0x80000000;
+    //    guest_state->g_pat = 0x7040600070406ULL;
   }
 
 
@@ -527,8 +558,9 @@ void Init_VMCB_pe(vmcb_t *vmcb, guest_info_t vm_info) {
 
   }
   
-
+  
   // also determine if CPU supports nested paging
+  /*
   if (vm_info.page_tables) {
     //   if (0) {
     // Flush the TLB on entries/exits
@@ -553,7 +585,7 @@ void Init_VMCB_pe(vmcb_t *vmcb, guest_info_t vm_info) {
     // Enable Paging
     //    guest_state->cr0 |= 0x80000000;
   }
-
+  */
 
 }
 
index ea2a7b8..6840ade 100644 (file)
@@ -27,6 +27,13 @@ int handle_svm_exit(guest_info_t * info) {
   PrintDebug("io_info2 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
   if (exit_code == VMEXIT_IOIO) {
     handle_svm_io(info);
+  } else if (( (exit_code == VMEXIT_CR3_READ)  ||
+              (exit_code == VMEXIT_CR3_WRITE) ||
+              (exit_code == VMEXIT_INVLPG)    ||
+              (exit_code == VMEXIT_INVLPGA)   || 
+              (exit_code == VMEXIT_EXCP14)) && 
+            (info->page_mode == SHADOW_PAGING)) {
+    handle_shadow_paging(info);
   }
 
 
@@ -57,3 +64,15 @@ int handle_svm_io(guest_info_t * info) {
 
   return 0;
 }
+
+
+int handle_shadow_paging(guest_info_t * info) {
+  vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
+  //  vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
+
+  if (guest_ctrl->exit_code == VMEXIT_CR3_READ) {
+
+  }
+
+  return 0;
+}
index cfaa2b3..88538a3 100644 (file)
@@ -64,8 +64,8 @@ void PrintDebugVMCB(vmcb_t * vmcb) {
   PrintDebug("\tINVD: %d\n", ctrl_area->instrs.instrs.INVD);
   PrintDebug("\tPAUSE: %d\n", ctrl_area->instrs.instrs.PAUSE);
   PrintDebug("\tHLT: %d\n", ctrl_area->instrs.instrs.HLT);
-  PrintDebug("\tINVPLG: %d\n", ctrl_area->instrs.instrs.INVPLG);
-  PrintDebug("\tINVPLGA: %d\n", ctrl_area->instrs.instrs.INVPLGA);
+  PrintDebug("\tINVLPG: %d\n", ctrl_area->instrs.instrs.INVLPG);
+  PrintDebug("\tINVLPGA: %d\n", ctrl_area->instrs.instrs.INVLPGA);
   PrintDebug("\tIOIO_PROT: %d\n", ctrl_area->instrs.instrs.IOIO_PROT);
   PrintDebug("\tMSR_PROT: %d\n", ctrl_area->instrs.instrs.MSR_PROT);
   PrintDebug("\ttask_switch: %d\n", ctrl_area->instrs.instrs.task_switch);
index 20899b0..4b2d6d9 100644 (file)
@@ -5,35 +5,33 @@
 extern struct vmm_os_hooks * os_hooks;
 
 
-void init_shadow_map_entry(shadow_map_entry_t *entry,
-                          addr_t              guest_addr_start,
-                          addr_t              guest_addr_end,
-                          guest_region_type_t guest_region_type,
-                          host_region_type_t  host_region_type)
+void init_shadow_region(shadow_region_t * entry,
+                       addr_t               guest_addr_start,
+                       addr_t               guest_addr_end,
+                       guest_region_type_t  guest_region_type,
+                       host_region_type_t   host_region_type)
 {
-  entry->guest_type=guest_region_type;
-  entry->guest_start=guest_addr_start;
-  entry->guest_end=guest_addr_end;
-  entry->host_type=host_region_type;
-  entry->next=entry->prev=NULL;
+  entry->guest_type = guest_region_type;
+  entry->guest_start = guest_addr_start;
+  entry->guest_end = guest_addr_end;
+  entry->host_type = host_region_type;
+  entry->next=entry->prev = NULL;
 }
 
-void init_shadow_map_entry_physical(shadow_map_entry_t *entry,
-                                   addr_t              guest_addr_start,
-                                   addr_t              guest_addr_end,
-                                   guest_region_type_t guest_region_type,
-                                   addr_t              host_addr_start,
-                                   addr_t              host_addr_end,
-                                   host_region_type_t  host_region_type)
+void init_shadow_region_physical(shadow_region_t * entry,
+                                addr_t               guest_addr_start,
+                                addr_t               guest_addr_end,
+                                guest_region_type_t  guest_region_type,
+                                addr_t               host_addr_start,
+                                host_region_type_t   host_region_type)
 {
-  init_shadow_map_entry(entry,guest_addr_start,guest_addr_end,guest_region_type,host_region_type);
-  entry->host_addr.phys_addr.host_start=host_addr_start;
-  entry->host_addr.phys_addr.host_end=host_addr_end;
+  init_shadow_region(entry, guest_addr_start, guest_addr_end, guest_region_type, host_region_type);
+  entry->host_addr.phys_addr.host_start = host_addr_start;
+
 }
                    
 
-void init_shadow_map(shadow_map_t * map) 
-{
+void init_shadow_map(shadow_map_t * map) {
   map->num_regions = 0;
 
   map->head = NULL;
@@ -41,8 +39,8 @@ void init_shadow_map(shadow_map_t * map)
 
 
 void free_shadow_map(shadow_map_t * map) {
-  shadow_map_entry_t * cursor = map->head;
-  shadow_map_entry_t * tmp = NULL;
+  shadow_region_t * cursor = map->head;
+  shadow_region_t * tmp = NULL;
 
   while(cursor) {
     tmp = cursor;
@@ -51,7 +49,6 @@ void free_shadow_map(shadow_map_t * map) {
   }
 
   VMMFree(map);
-
 }
 
 
@@ -60,10 +57,10 @@ void free_shadow_map(shadow_map_t * map) {
  * we don't allow overlaps we could probably allow overlappig regions
  * of the same type... but I'll let someone else deal with that
  */
-int add_shadow_map_region(shadow_map_t * map,
-                         shadow_map_entry_t * region) 
+int add_shadow_region(shadow_map_t * map,
+                     shadow_region_t * region) 
 {
-  shadow_map_entry_t * cursor = map->head;
+  shadow_region_t * cursor = map->head;
 
   if ((!cursor) || (cursor->guest_start >= region->guest_end)) {
     region->prev = NULL;
@@ -113,37 +110,33 @@ int add_shadow_map_region(shadow_map_t * map,
 }
 
 
-int delete_shadow_map_region(shadow_map_t *map,
-                            addr_t guest_start,
-                            addr_t guest_end)
-{
+int delete_shadow_region(shadow_map_t * map,
+                        addr_t guest_start,
+                        addr_t guest_end) {
   return -1;
 }
 
 
 
-shadow_map_entry_t *get_shadow_map_region_by_index(shadow_map_t * map,
-                                                  uint_t index) 
-{
-  shadow_map_entry_t * reg = map->head;
+shadow_region_t *get_shadow_region_by_index(shadow_map_t *  map,
+                                              uint_t index) {
+  shadow_region_t * reg = map->head;
   uint_t i = 0;
 
   while (reg) { 
-    if (i==index) { 
+    if (i == index) { 
       return reg;
     }
-    reg=reg->next;
+    reg = reg->next;
     i++;
   }
   return NULL;
 }
 
 
-shadow_map_entry_t * get_shadow_map_region_by_addr(shadow_map_t *map,
-                                                  addr_t addr) 
-{
-  shadow_map_entry_t * reg = map->head;
-
+shadow_region_t * get_shadow_region_by_addr(shadow_map_t * map,
+                                              addr_t addr) {
+  shadow_region_t * reg = map->head;
 
   while (reg) {
     if ((reg->guest_start <= addr) && (reg->guest_end > addr)) {
@@ -159,11 +152,12 @@ shadow_map_entry_t * get_shadow_map_region_by_addr(shadow_map_t *map,
 
 
 
-int map_guest_physical_to_host_physical(shadow_map_entry_t *entry, 
-                                       addr_t guest_addr,
-                                       addr_t *host_addr)
-{
-  if (!(guest_addr>=entry->guest_start && guest_addr<entry->guest_end)) { 
+int guest_paddr_to_host_paddr(shadow_region_t * entry, 
+                       addr_t guest_addr,
+                       addr_t * host_addr) {
+
+  if (!((guest_addr >= entry->guest_start) && 
+       (guest_addr < entry->guest_end))) { 
     return -1;
   }
 
@@ -171,7 +165,7 @@ int map_guest_physical_to_host_physical(shadow_map_entry_t *entry,
   case HOST_REGION_PHYSICAL_MEMORY:
   case HOST_REGION_MEMORY_MAPPED_DEVICE:
   case HOST_REGION_UNALLOCATED:
-    *host_addr=(guest_addr-entry->guest_start) + entry->host_addr.phys_addr.host_start;
+    *host_addr = (guest_addr-entry->guest_start) + entry->host_addr.phys_addr.host_start;
     return 0;
     break;
   default:
@@ -181,22 +175,22 @@ int map_guest_physical_to_host_physical(shadow_map_entry_t *entry,
 }
 
 
-void print_shadow_map(shadow_map_t *map) {
-  shadow_map_entry_t * cur = map->head;
+void print_shadow_map(shadow_map_t * map) {
+  shadow_region_t * cur = map->head;
   int i = 0;
 
   PrintDebug("Memory Layout (regions: %d) \n", map->num_regions);
 
   while (cur) {
-    PrintDebug("%d:  0x%x - 0x%x (%s) -> ", i, cur->guest_start, cur->guest_end -1,
+    PrintDebug("%d:  0x%x - 0x%x (%s) -> ", i, cur->guest_start, cur->guest_end - 1,
               cur->guest_type == GUEST_REGION_PHYSICAL_MEMORY ? "GUEST_REGION_PHYSICAL_MEMORY" :
               cur->guest_type == GUEST_REGION_NOTHING ? "GUEST_REGION_NOTHING" :
               cur->guest_type == GUEST_REGION_MEMORY_MAPPED_DEVICE ? "GUEST_REGION_MEMORY_MAPPED_DEVICE" :
-              "UNKNOWN");            
-    if (cur->host_type==HOST_REGION_PHYSICAL_MEMORY || 
-       cur->host_type==HOST_REGION_UNALLOCATED ||
-       cur->host_type==HOST_REGION_MEMORY_MAPPED_DEVICE) { 
-      PrintDebug("0x%x - 0x%x ", cur->host_addr.phys_addr.host_start, cur->host_addr.phys_addr.host_end);
+              "UNKNOWN");
+    if (cur->host_type == HOST_REGION_PHYSICAL_MEMORY || 
+       cur->host_type == HOST_REGION_UNALLOCATED ||
+       cur->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) { 
+      PrintDebug("0x%x", cur->host_addr.phys_addr.host_start);
     }
     PrintDebug("(%s)\n",
               cur->host_type == HOST_REGION_PHYSICAL_MEMORY ? "HOST_REGION_PHYSICAL_MEMORY" :
@@ -297,7 +291,7 @@ int mem_list_add_test_1(  vmm_mem_list_t * list) {
 }
 
 
-int mem_layout_add_test_1(vmm_mem_layout_t *layout) {
+int mem_layout_add_test_1(vmm_mem_layout_t * layout) {
 
   
   uint_t start = 0;
index 4c1469d..9cd97d6 100644 (file)
@@ -9,8 +9,8 @@ extern struct vmm_os_hooks * os_hooks;
 void delete_page_tables_pde32(vmm_pde_t * pde) {
   int i, j;
 
-  if (pde==NULL) { 
-    return ;
+  if (pde == NULL) { 
+    return;
   }
 
   for (i = 0; (i < MAX_PAGE_DIR_ENTRIES); i++) {
@@ -18,8 +18,8 @@ void delete_page_tables_pde32(vmm_pde_t * pde) {
       vmm_pte_t * pte = (vmm_pte_t *)(pde[i].pt_base_addr << PAGE_POWER);
       
       for (j = 0; (j < MAX_PAGE_TABLE_ENTRIES); j++) {
-       if ((pte[j].present) && (pte[j].vmm_info & GUEST_PAGE)){
-         os_hooks->free_page((void *)(pte[j].page_base_addr  << PAGE_POWER));
+       if ((pte[j].present)) {
+         os_hooks->free_page((void *)(pte[j].page_base_addr << PAGE_POWER));
        }
       }
       
@@ -31,54 +31,59 @@ void delete_page_tables_pde32(vmm_pde_t * pde) {
 }
 
 
-int init_shadow_paging_state(shadow_paging_state_t *state)
-{
-  state->guest_page_directory_type=state->shadow_page_directory_type=PDE32;
+int init_shadow_page_state(shadow_page_state_t * state) {
+  state->guest_mode = PDE32;
+  state->shadow_mode = PDE32;
   
-  state->guest_page_directory=state->shadow_page_directory=NULL;
+  state->guest_cr3.r_reg = 0;
+  state->shadow_cr3.r_reg = 0;
 
-  init_shadow_map(&(state->shadow_map));
   return 0;
 }
   
 
-int wholesale_update_shadow_paging_state(shadow_paging_state_t *state)
-{
+int wholesale_update_shadow_page_state(shadow_page_state_t * state, shadow_map_t * mem_map) {
   unsigned i, j;
-  vmm_pde_t *cur_guest_pde, *cur_shadow_pde;
-  vmm_pte_t *cur_guest_pte, *cur_shadow_pte;
+  vmm_pde_t * guest_pde;
+  vmm_pde_t * shadow_pde;
+
 
   // For now, we'll only work with PDE32
-  if (state->guest_page_directory_type!=PDE32) { 
+  if (state->guest_mode != PDE32) { 
     return -1;
   }
+
+
   
-  cur_shadow_pde=(vmm_pde_t*)(state->shadow_page_directory);
-  
-  cur_guest_pde = (vmm_pde_t*)(os_hooks->physical_to_virtual(state->guest_page_directory));
+  shadow_pde = (vmm_pde_t *)(CR3_TO_PDE(state->shadow_cr3.e_reg.low));  
+  guest_pde = (vmm_pde_t *)(os_hooks->paddr_to_vaddr((void*)CR3_TO_PDE(state->guest_cr3.e_reg.low)));
 
   // Delete the current page table
-  delete_page_tables_pde32(cur_shadow_pde);
+  delete_page_tables_pde32(shadow_pde);
+
+  shadow_pde = os_hooks->allocate_pages(1);
+
 
-  cur_shadow_pde = os_hooks->allocate_pages(1);
+  state->shadow_cr3.e_reg.low = (addr_t)shadow_pde;
 
-  state->shadow_page_directory = cur_shadow_pde;
-  state->shadow_page_directory_type=PDE32;
+  state->shadow_mode = PDE32;
 
   
-  for (i=0;i<MAX_PAGE_DIR_ENTRIES;i++) { 
-    cur_shadow_pde[i] = cur_guest_pde[i];
+  for (i = 0; i < MAX_PAGE_DIR_ENTRIES; i++) { 
+    shadow_pde[i] = guest_pde[i];
+
     // The shadow can be identical to the guest if it's not present
-    if (!cur_shadow_pde[i].present) { 
+    if (!shadow_pde[i].present) { 
       continue;
     }
-    if (cur_shadow_pde[i].large_pages) { 
+
+    if (shadow_pde[i].large_pages) { 
       // large page - just map it through shadow map to generate its physical location
-      addr_t guest_addr = PAGE_ADDR(cur_shadow_pde[i].pt_base_addr);
+      addr_t guest_addr = PAGE_ADDR(shadow_pde[i].pt_base_addr);
       addr_t host_addr;
-      shadow_map_entry_t *ent;
+      shadow_region_t * ent;
 
-      ent = get_shadow_map_region_by_addr(&(state->shadow_map),guest_addr);
+      ent = get_shadow_region_by_addr(mem_map, guest_addr);
       
       if (!ent) { 
        // FIXME Panic here - guest is trying to map to physical memory
@@ -92,18 +97,19 @@ int wholesale_update_shadow_paging_state(shadow_paging_state_t *state)
       case HOST_REGION_PHYSICAL_MEMORY:
        // points into currently allocated physical memory, so we just
        // set up the shadow to point to the mapped location
-       if (map_guest_physical_to_host_physical(ent,guest_addr,&host_addr)) { 
+       if (guest_paddr_to_host_paddr(ent, guest_addr, &host_addr)) { 
          // Panic here
          return -1;
        }
-       cur_shadow_pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(host_addr);
+
+       shadow_pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(host_addr);
        // FIXME set vmm_info bits here
        break;
       case HOST_REGION_UNALLOCATED:
        // points to physical memory that is *allowed* but that we
        // have not yet allocated.  We mark as not present and set a
        // bit to remind us to allocate it later
-       cur_shadow_pde[i].present=0;
+       shadow_pde[i].present = 0;
        // FIXME Set vminfo bits here so that we know that we will be
        // allocating it later
        break;
@@ -111,7 +117,7 @@ int wholesale_update_shadow_paging_state(shadow_paging_state_t *state)
        // points to physical memory that is NOT ALLOWED.   
        // We will mark it as not present and set a bit to remind
        // us that it's bad later and insert a GPF then
-       cur_shadow_pde[i].present=0;
+       shadow_pde[i].present = 0;
        break;
       case HOST_REGION_MEMORY_MAPPED_DEVICE:
       case HOST_REGION_REMOTE:
@@ -122,50 +128,57 @@ int wholesale_update_shadow_paging_state(shadow_paging_state_t *state)
        break;
       }
     } else {
-      addr_t host_addr;
+      vmm_pte_t * guest_pte;
+      vmm_pte_t * shadow_pte;
       addr_t guest_addr;
+      addr_t guest_pte_host_addr;
+      shadow_region_t * ent;
 
       // small page - set PDE and follow down to the child table
-      cur_shadow_pde[i] = cur_guest_pde[i];
-      
-      // Allocate a new second level page table for the shadow
-      cur_shadow_pte = os_hooks->allocate_pages(1);
+      shadow_pde[i] = guest_pde[i];
 
-      // make our first level page table in teh shadow point to it
-      cur_shadow_pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(cur_shadow_pte);
+      guest_addr = PAGE_ADDR(guest_pde[i].pt_base_addr);
 
-      shadow_map_entry_t *ent;
-      
-      guest_addr=PAGE_ADDR(cur_guest_pde[i].pt_base_addr);
+      // Allocate a new second level page table for the shadow
+      shadow_pte = os_hooks->allocate_pages(1);
 
-      ent = get_shadow_map_region_by_addr(&(state->shadow_map),guest_addr);
+      // make our first level page table in the shadow point to it
+      shadow_pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(shadow_pte);
       
-      if (!ent) { 
+      ent = get_shadow_region_by_addr(mem_map, guest_addr);
+      
+
+      /* JRL: This is bad.... */
+      // For now the guest Page Table must always be mapped to host physical memory
+      /* If we swap out a page table or if it isn't present for some reason, this turns real ugly */
+
+      if ((!ent) || (ent->host_type != HOST_REGION_PHYSICAL_MEMORY)) { 
        // FIXME Panic here - guest is trying to map to physical memory
        // it does not own in any way!
        return -1;
       }
 
       // Address of the relevant second level page table in the guest
-      if (map_guest_physical_to_host_physical(ent,guest_addr,&host_addr)) { 
+      if (guest_paddr_to_host_paddr(ent, guest_addr, &guest_pte_host_addr)) { 
        // Panic here
        return -1;
       }
-      // host_addr now contains the host physical address for the guest's 2nd level page table
 
+
+      // host_addr now contains the host physical address for the guest's 2nd level page table
       // Now we transform it to relevant virtual address
-      cur_guest_pte = os_hooks->physical_to_virtual((void*)host_addr);
+      guest_pte = os_hooks->paddr_to_vaddr((void *)guest_pte_host_addr);
 
       // Now we walk through the second level guest page table
       // and clone it into the shadow
-      for (j=0;j<MAX_PAGE_TABLE_ENTRIES;j++) { 
-       cur_shadow_pte[j] = cur_guest_pte[j];
+      for (j = 0; j < MAX_PAGE_TABLE_ENTRIES; j++) { 
+       shadow_pte[j] = guest_pte[j];
 
-       addr_t guest_addr = PAGE_ADDR(cur_shadow_pte[j].page_base_addr);
+       addr_t guest_addr = PAGE_ADDR(shadow_pte[j].page_base_addr);
        
-       shadow_map_entry_t *ent;
+       shadow_region_t * ent;
 
-       ent = get_shadow_map_region_by_addr(&(state->shadow_map),guest_addr);
+       ent = get_shadow_region_by_addr(mem_map, guest_addr);
       
        if (!ent) { 
          // FIXME Panic here - guest is trying to map to physical memory
@@ -175,20 +188,25 @@ int wholesale_update_shadow_paging_state(shadow_paging_state_t *state)
 
        switch (ent->host_type) { 
        case HOST_REGION_PHYSICAL_MEMORY:
-         // points into currently allocated physical memory, so we just
-         // set up the shadow to point to the mapped location
-         if (map_guest_physical_to_host_physical(ent,guest_addr,&host_addr)) { 
-           // Panic here
-           return -1;
+         {
+           addr_t host_addr;
+           
+           // points into currently allocated physical memory, so we just
+           // set up the shadow to point to the mapped location
+           if (guest_paddr_to_host_paddr(ent, guest_addr, &host_addr)) { 
+             // Panic here
+             return -1;
+           }
+           
+           shadow_pte[j].page_base_addr = PAGE_ALIGNED_ADDR(host_addr);
+           // FIXME set vmm_info bits here
+           break;
          }
-         cur_shadow_pte[j].page_base_addr = PAGE_ALIGNED_ADDR(host_addr);
-         // FIXME set vmm_info bits here
-         break;
        case HOST_REGION_UNALLOCATED:
          // points to physical memory that is *allowed* but that we
          // have not yet allocated.  We mark as not present and set a
          // bit to remind us to allocate it later
-         cur_shadow_pte[j].present=0;
+         shadow_pte[j].present = 0;
          // FIXME Set vminfo bits here so that we know that we will be
          // allocating it later
          break;
@@ -196,7 +214,7 @@ int wholesale_update_shadow_paging_state(shadow_paging_state_t *state)
          // points to physical memory that is NOT ALLOWED.   
          // We will mark it as not present and set a bit to remind
          // us that it's bad later and insert a GPF then
-         cur_shadow_pte[j].present=0;
+         shadow_pte[j].present = 0;
          break;
        case HOST_REGION_MEMORY_MAPPED_DEVICE:
        case HOST_REGION_REMOTE:
@@ -214,26 +232,68 @@ int wholesale_update_shadow_paging_state(shadow_paging_state_t *state)
       
 
 
-#if 0
+
 /* We generate a page table to correspond to a given memory layout
  * pulling pages from the mem_list when necessary
  * If there are any gaps in the layout, we add them as unmapped pages
  */
-vmm_pde_t * generate_guest_page_tables(vmm_mem_layout_t * layout, vmm_mem_list_t * list) {
+vmm_pde_t * create_passthrough_pde32_pts(shadow_map_t * map) {
   ullong_t current_page_addr = 0;
-  uint_t layout_index = 0;
-  uint_t list_index = 0;
-  ullong_t layout_addr = 0;
   int i, j;
-  uint_t num_entries = layout->num_pages;  // The number of pages left in the layout
-
 
-  
 
   vmm_pde_t * pde = os_hooks->allocate_pages(1);
 
   for (i = 0; i < MAX_PAGE_DIR_ENTRIES; i++) {
-    if (num_entries == 0) { 
+    int pte_present = 0;
+    vmm_pte_t * pte = os_hooks->allocate_pages(1);
+    
+
+    for (j = 0; j < MAX_PAGE_TABLE_ENTRIES; j++) {
+      shadow_region_t * region = get_shadow_region_by_addr(map, current_page_addr);
+
+      if (!region || 
+         (region->host_type == HOST_REGION_NOTHING) || 
+         (region->host_type == HOST_REGION_UNALLOCATED) || 
+         (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) || 
+         (region->host_type == HOST_REGION_REMOTE) ||
+         (region->host_type == HOST_REGION_SWAPPED)) {
+       pte[j].present = 0;
+       pte[j].flags = 0;
+       pte[j].accessed = 0;
+       pte[j].dirty = 0;
+       pte[j].pte_attr = 0;
+       pte[j].global_page = 0;
+       pte[j].vmm_info = 0;
+       pte[j].page_base_addr = 0;
+      } else {
+       addr_t host_addr;
+       pte[j].present = 1;
+       pte[j].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;   
+       
+       pte[j].accessed = 0;
+       pte[j].dirty = 0;
+       pte[j].pte_attr = 0;
+       pte[j].global_page = 0;
+       pte[j].vmm_info = 0;
+
+       if (guest_paddr_to_host_paddr(region, current_page_addr, &host_addr) == -1) {
+         // BIG ERROR
+         // PANIC
+         return NULL;
+       }
+       
+       pte[j].page_base_addr = host_addr >> 12;
+       
+       pte_present = 1;
+      }
+
+      current_page_addr += PAGE_SIZE;
+    }
+
+    if (pte_present == 0) { 
+      VMMFree(pte);
+
       pde[i].present = 0;
       pde[i].flags = 0;
       pde[i].accessed = 0;
@@ -243,8 +303,6 @@ vmm_pde_t * generate_guest_page_tables(vmm_mem_layout_t * layout, vmm_mem_list_t
       pde[i].vmm_info = 0;
       pde[i].pt_base_addr = 0;
     } else {
-      vmm_pte_t * pte = os_hooks->allocate_pages(1);
-
       pde[i].present = 1;
       pde[i].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;
       pde[i].accessed = 0;
@@ -252,85 +310,15 @@ vmm_pde_t * generate_guest_page_tables(vmm_mem_layout_t * layout, vmm_mem_list_t
       pde[i].large_pages = 0;
       pde[i].global_page = 0;
       pde[i].vmm_info = 0;
-      pde[i].pt_base_addr = PAGE_ALLIGNED_ADDR(pte);
-
-
-
-      for (j = 0; j < MAX_PAGE_TABLE_ENTRIES; j++) {
-       layout_addr = get_mem_layout_addr(layout, layout_index);
-       
-       if ((current_page_addr < layout_addr) || (num_entries == 0)) {
-         // We have a gap in the layout, fill with unmapped page
-         pte[j].present = 0;
-         pte[j].flags = 0;
-         pte[j].accessed = 0;
-         pte[j].dirty = 0;
-         pte[j].pte_attr = 0;
-         pte[j].global_page = 0;
-         pte[j].vmm_info = 0;
-         pte[j].page_base_addr = 0;
-
-         current_page_addr += PAGE_SIZE;
-       } else if (current_page_addr == layout_addr) {
-         // Set up the Table entry to map correctly to the layout region
-         layout_region_t * page_region = get_mem_layout_region(layout, layout_addr);
-
-         if (page_region->type == UNMAPPED) {
-           pte[j].present = 0;
-           pte[j].flags = 0;
-         } else {
-           pte[j].present = 1;
-           pte[j].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;
-         }         
-
-         pte[j].accessed = 0;
-         pte[j].dirty = 0;
-         pte[j].pte_attr = 0;
-         pte[j].global_page = 0;
-         pte[j].vmm_info = 0;
-
-         if (page_region->type == UNMAPPED) {
-           pte[j].page_base_addr = 0;
-         } else if (page_region->type == SHARED) {
-           addr_t host_addr = page_region->host_addr + (layout_addr - page_region->start);
-
-           pte[j].page_base_addr = host_addr >> 12;
-           pte[j].vmm_info = SHARED_PAGE;
-         } else if (page_region->type == GUEST) {
-           addr_t list_addr =  get_mem_list_addr(list, list_index++);
-           
-           if (list_addr == -1) {
-             // error
-             // cleanup...
-             free_guest_page_tables(pde);
-             return NULL;
-           }
-           PrintDebug("Adding guest page (%x)\n", list_addr);
-           pte[j].page_base_addr = list_addr >> 12;
-           
-           // Reset this when we move over to dynamic page allocation
-           //      pte[j].vmm_info = GUEST_PAGE;           
-           pte[j].vmm_info = SHARED_PAGE;
-         }
-
-         num_entries--;
-         current_page_addr += PAGE_SIZE;
-         layout_index++;
-       } else {
-         // error
-         PrintDebug("Error creating page table...\n");
-         // cleanup
-         free_guest_page_tables(pde);
-         return NULL;
-       }
-      }
+      pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(pte);
     }
+
   }
 
   return pde;
 }
 
-#endif
+
 
 
 
index 96145db..a9db9c6 100644 (file)
@@ -2,6 +2,9 @@
 #include <geekos/serial.h>
 
 
+
+void * Identity(void *addr) { return addr; };
+
 void * Allocate_VMM_Pages(int num_pages) {
   void * start_page = Alloc_Page();
   SerialPrint("Allocating Page: %x (%d of %d)\n",start_page, 1, num_pages);