Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


cleaned up the memory handing implementation
Jack Lange [Mon, 20 Apr 2009 00:55:58 +0000 (19:55 -0500)]
13 files changed:
palacios/include/palacios/vmm_direct_paging.h
palacios/include/palacios/vmm_paging.h
palacios/include/palacios/vmm_shadow_paging.h
palacios/src/palacios/svm.c
palacios/src/palacios/vmm.c
palacios/src/palacios/vmm_config.c
palacios/src/palacios/vmm_ctrl_regs.c
palacios/src/palacios/vmm_direct_paging.c
palacios/src/palacios/vmm_io.c
palacios/src/palacios/vmm_mem.c
palacios/src/palacios/vmm_paging.c
palacios/src/palacios/vmm_shadow_paging.c
palacios/src/palacios/vmm_shadow_paging_32.h

index 22843fd..8b3e0e4 100644 (file)
 #include <palacios/vmm_mem.h>
 #include <palacios/vmm_paging.h>
 
-addr_t v3_create_direct_passthrough_pts(struct guest_info * guest_info);
+int v3_init_passthrough_pts(struct guest_info * guest_info);
+int v3_reset_passthrough_pts(struct guest_info * guest_info);
 
 int v3_handle_passthrough_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code);
 int v3_handle_nested_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code);
 
+int v3_activate_passthrough_pt(struct guest_info * info);
+
+int v3_invalidate_passthrough_addr(struct guest_info * info, addr_t inv_addr);
+
 #endif // ! __V3VEE__
 
 #endif
index ff39685..d1f93f2 100644 (file)
@@ -576,7 +576,7 @@ pml4e64_t * create_passthrough_pts_64(struct guest_info * info);
 
 
 void delete_page_tables_32(pde32_t * pde);
-void delete_page_tables_32PAE(pdpe32pae_t * pdpe);
+void delete_page_tables_32pae(pdpe32pae_t * pdpe);
 void delete_page_tables_64(pml4e64_t *  pml4);
 
 
index a3eabb9..1633ec8 100644 (file)
@@ -38,11 +38,6 @@ struct shadow_page_state {
     // list of allocated shadow pages
     struct list_head page_list;
 
-    /* SOON TO BE DEPRECATED */
-    // Hash table that contains a mapping of guest pte addresses to host pte addresses
-    struct hashtable *  cached_ptes;
-    addr_t cached_cr3;
-
 };
 
 
@@ -61,7 +56,7 @@ int v3_handle_shadow_invlpg(struct guest_info * info);
 
 
 int v3_activate_shadow_pt(struct guest_info * info);
-int v3_activate_passthrough_pt(struct guest_info * info);
+
 
 
 #endif // ! __V3VEE__
index 7325bdf..3707d1f 100644 (file)
 
 #include <palacios/vmm_direct_paging.h>
 
+#include <palacios/vmm_ctrl_regs.h>
+
+
 extern void v3_stgi();
 extern void v3_clgi();
 //extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, uint64_t * fs, uint64_t * gs);
 extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs);
 
 
-
-
 static vmcb_t * Allocate_VMCB() {
     vmcb_t * vmcb_page = (vmcb_t *)V3_VAddr(V3_AllocPages(1));
 
@@ -58,8 +59,6 @@ static vmcb_t * Allocate_VMCB() {
 
 
 
-#include <palacios/vmm_ctrl_regs.h>
-
 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
     vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
@@ -72,20 +71,6 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
 
     guest_state->cpl = 0;
 
-
-
-
-    /* Set up the efer to enable 64 bit page tables */
-    /*
-      {
-      struct efer_64 * efer = (struct efer_64 *)&(guest_state->efer);
-      struct cr4_32 * cr4 = (struct cr4_32 *)&(guest_state->cr4);
-      efer->lma = 1;
-      efer->lme = 1;
-      
-      cr4->pae = 1;
-      }
-    */
     guest_state->efer |= EFER_MSR_svm_enable;
 
 
@@ -108,8 +93,6 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
 
     ctrl_area->instrs.HLT = 1;
     // guest_state->cr0 = 0x00000001;    // PE 
-
-
   
     /*
       ctrl_area->exceptions.de = 1;
@@ -128,29 +111,31 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
     */
     
 
-    // Debug of boot on physical machines - 7/14/08
-    ctrl_area->instrs.NMI=1;
-    ctrl_area->instrs.SMI=1;
-    ctrl_area->instrs.INIT=1;
-    ctrl_area->instrs.PAUSE=1;
-    ctrl_area->instrs.shutdown_evts=1;
-    
+    ctrl_area->instrs.NMI = 1;
+    ctrl_area->instrs.SMI = 1;
+    ctrl_area->instrs.INIT = 1;
+    ctrl_area->instrs.PAUSE = 1;
+    ctrl_area->instrs.shutdown_evts = 1;
+
     vm_info->vm_regs.rdx = 0x00000f00;
-    
+
     guest_state->cr0 = 0x60000010;
-    
-    
+
+
     guest_state->cs.selector = 0xf000;
-    guest_state->cs.limit=0xffff;
+    guest_state->cs.limit = 0xffff;
     guest_state->cs.base = 0x0000000f0000LL;
     guest_state->cs.attrib.raw = 0xf3;
 
-  
+
     /* DEBUG FOR RETURN CODE */
     ctrl_area->exit_code = 1;
 
 
-    struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
+    struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), 
+                                       &(guest_state->es), &(guest_state->fs), 
+                                       &(guest_state->gs), NULL};
+
     for ( i = 0; segregs[i] != NULL; i++) {
        struct vmcb_selector * seg = segregs[i];
        
@@ -160,12 +145,12 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
        seg->attrib.raw = 0xf3;
        seg->limit = ~0u;
     }
-  
+
     guest_state->gdtr.limit = 0x0000ffff;
     guest_state->gdtr.base = 0x0000000000000000LL;
     guest_state->idtr.limit = 0x0000ffff;
     guest_state->idtr.base = 0x0000000000000000LL;
-    
+
     guest_state->ldtr.selector = 0x0000;
     guest_state->ldtr.limit = 0x0000ffff;
     guest_state->ldtr.base = 0x0000000000000000LL;
@@ -177,10 +162,6 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
     guest_state->dr6 = 0x00000000ffff0ff0LL;
     guest_state->dr7 = 0x0000000000000400LL;
 
-  
-  
-
-
 
     if ( !RB_EMPTY_ROOT(&(vm_info->io_map)) ) {
        struct v3_io_hook * iter;
@@ -190,44 +171,48 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
        
        io_port_bitmap = (addr_t)V3_VAddr(V3_AllocPages(3));
        memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
-       
+
        ctrl_area->IOPM_BASE_PA = (addr_t)V3_PAddr((void *)io_port_bitmap);
-       
+
        //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
-       
+
        do {
            iter = rb_entry(io_node, struct v3_io_hook, tree_node);
-           
+
            ushort_t port = iter->port;
            uchar_t * bitmap = (uchar_t *)io_port_bitmap;
            //PrintDebug("%d: Hooking Port %d\n", i, port);
-           
+
            bitmap += (port / 8);
            //      PrintDebug("Setting Bit for port 0x%x\n", port);
            *bitmap |= 1 << (port % 8);
-           
+
            i++;
        } while ((io_node = v3_rb_next(io_node)));
-       
-       
+
        //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
-       
+
        ctrl_area->instrs.IOIO_PROT = 1;
     }
-    
-    
+
+
     PrintDebug("Exiting on interrupts\n");
     ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
     ctrl_area->instrs.INTR = 1;
-    
-    
+
+
     if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
        PrintDebug("Creating initial shadow page table\n");
        
        ctrl_area->guest_ASID = 1;
        
-       vm_info->direct_map_pt = (addr_t)V3_PAddr((void *)v3_create_direct_passthrough_pts(vm_info));
        
+       if (v3_init_passthrough_pts(vm_info) == -1) {
+           PrintError("Could not initialize passthrough page tables\n");
+           return ;
+       }
+
+
        vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
        PrintDebug("Created\n");
        
@@ -240,56 +225,51 @@ static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
        ctrl_area->cr_reads.cr3 = 1;
        ctrl_area->cr_writes.cr3 = 1;
 
-
        vm_info->guest_efer.value = 0x0LL;
-    
+
        v3_hook_msr(vm_info, EFER_MSR, 
                    &v3_handle_efer_read,
                    &v3_handle_efer_write, 
                    vm_info);
 
-
        ctrl_area->instrs.INVLPG = 1;
 
-       
        ctrl_area->exceptions.pf = 1;
-       
+
        /* JRL: This is a performance killer, and a simplistic solution */
        /* We need to fix this */
        ctrl_area->TLB_CONTROL = 1;
-       
+
        guest_state->g_pat = 0x7040600070406ULL;
-       
+
        guest_state->cr0 |= 0x80000000;
-       
+
     } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
        // Flush the TLB on entries/exits
        ctrl_area->TLB_CONTROL = 1;
        ctrl_area->guest_ASID = 1;
-       
+
        // Enable Nested Paging
        ctrl_area->NP_ENABLE = 1;
-       
+
        PrintDebug("NP_Enable at 0x%p\n", (void *)&(ctrl_area->NP_ENABLE));
-       
+
        // Set the Nested Page Table pointer
-       vm_info->direct_map_pt = (addr_t)V3_PAddr((void *)v3_create_direct_passthrough_pts(vm_info));
+       if (v3_init_passthrough_pts(vm_info) == -1) {
+           PrintError("Could not initialize Nested page tables\n");
+           return ;
+       }
+
        ctrl_area->N_CR3 = vm_info->direct_map_pt;
-       
-       //   ctrl_area->N_CR3 = Get_CR3();
-       // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
-       
+
        guest_state->g_pat = 0x7040600070406ULL;
     }
-    
 
     if (vm_info->msr_map.num_hooks > 0) {
        PrintDebug("Hooking %d msrs\n", vm_info->msr_map.num_hooks);
        ctrl_area->MSRPM_BASE_PA = v3_init_svm_msr_map(vm_info);
        ctrl_area->instrs.MSR_PROT = 1;
-       
     }
-    
 
     /* Safety locations for fs/gs */
     //    vm_info->fs = 0;
@@ -301,14 +281,6 @@ static int init_svm_guest(struct guest_info *info) {
     PrintDebug("Allocating VMCB\n");
     info->vmm_data = (void*)Allocate_VMCB();
 
-
-    //PrintDebug("Generating Guest nested page tables\n");
-    //  info->page_tables = NULL;
-    //info->page_tables = generate_guest_page_tables_64(&(info->mem_layout), &(info->mem_list));
-    //info->page_tables = generate_guest_page_tables(&(info->mem_layout), &(info->mem_list));
-    //  PrintDebugPageTables(info->page_tables);
-
-
     PrintDebug("Initializing VMCB (addr=%p)\n", (void *)info->vmm_data);
     Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
   
@@ -508,24 +480,24 @@ int v3_is_svm_capable() {
                PrintDebug("SVM is locked with a key\n");
            }
            return 0;
-           
+
        } else {
            PrintDebug("SVM is available and  enabled.\n");
-           
+
            v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
            PrintDebug("CPUID_FEATURE_IDS_eax=%p\n", (void *)eax);
            PrintDebug("CPUID_FEATURE_IDS_ebx=%p\n", (void *)ebx);
-           PrintDebug("CPUID_FEATURE_IDS_ecx=%p\n", (void *)ecx);      
+           PrintDebug("CPUID_FEATURE_IDS_ecx=%p\n", (void *)ecx);
            PrintDebug("CPUID_FEATURE_IDS_edx=%p\n", (void *)edx);
 
-           
+
            if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
                PrintDebug("SVM Nested Paging not supported\n");
            } else {
                PrintDebug("SVM Nested Paging supported\n");
            }
-           
-           return 1;    
+
+           return 1;
        }
     }
 }
@@ -534,9 +506,9 @@ static int has_svm_nested_paging() {
     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
 
     v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
-      
+
     //PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n", edx);
-  
+
     if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
        PrintDebug("SVM Nested Paging not supported\n");
        return 0;
index 7343cd5..5525d65 100644 (file)
@@ -26,6 +26,8 @@
 #include <palacios/vmm_instrument.h>
 
 
+/* These should be the only global variables in Palacios */
+/* They are architecture specific variables */
 v3_cpu_arch_t v3_cpu_type;
 struct v3_os_hooks * os_hooks = NULL;
 
@@ -40,10 +42,12 @@ static struct guest_info * allocate_guest() {
 
 
 void Init_V3(struct v3_os_hooks * hooks, struct v3_ctrl_ops * vmm_ops) {
+    
+    // Set global variables. 
     os_hooks = hooks;
-
     v3_cpu_type = V3_INVALID_CPU;
 
+
 #ifdef INSTRUMENT_VMM
     v3_init_instrumentation();
 #endif
@@ -64,7 +68,3 @@ void Init_V3(struct v3_os_hooks * hooks, struct v3_ctrl_ops * vmm_ops) {
        PrintDebug("CPU has no virtualization Extensions\n");
     }
 }
-
-
-// Get CPU Type..
-
index 8531d58..89a3f6c 100644 (file)
@@ -392,11 +392,7 @@ static struct vm_device *  configure_generic(struct guest_info * info, struct v3
 #endif
 
 
-#if 1
-    // Make any Bus master ide controller invisible
-    
-    v3_generic_add_port_range(generic, 0xc000, 0xc00f, GENERIC_PRINT_AND_IGNORE);
-#endif
+
     //  v3_generic_add_port_range(generic, 0x378, 0x400, GENERIC_PRINT_AND_IGNORE);
     
 
index 9bbcf5b..98a9e53 100644 (file)
@@ -484,12 +484,12 @@ int v3_handle_cr4_write(struct guest_info * info) {
                if ((cr4->pae == 0) && (new_cr4->pae == 1)) {
                    PrintDebug("Creating PAE passthrough tables\n");
                    
-                   // Delete the old 32 bit direct map page tables
-                   delete_page_tables_32((pde32_t *)V3_VAddr((void *)(info->direct_map_pt)));
-                   
                    // create 32 bit PAE direct map page table
-                   info->direct_map_pt = (addr_t)V3_PAddr((void *)v3_create_direct_passthrough_pts(info));
-                   
+                   if (v3_reset_passthrough_pts(info) == -1) {
+                       PrintError("Could not create 32 bit PAE passthrough pages tables\n");
+                       return -1;
+                   }
+
                    // reset cr3 to new page tables
                    info->ctrl_regs.cr3 = *(addr_t*)&(info->direct_map_pt);
                    
@@ -568,54 +568,6 @@ int v3_handle_efer_write(uint_t msr, struct v3_msr src, void * priv_data) {
     // Enable/Disable Syscall
     shadow_efer->sce = src.value & 0x1;
     
-    
-    // We have to handle long mode writes....
-    
-    /* 
-       if ((info->shdw_pg_mode == SHADOW_PAGING) && 
-       (v3_get_mem_mode(info) == PHYSICAL_MEM)) {
-       
-       if ((shadow_efer->lme == 0) && (new_efer->lme == 1)) {
-       PrintDebug("Transition to longmode\n");
-       PrintDebug("Creating Passthrough 64 bit page tables\n");
-       
-       // Delete the old 32 bit direct map page tables
-       
-       PrintDebug("Deleting old PAE Page tables\n");
-       PrintError("JRL BUG?: Will the old page tables always be in PAE format??\n");
-       delete_page_tables_32PAE((pdpe32pae_t *)V3_VAddr((void *)(info->direct_map_pt)));
-       
-       // create 64 bit direct map page table
-       info->direct_map_pt = (addr_t)V3_PAddr(create_passthrough_pts_64(info));
-       
-       // reset cr3 to new page tables
-       info->ctrl_regs.cr3 = *(addr_t*)&(info->direct_map_pt);
-       
-       // We mark the Long Mode active  because we have paging enabled
-       // We do this in new_efer because we copy the msr in full below
-       // new_efer->lma = 1;
-       
-       } else if ((shadow_efer->lme == 1) && (new_efer->lme == 0)) {
-       // transition out of long mode
-       //((struct efer_64 *)&(info->guest_efer.value))->lme = 0;
-       //((struct efer_64 *)&(info->guest_efer.value))->lma = 0;
-       
-       return -1;
-       }
-       
-       // accept all changes to the efer, but make sure that the SVME bit is set... (SVM specific)
-       *shadow_efer = *new_efer;
-       shadow_efer->svme = 1;
-       
-       
-       
-       PrintDebug("New EFER=%p\n", (void *)*(addr_t *)(shadow_efer));
-       } else {
-       PrintError("Write to EFER in NESTED_PAGING or VIRTUAL_MEM mode not supported\n");
-       // Should probably just check for a long mode transition, and bomb out if it is
-       return -1;
-       }
-    */
     info->rip += 2; // WRMSR/RDMSR are two byte operands
     
     return 0;
index 9884620..2e86dbe 100644 (file)
@@ -44,11 +44,49 @@ static addr_t create_generic_pt_page() {
 #include "vmm_direct_paging_32pae.h"
 #include "vmm_direct_paging_64.h"
 
+int v3_init_passthrough_pts(struct guest_info * info) {
+    info->direct_map_pt = (addr_t)V3_PAddr((void *)create_generic_pt_page());
+    return 0;
+}
+
+int v3_reset_passthrough_pts(struct guest_info * info) {
+    v3_vm_cpu_mode_t mode = v3_get_cpu_mode(info);
+
+    // Delete the old direct map page tables
+    switch(mode) {
+       case REAL:
+       case PROTECTED:
+           delete_page_tables_32((pde32_t *)V3_VAddr((void *)(info->direct_map_pt)));
+           break;
+       case PROTECTED_PAE:
+       case LONG:
+       case LONG_32_COMPAT:
+           // Long mode will only use 32PAE page tables...
+           delete_page_tables_32pae((pdpe32pae_t *)V3_VAddr((void *)(info->direct_map_pt)));
+           break;
+       default:
+           PrintError("Unknown CPU Mode\n");
+           break;
+    }
+           
+    // create new direct map page table
+    v3_init_passthrough_pts(info);
+    
+    return 0;
+}
 
-addr_t v3_create_direct_passthrough_pts(struct guest_info * info) {
-    return create_generic_pt_page();
+
+int v3_activate_passthrough_pt(struct guest_info * info) {
+    // For now... But we need to change this....
+    // As soon as shadow paging becomes active the passthrough tables are hosed
+    // So this will cause chaos if it is called at that time
+
+    info->ctrl_regs.cr3 = *(addr_t*)&(info->direct_map_pt);
+    //PrintError("Activate Passthrough Page tables not implemented\n");
+    return 0;
 }
 
+
 int v3_handle_passthrough_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
     v3_vm_cpu_mode_t mode = v3_get_cpu_mode(info);
 
@@ -98,3 +136,7 @@ int v3_handle_nested_pagefault(struct guest_info * info, addr_t fault_addr, pf_e
     return -1;
 }
 
+int v3_invalidate_passthrough_addr(struct guest_info * info, addr_t inv_addr) {
+
+    return -1;
+}
index 65dc6cf..9756eb4 100644 (file)
@@ -41,7 +41,6 @@ void v3_init_io_map(struct guest_info * info) {
 
 
 
-
 static inline struct v3_io_hook * __insert_io_hook(struct guest_info * info, struct v3_io_hook * hook) {
   struct rb_node ** p = &(info->io_map.rb_node);
   struct rb_node * parent = NULL;
index d37b3da..92b6c63 100644 (file)
@@ -26,7 +26,6 @@
 #define MEM_OFFSET_HCALL 0x1000
 
 
-
 static inline
 struct v3_shadow_region * insert_shadow_region(struct guest_info * info, 
                                               struct v3_shadow_region * region);
@@ -67,6 +66,9 @@ void v3_delete_shadow_map(struct guest_info * info) {
 
        v3_delete_shadow_region(info, reg);
     }
+
+    V3_FreePage((void *)(info->mem_map.base_region.host_addr));
+    V3_FreePage(V3_PAddr((void *)(info->mem_map.hook_hva)));
 }
 
 
@@ -184,6 +186,10 @@ struct v3_shadow_region * insert_shadow_region(struct guest_info * info,
   
     v3_rb_insert_color(&(region->tree_node), &(info->mem_map.shdw_regions));
 
+
+    // flush virtual page tables 
+    // 3 cases shadow, shadow passthrough, and nested
+
     return NULL;
 }
                                                 
@@ -216,7 +222,8 @@ int v3_handle_mem_wr_hook(struct guest_info * info, addr_t guest_va, addr_t gues
 
     addr_t dst_addr = (addr_t)V3_VAddr((void *)v3_get_shadow_addr(reg, guest_pa));
 
-    if (v3_emulate_write_op(info, guest_va, guest_pa, dst_addr, reg->write_hook, reg->priv_data) == -1) {
+    if (v3_emulate_write_op(info, guest_va, guest_pa, dst_addr, 
+                           reg->write_hook, reg->priv_data) == -1) {
        PrintError("Write hook emulation failed\n");
        return -1;
     }
@@ -230,12 +237,15 @@ int v3_handle_mem_full_hook(struct guest_info * info, addr_t guest_va, addr_t gu
     addr_t op_addr = info->mem_map.hook_hva;
 
     if (access_info.write == 1) {
-       if (v3_emulate_write_op(info, guest_va, guest_pa, op_addr, reg->write_hook, reg->priv_data) == -1) {
+       if (v3_emulate_write_op(info, guest_va, guest_pa, op_addr, 
+                               reg->write_hook, reg->priv_data) == -1) {
            PrintError("Write Full Hook emulation failed\n");
            return -1;
        }
     } else {
-       if (v3_emulate_read_op(info, guest_va, guest_pa, op_addr, reg->read_hook, reg->write_hook, reg->priv_data) == -1) {
+       if (v3_emulate_read_op(info, guest_va, guest_pa, op_addr, 
+                              reg->read_hook, reg->write_hook, 
+                              reg->priv_data) == -1) {
            PrintError("Read Full Hook emulation failed\n");
            return -1;
        }
@@ -281,6 +291,10 @@ void v3_delete_shadow_region(struct guest_info * info, struct v3_shadow_region *
 
        V3_Free(reg);
     }
+
+    // flush virtual page tables 
+    // 3 cases shadow, shadow passthrough, and nested
+
 }
 
 
index dc7aa97..27392cb 100644 (file)
@@ -59,16 +59,14 @@ void delete_page_tables_32(pde32_t * pde) {
     if (pde == NULL) { 
        return;
     }
-    PrintDebug("Deleting Page Tables -- PDE (%p)\n", pde);
 
-    for (i = 0; (i < MAX_PDE32_ENTRIES); i++) {
-       if (pde[i].present) {
-           // We double cast, first to an addr_t to handle 64 bit issues, then to the pointer
-      
-           pte32_t * pte = (pte32_t *)((addr_t)(uint_t)(pde[i].pt_base_addr << PAGE_POWER));
+    PrintDebug("Deleting Page Tables (32) -- PDE (%p)\n", pde);
 
+    for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
+       if ((pde[i].present) && (pde[i].large_page == 0)) {
+           // We double cast, first to an addr_t to handle 64 bit issues, then to the pointer
       
-           V3_FreePage(pte);
+           V3_FreePage((void *)(addr_t)PAGE_ADDR_4KB(pde[i].pt_base_addr));
        }
     }
 
@@ -76,12 +74,75 @@ void delete_page_tables_32(pde32_t * pde) {
     V3_FreePage(V3_PAddr(pde));
 }
 
-void delete_page_tables_32PAE(pdpe32pae_t * pdpe) { 
-    PrintError("Unimplemented function\n");
+void delete_page_tables_32pae(pdpe32pae_t * pdpe) {
+    int i, j;
+
+    if (pdpe == NULL) {
+       return;
+    }
+
+    PrintDebug("Deleting Page Tables (32 PAE) -- PDPE (%p)\n", pdpe);
+    
+    for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
+       if (pdpe[i].present == 0) {
+           continue;
+       }
+
+       pde32pae_t * pde = (pde32pae_t *)V3_VAddr((void *)(addr_t)PAGE_ADDR_4KB(pdpe[i].pd_base_addr));
+
+       for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
+
+           if ((pde[j].present == 0) || (pde[j].large_page == 1)) {
+               continue;
+           }
+
+           V3_FreePage((void *)(addr_t)PAGE_ADDR_4KB(pde[j].pt_base_addr));
+       }
+
+       V3_FreePage(V3_PAddr(pde));
+    }
+
+    V3_FreePage(V3_PAddr(pdpe));
 }
 
 void delete_page_tables_64(pml4e64_t * pml4) {
-    PrintError("Unimplemented function\n");
+    int i, j, k;
+
+    if (pml4 == NULL) {
+       return;
+    }
+
+    PrintDebug("Deleting Page Tables (64) -- PML4 (%p)\n", pml4);
+
+    for (i = 0; i < MAX_PML4E64_ENTRIES; i++) {
+       if (pml4[i].present == 0) {
+           continue;
+       }
+
+       pdpe64_t * pdpe = (pdpe64_t *)V3_VAddr((void *)(addr_t)PAGE_ADDR_4KB(pml4[i].pdp_base_addr));
+
+       for (j = 0; j < MAX_PDPE64_ENTRIES; j++) {
+           if ((pdpe[j].present == 0) || (pdpe[j].large_page == 1)) {
+               continue;
+           }
+
+           pde64_t * pde = (pde64_t *)V3_VAddr((void *)(addr_t)PAGE_ADDR_4KB(pdpe[j].pd_base_addr));
+
+           for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
+               if ((pde[k].present == 0) || (pde[k].large_page == 1)) {
+                   continue;
+               }
+
+               V3_FreePage((void *)(addr_t)PAGE_ADDR_4KB(pde[k].pt_base_addr));
+           }
+           
+           V3_FreePage(V3_PAddr(pde));
+       }
+
+       V3_FreePage(V3_PAddr(pdpe));
+    }
+
+    V3_FreePage(V3_PAddr(pml4));
 }
 
 
index 8074c16..559854a 100644 (file)
@@ -49,19 +49,6 @@ struct shadow_page_data {
 };
 
 
-DEFINE_HASHTABLE_INSERT(add_pte_map, addr_t, addr_t);
-DEFINE_HASHTABLE_SEARCH(find_pte_map, addr_t, addr_t);
-//DEFINE_HASHTABLE_REMOVE(del_pte_map, addr_t, addr_t, 0);
-
-
-
-static uint_t pte_hash_fn(addr_t key) {
-    return hash_long(key, 32);
-}
-
-static int pte_equals(addr_t key1, addr_t key2) {
-    return (key1 == key2);
-}
 
 static struct shadow_page_data * create_new_shadow_pt(struct guest_info * info);
 static void inject_guest_pf(struct guest_info * info, addr_t fault_addr, pf_error_t error_code);
@@ -81,9 +68,6 @@ int v3_init_shadow_page_state(struct guest_info * info) {
     state->guest_cr0 = 0;
 
     INIT_LIST_HEAD(&(state->page_list));
-
-    state->cached_ptes = NULL;
-    state->cached_cr3 = 0;
   
     return 0;
 }
@@ -113,15 +97,6 @@ int v3_activate_shadow_pt(struct guest_info * info) {
 }
 
 
-int v3_activate_passthrough_pt(struct guest_info * info) {
-    // For now... But we need to change this....
-    // As soon as shadow paging becomes active the passthrough tables are hosed
-    // So this will cause chaos if it is called at that time
-
-    info->ctrl_regs.cr3 = *(addr_t*)&(info->direct_map_pt);
-    //PrintError("Activate Passthrough Page tables not implemented\n");
-    return 0;
-}
 
 
 
index 755c9a6..d60eb9f 100644 (file)
  */
 
 
-static int cache_page_tables_32(struct guest_info * info, addr_t pde) {
-    struct shadow_page_state * state = &(info->shdw_pg_state);
-    addr_t pde_host_addr;
-    pde32_t * tmp_pde;
-    struct hashtable * pte_cache = NULL;
-    int i = 0;
-
-    if (pde == state->cached_cr3) {
-       return 1;
-    }
-
-    if (state->cached_ptes != NULL) {
-       hashtable_destroy(state->cached_ptes, 0, 0);
-       state->cached_ptes = NULL;
-    }
-
-    state->cached_cr3 = pde;
-
-    pte_cache = create_hashtable(0, &pte_hash_fn, &pte_equals);
-    state->cached_ptes = pte_cache;
-
-    if (guest_pa_to_host_va(info, pde, &pde_host_addr) == -1) {
-       PrintError("Could not lookup host address of guest PDE\n");
-       return -1;
-    }
-
-    tmp_pde = (pde32_t *)pde_host_addr;
-
-    add_pte_map(pte_cache, pde, pde_host_addr);
-
-
-    for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
-       if ((tmp_pde[i].present) && (tmp_pde[i].large_page == 0)) {
-           addr_t pte_host_addr;
-
-           if (guest_pa_to_host_va(info, (addr_t)(BASE_TO_PAGE_ADDR(tmp_pde[i].pt_base_addr)), &pte_host_addr) == -1) {
-               PrintError("Could not lookup host address of guest PDE\n");
-               return -1;
-           }
-
-           add_pte_map(pte_cache, (addr_t)(BASE_TO_PAGE_ADDR(tmp_pde[i].pt_base_addr)), pte_host_addr); 
-       }
-    }
-
-    return 0;
-
-}
-
 
 
-// We assume that shdw_pg_state.guest_cr3 is pointing to the page tables we want to activate
-// We also assume that the CPU mode has not changed during this page table transition
 static inline int activate_shadow_pt_32(struct guest_info * info) {
     struct cr3_32 * shadow_cr3 = (struct cr3_32 *)&(info->ctrl_regs.cr3);
     struct cr3_32 * guest_cr3 = (struct cr3_32 *)&(info->shdw_pg_state.guest_cr3);
-    int cached = 0;
-  
-    // Check if shadow page tables are in the cache
-    cached = cache_page_tables_32(info, CR3_TO_PDE32_PA(*(addr_t *)guest_cr3));
-  
-    if (cached == -1) {
-       PrintError("CR3 Cache failed\n");
-       return -1;
-    } else if (cached == 0) {
-       struct shadow_page_data * shdw_page = create_new_shadow_pt(info);
+    struct shadow_page_data * shdw_page = create_new_shadow_pt(info);
 
-       shdw_page->cr3 = shdw_page->page_pa;
+    shdw_page->cr3 = shdw_page->page_pa;
     
-       shadow_cr3->pdt_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
-       PrintDebug( "Created new shadow page table %p\n", (void *)BASE_TO_PAGE_ADDR(shadow_cr3->pdt_base_addr));
-    } else {
-       PrintDebug("Reusing cached shadow Page table\n");
-    }
+    shadow_cr3->pdt_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
+    PrintDebug( "Created new shadow page table %p\n", (void *)BASE_TO_PAGE_ADDR(shadow_cr3->pdt_base_addr));
   
     shadow_cr3->pwt = guest_cr3->pwt;
     shadow_cr3->pcd = guest_cr3->pcd;
@@ -282,7 +220,6 @@ static int handle_large_pagefault_32(struct guest_info * info,
     pt_access_status_t shadow_pte_access = v3_can_access_pte32(shadow_pt, fault_addr, error_code);
     pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]);
     addr_t guest_fault_pa = BASE_TO_PAGE_ADDR_4MB(large_guest_pde->page_base_addr) + PAGE_OFFSET_4MB(fault_addr);  
-    struct shadow_page_state * state = &(info->shdw_pg_state);
 
     struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info, guest_fault_pa);
 
@@ -320,11 +257,7 @@ static int handle_large_pagefault_32(struct guest_info * info,
             */
            shadow_pte->user_page = 1;
 
-           if (find_pte_map(state->cached_ptes, PAGE_ADDR(guest_fault_pa)) != NULL) {
-               // Check if the entry is a page table...
-               PrintDebug("Marking page as Guest Page Table (large page)\n");
-               shadow_pte->writable = 0;
-           } else if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
+           if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
                shadow_pte->writable = 0;
            } else {
                shadow_pte->writable = 1;
@@ -356,12 +289,7 @@ static int handle_large_pagefault_32(struct guest_info * info,
        }
 
 
-       if (find_pte_map(state->cached_ptes, PAGE_ADDR(guest_fault_pa)) != NULL) {
-           struct shadow_page_state * state = &(info->shdw_pg_state);
-           PrintDebug("Write operation on Guest PAge Table Page (large page)\n");
-           state->cached_cr3 = 0;
-           shadow_pte->writable = 1;
-       }
+
 
     } else {
        PrintError("Error in large page fault handler...\n");
@@ -390,7 +318,6 @@ static int handle_shadow_pte32_fault(struct guest_info * info,
     pte32_t * guest_pte = (pte32_t *)&(guest_pt[PTE32_INDEX(fault_addr)]);;
     pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]);
     addr_t guest_pa = BASE_TO_PAGE_ADDR((addr_t)(guest_pte->page_base_addr)) +  PAGE_OFFSET(fault_addr);
-    struct shadow_page_state * state = &(info->shdw_pg_state);
 
     struct v3_shadow_region * shdw_reg =  v3_get_shadow_region(info, guest_pa);
 
@@ -462,15 +389,7 @@ static int handle_shadow_pte32_fault(struct guest_info * info,
                shadow_pte->writable = 0;
            }
 
-           // dirty flag has been set, check if its in the cache
-           if (find_pte_map(state->cached_ptes, PAGE_ADDR(guest_pa)) != NULL) {
-               if (error_code.write == 1) {
-                   state->cached_cr3 = 0;
-                   shadow_pte->writable = guest_pte->writable;
-               } else {
-                   shadow_pte->writable = 0;
-               }
-           }
+
 
            // Write hooks trump all, and are set Read Only
            if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
@@ -498,11 +417,6 @@ static int handle_shadow_pte32_fault(struct guest_info * info,
            shadow_pte->writable = guest_pte->writable;
        }
 
-       if (find_pte_map(state->cached_ptes, PAGE_ADDR(guest_pa)) != NULL) {
-           struct shadow_page_state * state = &(info->shdw_pg_state);
-           PrintDebug("Write operation on Guest PAge Table Page\n");
-           state->cached_cr3 = 0;
-       }
 
        return 0;