Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Convert shadow paging to use 32 PAE (Core)
Chunxiao Diao, Daniel Zuo, Yuanbo Fan [Mon, 26 May 2014 20:54:18 +0000 (15:54 -0500)]
This changes shadow paging to use, at minimum, 32PAE
shadow page tables.   This makes it possible to place
a guest using shadow paging anywhere in host physical
memory instead of just the first 4 GB

This first patch adds core functionality to the VTLB
pager, as well as change to CR4 handling.   The
other patches with this name need to be applied for
this to work.

Kconfig
palacios/include/palacios/vmm_shadow_paging.h
palacios/src/palacios/mmu/vmm_shdw_pg_tlb.c
palacios/src/palacios/mmu/vmm_shdw_pg_tlb_32.h
palacios/src/palacios/mmu/vmm_shdw_pg_tlb_32pae.h
palacios/src/palacios/vm_guest.c
palacios/src/palacios/vmm_ctrl_regs.c
palacios/src/palacios/vmm_paging_debug.h

diff --git a/Kconfig b/Kconfig
index a099526..58a998a 100644 (file)
--- a/Kconfig
+++ b/Kconfig
@@ -285,6 +285,7 @@ config SHADOW_PAGING_VTLB
        depends on SHADOW_PAGING
        help 
           Enables Virtual TLB implemenation for shadow paging
+           Virtual TLB now uses PAE so there are no 4 GB restrictions
 
 
 config DEBUG_SHDW_PG_VTLB
index 6af224c..f7ececf 100644 (file)
@@ -62,6 +62,7 @@ struct v3_shdw_pg_state {
     v3_reg_t guest_cr3;
     v3_reg_t guest_cr0;
     v3_msr_t guest_efer;
+    v3_reg_t guest_cr4;
 
     void * local_impl_data;
 
index 51e88e8..d0c02de 100644 (file)
@@ -12,6 +12,8 @@
  * All rights reserved.
  *
  * Author: Jack Lange <jarusl@cs.northwestern.edu>
+ * Author: Chunxiao Diao <chunxiaodiao2012@u.northwestern.edu> 
+ *
  *
  * This is free software.  You are permitted to use,
  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
@@ -52,24 +54,12 @@ static struct shadow_page_data * create_new_shadow_pt(struct guest_info * core);
 #include "vmm_shdw_pg_tlb_32pae.h"
 #include "vmm_shdw_pg_tlb_64.h"
 
-
 static inline int get_constraints(struct guest_info *core) 
 {
-    switch (v3_get_vm_cpu_mode(core)) {
-       case PROTECTED:
-       case PROTECTED_PAE:
-           return V3_ALLOC_PAGES_CONSTRAINT_4GB;
-           break;
-       case LONG:
-       case LONG_32_COMPAT:
-       case LONG_16_COMPAT:
-           return 0;
-           break;
-       default:
-           return V3_ALLOC_PAGES_CONSTRAINT_4GB;
-           break;
-    }
-    return V3_ALLOC_PAGES_CONSTRAINT_4GB;
+  // the current version of VTLB does not require any constraints
+  // on where page tables are allocated since it will use
+  // 32PAE page tables on a 64 bit machine even in 32 bit mode and below
+  return 0;  
 }
 
 
@@ -211,7 +201,7 @@ static int vtlb_invalidate_shdw_pt(struct guest_info * core) {
 
 
 static int vtlb_handle_pf(struct guest_info * core, addr_t fault_addr, pf_error_t error_code) {
-
+  
        switch (v3_get_vm_cpu_mode(core)) {
            case PROTECTED:
                return handle_shadow_pagefault_32(core, fault_addr, error_code);
index 502e5e5..4940993 100644 (file)
  * and the University of New Mexico.  You can find out more at 
  * http://www.v3vee.org
  *
+ * Copyright (c) 2014, Chunxiao Diao <chunxiaodiao2012@u.northwestern.edu> 
  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
  * All rights reserved.
  *
  * Author: Jack Lange <jarusl@cs.northwestern.edu>
+ * Author: Chunxiao Diao <chunxiaodiao2012@u.northwestern.edu> 
  *
  * This is free software.  You are permitted to use,
  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
  */
 
+#define GET_BUDDY(x) (((ullong_t)x) ^ 0x1)
+#define MARK_LAST_ZERO(x) (((ullong_t)x) & 0x0)
+#define CR3_PAGE_BASE_ADDR(x) ((x) >> 5)
+#define V3_SHADOW_LARGE_PAGE 0x3
 
 
+static inline int activate_shadow_pt_32( struct guest_info *info) 
+{
+       struct cr3_32_PAE * shadow_cr3 = (struct cr3_32_PAE *) &(info->ctrl_regs.cr3);
+       struct cr3_32 * guest_cr3 = (struct cr3_32 *)&(info->shdw_pg_state.guest_cr3);
+               struct cr4_32 * shadow_cr4 = (struct cr4_32 *) &(info->ctrl_regs.cr4);
 
-static inline int activate_shadow_pt_32(struct guest_info * core) {
-    struct cr3_32 * shadow_cr3 = (struct cr3_32 *)&(core->ctrl_regs.cr3);
-    struct cr3_32 * guest_cr3 = (struct cr3_32 *)&(core->shdw_pg_state.guest_cr3);
-    struct shadow_page_data * shdw_page = create_new_shadow_pt(core);
-
-    shdw_page->cr3 = shdw_page->page_pa;
-    
-    shadow_cr3->pdt_base_addr = PAGE_BASE_ADDR_4KB(shdw_page->page_pa);
-    PrintDebug(core->vm_info, core, "Created new shadow page table %p\n", (void *)BASE_TO_PAGE_ADDR(shadow_cr3->pdt_base_addr));
-  
-    shadow_cr3->pwt = guest_cr3->pwt;
-    shadow_cr3->pcd = guest_cr3->pcd;
-
-    return 0;
+#ifdef V3_CONFIG_DEBUG_SHADOW_PAGING
+       struct cr4_32 * guest_cr4 = (struct cr4_32 *)&(info->shdw_pg_state.guest_cr4);
+#endif
+       
+       struct shadow_page_data * shadow_pt = create_new_shadow_pt(info);
+       addr_t shadow_pt_addr = shadow_pt->page_pa;             
+       shadow_pt->cr3 = shadow_pt->page_pa;      
+       PrintDebug(info->vm_info, info, "Top level ShadowPAE pdp page pa=%p\n", (void *)shadow_pt_addr);
+       PrintDebug(info->vm_info,info,"Guest CR4 =%x and Shadow CR4 =%x\n", *(uint_t *)guest_cr4, *(uint_t*)shadow_cr4);
+       //shadow cr3 points to the new page, which is PML4T
+    shadow_cr3->pdpt_base_addr = CR3_PAGE_BASE_ADDR(shadow_pt_addr); // x >> 5
+    PrintDebug(info->vm_info, info, "Creating new shadow page table %p\n", (void *)BASE_TO_PAGE_ADDR(shadow_cr3->pdpt_base_addr));
+
+       shadow_cr3->pwt = guest_cr3->pwt;  
+       shadow_cr3->pcd = guest_cr3->pcd;       
+       shadow_cr4->pae = 1;
+       //shadow_cr4->pse = 1;
+       /*      shadow_efer->lme = 1;
+       shadow_efer->lma = 1; */
+       
+        return 0;
 }
 
+/*
+*
+* shadowPAE page fault handlers
+*
+*/
 
+static int handle_pdpe_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,pdpe32pae_t * shadow_pdp) ;
+static int handle_pde_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code, pde32pae_t * shadow_pd);
+static int handle_pte_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
+                                           pte32pae_t * shadow_pt, pte32_t * guest_pt) ;
+static int handle_4MB_shadow_pagefault_pte_32(struct guest_info * info, 
+                                             addr_t fault_addr, pf_error_t error_code, 
+                                             pte32pae_t * shadow_pt, pde32_4MB_t * large_guest_pde) ;
+static int handle_4MB_shadow_pagefault_pde_32(struct guest_info * info, 
+                                    addr_t fault_addr, pf_error_t error_code, 
+                                    pt_access_status_t shadow_pde_access,
+                                    pde32pae_2MB_t * large_shadow_pde, pde32pae_2MB_t *large_shadow_pde_bd,
+                                               pde32_4MB_t * large_guest_pde)  ;
 
+static inline int handle_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code)
+{
+       // pointer to pml4t
+       pdpe32pae_t * shadow_pdp = CR3_TO_PDPE32PAE_VA(info->ctrl_regs.cr3);
+       PrintDebug(info->vm_info, info, "32 bit ShadowPAE page fault handler : %p----------------------------------------\n", (void*)fault_addr);
+       if (handle_pdpe_shadow_pagefault_32(info, fault_addr, error_code, shadow_pdp) == -1) {
+               PrintError(info->vm_info, info, "Error handling Page fault caused by PDPE\n");
+               return -1;
+        }      
+       return 0;
+}
 
-/* 
- * *
- * * 
- * * 32 bit Page table fault handlers
- * *
- * *
- */
-static int handle_4MB_shadow_pagefault_pde_32(struct guest_info * info,  addr_t fault_addr, pf_error_t error_code, 
-                                             pt_access_status_t shadow_pde_access, pde32_4MB_t * large_shadow_pde, 
-                                             pde32_4MB_t * large_guest_pde);
-static int handle_4MB_shadow_pagefault_pte_32(struct guest_info * info,  addr_t fault_addr, pf_error_t error_code, 
-                                             pte32_t * shadow_pt, pde32_4MB_t * large_guest_pde);
+//first 4 entries of shadow pdpe should be present and accessible
+static int handle_pdpe_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code, pdpe32pae_t * shadow_pdp) 
+{
+       pt_access_status_t shadow_pdpe_access;  
+       
+       //fault address error
+       if ( (PDPE32PAE_INDEX(fault_addr) != 0) && (PDPE32PAE_INDEX(fault_addr) != 1)
+           && (PDPE32PAE_INDEX(fault_addr) != 2) && (PDPE32PAE_INDEX(fault_addr) != 3))
+       {
+               PrintDebug(info->vm_info, info, "Fault pdpe index is 0x%x, out of range\n", PDPE32PAE_INDEX(fault_addr));
+               if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
+                       PrintError(info->vm_info, info, "Could not inject guest page fault\n");
+                       return -1;
+               }       
+               return 0;
+       }
+       
+       pdpe32pae_t * shadow_pdpe = (pdpe32pae_t *)&(shadow_pdp[PDPE32PAE_INDEX(fault_addr)]);
+    
+       PrintDebug(info->vm_info, info, "Handling PDP fault\n");
+       
+       
+    if (fault_addr==0) { 
+               PrintDebug(info->vm_info, info, "Guest Page Tree for guest virtual address zero fault\n");
+               PrintGuestPageTree(info,fault_addr,(addr_t)(info->shdw_pg_state.guest_cr3));
+               PrintDebug(info->vm_info, info, "Host Page Tree for guest virtual address zero fault\n");
+               PrintHostPageTree(info,fault_addr,(addr_t)(info->ctrl_regs.cr3));
+    }  
+       
+    PrintDebug(info->vm_info, info, "Checking shadow_pdp_access %p\n", (void *)shadow_pdp);    
+    // Check the shadow page permissions
+    shadow_pdpe_access = v3_can_access_pdpe32pae(shadow_pdp, fault_addr, error_code);  
+       
+   if (shadow_pdpe_access == PT_ACCESS_USER_ERROR || shadow_pdpe_access == PT_ACCESS_WRITE_ERROR) 
+   {
+               //
+               // PML4 Entry marked non-user
+               //      
+               PrintDebug(info->vm_info, info, "Shadow Paging User or Write access error (shadow_pdpe_access=0x%x). Ignore it.\n", shadow_pdpe_access);
+               //shadow_pdpe->user_page = 1;
+               //return 0;
+    } 
+   else if ((shadow_pdpe_access != PT_ACCESS_NOT_PRESENT ) && 
+              (shadow_pdpe_access != PT_ACCESS_OK)) 
+       {
+               // inject page fault in guest
+               //
+               // unknown error
+               //
+               if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
+                       PrintError(info->vm_info, info, "Could not inject guest page fault\n");
+                       return -1;
+               }
+               PrintDebug(info->vm_info, info, "Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pdpe_access);
+               PrintDebug(info->vm_info, info, "Manual Says to inject page fault into guest\n");
+               return 0;
+    }  
+
+       pde32pae_t * shadow_pd = NULL;
+       //get to page directory table level, allocate if not present
+    if (shadow_pdpe_access == PT_ACCESS_NOT_PRESENT) {
+               struct shadow_page_data * shdw_page = create_new_shadow_pt(info);
+               shadow_pd = (pde32pae_t *)V3_VAddr((void *)shdw_page->page_pa);
+                       PrintDebug(info->vm_info, info, "Creating new shadow PDE table: %p\n",shadow_pd);        
+               //values should be 1
+               shadow_pdpe->present = 1;
+               //shadow_pdpe->user_page = 1;
+               //shadow_pdpe->writable = 1;
+               // when these values set to 0, the next levels have freedom to change them
+               shadow_pdpe->write_through = 0;
+               shadow_pdpe->cache_disable = 0;
+
+               shadow_pdpe->pd_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
+    } 
+       else 
+    {
+               shadow_pd = (pde32pae_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR(shadow_pdpe->pd_base_addr));
+    }  
+       
 
-static int handle_pte_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
-                                         pte32_t * shadow_pt,  pte32_t * guest_pt);
 
+    if (handle_pde_shadow_pagefault_32(info, fault_addr, error_code, shadow_pd) == -1) {
+               PrintError(info->vm_info, info, "Error handling Page fault caused by PDE\n");
+               return -1;
+    }
+    return 0;
+}      
 
-static inline int handle_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
-    pde32_t * guest_pd = NULL;
-    pde32_t * shadow_pd = CR3_TO_PDE32_VA(info->ctrl_regs.cr3);
-    addr_t guest_cr3 = CR3_TO_PDE32_PA(info->shdw_pg_state.guest_cr3);
+//to handle pde fault
+static int handle_pde_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code, pde32pae_t * shadow_pd)
+{
     pt_access_status_t guest_pde_access;
     pt_access_status_t shadow_pde_access;
-    pde32_t * guest_pde = NULL;
-    pde32_t * shadow_pde = (pde32_t *)&(shadow_pd[PDE32_INDEX(fault_addr)]);
-
-    PrintDebug(info->vm_info, info, "Shadow page fault handler: %p\n", (void*) fault_addr );
-    PrintDebug(info->vm_info, info, "Handling PDE32 Fault\n");
-
+       
+       pde32_t * guest_pd = NULL;
+       pde32_t * guest_pde = NULL;
+       addr_t guest_cr3 = CR3_TO_PDE32_PA(info->shdw_pg_state.guest_cr3);
     if (v3_gpa_to_hva(info, guest_cr3, (addr_t*)&guest_pd) == -1) {
-       PrintError(info->vm_info, info, "Invalid Guest PDE Address: 0x%p\n",  (void *)guest_cr3);
-       return -1;
-    } 
-
+               PrintError(info->vm_info, info, "Invalid Guest PDE Address: 0x%p\n",  (void *)guest_cr3);
+               return -1;
+    }
     guest_pde = (pde32_t *)&(guest_pd[PDE32_INDEX(fault_addr)]);
+       
+    pde32pae_t * shadow_pde = (pde32pae_t *)&(shadow_pd[PDE32PAE_INDEX(fault_addr)]);
+    pde32pae_t * shadow_pde_bd = (pde32pae_t *)&(shadow_pd[GET_BUDDY(PDE32PAE_INDEX(fault_addr))]);
+    pde32pae_t * shadow_pde_sd = (pde32pae_t *)&(shadow_pd[MARK_LAST_ZERO(PDE32PAE_INDEX(fault_addr))]);  
+    PrintDebug(info->vm_info, info, "Handling PDE fault\n");   
 
-
+    PrintDebug(info->vm_info, info, "Checking guest_pde_access %p\n", (void *)guest_pd);       
     // Check the guest page permissions
-    guest_pde_access = v3_can_access_pde32(guest_pd, fault_addr, error_code);
-
+    guest_pde_access = v3_can_access_pde32(guest_pd, fault_addr, error_code);  
     // Check the shadow page permissions
-    shadow_pde_access = v3_can_access_pde32(shadow_pd, fault_addr, error_code);
-  
-    /* Was the page fault caused by the Guest's page tables? */
-    if (v3_is_guest_pf(guest_pde_access, shadow_pde_access) == 1) {
-       PrintDebug(info->vm_info, info, "Injecting PDE pf to guest: (guest access error=%d) (shdw access error=%d)  (pf error code=%d)\n", 
+    PrintDebug(info->vm_info, info, "Checking shadow_pde_access %p\n", (void *)shadow_pd);
+    shadow_pde_access = v3_can_access_pde32pae(shadow_pd, fault_addr, error_code);
+       
+    /* Was the page fault caused by the Guest PDE */
+    if (v3_is_guest_pf(guest_pde_access, shadow_pde_access) == 1) 
+       {
+               PrintDebug(info->vm_info, info, "Injecting PDE pf to guest: (guest access error=%d) (shdw access error=%d)  (pf error code=%d)\n", 
                   *(uint_t *)&guest_pde_access, *(uint_t *)&shadow_pde_access, *(uint_t *)&error_code);
-       if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
-           PrintError(info->vm_info, info, "Could not inject guest page fault for vaddr %p\n", (void *)fault_addr);
-           return -1;
-       }
-       return 0;
+               if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) 
+               {
+                       PrintError(info->vm_info, info, "Could not inject guest page fault for vaddr %p\n", (void *)fault_addr);
+                       return -1;
+               }
+               return 0;
     }
-
-
-
-    if (shadow_pde_access == PT_ACCESS_USER_ERROR) {
-       // 
-       // PDE Entry marked non user
-       //
-       PrintDebug(info->vm_info, info, "Shadow Paging User access error (shadow_pde_access=0x%x, guest_pde_access=0x%x)\n", 
-                  shadow_pde_access, guest_pde_access);
        
-       if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
-           PrintError(info->vm_info, info, "Could not inject guest page fault for vaddr %p\n", (void *)fault_addr);
-           return -1;
-       }
-       return 0;
+       //Guest PDE ok
+    if (shadow_pde_access == PT_ACCESS_USER_ERROR) 
+       {
+               //
+               // PDE Entry marked non-user
+               //      
+               PrintDebug(info->vm_info, info, "Shadow Paging User access error (shadow_pde_access=0x%x, guest_pde_access=0x%x)\n", 
+                       shadow_pde_access, guest_pde_access);
+               if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) 
+               {
+                       PrintError(info->vm_info, info, "Could not inject guest page fault\n");
+                       return -1;
+               }
+               return 0;
     } else if ((shadow_pde_access == PT_ACCESS_WRITE_ERROR) && 
-              (guest_pde->large_page == 1)) {
-       
-       ((pde32_4MB_t *)guest_pde)->dirty = 1;
-       shadow_pde->writable = guest_pde->writable;
-       return 0;
+              (guest_pde->large_page == 1)) 
+       {
+
+               ((pde32_4MB_t *)guest_pde)->dirty = 1;
+               shadow_pde->writable = guest_pde->writable;
+               shadow_pde_bd->writable = guest_pde->writable;
+               return 0;
     } else if ((shadow_pde_access != PT_ACCESS_NOT_PRESENT) &&
-              (shadow_pde_access != PT_ACCESS_OK)) {
-       // inject page fault in guest
-       if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
-           PrintError(info->vm_info, info, "Could not inject guest page fault for vaddr %p\n", (void *)fault_addr);
-           return -1;
-       }
-       PrintDebug(info->vm_info, info, "Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pde_access);
-       PrintDebug(info->vm_info, info, "Manual Says to inject page fault into guest\n");
-       return 0;
+              (shadow_pde_access != PT_ACCESS_OK)) 
+       {
+               // inject page fault in guest
+               //
+               //unknown error
+               //
+               if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
+                       PrintError(info->vm_info, info, "Could not inject guest page fault\n");
+                       return -1;
+               }
+               PrintDebug(info->vm_info, info, "Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pde_access);
+               PrintDebug(info->vm_info, info, "Manual Says to inject page fault into guest\n");
+               return 0;
     }
 
-  
-    pte32_t * shadow_pt = NULL;
-    pte32_t * guest_pt = NULL;
-
-    // Get the next shadow page level, allocate if not present
-
-    if (shadow_pde_access == PT_ACCESS_NOT_PRESENT) {
-
-        if ((info->use_large_pages == 1) && (guest_pde->large_page == 1)) {
-            // Check underlying physical memory map to see if a large page is viable
-           addr_t guest_pa = BASE_TO_PAGE_ADDR_4MB(((pde32_4MB_t *)guest_pde)->page_base_addr);
-           uint32_t page_size = v3_get_max_page_size(info, guest_pa, PROTECTED);
+       pte32pae_t * shadow_pt = NULL;
+       pte32pae_t * shadow_pt_bd = NULL;
+       pte32_t * guest_pt = NULL;
+       
+    // get the next shadow page level (page table) , allocate 2 PDEs (buddies) if not present
+    if (shadow_pde_access == PT_ACCESS_NOT_PRESENT) 
+    {
+        // Check if  we can use large pages and the guest memory is properly aligned
+        // to potentially use a large page
+
+        if ((info->use_large_pages == 1) && (guest_pde->large_page == 1)) 
+               {
+                       addr_t guest_pa = BASE_TO_PAGE_ADDR_4MB(((pde32_4MB_t *)guest_pde)->page_base_addr);
+                       uint32_t page_size = v3_get_max_page_size(info, guest_pa, PROTECTED);
            
-           if (page_size == PAGE_SIZE_4MB) {
-               PrintDebug(info->vm_info, info, "using large page for fault_addr %p (gpa=%p)\n", (void *)fault_addr, (void *)guest_pa); 
-               if (handle_4MB_shadow_pagefault_pde_32(info, fault_addr, error_code, shadow_pde_access,
-                                                      (pde32_4MB_t *)shadow_pde, (pde32_4MB_t *)guest_pde) == -1) {
-                   PrintError(info->vm_info, info, "Error handling large pagefault with large page\n");
-                   return -1;
-               }
+                       if (page_size == PAGE_SIZE_4MB) 
+                       {
+
+                               if (shadow_pde !=  shadow_pde_sd) // when handling page fault, we pass through the buddy with last bit as 0
+                               {
+                                       pde32pae_t * tmp_addr = shadow_pde;
+                                       shadow_pde = shadow_pde_bd;  
+                                       shadow_pde_bd = tmp_addr;
+                               }
+                               if (handle_4MB_shadow_pagefault_pde_32(info, fault_addr, error_code, shadow_pde_access,
+                                                      (pde32pae_2MB_t *)shadow_pde,(pde32pae_2MB_t *)shadow_pde_bd, (pde32_4MB_t *)guest_pde) == -1) 
+                               {
+                                       PrintError(info->vm_info, info, "Error handling large pagefault with large page\n");
+                                       return -1;
+                               }
+                               return 0;
+                       } 
+           // Fallthrough to handle the region with small pages
+               }       
                
-                return 0;
-           }
-        }
-
-       struct shadow_page_data * shdw_page =  create_new_shadow_pt(info);
-       shadow_pt = (pte32_t *)V3_VAddr((void *)shdw_page->page_pa);
-
-       shadow_pde->present = 1;
-       shadow_pde->user_page = guest_pde->user_page;
-
-
-       if (guest_pde->large_page == 0) {
-           shadow_pde->writable = guest_pde->writable;
-       } else {
-           // This large page flag is temporary until we can get a working cache....
-           ((pde32_4MB_t *)guest_pde)->vmm_info = V3_LARGE_PG;
-
-           if (error_code.write) {
-               shadow_pde->writable = guest_pde->writable;
-               ((pde32_4MB_t *)guest_pde)->dirty = 1;
-           } else {
-               shadow_pde->writable = 0;
-               ((pde32_4MB_t *)guest_pde)->dirty = 0;
-           }
-       }
-      
-       // VMM Specific options
-       shadow_pde->write_through = guest_pde->write_through;
-       shadow_pde->cache_disable = guest_pde->cache_disable;
-       shadow_pde->global_page = guest_pde->global_page;
-       //
-      
-       guest_pde->accessed = 1;
-      
-       shadow_pde->pt_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
-    } else {
-       shadow_pt = (pte32_t *)V3_VAddr((void *)BASE_TO_PAGE_ADDR(shadow_pde->pt_base_addr));
+               struct shadow_page_data * shdw_page = create_new_shadow_pt(info);
+               struct shadow_page_data * shdw_page_bd = create_new_shadow_pt(info);
+               shadow_pt = (pte32pae_t *)V3_VAddr((void *)shdw_page->page_pa);
+               shadow_pt_bd = (pte32pae_t *)V3_VAddr((void *)shdw_page_bd->page_pa);
+               PrintDebug(info->vm_info, info, "Creating new shadow PTs: %p and %p\n", shadow_pt, shadow_pt_bd);
+
+               shadow_pde->present = 1;
+               shadow_pde_bd->present = 1;
+               shadow_pde->user_page = guest_pde->user_page;   
+               shadow_pde_bd->user_page = guest_pde->user_page;
+
+               if (guest_pde->large_page == 0) {
+                       shadow_pde->writable = guest_pde->writable;
+                       shadow_pde_bd->writable = guest_pde->writable;
+               } 
+               else {
+                       ((pde32pae_2MB_t *)guest_pde)->vmm_info = V3_LARGE_PG;
+
+                       if (error_code.write) {
+                               shadow_pde->writable = guest_pde->writable;
+                               shadow_pde_bd->writable = guest_pde->writable;
+                               ((pde32pae_2MB_t *)guest_pde)->dirty = 1;       
+                       } 
+                       else {
+                               shadow_pde->writable = 0;
+                               shadow_pde_bd->writable = 0;
+                               ((pde32pae_2MB_t *)guest_pde)->dirty = 0;
+                       } 
+               }               
+       
+               // VMM Specific options
+               shadow_pde->write_through = guest_pde->write_through;
+               shadow_pde->cache_disable = guest_pde->cache_disable;
+               shadow_pde->global_page = guest_pde->global_page;
+               
+               shadow_pde_bd->write_through = guest_pde->write_through;
+               shadow_pde_bd->cache_disable = guest_pde->cache_disable;
+               shadow_pde_bd->global_page = guest_pde->global_page;
+               //
+               guest_pde->accessed = 1;
+               
+               shadow_pde->pt_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
+               shadow_pde_bd->pt_base_addr = PAGE_BASE_ADDR(shdw_page_bd->page_pa);
+    } 
+       else {
+         if ((info->use_large_pages == 1) && (guest_pde->large_page == 1) && (guest_pde->vmm_info == V3_SHADOW_LARGE_PAGE)) 
+               {
+                       addr_t guest_pa = BASE_TO_PAGE_ADDR_4MB(((pde32_4MB_t *)guest_pde)->page_base_addr);
+                       uint32_t page_size = v3_get_max_page_size(info, guest_pa, PROTECTED);   
+                       if (page_size == PAGE_SIZE_4MB) 
+                       {
+                               if (shadow_pde_access == PT_ACCESS_OK) {
+                                       // Inconsistent state...
+                                       // Guest Re-Entry will flush tables and everything should now workd
+                                       PrintDebug(info->vm_info, info, "Inconsistent state PDE... Guest re-entry should flush tlb\n");
+                    //PrintDebug(info->vm_info, info, "Bug here: shadow_pde_access is %d page_size is %d\n",
+                                       //         (uint_t)shadow_pde_access,(uint_t)page_size);
+                                       return 0;
+                               }
+                       } 
+               }
+               shadow_pt = (pte32pae_t *)V3_VAddr((void *)BASE_TO_PAGE_ADDR(shadow_pde->pt_base_addr));
     }
-
-    
-    if (guest_pde->large_page == 0) {
-       if (v3_gpa_to_hva(info, BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr), (addr_t*)&guest_pt) == -1) {
-           // Machine check the guest
-           PrintDebug(info->vm_info, info, "Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr));
-           v3_raise_exception(info, MC_EXCEPTION);
-           return 0;
-       }
-
-       if (handle_pte_shadow_pagefault_32(info, fault_addr, error_code, shadow_pt, guest_pt)  == -1) {
-           PrintError(info->vm_info, info, "Error handling Page fault caused by PTE\n");
-           return -1;
+       
+    if (guest_pde->large_page == 0) 
+       {
+               if (v3_gpa_to_hva(info, BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr), (addr_t*)&guest_pt) == -1) 
+               {
+                       // Machine check the guest
+                       PrintDebug(info->vm_info, info, "Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr));
+                       v3_raise_exception(info, MC_EXCEPTION);
+                       return 0;
+               }       
+               if (handle_pte_shadow_pagefault_32(info, fault_addr, error_code, shadow_pt, guest_pt) == -1) 
+               {
+                       PrintError(info->vm_info, info, "Error handling Page fault caused by PDE\n");
+                       return -1;
+               }
        }
-    } else {
-       if (handle_4MB_shadow_pagefault_pte_32(info, fault_addr, error_code, shadow_pt, (pde32_4MB_t *)guest_pde) == -1) {
-           PrintError(info->vm_info, info, "Error handling large pagefault\n");
-           return -1;
-       }       
-    }
-
-    return 0;
+       else {
+               //
+               //use 4K pages to implement large page; ignore for now
+               //
+               if (handle_4MB_shadow_pagefault_pte_32(info, fault_addr, error_code, shadow_pt, (pde32_4MB_t *)guest_pde) == -1) 
+               {
+                       PrintError(info->vm_info, info, "Error handling large pagefault\n");
+                       return -1;
+               }        
+    }  
+       
+       return 0;
 }
-
-
+       
+       
 static int handle_pte_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
-                                         pte32_t * shadow_pt, pte32_t * guest_pt) {
-
+                                         pte32pae_t * shadow_pt, pte32_t * guest_pt) 
+{
     pt_access_status_t guest_pte_access;
     pt_access_status_t shadow_pte_access;
     pte32_t * guest_pte = (pte32_t *)&(guest_pt[PTE32_INDEX(fault_addr)]);;
-    pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]);
+    pte32pae_t * shadow_pte = (pte32pae_t *)&(shadow_pt[PTE32PAE_INDEX(fault_addr)]);
     addr_t guest_pa = BASE_TO_PAGE_ADDR((addr_t)(guest_pte->page_base_addr)) +  PAGE_OFFSET(fault_addr);
 
+     PrintDebug(info->vm_info, info, "Handling PTE fault\n");
+
     struct v3_mem_region * shdw_reg =  v3_get_mem_region(info->vm_info, info->vcpu_id, guest_pa);
 
     if (shdw_reg == NULL) {
-       // Inject a machine check in the guest
-       PrintDebug(info->vm_info, info, "Invalid Guest Address in page table (0x%p)\n", (void *)guest_pa);
-       v3_raise_exception(info, MC_EXCEPTION);
-       return 0;
+               // Inject a machine check in the guest
+               PrintDebug(info->vm_info, info, "Invalid Guest Address in page table (0x%p)\n", (void *)guest_pa);
+               v3_raise_exception(info, MC_EXCEPTION);
+               return 0;
     }
 
     // Check the guest page permissions
     guest_pte_access = v3_can_access_pte32(guest_pt, fault_addr, error_code);
 
     // Check the shadow page permissions
-    shadow_pte_access = v3_can_access_pte32(shadow_pt, fault_addr, error_code);
+    shadow_pte_access = v3_can_access_pte32pae(shadow_pt, fault_addr, error_code);
   
   
     /* Was the page fault caused by the Guest's page tables? */
-    if (v3_is_guest_pf(guest_pte_access, shadow_pte_access) == 1) {
+    if (v3_is_guest_pf(guest_pte_access, shadow_pte_access) == 1) 
+       {
 
-       PrintDebug(info->vm_info, info, "Access error injecting pf to guest (guest access error=%d) (pf error code=%d)\n", 
+               PrintDebug(info->vm_info, info, "Access error injecting pf to guest (guest access error=%d) (pf error code=%d)\n", 
                   guest_pte_access, *(uint_t*)&error_code);
        
 
-       //   inject:
-       if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
-           PrintError(info->vm_info, info, "Could not inject guest page fault for vaddr %p\n", (void *)fault_addr);
-           return -1;
-       }       
+               //   inject:
+               if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
+                       PrintError(info->vm_info, info, "Could not inject guest page fault for vaddr %p\n", (void *)fault_addr);
+                       return -1;
+               }       
 
-       return 0; 
+               return 0; 
     }
 
   
   
-    if (shadow_pte_access == PT_ACCESS_OK) {
-       // Inconsistent state...
-       // Guest Re-Entry will flush page tables and everything should now work
-       PrintDebug(info->vm_info, info, "Inconsistent state... Guest re-entry should flush tlb\n");
-       return 0;
+    if (shadow_pte_access == PT_ACCESS_OK) 
+       {
+               // Inconsistent state...
+               // Guest Re-Entry will flush page tables and everything should now work
+               PrintDebug(info->vm_info, info, "Inconsistent state PTE... Guest re-entry should flush tlb\n");
+               PrintDebug(info->vm_info, info, "guest_pte_access is %d and shadow_pte_access is %d\n", (uint_t)guest_pte_access, 
+                          (uint_t)shadow_pte_access);
+               PrintDebug(info->vm_info, info, "Error_code: write 0x%x, present 0x%x, user 0x%x, rsvd_access 0x%x, ifetch 0x%x \n",  error_code.write,error_code.present,error_code.user,error_code.rsvd_access,error_code.ifetch);
+               PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
+               PrintGuestPageTree(info,fault_addr,(addr_t)(info->shdw_pg_state.guest_cr3));
+               return 0;
     }
 
 
-    if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
-       // Page Table Entry Not Present
-       PrintDebug(info->vm_info, info, "guest_pa =%p\n", (void *)guest_pa);
+    if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) 
+       {
+               // Page Table Entry Not Present
+               PrintDebug(info->vm_info, info, "guest_pa =%p\n", (void *)guest_pa);
 
-       if ((shdw_reg->flags.alloced == 1) && (shdw_reg->flags.read == 1)) {
-           addr_t shadow_pa = 0;
+               if ((shdw_reg->flags.alloced == 1) && (shdw_reg->flags.read == 1)) 
+               {
+                       addr_t shadow_pa = 0;
 
-           if (v3_gpa_to_hpa(info, guest_pa, &shadow_pa) == -1) {
-               PrintError(info->vm_info, info, "could not translate page fault address (%p)\n", (void *)guest_pa);
-               return -1;
-           }
+                       if (v3_gpa_to_hpa(info, guest_pa, &shadow_pa) == -1) 
+                       {
+                               PrintError(info->vm_info, info, "could not translate page fault address (%p)\n", (void *)guest_pa);
+                               return -1;
+                       }
 
-           shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
+                       shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
 
-           PrintDebug(info->vm_info, info, "\tMapping shadow page (%p)\n", (void *)BASE_TO_PAGE_ADDR(shadow_pte->page_base_addr));
+                       PrintDebug(info->vm_info, info, "\tMapping shadow page (%p)\n", (void *)BASE_TO_PAGE_ADDR(shadow_pte->page_base_addr));
       
-           shadow_pte->present = guest_pte->present;
-           shadow_pte->user_page = guest_pte->user_page;
+                       shadow_pte->present = guest_pte->present;
+                       shadow_pte->user_page = guest_pte->user_page;
       
-           //set according to VMM policy
-           shadow_pte->write_through = guest_pte->write_through;
-           shadow_pte->cache_disable = guest_pte->cache_disable;
-           shadow_pte->global_page = guest_pte->global_page;
-           //
+                       //set according to VMM policy
+                       shadow_pte->write_through = guest_pte->write_through;
+                       shadow_pte->cache_disable = guest_pte->cache_disable;
+                       shadow_pte->global_page = guest_pte->global_page;
+                       //
       
-           guest_pte->accessed = 1;
+                       guest_pte->accessed = 1;
       
-           if (guest_pte->dirty == 1) {
-               shadow_pte->writable = guest_pte->writable;
-           } else if ((guest_pte->dirty == 0) && (error_code.write == 1)) {
-               shadow_pte->writable = guest_pte->writable;
+                       if (guest_pte->dirty == 1) {
+                               shadow_pte->writable = guest_pte->writable;
+                       } 
+                       else if ((guest_pte->dirty == 0) && (error_code.write == 1)) {
+                               shadow_pte->writable = guest_pte->writable;
+                               guest_pte->dirty = 1;
+                       } 
+                       else if ((guest_pte->dirty == 0) && (error_code.write == 0)) {
+                               shadow_pte->writable = 0;
+                       }
+
+                       // Write hooks trump all, and are set Read Only
+                       if (shdw_reg->flags.write == 0) {
+                               shadow_pte->writable = 0;
+                       }       
+
+               } 
+               else {
+                       // Page fault on unhandled memory region
+           
+                       if (shdw_reg->unhandled(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
+                               PrintError(info->vm_info, info, "Special Page fault handler returned error for address: %p\n",  (void *)fault_addr);
+                               return -1;
+                       }
+               }
+    } 
+       else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) 
+       {
                guest_pte->dirty = 1;
-           } else if ((guest_pte->dirty == 0) && (error_code.write == 0)) {
-               shadow_pte->writable = 0;
-           }
-
 
-           if (shdw_reg->flags.write == 0) {
-               shadow_pte->writable = 0;
-           }
+               if (shdw_reg->flags.write == 1) {
+                       PrintDebug(info->vm_info, info, "Shadow PTE Write Error\n");
+                       shadow_pte->writable = guest_pte->writable;
+               } 
+               else {
+                       if (shdw_reg->unhandled(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
+                               PrintError(info->vm_info, info, "Special Page fault handler returned error for address: %p\n",  (void *)fault_addr);
+                               return -1;
+                       }
+               }
+               return 0;
+    } 
+       else {
+               // Inject page fault into the guest     
+               if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
+                       PrintError(info->vm_info, info, "Could not inject guest page fault for vaddr %p\n", (void *)fault_addr);
+                       return -1;
+               }
 
-       } else {
-           // Page fault on unhandled memory region
-           
-           if (shdw_reg->unhandled(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
-               PrintError(info->vm_info, info, "Special Page fault handler returned error for address: %p\n",  (void *)fault_addr);
-               return -1;
-           }
-       }
-    } else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) {
-       guest_pte->dirty = 1;
-
-       if (shdw_reg->flags.write == 1) {
-           PrintDebug(info->vm_info, info, "Shadow PTE Write Error\n");
-           shadow_pte->writable = guest_pte->writable;
-       } else {
-           if (shdw_reg->unhandled(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
-               PrintError(info->vm_info, info, "Special Page fault handler returned error for address: %p\n",  (void *)fault_addr);
+               PrintError(info->vm_info, info, "PTE Page fault fell through... Not sure if this should ever happen\n");
+               PrintError(info->vm_info, info, "Manual Says to inject page fault into guest\n");
                return -1;
-           }
-       }
-
-
-       return 0;
-
-    } else {
-       // Inject page fault into the guest     
-       if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
-           PrintError(info->vm_info, info, "Could not inject guest page fault for vaddr %p\n", (void *)fault_addr);
-           return -1;
-       }
-
-       PrintError(info->vm_info, info, "PTE Page fault fell through... Not sure if this should ever happen\n");
-       PrintError(info->vm_info, info, "Manual Says to inject page fault into guest\n");
-       return -1;
     }
 
     return 0;
-}
-
-// Handle a 4MB page fault with small pages in the PTE
-static int handle_4MB_shadow_pagefault_pte_32(struct guest_info * info, 
+}      
+       
+       
+// Handle a 4MB page fault with 2 2MB page in the PDE
+static int handle_4MB_shadow_pagefault_pde_32(struct guest_info * info, 
                                     addr_t fault_addr, pf_error_t error_code, 
-                                    pte32_t * shadow_pt, pde32_4MB_t * large_guest_pde) 
+                                    pt_access_status_t shadow_pde_access,
+                                    pde32pae_2MB_t * large_shadow_pde, pde32pae_2MB_t * large_shadow_pde_bd,
+                                        pde32_4MB_t * large_guest_pde) 
 {
-    pt_access_status_t shadow_pte_access = v3_can_access_pte32(shadow_pt, fault_addr, error_code);
-    pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]);
-    addr_t guest_fault_pa = BASE_TO_PAGE_ADDR_4MB(large_guest_pde->page_base_addr) + PAGE_OFFSET_4MB(fault_addr);  
+       addr_t guest_fault_pa = BASE_TO_PAGE_ADDR_4MB(large_guest_pde->page_base_addr) + PAGE_OFFSET_4MB(fault_addr);  
 
-
-    PrintDebug(info->vm_info, info, "Handling 4MB fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
-    PrintDebug(info->vm_info, info, "ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde);
+    PrintDebug(info->vm_info, info, "Handling 4MB fault with large page (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
+    PrintDebug(info->vm_info, info, "LargeShadowPDE=%p, LargeGuestPDE=%p\n", large_shadow_pde, large_guest_pde);
 
     struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->vcpu_id, guest_fault_pa);
 
  
     if (shdw_reg == NULL) {
-       // Inject a machine check in the guest
-       PrintDebug(info->vm_info, info, "Invalid Guest Address in page table (0x%p)\n", (void *)guest_fault_pa);
-       v3_raise_exception(info, MC_EXCEPTION);
-       return -1;
+               // Inject a machine check in the guest
+               PrintDebug(info->vm_info, info, "Invalid Guest Address in page table (0x%p)\n", (void *)guest_fault_pa);
+               v3_raise_exception(info, MC_EXCEPTION);
+               return -1;
     }
-
-    if (shadow_pte_access == PT_ACCESS_OK) {
-       // Inconsistent state...
-       // Guest Re-Entry will flush tables and everything should now workd
-       PrintDebug(info->vm_info, info, "Inconsistent state... Guest re-entry should flush tlb\n");
-       return 0;
+       
+       //dead bug
+    if (shadow_pde_access == PT_ACCESS_OK) {
+               // Inconsistent state...
+               // Guest Re-Entry will flush tables and everything should now workd
+               PrintDebug(info->vm_info, info, "Inconsistent state 4MB pde... Guest re-entry should flush tlb\n");
+               return 0;
     }
 
   
-    if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
-       // Get the guest physical address of the fault
+    if (shadow_pde_access == PT_ACCESS_NOT_PRESENT) 
+       {
+               // Get the guest physical address of the fault
 
-       if ((shdw_reg->flags.alloced == 1) && 
-           (shdw_reg->flags.read  == 1)) {
-           addr_t shadow_pa = 0;
+               if ((shdw_reg->flags.alloced == 1) && 
+                       (shdw_reg->flags.read  == 1)) 
+               {
+                       addr_t shadow_pa = 0;
 
 
-           if (v3_gpa_to_hpa(info, guest_fault_pa, &shadow_pa) == -1) {
-               PrintError(info->vm_info, info, "could not translate page fault address (%p)\n", (void *)guest_fault_pa);
-               return -1;
-           }
-
-           shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
+                       if (v3_gpa_to_hpa(info, guest_fault_pa, &shadow_pa) == -1) 
+                       {
+                               PrintError(info->vm_info, info, "could not translate page fault address (%p)\n", (void *)guest_fault_pa);
+                               return -1;
+                       }
 
-           PrintDebug(info->vm_info, info, "\tMapping shadow page (%p)\n", (void *)BASE_TO_PAGE_ADDR(shadow_pte->page_base_addr));
+                       PrintDebug(info->vm_info, info, "shadow PA = %p\n", (void *)shadow_pa);
 
-           shadow_pte->present = 1;
 
-           /* We are assuming that the PDE entry has precedence
-            * so the Shadow PDE will mirror the guest PDE settings, 
-            * and we don't have to worry about them here
-            * Allow everything
-            */
-           shadow_pte->user_page = 1;
+              large_guest_pde->vmm_info = V3_SHADOW_LARGE_PAGE; /* For invalidations */
+             //shadow pde (last bit 0) gets the half with smaller address and its buddy gets the rest
+            large_shadow_pde->page_base_addr = PAGE_BASE_ADDR_4MB(shadow_pa)<<1;
+                       large_shadow_pde_bd->page_base_addr =(PAGE_BASE_ADDR_4MB(shadow_pa)<<1)|1; 
+                       
+                       // large_shadow_pde->large_page = 1;
+            large_shadow_pde->present = 1;
+            large_shadow_pde->user_page = 1;
+                       
+           //          large_shadow_pde_bd->large_page = 1;
+            large_shadow_pde_bd->present = 1;
+            large_shadow_pde_bd->user_page = 1;
 
-           //set according to VMM policy
-           shadow_pte->write_through = large_guest_pde->write_through;
-           shadow_pte->cache_disable = large_guest_pde->cache_disable;
-           shadow_pte->global_page = large_guest_pde->global_page;
-           //
-      
+           PrintDebug(info->vm_info, info, "\tMapping shadow pages (%p) and (%p)\n", 
+                                               (void *)BASE_TO_PAGE_ADDR_2MB(large_shadow_pde->page_base_addr),
+                                               (void *)BASE_TO_PAGE_ADDR_2MB(large_shadow_pde_bd->page_base_addr));
 
-           if (shdw_reg->flags.write == 0) {
-               shadow_pte->writable = 0;
-           } else {
-               shadow_pte->writable = 1;
-           }
+            if (shdw_reg->flags.write == 0) {
+                large_shadow_pde->writable = 0;
+                               large_shadow_pde_bd->writable = 0;
+            } else {
+                large_shadow_pde_bd->writable = 1;
+                               large_shadow_pde->writable = 1;
+            }
 
-       } else {
-           if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
-               PrintError(info->vm_info, info, "Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
-               return -1;
-           }
-       }
-    } else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) {
+                       //set according to VMM policy
+                       large_shadow_pde->write_through = large_guest_pde->write_through;
+                       large_shadow_pde->cache_disable = large_guest_pde->cache_disable;
+                       large_shadow_pde->global_page = large_guest_pde->global_page;
+
+                       large_shadow_pde_bd->write_through = large_guest_pde->write_through;
+                       large_shadow_pde_bd->cache_disable = large_guest_pde->cache_disable;
+                       large_shadow_pde_bd->global_page = large_guest_pde->global_page;                        
+               } 
+               else {
+                       if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
+                               PrintError(info->vm_info, info, "Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
+                               return -1;
+                       }
+               }
+       } 
+       else if (shadow_pde_access == PT_ACCESS_WRITE_ERROR) 
+       {
+
+               if (shdw_reg->flags.write == 0) {
+                       if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
+                               PrintError(info->vm_info, info, "Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
+                               return -1;
+                       }       
+               }
 
-       if (shdw_reg->flags.write == 0) {
-           if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
-               PrintError(info->vm_info, info, "Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
+    } 
+       else {
+               PrintError(info->vm_info, info, "Error in large page fault handler...\n");
+               PrintError(info->vm_info, info, "This case should have been handled at the top level handler\n");
                return -1;
-           }
-       }
-
-    } else {
-       PrintError(info->vm_info, info, "Error in large page fault handler...\n");
-       PrintError(info->vm_info, info, "This case should have been handled at the top level handler\n");
-       return -1;
     }
 
-    PrintDebug(info->vm_info, info, "Returning from large page->small page fault handler\n");
-    return 0;
-}
-
-
-// Handle a 4MB page fault with a 4MB page in the PDE
-static int handle_4MB_shadow_pagefault_pde_32(struct guest_info * info, 
-                                    addr_t fault_addr, pf_error_t error_code, 
-                                    pt_access_status_t shadow_pde_access,
-                                    pde32_4MB_t * large_shadow_pde, pde32_4MB_t * large_guest_pde) 
+       PrintDebug(info->vm_info, info, "Returning from large page->large page fault handler\n");
+       return 0;
+}      
+       
+static int handle_4MB_shadow_pagefault_pte_32(struct guest_info * info, 
+                                             addr_t fault_addr, pf_error_t error_code, 
+                                             pte32pae_t * shadow_pt, pde32_4MB_t * large_guest_pde) 
 {
-    addr_t guest_fault_pa = BASE_TO_PAGE_ADDR_4MB(large_guest_pde->page_base_addr) + PAGE_OFFSET_4MB(fault_addr);  
-
+    pt_access_status_t shadow_pte_access = v3_can_access_pte32pae(shadow_pt, fault_addr, error_code);
+    pte32pae_t * shadow_pte = (pte32pae_t *)&(shadow_pt[PTE32PAE_INDEX(fault_addr)]);
+    addr_t guest_fault_pa = BASE_TO_PAGE_ADDR_4MB(large_guest_pde->page_base_addr) + PAGE_OFFSET_4MB(fault_addr);
+    //  struct shadow_page_state * state = &(info->shdw_pg_state);
 
-    PrintDebug(info->vm_info, info, "Handling 4MB fault with large page (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
-    PrintDebug(info->vm_info, info, "LargeShadowPDE=%p, LargeGuestPDE=%p\n", large_shadow_pde, large_guest_pde);
+    PrintDebug(info->vm_info, info, "Handling 4MB PTE fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
+    PrintDebug(info->vm_info, info, "ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde);
 
     struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->vcpu_id, guest_fault_pa);
 
  
     if (shdw_reg == NULL) {
-       // Inject a machine check in the guest
-       PrintDebug(info->vm_info, info, "Invalid Guest Address in page table (0x%p)\n", (void *)guest_fault_pa);
-       v3_raise_exception(info, MC_EXCEPTION);
-       return -1;
+               // Inject a machine check in the guest
+               PrintError(info->vm_info, info, "Invalid Guest Address in page table (0x%p)\n", (void *)guest_fault_pa);
+               v3_raise_exception(info, MC_EXCEPTION);
+               return 0;
     }
 
-    if (shadow_pde_access == PT_ACCESS_OK) {
-       // Inconsistent state...
-       // Guest Re-Entry will flush tables and everything should now workd
-       PrintDebug(info->vm_info, info, "Inconsistent state... Guest re-entry should flush tlb\n");
-       return 0;
+    if (shadow_pte_access == PT_ACCESS_OK) {
+               // Inconsistent state...
+               // Guest Re-Entry will flush tables and everything should now workd
+               PrintDebug(info->vm_info, info, "Inconsistent state 4MB PTE... Guest re-entry should flush tlb\n");
+               //PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
+               return 0;
     }
 
   
-    if (shadow_pde_access == PT_ACCESS_NOT_PRESENT) {
+    if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
        // Get the guest physical address of the fault
 
-       if ((shdw_reg->flags.alloced == 1) && 
-           (shdw_reg->flags.read  == 1)) {
-           addr_t shadow_pa = 0;
-
-
-           if (v3_gpa_to_hpa(info, guest_fault_pa, &shadow_pa) == -1) {
-               PrintError(info->vm_info, info, "could not translate page fault address (%p)\n", (void *)guest_fault_pa);
+               if ((shdw_reg->flags.alloced == 1) || 
+                       (shdw_reg->flags.read == 1)) {
+                       addr_t shadow_pa = 0;
+
+                       if (v3_gpa_to_hpa(info, guest_fault_pa, &shadow_pa) == -1) {
+                               PrintError(info->vm_info, info, "could not translate page fault address (%p)\n", (void *)guest_fault_pa);
+                               return -1;
+                       }
+
+                       shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
+
+                       shadow_pte->present = 1;
+
+                       /* We are assuming that the PDE entry has precedence
+                       * so the Shadow PDE will mirror the guest PDE settings, 
+                       * and we don't have to worry about them here
+                       * Allow everything
+                       */
+                       shadow_pte->user_page = 1;
+
+                       if (shdw_reg->flags.write == 0) {
+                               shadow_pte->writable = 0;
+                       } else {
+                               shadow_pte->writable = 1;
+                       }
+
+                       //set according to VMM policy
+                       shadow_pte->write_through = large_guest_pde->write_through;
+                       shadow_pte->cache_disable = large_guest_pde->cache_disable;
+                       shadow_pte->global_page = large_guest_pde->global_page;
+                       //
+      
+               } else {
+                       if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
+                               PrintError(info->vm_info, info, "Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
+                               return -1;
+                       }
+               }
+    } else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) {
+           if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
+                       PrintError(info->vm_info, info, "Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
+                       return -1;
+               }
+    } else {
+               PrintError(info->vm_info, info, "Error in large page fault handler...\n");
+               PrintError(info->vm_info, info, "This case should have been handled at the top level handler\n");
                return -1;
-           }
-
-           PrintDebug(info->vm_info, info, "shadow PA = %p\n", (void *)shadow_pa);
-
-
-            large_guest_pde->vmm_info = V3_LARGE_PG; /* For invalidations */
-            large_shadow_pde->page_base_addr = PAGE_BASE_ADDR_4MB(shadow_pa);
-            large_shadow_pde->large_page = 1;
-            large_shadow_pde->present = 1;
-            large_shadow_pde->user_page = 1;
-
-           PrintDebug(info->vm_info, info, "\tMapping shadow page (%p)\n", (void *)BASE_TO_PAGE_ADDR_4MB(large_shadow_pde->page_base_addr));
-
-            if (shdw_reg->flags.write == 0) {
-                large_shadow_pde->writable = 0;
-            } else {
-                large_shadow_pde->writable = 1;
-            }
+    }
 
-           //set according to VMM policy
-           large_shadow_pde->write_through = large_guest_pde->write_through;
-           large_shadow_pde->cache_disable = large_guest_pde->cache_disable;
-           large_shadow_pde->global_page = large_guest_pde->global_page;
-           //
+    //  PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
+    PrintDebug(info->vm_info, info, "Returning from large page->small page fault handler\n");
+    return 0;
+}      
+       
+static int invalidation_cb32_64(struct guest_info * info, page_type_t type, 
+                             addr_t vaddr, addr_t page_ptr, addr_t page_pa, 
+                             void * private_data) {
+
+    switch (type) {
+       case PAGE_PDP32PAE:
+           {
+                       pdpe32pae_t * pdp = (pdpe32pae_t *)page_ptr;
+                       pdpe32pae_t * pdpe = &(pdp[PDPE32PAE_INDEX(vaddr)]);
+
+                       if (pdpe->present == 0) {
+                               return 1;
+                       }
+     
+                       if (pdpe->vmm_info == V3_LARGE_PG) {
+                               PrintError(info->vm_info, info, "1 Gigabyte pages not supported\n");
+                               return -1;
+
+                               pdpe->present = 0;
+                               return 1;
+                       }
+
+                       return 0;
+           }
+       case PAGE_PD32PAE:
+           {
+                       pde32pae_t * pd = (pde32pae_t *)page_ptr;
+                       pde32pae_t * pde = &(pd[PDE32PAE_INDEX(vaddr)]);
+                       pde32pae_t * pde_bd = &(pd[GET_BUDDY(PDE32PAE_INDEX(vaddr))]);
+                       if (pde->present == 0) {
+                               return 1;
+                       }
+      
+                       if (pde->vmm_info == V3_LARGE_PG || pde->vmm_info == V3_SHADOW_LARGE_PAGE) {
+                               pde->present = 0;
+                               pde_bd->present = 0;
+                               return 1;
+                       }
 
-       } else {
-           if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
-               PrintError(info->vm_info, info, "Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
-               return -1;
+                       return 0;
            }
-       }
-    } else if (shadow_pde_access == PT_ACCESS_WRITE_ERROR) {
+       case PAGE_PT32PAE:
+           {
+                       pte32pae_t * pt = (pte32pae_t *)page_ptr;
 
-       if (shdw_reg->flags.write == 0) {
-           if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
-               PrintError(info->vm_info, info, "Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
-               return -1;
+                       pt[PTE32PAE_INDEX(vaddr)].present = 0;
+
+                       return 1;
            }
-       }
+       default:
+           PrintError(info->vm_info, info, "Invalid Page Type\n");
+           return -1;
 
-    } else {
-       PrintError(info->vm_info, info, "Error in large page fault handler...\n");
-       PrintError(info->vm_info, info, "This case should have been handled at the top level handler\n");
-       return -1;
     }
 
-    PrintDebug(info->vm_info, info, "Returning from large page->large page fault handler\n");
-    return 0;
-}
+    // should not get here
+    PrintError(info->vm_info, info, "Should not get here....\n");
+    return -1;
+}      
 
-/* If we start to optimize we should look up the guest pages in the cache... */
 static inline int handle_shadow_invlpg_32(struct guest_info * info, addr_t vaddr) {
-    pde32_t * shadow_pd = (pde32_t *)CR3_TO_PDE32_VA(info->ctrl_regs.cr3);
-    pde32_t * shadow_pde = (pde32_t *)&shadow_pd[PDE32_INDEX(vaddr)];
+    PrintDebug(info->vm_info, info, "INVLPG32PAE - %p\n",(void*)vaddr);
 
-    addr_t guest_cr3 =  CR3_TO_PDE32_PA(info->shdw_pg_state.guest_cr3);
-    pde32_t * guest_pd = NULL;
-    pde32_t * guest_pde;
-
-    if (v3_gpa_to_hva(info, guest_cr3, (addr_t*)&guest_pd) == -1) {
-       PrintError(info->vm_info, info, "Invalid Guest PDE Address: 0x%p\n",  (void *)guest_cr3);
-       return -1;
+    int ret =  v3_drill_host_pt_32pae(info, info->ctrl_regs.cr3, vaddr, invalidation_cb32_64, NULL);
+    if (ret == -1) {
+               PrintError(info->vm_info, info, "Page table drill returned error.... \n");
+               PrintHostPageTree(info, vaddr, info->ctrl_regs.cr3);
     }
-  
-    guest_pde = (pde32_t *)&(guest_pd[PDE32_INDEX(vaddr)]);
-  
-    if (guest_pde->large_page == 1) {
-       shadow_pde->present = 0;
-       PrintDebug(info->vm_info, info, "Invalidating Large Page\n");
-    } else if (shadow_pde->present == 1) {
-       pte32_t * shadow_pt = (pte32_t *)(addr_t)BASE_TO_PAGE_ADDR_4KB(shadow_pde->pt_base_addr);
-       pte32_t * shadow_pte = (pte32_t *) V3_VAddr( (void*) &shadow_pt[PTE32_INDEX(vaddr)] );
-    
-       PrintDebug(info->vm_info, info, "Setting not present\n");
-    
-       shadow_pte->present = 0;
-    }
-    return 0;
+
+    return (ret == -1) ? -1 : 0; 
 }
+       
index 2c79599..d5e75a1 100644 (file)
  * and the University of New Mexico.  You can find out more at 
  * http://www.v3vee.org
  *
+ * Copyright (c) 2014, Daniel Zuo <pengzuo2014@u.northwestern.edu>
+ * Copyright (c) 2014, Chunxiao Diao <chunxiaodiao2012@u.northwestern.edu> 
  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
  * All rights reserved.
  *
- * Author: Jack Lange <jarusl@cs.northwestern.edu>
+ * Author: Daniel Zuo <pengzuo2014@u.northwestern.edu>
+ *        Chunxiao Diao <chunxiaodiao2012@u.northwestern.edu> 
+ *         Jack Lange <jarusl@cs.northwestern.edu> 
  *
  * This is free software.  You are permitted to use,
  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
  */
-
+#define GET_BUDDY(x) (((ullong_t)x) ^ 0x1)
+#define MARK_LAST_ZERO(x) (((ullong_t)x) & 0x0)
+#define CR3_PAGE_BASE_ADDR(x) ((x) >> 5)
+#define V3_SHADOW_LARGE_PAGE 0x3
 
 static inline int activate_shadow_pt_32pae(struct guest_info * info) {
-    PrintError(info->vm_info, info, "Activating 32 bit PAE page tables not implemented\n");
-    return -1;
+    
+    struct cr3_32_PAE * shadow_cr3 = (struct cr3_32_PAE *)&(info->ctrl_regs.cr3);
+    struct cr3_32_PAE * guest_cr3 = (struct cr3_32_PAE *)&(info->shdw_pg_state.guest_cr3);   
+    struct cr4_32 * shadow_cr4 = (struct cr4_32 *)&(info->ctrl_regs.cr4);
+    struct shadow_page_data * shadow_pt = create_new_shadow_pt(info);
+    addr_t shadow_pt_addr = shadow_pt->page_pa;
+    shadow_pt->cr3 = shadow_pt->page_pa;
+    PrintDebug(info->vm_info, info, "Top level shadow 32pae pdp page pa=%p\n", (void *) shadow_pt_addr);
+
+    //Shadow cr3 points to the new page, which is PDPT
+    shadow_cr3->pdpt_base_addr = CR3_PAGE_BASE_ADDR(shadow_pt_addr);
+    PrintDebug(info->vm_info, info, "Creating new shadow page table %p\n", (void *)BASE_TO_PAGE_ADDR(shadow_cr3->pdpt_base_addr));
+
+    shadow_cr3->pwt = guest_cr3->pwt;
+    shadow_cr3->pcd = guest_cr3->pcd;
+    shadow_cr4->pae = 1;
+    //shadow_efer->lma = 1;
+
+    return 0;
 }
 
+/*
+*
+* Shadow PAE 32 pagefault handlers 
+*
+*/
 
+static int handle_pde_shadow32pae_pagefault_32pae(struct guest_info * info, addr_t fault_addr, pf_error_t error_code, pde32pae_t * shadow_pd, pde32pae_t * guest_pd);
+static int handle_pte_shadow32pae_pagefault_32pae(struct guest_info * info, addr_t fault_addr, pf_error_t error_code, pte32pae_t * shadow_pt, pte32pae_t * guest_pt);
+static int handle_2MB_shadow32pae_pagefault_pte_32pae(struct guest_info * info, addr_t fault_addr, pf_error_t error_code, pte32pae_t * shadow_pt, pde32pae_2MB_t * large_guest_pde);
+static int handle_2MB_shadow32pae_pagefault_pde_32pae(struct guest_info * info, addr_t fault_addr, pf_error_t error_code, pde32pae_t * shadow_pd, pde32pae_2MB_t * large_guest_pde);
 
+static inline int handle_shadow_pagefault_32pae(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
+    
+    pdpe32pae_t * shadow_pdp = CR3_TO_PDPE32PAE_VA(info->ctrl_regs.cr3);
+    pdpe32pae_t * guest_pdp = NULL;
+    addr_t guest_cr3 = CR3_TO_PDPE32PAE_PA(info->shdw_pg_state.guest_cr3);
+    
+    pt_access_status_t shadow_pdpe_access;
+    pt_access_status_t guest_pdpe_access;
+    
+    pdpe32pae_t * guest_pdpe = NULL;
+    pdpe32pae_t * shadow_pdpe = (pdpe32pae_t *)&(shadow_pdp[PDPE32PAE_INDEX(fault_addr)]);
+    
+    PrintDebug(info->vm_info, info, "32 bit PAE shadow paging page fault handler: %p\n", (void*)fault_addr);
+    PrintDebug(info->vm_info, info, "Handling PDP fault\n");
+    
+    if (v3_gpa_to_hva(info, guest_cr3, (addr_t*)guest_pdp) ==  -1) {
+        PrintError(info->vm_info, info, "Invalid Guest PDPE Address: 0x%p\n", (void *)guest_cr3);
+        return -1;
+        
+    }
+    
+    guest_pdpe = (pdpe32pae_t *)&(guest_pdp[PDPE32PAE_INDEX(fault_addr)]);
+    
+    PrintDebug(info->vm_info, info, "Checking Guest %p\n", (void *)guest_pdp);
+    //Check the guest page permissions
+    guest_pdpe_access = v3_can_access_pdpe32pae(guest_pdp, fault_addr, error_code);
+    
+    
+    PrintDebug(info->vm_info, info, "Checking Host %p\n", (void *)shadow_pdp);
+    //Check the host page permissions
+    shadow_pdpe_access = v3_can_access_pdpe32pae(shadow_pdp, fault_addr, error_code);
+    
+    /* Was the page fault caused by an out-of-range address */
+    if ((PDPE32PAE_INDEX(fault_addr) != 0) && (PDPE32PAE_INDEX(fault_addr) != 1) && (PDPE32PAE_INDEX(fault_addr) != 2) && (PDPE32PAE_INDEX(fault_addr) != 3)) {
+        PrintDebug(info->vm_info, info, "Fault PDPE index is 0x%x out of range\n", PDPE32PAE_INDEX(fault_addr));
+        if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
+            PrintError(info->vm_info, info, "Cound not inject guest page fault\n");
+            return -1;
+        }
+        return 0;      
+    }
+    
+    /* Zero address fault */
+    if (fault_addr == 0) {
+        PrintDebug(info->vm_info, info, "Guest page tree for guest virtual address zero fault\n");
+        PrintGuestPageTree(info, fault_addr, (addr_t)(info->shdw_pg_state.guest_cr3));
+        PrintDebug(info->vm_info, info, "Host page tree for guest virtual address zero fault\n");
+        PrintHostPageTree(info, fault_addr, (addr_t)(info->ctrl_regs.cr3));
+    }
+    
+    /* Was the page fault caused by the Guest's page tables */
+    if (v3_is_guest_pf(guest_pdpe_access, shadow_pdpe_access) == 1) {
+        PrintDebug(info->vm_info, info, "Injecting PDPE pf to guest: (guest access error=%d) (pf error code=%d)\n", *(uint_t *)&guest_pdpe_access, *(uint_t *)&error_code);
+        if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
+            PrintError(info->vm_info, info, "Could not inject guest page fault\n");
+            return -1;
+        }
+        return 0;
+    }
+    
+    if (shadow_pdpe_access == PT_ACCESS_USER_ERROR || shadow_pdpe_access == PT_ACCESS_WRITE_ERROR) {
+        PrintDebug(info->vm_info, info, "Shadow Paging User or Write Access error (shadow_pdpe_access = 0x%x, guest_pdpe_access = 0x%x). Ignore it.\n", shadow_pdpe_access, guest_pdpe_access);
+    } else if ((shadow_pdpe_access != PT_ACCESS_NOT_PRESENT) && (shadow_pdpe_access != PT_ACCESS_OK)) {
+        // inject page fault in guest
+        //
+        // unknown error
+        //
+        if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
+            PrintError(info->vm_info, info, "Could not inject guest page fault\n");
+            return -1;
+        }
+        PrintDebug(info->vm_info, info, "Unknown Error occurred (shadow_pdpe_access=%x)\n", shadow_pdpe_access);
+        PrintDebug(info->vm_info, info, "Manual says to inject page fault into guest\n");
+        return 0;
+    }
+    
+    pde32pae_t * shadow_pd = NULL;
+    pde32pae_t * guest_pd = NULL;
+    
+    // Get the next shadow page level, allocate if not present
+    if (shadow_pdpe_access == PT_ACCESS_NOT_PRESENT) {
+        struct shadow_page_data * shdw_page = create_new_shadow_pt(info);
+        shadow_pd = (pde32pae_t *)V3_VAddr((void *)shdw_page->page_pa);
+        
+        shadow_pdpe->present = 1;
+        //shadow_pdpe->user_page = guest_pdpe->user_page;
+        //shadow_pdpe->writable = guest_pdpe->writable;
+        shadow_pdpe->write_through = guest_pdpe->write_through;
+        shadow_pdpe->cache_disable = guest_pdpe->cache_disable;
+        
+        guest_pdpe->accessed = 1;
+        
+        shadow_pdpe->pd_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
+    } else {
+        shadow_pd = (pde32pae_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR(shadow_pdpe->pd_base_addr));
+    }
+    
+    // Continue processing at the next level
+    if (v3_gpa_to_hva(info, BASE_TO_PAGE_ADDR(guest_pdpe->pd_base_addr), (addr_t *)&guest_pd) == -1) {
+        //Machine check the guest
+        PrintError(info->vm_info, info, "Invalid Guest PD Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(shadow_pdpe->pd_base_addr));
+        v3_raise_exception(info, MC_EXCEPTION);
+        return 0;
+    }
+    
+    if (handle_pde_shadow32pae_pagefault_32pae(info, fault_addr, error_code, shadow_pd, guest_pd) == -1) {
+        PrintError(info->vm_info, info, "Error handling Page Fault caused by PDE\n");
+        return -1;
+    }
+    return 0;
+}
 
+static int handle_pde_shadow32pae_pagefault_32pae(struct guest_info * info, addr_t fault_addr, pf_error_t error_code, pde32pae_t * shadow_pd, pde32pae_t * guest_pd) {
+   pt_access_status_t guest_pde_access;
+   pt_access_status_t shadow_pde_access;
+   
+   pde32pae_t * guest_pde = NULL;
+   guest_pde = (pde32pae_t *)&(guest_pd[PDE32PAE_INDEX(fault_addr)]);
 
+   pde32pae_t * shadow_pde = (pde32pae_t *)&(shadow_pd[PDE32PAE_INDEX(fault_addr)]);
+   // What is GET_BUDDY and MARK_LAST_ZERO
+   // pde32pae_t * shadow_pde_bd = (pde32pae_t *)&(shadow_pd[GET_BUDDY(PDE32PAE_INDEX(fault_addr))]);
+   // pde32pae_t * shadow_pde_sd = (pde32pae_t *)&(shadow_pd[MARK_LAST_ZERO(PDE32PAE_INDEX(fault_addr))]);  
+   PrintDebug(info->vm_info, info, "Handling PDE fault\n");    
 
-/* 
- * *
- * * 
- * * 32 bit PAE  Page table fault handlers
- * *
- * *
- */
+   PrintDebug(info->vm_info, info, "Checking guest_pde_access %p\n", (void *)guest_pd);        
+    // Check the guest page permissions
+    guest_pde_access = v3_can_access_pde32pae(guest_pd, fault_addr, error_code);       
+    // Check the shadow page permissions
+    PrintDebug(info->vm_info, info, "Checking shadow_pde_access %p\n", (void *)shadow_pd);
+    shadow_pde_access = v3_can_access_pde32pae(shadow_pd, fault_addr, error_code);
+       
+    /* Was the page fault caused by the Guest PDE */
+    if (v3_is_guest_pf(guest_pde_access, shadow_pde_access) == 1) 
+       {
+               PrintDebug(info->vm_info, info, "Injecting PDE pf to guest: (guest access error=%d) (shdw access error=%d)  (pf error code=%d)\n", 
+                  *(uint_t *)&guest_pde_access, *(uint_t *)&shadow_pde_access, *(uint_t *)&error_code);
+               if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) 
+               {
+                       PrintError(info->vm_info, info, "Could not inject guest page fault for vaddr %p\n", (void *)fault_addr);
+                       return -1;
+               }
+               return 0;
+    }
+       
+       //Guest PDE ok
+    if (shadow_pde_access == PT_ACCESS_USER_ERROR) 
+       {
+               //
+               // PDE Entry marked non-user
+               //      
+               PrintDebug(info->vm_info, info, "Shadow Paging User access error (shadow_pde_access=0x%x, guest_pde_access=0x%x)\n", 
+                       shadow_pde_access, guest_pde_access);
+               if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) 
+               {
+                       PrintError(info->vm_info, info, "Could not inject guest page fault\n");
+                       return -1;
+               }
+               return 0;
+    } else if ((shadow_pde_access == PT_ACCESS_WRITE_ERROR) && 
+              (guest_pde->large_page == 1)) {
 
-static inline int handle_shadow_pagefault_32pae(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
-    PrintError(info->vm_info, info, "32 bit PAE shadow paging not implemented\n");
-    return -1;
+               ((pde32pae_2MB_t *)guest_pde)->dirty = 1;
+               shadow_pde->writable = guest_pde->writable;
+               // shadow_pde_bd->writable = guest_pde->writable;
+               return 0;
+    } else if ((shadow_pde_access != PT_ACCESS_NOT_PRESENT) &&
+              (shadow_pde_access != PT_ACCESS_OK)) 
+       {
+               // inject page fault in guest
+               //
+               //unknown error
+               //
+               if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
+                       PrintError(info->vm_info, info, "Could not inject guest page fault\n");
+                       return -1;
+               }
+               PrintDebug(info->vm_info, info, "Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pde_access);
+               PrintDebug(info->vm_info, info, "Manual Says to inject page fault into guest\n");
+               return 0;
+    }
+
+       pte32pae_t * shadow_pt = NULL;
+       //pte32pae_t * shadow_pt_bd = NULL;
+       pte32pae_t * guest_pt = NULL;
+       
+    // Get to the next shadow page level
+    if (shadow_pde_access == PT_ACCESS_NOT_PRESENT) 
+    {
+        // Check if  we can use large pages and the guest memory is properly aligned
+        // to potentially use a large page
+
+        if ((info->use_large_pages == 1) && (guest_pde->large_page == 1)) 
+               {
+                       addr_t guest_pa = BASE_TO_PAGE_ADDR_2MB(((pde32pae_2MB_t *)guest_pde)->page_base_addr);
+                       uint32_t page_size = v3_get_max_page_size(info, guest_pa, PROTECTED);
+           
+                       if (page_size == PAGE_SIZE_2MB) 
+                       {
+                               if (handle_2MB_shadow32pae_pagefault_pde_32pae(info, fault_addr, error_code,
+                                                      shadow_pd, (pde32pae_2MB_t *)guest_pde) == -1) 
+                               {
+                                       PrintError(info->vm_info, info, "Error handling large pagefault with large page\n");
+                                       return -1;
+                               }
+                               return 0;
+                       } 
+           // Fallthrough to handle the region with small pages
+               }       
+               
+               struct shadow_page_data * shdw_page = create_new_shadow_pt(info);
+               //struct shadow_page_data * shdw_page_bd = create_new_shadow_pt(info);
+               shadow_pt = (pte32pae_t *)V3_VAddr((void *)shdw_page->page_pa);
+               //shadow_pt_bd = (pte32pae_t *)V3_VAddr((void *)shdw_page_bd->page_pa);
+               PrintDebug(info->vm_info, info, "Creating new shadow PTs: %p\n", shadow_pt);
+
+               shadow_pde->present = 1;
+               //shadow_pde_bd->present = 1;
+               shadow_pde->user_page = guest_pde->user_page;   
+               //shadow_pde_bd->user_page = guest_pde->user_page;
+
+               if (guest_pde->large_page == 0) {
+                       shadow_pde->writable = guest_pde->writable;
+                       //shadow_pde_bd->writable = guest_pde->writable;
+               } 
+               else {
+                       ((pde32pae_2MB_t *)guest_pde)->vmm_info = V3_LARGE_PG;
+
+                       if (error_code.write) {
+                               shadow_pde->writable = guest_pde->writable;
+                               //shadow_pde_bd->writable = guest_pde->writable;
+                               ((pde32pae_2MB_t *)guest_pde)->dirty = 1;       
+                       } 
+                       else {
+                               shadow_pde->writable = 0;
+                               //shadow_pde_bd->writable = 0;
+                               ((pde32pae_2MB_t *)guest_pde)->dirty = 0;
+                       } 
+               }               
+       
+               // VMM Specific options
+               shadow_pde->write_through = guest_pde->write_through;
+               shadow_pde->cache_disable = guest_pde->cache_disable;
+               shadow_pde->global_page = guest_pde->global_page;
+               
+               //shadow_pde_bd->write_through = guest_pde->write_through;
+               //shadow_pde_bd->cache_disable = guest_pde->cache_disable;
+               //shadow_pde_bd->global_page = guest_pde->global_page;
+               //
+               guest_pde->accessed = 1;
+               
+               shadow_pde->pt_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
+               //shadow_pde_bd->pt_base_addr = PAGE_BASE_ADDR(shdw_page_bd->page_pa);
+    } 
+       else {
+         if ((info->use_large_pages == 1) && (guest_pde->large_page == 1) && (guest_pde->vmm_info == V3_SHADOW_LARGE_PAGE)) 
+               {
+                       addr_t guest_pa = BASE_TO_PAGE_ADDR_2MB(((pde32pae_2MB_t *)guest_pde)->page_base_addr);
+                       uint32_t page_size = v3_get_max_page_size(info, guest_pa, PROTECTED);   
+                       if (page_size == PAGE_SIZE_2MB) 
+                       {
+                               if (shadow_pde_access == PT_ACCESS_OK) {
+                                       // Inconsistent state...
+                                       // Guest Re-Entry will flush tables and everything should now workd
+                                       PrintDebug(info->vm_info, info, "Inconsistent state PDE... Guest re-entry should flush tlb\n");
+                    //PrintDebug(info->vm_info, info, "Bug here: shadow_pde_access is %d page_size is %d\n",
+                                       //         (uint_t)shadow_pde_access,(uint_t)page_size);
+                                       return 0;
+                               }
+                       } 
+               }
+               shadow_pt = (pte32pae_t *)V3_VAddr((void *)BASE_TO_PAGE_ADDR(shadow_pde->pt_base_addr));
+    }
+       
+    if (guest_pde->large_page == 0) 
+       {
+               if (v3_gpa_to_hva(info, BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr), (addr_t*)&guest_pt) == -1) 
+               {
+                       // Machine check the guest
+                       PrintDebug(info->vm_info, info, "Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr));
+                       v3_raise_exception(info, MC_EXCEPTION);
+                       return 0;
+               }       
+               if (handle_pte_shadow32pae_pagefault_32pae(info, fault_addr, error_code, shadow_pt, guest_pt) == -1) 
+               {
+                       PrintError(info->vm_info, info, "Error handling Page fault caused by PTE\n");
+                       return -1;
+               }
+       } else {
+               //
+               //use 4K pages to implement large page; ignore for now
+               //
+               if (handle_2MB_shadow32pae_pagefault_pte_32pae(info, fault_addr, error_code, shadow_pt, (pde32pae_2MB_t *)guest_pde) == -1) 
+               {
+                       PrintError(info->vm_info, info, "Error handling large pagefault\n");
+                       return -1;
+               }        
+    }  
+       
+       return 0;
 }
+       
+       
+static int handle_pte_shadow32pae_pagefault_32pae(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
+                                         pte32pae_t * shadow_pt, pte32pae_t * guest_pt) 
+{
+    pt_access_status_t guest_pte_access;
+    pt_access_status_t shadow_pte_access;
+    pte32pae_t * guest_pte = (pte32pae_t *)&(guest_pt[PTE32PAE_INDEX(fault_addr)]);;
+    pte32pae_t * shadow_pte = (pte32pae_t *)&(shadow_pt[PTE32PAE_INDEX(fault_addr)]);
+    addr_t guest_pa = BASE_TO_PAGE_ADDR((addr_t)(guest_pte->page_base_addr)) +  PAGE_OFFSET(fault_addr);
 
+    PrintDebug(info->vm_info, info, "Handling PTE fault\n");
 
-static inline int handle_shadow_invlpg_32pae(struct guest_info * info, addr_t vaddr) {
-    PrintError(info->vm_info, info, "32 bit PAE shadow paging not implemented\n");
+    struct v3_mem_region * shdw_reg =  v3_get_mem_region(info->vm_info, info->vcpu_id, guest_pa);
+
+    if (shdw_reg == NULL) {
+               // Inject a machine check in the guest
+               PrintDebug(info->vm_info, info, "Invalid Guest Address in page table (0x%p)\n", (void *)guest_pa);
+               v3_raise_exception(info, MC_EXCEPTION);
+               return 0;
+    }
+
+    // Check the guest page permissions
+    guest_pte_access = v3_can_access_pte32pae(guest_pt, fault_addr, error_code);
+
+    // Check the shadow page permissions
+    shadow_pte_access = v3_can_access_pte32pae(shadow_pt, fault_addr, error_code);
+  
+    /* Was the page fault caused by the Guest's page tables? */
+    if (v3_is_guest_pf(guest_pte_access, shadow_pte_access) == 1) 
+       {
+
+               PrintDebug(info->vm_info, info, "Access error injecting pf to guest (guest access error=%d) (pf error code=%d)\n", 
+                  guest_pte_access, *(uint_t*)&error_code);
+       
+
+               //   inject:
+               if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
+                       PrintError(info->vm_info, info, "Could not inject guest page fault for vaddr %p\n", (void *)fault_addr);
+                       return -1;
+               }       
+
+               return 0; 
+    }
+
+  
+  
+    if (shadow_pte_access == PT_ACCESS_OK) 
+       {
+               // Inconsistent state...
+               // Guest Re-Entry will flush page tables and everything should now work
+               PrintDebug(info->vm_info, info, "Inconsistent state PTE... Guest re-entry should flush tlb\n");
+               PrintDebug(info->vm_info, info, "guest_pte_access is %d and shadow_pte_access is %d\n", (uint_t)guest_pte_access, 
+                          (uint_t)shadow_pte_access);
+               PrintDebug(info->vm_info, info, "Error_code: write 0x%x, present 0x%x, user 0x%x, rsvd_access 0x%x, ifetch 0x%x \n",  error_code.write,error_code.present,error_code.user,error_code.rsvd_access,error_code.ifetch);
+               PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
+               PrintGuestPageTree(info,fault_addr,(addr_t)(info->shdw_pg_state.guest_cr3));
+               return 0;
+    }
+
+
+    if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) 
+       {
+               // Page Table Entry Not Present
+               PrintDebug(info->vm_info, info, "guest_pa =%p\n", (void *)guest_pa);
+
+               if ((shdw_reg->flags.alloced == 1) && (shdw_reg->flags.read == 1)) 
+               {
+                       addr_t shadow_pa = 0;
+
+                       if (v3_gpa_to_hpa(info, guest_pa, &shadow_pa) == -1) 
+                       {
+                               PrintError(info->vm_info, info, "could not translate page fault address (%p)\n", (void *)guest_pa);
+                               return -1;
+                       }
+
+                       shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
+
+                       PrintDebug(info->vm_info, info, "\tMapping shadow page (%p)\n", (void *)BASE_TO_PAGE_ADDR(shadow_pte->page_base_addr));
+      
+                       shadow_pte->present = guest_pte->present;
+                       shadow_pte->user_page = guest_pte->user_page;
+      
+                       //set according to VMM policy
+                       shadow_pte->write_through = guest_pte->write_through;
+                       shadow_pte->cache_disable = guest_pte->cache_disable;
+                       shadow_pte->global_page = guest_pte->global_page;
+                       //
+      
+                       guest_pte->accessed = 1;
+      
+                       if (guest_pte->dirty == 1) {
+                               shadow_pte->writable = guest_pte->writable;
+                       } 
+                       else if ((guest_pte->dirty == 0) && (error_code.write == 1)) {
+                               shadow_pte->writable = guest_pte->writable;
+                               guest_pte->dirty = 1;
+                       } 
+                       else if ((guest_pte->dirty == 0) && (error_code.write == 0)) {
+                               shadow_pte->writable = 0;
+                       }
+
+                       // Write hooks trump all, and are set Read Only
+                       if (shdw_reg->flags.write == 0) {
+                               shadow_pte->writable = 0;
+                       }       
+
+               } 
+               else {
+                       // Page fault on unhandled memory region
+           
+                       if (shdw_reg->unhandled(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
+                               PrintError(info->vm_info, info, "Special Page fault handler returned error for address: %p\n",  (void *)fault_addr);
+                               return -1;
+                       }
+               }
+    } 
+       else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) 
+       {
+               guest_pte->dirty = 1;
+
+               if (shdw_reg->flags.write == 1) {
+                       PrintDebug(info->vm_info, info, "Shadow PTE Write Error\n");
+                       shadow_pte->writable = guest_pte->writable;
+               } 
+               else {
+                       if (shdw_reg->unhandled(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
+                               PrintError(info->vm_info, info, "Special Page fault handler returned error for address: %p\n",  (void *)fault_addr);
+                               return -1;
+                       }
+               }
+               return 0;
+    } 
+       else {
+               // Inject page fault into the guest     
+               if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
+                       PrintError(info->vm_info, info, "Could not inject guest page fault for vaddr %p\n", (void *)fault_addr);
+                       return -1;
+               }
+
+               PrintError(info->vm_info, info, "PTE Page fault fell through... Not sure if this should ever happen\n");
+               PrintError(info->vm_info, info, "Manual Says to inject page fault into guest\n");
+               return -1;
+    }
+
+    return 0;
+}      
+       
+       
+// Handle a 2MB page fault with 2MB page in the PDE
+static int handle_2MB_shadow32pae_pagefault_pde_32pae(struct guest_info * info,
+                                    addr_t fault_addr, pf_error_t error_code, 
+                                    pde32pae_t * shadow_pd,
+                                        pde32pae_2MB_t * large_guest_pde)      
+{   
+    pt_access_status_t shadow_pde_access;
+    pde32pae_2MB_t * large_shadow_pde = (pde32pae_2MB_t*)&(shadow_pd[PDE32PAE_INDEX(fault_addr)]);
+    shadow_pde_access = v3_can_access_pde32pae(shadow_pd, fault_addr, error_code);        
+
+    addr_t guest_fault_pa = BASE_TO_PAGE_ADDR_2MB(large_guest_pde->page_base_addr) + PAGE_OFFSET_4MB(fault_addr);  
+
+    PrintDebug(info->vm_info, info, "Handling 4MB fault with large page (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
+    PrintDebug(info->vm_info, info, "LargeShadowPDE=%p, LargeGuestPDE=%p\n", large_shadow_pde, large_guest_pde);
+       
+    struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->vcpu_id, guest_fault_pa);
+
+    if (shdw_reg == NULL) {
+               // Inject a machine check in the guest
+               PrintDebug(info->vm_info, info, "Invalid Guest Address in page table (0x%p)\n", (void *)guest_fault_pa);
+               v3_raise_exception(info, MC_EXCEPTION);
+               return -1;
+    }
+       
+       //dead bug
+    if (shadow_pde_access == PT_ACCESS_OK) {
+               // Inconsistent state...
+               // Guest Re-Entry will flush tables and everything should now workd
+               PrintDebug(info->vm_info, info, "Inconsistent state 2MB pde... Guest re-entry should flush tlb\n");
+               return 0;
+    }
+
+  
+    if (shadow_pde_access == PT_ACCESS_NOT_PRESENT) 
+       {
+               // Get the guest physical address of the fault
+
+               if ((shdw_reg->flags.alloced == 1) && 
+                       (shdw_reg->flags.read  == 1)) 
+               {
+                       addr_t shadow_pa = 0;
+
+
+                       if (v3_gpa_to_hpa(info, guest_fault_pa, &shadow_pa) == -1) 
+                       {
+                               PrintError(info->vm_info, info, "could not translate page fault address (%p)\n", (void *)guest_fault_pa);
+                               return -1;
+                       }
+
+                       PrintDebug(info->vm_info, info, "shadow PA = %p\n", (void *)shadow_pa);
+
+
+            large_guest_pde->vmm_info = V3_SHADOW_LARGE_PAGE; /* For invalidations */
+                       //shadow pde (last bit 0) gets the half with smaller address and its buddy gets the rest
+            large_shadow_pde->page_base_addr = PAGE_BASE_ADDR_2MB(shadow_pa);
+                       //large_shadow_pde_bd->page_base_addr =(PAGE_BASE_ADDR_4MB(shadow_pa)<<1)|1; 
+                       
+                       // large_shadow_pde->large_page = 1;
+            large_shadow_pde->present = 1;
+            large_shadow_pde->user_page = 1;
+                       
+                       // large_shadow_pde_bd->large_page = 1;
+            // large_shadow_pde_bd->present = 1;
+            // large_shadow_pde_bd->user_page = 1;
+
+           PrintDebug(info->vm_info, info, "\tMapping shadow pages (%p)\n", 
+                                               (void *)BASE_TO_PAGE_ADDR_2MB(large_shadow_pde->page_base_addr));
+
+            if (shdw_reg->flags.write == 0) {
+                large_shadow_pde->writable = 0;
+               // large_shadow_pde_bd->writable = 0;
+            } else {
+                // large_shadow_pde_bd->writable = 1;
+               large_shadow_pde->writable = 1;
+            }
+
+                       //set according to VMM policy
+                       large_shadow_pde->write_through = large_guest_pde->write_through;
+                       large_shadow_pde->cache_disable = large_guest_pde->cache_disable;
+                       large_shadow_pde->global_page = large_guest_pde->global_page;
+
+                       //large_shadow_pde_bd->write_through = large_guest_pde->write_through;
+                       //large_shadow_pde_bd->cache_disable = large_guest_pde->cache_disable;
+                       //large_shadow_pde_bd->global_page = large_guest_pde->global_page;                      
+               } 
+               else {
+                       if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
+                               PrintError(info->vm_info, info, "Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
+                               return -1;
+                       }
+               }
+       } 
+       else if (shadow_pde_access == PT_ACCESS_WRITE_ERROR) 
+       {
+
+               if (shdw_reg->flags.write == 0) {
+                       if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
+                               PrintError(info->vm_info, info, "Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
+                               return -1;
+                       }       
+               }
+
+    } 
+       else {
+               PrintError(info->vm_info, info, "Error in large page fault handler...\n");
+               PrintError(info->vm_info, info, "This case should have been handled at the top level handler\n");
+               return -1;
+    }
+
+       PrintDebug(info->vm_info, info, "Returning from large page->large page fault handler\n");
+       return 0;
+}      
+       
+static int handle_2MB_shadow32pae_pagefault_pte_32pae(struct guest_info * info, 
+                                             addr_t fault_addr, pf_error_t error_code, 
+                                             pte32pae_t * shadow_pt, pde32pae_2MB_t * large_guest_pde) 
+{
+    pt_access_status_t shadow_pte_access = v3_can_access_pte32pae(shadow_pt, fault_addr, error_code);
+    pte32pae_t * shadow_pte = (pte32pae_t *)&(shadow_pt[PTE32PAE_INDEX(fault_addr)]);
+    addr_t guest_fault_pa = BASE_TO_PAGE_ADDR_2MB(large_guest_pde->page_base_addr) + PAGE_OFFSET_2MB(fault_addr);
+    //  struct shadow_page_state * state = &(info->shdw_pg_state);
+
+    PrintDebug(info->vm_info, info, "Handling MB PTE fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
+    PrintDebug(info->vm_info, info, "ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde);
+
+    struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->vcpu_id, guest_fault_pa);
+
+    if (shdw_reg == NULL) {
+               // Inject a machine check in the guest
+               PrintError(info->vm_info, info, "Invalid Guest Address in page table (0x%p)\n", (void *)guest_fault_pa);
+               v3_raise_exception(info, MC_EXCEPTION);
+               return 0;
+    }
+
+    if (shadow_pte_access == PT_ACCESS_OK) {
+               // Inconsistent state...
+               // Guest Re-Entry will flush tables and everything should now workd
+               PrintDebug(info->vm_info, info, "Inconsistent state 2MB PTE... Guest re-entry should flush tlb\n");
+               //PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
+               return 0;
+    }
+
+  
+    if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
+       // Get the guest physical address of the fault
+
+               if ((shdw_reg->flags.alloced == 1) || 
+                       (shdw_reg->flags.read == 1)) {
+                       addr_t shadow_pa = 0;
+
+                       if (v3_gpa_to_hpa(info, guest_fault_pa, &shadow_pa) == -1) {
+                               PrintError(info->vm_info, info, "could not translate page fault address (%p)\n", (void *)guest_fault_pa);
+                               return -1;
+                       }
+
+                       shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
+
+                       shadow_pte->present = 1;
+
+                       /* We are assuming that the PDE entry has precedence
+                       * so the Shadow PDE will mirror the guest PDE settings, 
+                       * and we don't have to worry about them here
+                       * Allow everything
+                       */
+                       shadow_pte->user_page = 1;
+
+                       if (shdw_reg->flags.write == 0) {
+                               shadow_pte->writable = 0;
+                       } else {
+                               shadow_pte->writable = 1;
+                       }
+
+                       //set according to VMM policy
+                       shadow_pte->write_through = large_guest_pde->write_through;
+                       shadow_pte->cache_disable = large_guest_pde->cache_disable;
+                       shadow_pte->global_page = large_guest_pde->global_page;
+                       //
+      
+               } else {
+                       if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
+                               PrintError(info->vm_info, info, "Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
+                               return -1;
+                       }
+               }
+    } else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) {
+           if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
+                       PrintError(info->vm_info, info, "Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
+                       return -1;
+               }
+    } else {
+               PrintError(info->vm_info, info, "Error in large page fault handler...\n");
+               PrintError(info->vm_info, info, "This case should have been handled at the top level handler\n");
+               return -1;
+    }
+
+    //  PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
+    PrintDebug(info->vm_info, info, "Returning from large page->small page fault handler\n");
+    return 0;
+}      
+
+static int invalidation_cb32pae_64(struct guest_info * info, page_type_t type, 
+                             addr_t vaddr, addr_t page_ptr, addr_t page_pa, 
+                             void * private_data) {
+
+    switch (type) {
+       case PAGE_PDP32PAE:
+           {
+                       pdpe32pae_t * pdp = (pdpe32pae_t *)page_ptr;
+                       pdpe32pae_t * pdpe = &(pdp[PDPE32PAE_INDEX(vaddr)]);
+
+                       if (pdpe->present == 0) {
+                               return 1;
+                       }
+     
+                       if (pdpe->vmm_info == V3_LARGE_PG) {
+                               PrintError(info->vm_info, info, "1 Gigabyte pages not supported\n");
+                               return -1;
+
+                               pdpe->present = 0;
+                               return 1;
+                       }
+
+                       return 0;
+           }
+       case PAGE_PD32PAE:
+           {
+                       pde32pae_t * pd = (pde32pae_t *)page_ptr;
+                       pde32pae_t * pde = &(pd[PDE32PAE_INDEX(vaddr)]);
+                       //pde32pae_t * pde_bd = &(pd[GET_BUDDY(PDE32PAE_INDEX(vaddr))]);
+                       if (pde->present == 0) {
+                               return 1;
+                       }
+      
+                       if (pde->vmm_info == V3_LARGE_PG || pde->vmm_info == V3_SHADOW_LARGE_PAGE) {
+                               pde->present = 0;
+                               //pde_bd->present = 0;
+                               return 1;
+                       }
+
+                       return 0;
+           }
+       case PAGE_PT32PAE:
+           {
+                       pte32pae_t * pt = (pte32pae_t *)page_ptr;
+
+                       pt[PTE32PAE_INDEX(vaddr)].present = 0;
+
+                       return 1;
+           }
+       default:
+           PrintError(info->vm_info, info, "Invalid Page Type\n");
+           return -1;
+
+    }
+
+    // should not get here
+    PrintError(info->vm_info, info, "Should not get here....\n");
     return -1;
+}      
+
+static inline int handle_shadow_invlpg_32pae(struct guest_info * info, addr_t vaddr) {
+    PrintDebug(info->vm_info, info, "INVLPG32PAE - %p\n",(void*)vaddr);
+
+    int ret =  v3_drill_host_pt_32pae(info, info->ctrl_regs.cr3, vaddr, invalidation_cb32pae_64, NULL);
+    if (ret == -1) {
+               PrintError(info->vm_info, info, "Page table drill returned error.... \n");
+               PrintHostPageTree(info, vaddr, info->ctrl_regs.cr3);
+    }
+
+    return (ret == -1) ? -1 : 0; 
 }
 
 
index e49e4d4..5c98848 100644 (file)
 v3_cpu_mode_t v3_get_vm_cpu_mode(struct guest_info * info) {
     struct cr0_32 * cr0;
     struct efer_64 * efer;
-    struct cr4_32 * cr4 = (struct cr4_32 *)&(info->ctrl_regs.cr4);
     struct v3_segment * cs = &(info->segments.cs);
-
+    struct cr4_32 * cr4;
 
     if (info->shdw_pg_mode == SHADOW_PAGING) {
        cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0);
        efer = (struct efer_64 *)&(info->shdw_pg_state.guest_efer);
+        cr4 = (struct cr4_32 *)&(info->shdw_pg_state.guest_cr4);
     } else if (info->shdw_pg_mode == NESTED_PAGING) {
        cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0);
        efer = (struct efer_64 *)&(info->ctrl_regs.efer);
+        cr4 = (struct cr4_32 *)&(info->ctrl_regs.cr4);
     } else {
         PrintError(info->vm_info, info, "Invalid Paging Mode...\n");
        V3_ASSERT(info->vm_info, info, 0);
index f09bfb9..2a3ebbd 100644 (file)
@@ -428,13 +428,61 @@ int v3_handle_cr3_read(struct guest_info * info) {
 }
 
 
-// We don't need to virtualize CR4, all we need is to detect the activation of PAE
+//return guest cr4 - shadow PAE is always on
 int v3_handle_cr4_read(struct guest_info * info) {
-    PrintError(info->vm_info, info, "CR4 Read not handled\n");
-    // Do nothing...
+    uchar_t instr[15];
+    int ret;
+    struct x86_instr dec_instr;
+    
+    if (info->mem_mode == PHYSICAL_MEM) { 
+       ret = v3_read_gpa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
+    } else { 
+       ret = v3_read_gva_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
+    }
+    
+    if (v3_decode(info, (addr_t)instr, &dec_instr) == -1) {
+       PrintError(info->vm_info, info, "Could not decode instruction\n");
+       return -1;
+    }
+    if (dec_instr.op_type != V3_OP_MOVCR2) {
+       PrintError(info->vm_info, info, "Invalid opcode in read CR4\n");
+       return -1;
+    }
+       
+       if (info->shdw_pg_mode == SHADOW_PAGING) {
+           
+           if ((v3_get_vm_cpu_mode(info) == LONG) || 
+               (v3_get_vm_cpu_mode(info) == LONG_32_COMPAT)) {
+               struct cr4_64 * dst_reg = (struct cr4_64 *)(dec_instr.dst_operand.operand);
+               struct cr4_64 * guest_cr4 = (struct cr4_64 *)&(info->ctrl_regs.cr4);
+               *dst_reg = *guest_cr4;
+           } 
+           else {
+               struct cr4_32 * dst_reg = (struct cr4_32 *)(dec_instr.dst_operand.operand);
+               struct cr4_32 * guest_cr4 = (struct cr4_32 *)&(info->shdw_pg_state.guest_cr4);
+               *dst_reg = *guest_cr4;
+           }
+           
+       } else if (info->shdw_pg_mode == NESTED_PAGING) {
+           
+           
+           if ((v3_get_vm_cpu_mode(info) == LONG) || 
+               (v3_get_vm_cpu_mode(info) == LONG_32_COMPAT)) {
+               struct cr4_64 * dst_reg = (struct cr4_64 *)(dec_instr.dst_operand.operand);
+               struct cr4_64 * guest_cr4 = (struct cr4_64 *)&(info->ctrl_regs.cr4);
+               *dst_reg = *guest_cr4;
+           } else {
+               struct cr4_32 * dst_reg = (struct cr4_32 *)(dec_instr.dst_operand.operand);
+               struct cr4_32 * guest_cr4 = (struct cr4_32 *)&(info->ctrl_regs.cr4);
+               *dst_reg = *guest_cr4;
+           }
+       }
+       
+       info->rip += dec_instr.instr_length;
     return 0;
 }
 
+
 int v3_handle_cr4_write(struct guest_info * info) {
     uchar_t instr[15];
     int ret;
@@ -463,7 +511,7 @@ int v3_handle_cr4_write(struct guest_info * info) {
 
     if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) { 
        struct cr4_32 * new_cr4 = (struct cr4_32 *)(dec_instr.src_operand.operand);
-       struct cr4_32 * cr4 = (struct cr4_32 *)&(info->ctrl_regs.cr4);
+       struct cr4_32 * cr4 = (struct cr4_32 *)&(info->shdw_pg_state.guest_cr4);
        
        // if pse, pge, or pae have changed while PG (in any mode) is on
        // the side effect is a TLB flush, which means we need to
@@ -483,15 +531,15 @@ int v3_handle_cr4_write(struct guest_info * info) {
 
     if ((cpu_mode == PROTECTED) || (cpu_mode == PROTECTED_PAE)) {
        struct cr4_32 * new_cr4 = (struct cr4_32 *)(dec_instr.src_operand.operand);
-       struct cr4_32 * cr4 = (struct cr4_32 *)&(info->ctrl_regs.cr4);
-       
+       struct cr4_32 * shadow_cr4 = (struct cr4_32 *)&(info->ctrl_regs.cr4);
+       struct cr4_32 * guest_cr4 = (struct cr4_32 *)&(info->shdw_pg_state.guest_cr4);
        PrintDebug(info->vm_info, info, "OperandVal = %x, length = %d\n", *(uint_t *)new_cr4, dec_instr.src_operand.size);
-       PrintDebug(info->vm_info, info, "Old CR4=%x\n", *(uint_t *)cr4);
+       PrintDebug(info->vm_info, info, "Old guest CR4=%x\n", *(uint_t *)guest_cr4);
        
        if ((info->shdw_pg_mode == SHADOW_PAGING)) { 
            if (v3_get_vm_mem_mode(info) == PHYSICAL_MEM) {
                
-               if ((cr4->pae == 0) && (new_cr4->pae == 1)) {
+               if ((guest_cr4->pae == 0) && (new_cr4->pae == 1)) {
                    PrintDebug(info->vm_info, info, "Creating PAE passthrough tables\n");
                    
                    // create 32 bit PAE direct map page table
@@ -503,7 +551,7 @@ int v3_handle_cr4_write(struct guest_info * info) {
                    // reset cr3 to new page tables
                    info->ctrl_regs.cr3 = *(addr_t*)&(info->direct_map_pt);
                    
-               } else if ((cr4->pae == 1) && (new_cr4->pae == 0)) {
+               } else if ((guest_cr4->pae == 1) && (new_cr4->pae == 0)) {
                    // Create passthrough standard 32bit pagetables
                    PrintError(info->vm_info, info, "Switching From PAE to Protected mode not supported\n");
                    return -1;
@@ -511,8 +559,10 @@ int v3_handle_cr4_write(struct guest_info * info) {
            }
        }
        
-       *cr4 = *new_cr4;
-       PrintDebug(info->vm_info, info, "New CR4=%x\n", *(uint_t *)cr4);
+       *guest_cr4 = *new_cr4;
+       *shadow_cr4 = *guest_cr4;
+       shadow_cr4->pae = 1;   // always on for the shadow pager
+       PrintDebug(info->vm_info, info, "New guest CR4=%x and shadow CR4=%x\n", *(uint_t *)guest_cr4,*(uint_t*)shadow_cr4);
        
     } else if ((cpu_mode == LONG) || (cpu_mode == LONG_32_COMPAT)) {
        struct cr4_64 * new_cr4 = (struct cr4_64 *)(dec_instr.src_operand.operand);
index adae3c4..d4b4536 100644 (file)
@@ -453,7 +453,7 @@ void PrintGuestPageTables(struct guest_info * info, addr_t cr3) {
 }
 
 void PrintHostPageTree(struct guest_info * info,  addr_t virtual_addr, addr_t cr3) {
-    PrintDebug(info->vm_info, info, "CR3: %p\n", (void *)cr3);
+    PrintDebug(info->vm_info, info, "CR3: %p\n, cpu mode is %x", (void *)cr3, info->cpu_mode);
     switch (info->cpu_mode) {
        case PROTECTED:
            v3_drill_host_pt_32(info, cr3, virtual_addr, print_page_tree_cb, NULL);