Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Merge branch 'devel' of ssh://newskysaw.cs.northwestern.edu/home/palacios/palacios...
[palacios.git] / palacios / src / palacios / mmu / vmm_shdw_pg_tlb_32.h
index 83bfb10..e638056 100644 (file)
@@ -133,25 +133,21 @@ static inline int handle_shadow_pagefault_32(struct guest_info * info, addr_t fa
 
     if (shadow_pde_access == PT_ACCESS_NOT_PRESENT) {
 
-        if (info->use_large_pages && guest_pde->large_page) {
+        if ((info->use_large_pages == 1) && (guest_pde->large_page == 1)) {
             // Check underlying physical memory map to see if a large page is viable
-            addr_t guest_pa = BASE_TO_PAGE_ADDR_4MB(((pde32_4MB_t *)guest_pde)->page_base_addr);
-            addr_t host_pa;
-            if (v3_get_max_page_size(info, guest_pa, PAGE_SIZE_4MB) < PAGE_SIZE_4MB) {
-                PrintDebug("Underlying physical memory map doesn't allow use of a large page.\n");
-                // Fallthrough to small pages
-            } else if ((v3_gpa_to_hpa(info, guest_pa, &host_pa) != 0)
-                       || (v3_compute_page_alignment(host_pa) < PAGE_SIZE_4MB)) {
-                PrintDebug("Host memory alignment doesn't allow use of a large page.\n");
-                // Fallthrough to small pages
-            } else if (handle_4MB_shadow_pagefault_pde_32(info, fault_addr, error_code, shadow_pde_access,
-                                                          (pde32_4MB_t *)shadow_pde, (pde32_4MB_t *)guest_pde) == 0) {
+           addr_t guest_pa = BASE_TO_PAGE_ADDR_4MB(((pde32_4MB_t *)guest_pde)->page_base_addr);
+           uint32_t page_size = v3_get_max_page_size(info, guest_pa, PROTECTED);
+           
+           if (page_size == PAGE_SIZE_4MB) {
+               PrintDebug("using large page for fault_addr %p (gpa=%p)\n", (void *)fault_addr, (void *)guest_pa); 
+               if (handle_4MB_shadow_pagefault_pde_32(info, fault_addr, error_code, shadow_pde_access,
+                                                      (pde32_4MB_t *)shadow_pde, (pde32_4MB_t *)guest_pde) == -1) {
+                   PrintError("Error handling large pagefault with large page\n");
+                   return -1;
+               }
+               
                 return 0;
-            } else {
-                PrintError("Error handling large pagefault with large page\n");
-                return -1;
-            }
-            // Fallthrough to handle the region with small pages
+           }
         }
 
        struct shadow_page_data * shdw_page =  create_new_shadow_pt(info);
@@ -176,7 +172,6 @@ static inline int handle_shadow_pagefault_32(struct guest_info * info, addr_t fa
            }
        }
       
-
        // VMM Specific options
        shadow_pde->write_through = guest_pde->write_through;
        shadow_pde->cache_disable = guest_pde->cache_disable;
@@ -185,14 +180,12 @@ static inline int handle_shadow_pagefault_32(struct guest_info * info, addr_t fa
       
        guest_pde->accessed = 1;
       
-
        shadow_pde->pt_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
     } else {
        shadow_pt = (pte32_t *)V3_VAddr((void *)BASE_TO_PAGE_ADDR(shadow_pde->pt_base_addr));
     }
 
-
-      
+    
     if (guest_pde->large_page == 0) {
        if (v3_gpa_to_hva(info, BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr), (addr_t*)&guest_pt) == -1) {
            // Machine check the guest
@@ -225,7 +218,7 @@ static int handle_pte_shadow_pagefault_32(struct guest_info * info, addr_t fault
     pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]);
     addr_t guest_pa = BASE_TO_PAGE_ADDR((addr_t)(guest_pte->page_base_addr)) +  PAGE_OFFSET(fault_addr);
 
-    struct v3_mem_region * shdw_reg =  v3_get_mem_region(info->vm_info, info->cpu_id, guest_pa);
+    struct v3_mem_region * shdw_reg =  v3_get_mem_region(info->vm_info, info->vcpu_id, guest_pa);
 
     if (shdw_reg == NULL) {
        // Inject a machine check in the guest
@@ -360,7 +353,7 @@ static int handle_4MB_shadow_pagefault_pte_32(struct guest_info * info,
     PrintDebug("Handling 4MB fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
     PrintDebug("ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde);
 
-    struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->cpu_id, guest_fault_pa);
+    struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->vcpu_id, guest_fault_pa);
 
  
     if (shdw_reg == NULL) {
@@ -455,7 +448,7 @@ static int handle_4MB_shadow_pagefault_pde_32(struct guest_info * info,
     PrintDebug("Handling 4MB fault with large page (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
     PrintDebug("LargeShadowPDE=%p, LargeGuestPDE=%p\n", large_shadow_pde, large_guest_pde);
 
-    struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->cpu_id, guest_fault_pa);
+    struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->vcpu_id, guest_fault_pa);
 
  
     if (shdw_reg == NULL) {
@@ -486,7 +479,8 @@ static int handle_4MB_shadow_pagefault_pde_32(struct guest_info * info,
                return -1;
            }
 
-           PrintDebug("\tMapping shadow page (%p)\n", (void *)BASE_TO_PAGE_ADDR(shadow_pte->page_base_addr));
+           PrintDebug("shadow PA = %p\n", (void *)shadow_pa);
+
 
             large_guest_pde->vmm_info = V3_LARGE_PG; /* For invalidations */
             large_shadow_pde->page_base_addr = PAGE_BASE_ADDR_4MB(shadow_pa);
@@ -494,6 +488,8 @@ static int handle_4MB_shadow_pagefault_pde_32(struct guest_info * info,
             large_shadow_pde->present = 1;
             large_shadow_pde->user_page = 1;
 
+           PrintDebug("\tMapping shadow page (%p)\n", (void *)BASE_TO_PAGE_ADDR_4MB(large_shadow_pde->page_base_addr));
+
             if (shdw_reg->flags.write == 0) {
                 large_shadow_pde->writable = 0;
             } else {