Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Actually fixed page alignment computation to be correct, moved into vmm_mem.h for...
Patrick Bridges [Thu, 12 Aug 2010 17:05:05 +0000 (11:05 -0600)]
palacios/include/palacios/vmm_mem.h
palacios/src/palacios/mmu/vmm_shdw_pg_tlb_64.h
palacios/src/palacios/vmm_mem.c

index 651e82e..32d6ae6 100644 (file)
@@ -110,7 +110,7 @@ struct v3_mem_region * v3_get_next_mem_region(struct v3_vm_info * vm, uint16_t c
 void v3_print_mem_map(struct v3_vm_info * vm);
 
 uint32_t v3_get_max_page_size(struct guest_info * core, addr_t fault_addr, uint32_t req_size);
-
+uint32_t v3_compute_page_alignment(addr_t addr);
 
 
 #endif // ! __V3VEE__
index 8b30540..011a882 100644 (file)
@@ -271,44 +271,6 @@ static int handle_pdpe_shadow_pagefault_64(struct guest_info * info, addr_t faul
     return 0;
 }
 
-// For an address on a page of size page_size, compute the actual alignment
-// of the physical page it maps to
-int compute_physical_alignment(addr_t va, addr_t pa, uint32_t page_size)
-{
-    addr_t va_offset, pa_base;
-    switch (page_size) {
-       case PAGE_SIZE_1GB:
-           va_offset = PAGE_OFFSET_1GB(va);
-           break;
-       case PAGE_SIZE_4MB:
-           va_offset = PAGE_OFFSET_4MB(va);
-           break;
-       case PAGE_SIZE_2MB:
-           va_offset = PAGE_OFFSET_2MB(va);
-           break;
-       case PAGE_SIZE_4KB:
-           return 1;
-       default:
-           PrintError("Invalid page size in %s.\n", __FUNCTION__);
-           return 0;
-    }
-    pa_base = pa - va_offset;
-
-    if (PAGE_OFFSET_1GB(pa_base) == 0) {
-        return PAGE_SIZE_1GB;
-    } else if (PAGE_OFFSET_4MB(pa_base) == 0) {
-        return PAGE_SIZE_4MB;
-    } else if (PAGE_OFFSET_2MB(pa_base) == 0) {
-       return PAGE_SIZE_2MB;
-    } else if (PAGE_OFFSET_4KB(pa_base) == 0) {
-       return PAGE_SIZE_4KB;
-    } else {
-        PrintError("Incorrection alignment setup or calculation in %s.\n", __FUNCTION__);
-       return 0;
-    }
-}
-
 static int handle_pde_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
                                          pde64_t * shadow_pd, pde64_t * guest_pd) {
     pt_access_status_t guest_pde_access;
@@ -379,18 +341,20 @@ static int handle_pde_shadow_pagefault_64(struct guest_info * info, addr_t fault
         if (info->use_large_pages && guest_pde->large_page) {
             // Check underlying physical memory map to see if a large page is viable
            addr_t guest_pa = BASE_TO_PAGE_ADDR_2MB(((pde64_2MB_t *)guest_pde)->page_base_addr);
-           if ((compute_physical_alignment(fault_addr, guest_pa, PAGE_SIZE_2MB) >= PAGE_SIZE_2MB)
-               && (v3_get_max_page_size(info, guest_pa, PAGE_SIZE_2MB) >= PAGE_SIZE_2MB)) {
-               // should be able to use a large page.
-               if (handle_2MB_shadow_pagefault_pde_64(info, fault_addr, error_code, shadow_pde_access,
-                                                      (pde64_2MB_t *)shadow_pde, (pde64_2MB_t *)guest_pde) ==  0) {
-                   return 0;
-               } else {
-                   PrintError("Error handling large pagefault with large page\n");
-                   return -1;
-               }
+           addr_t host_pa;
+           if (v3_get_max_page_size(info, guest_pa, PAGE_SIZE_2MB) < PAGE_SIZE_2MB) {
+               PrintDebug("Underlying physical memory map doesn't allow use of a large page.\n");
+               // Fallthrough to small pages
+           } else if ((v3_gpa_to_hpa(info, guest_pa, &host_pa) != 0)
+                      || (v3_compute_page_alignment(host_pa) < PAGE_SIZE_2MB)) {
+               PrintDebug("Host memory alignment doesn't allow use of a large page.\n");
+               // Fallthrough to small pages
+           } else if (handle_2MB_shadow_pagefault_pde_64(info, fault_addr, error_code, shadow_pde_access,
+                                                      (pde64_2MB_t *)shadow_pde, (pde64_2MB_t *)guest_pde) == 0) {
+               return 0;
            } else {
-               PrintDebug("Alignment or underlying physical memory map doesn't allow use of a large page.\n");
+               PrintError("Error handling large pagefault with large page\n");
+               return -1;
            }
            // Fallthrough to handle the region with small pages
        }
index 78ee376..4a33825 100644 (file)
@@ -473,6 +473,23 @@ uint32_t v3_get_max_page_size(struct guest_info * core, addr_t fault_addr, uint3
     return page_size;
 }
 
+// For an address on a page of size page_size, compute the actual alignment
+// of the physical page it maps to
+uint32_t v3_compute_page_alignment(addr_t page_addr)
+{
+    if (PAGE_OFFSET_1GB(page_addr) == 0) {
+        return PAGE_SIZE_1GB;
+    } else if (PAGE_OFFSET_4MB(page_addr) == 0) {
+        return PAGE_SIZE_4MB;
+    } else if (PAGE_OFFSET_2MB(page_addr) == 0) {
+       return PAGE_SIZE_2MB;
+    } else if (PAGE_OFFSET_4KB(page_addr) == 0) {
+       return PAGE_SIZE_4KB;
+    } else {
+        PrintError("Non-page aligned address passed to %s.\n", __FUNCTION__);
+       return 0;
+    }
+}
 
 void v3_print_mem_map(struct v3_vm_info * vm) {
     struct rb_node * node = v3_rb_first(&(vm->mem_map.mem_regions));