Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Refactoring and additions to direct paging (nested and passthrough)
[palacios.git] / palacios / src / palacios / vmm_mem.c
index d934398..7bf0019 100644 (file)
@@ -27,6 +27,7 @@
 #include <palacios/vmm_shadow_paging.h>
 #include <palacios/vmm_direct_paging.h>
 
+#include <interfaces/vmm_numa.h>
 
 uint64_t v3_mem_block_size = V3_CONFIG_MEM_BLOCK_SIZE;
 
@@ -35,7 +36,7 @@ struct v3_mem_region * v3_get_base_region(struct v3_vm_info * vm, addr_t gpa) {
     struct v3_mem_map * map = &(vm->mem_map);
     uint32_t block_index = gpa / v3_mem_block_size;
 
-    if (gpa > (map->num_base_regions * v3_mem_block_size) ||
+    if ((gpa >= (map->num_base_regions * v3_mem_block_size)) ||
         (block_index >= map->num_base_regions)) {
         PrintError(vm, VCORE_NONE, "Guest Address Exceeds Base Memory Size (ga=0x%p), (limit=0x%p)\n", 
                    (void *)gpa, (void *)vm->mem_size);
@@ -104,6 +105,36 @@ static int gpa_to_node_from_cfg(struct v3_vm_info * vm, addr_t gpa) {
     return -1;
 }
 
+//
+// This code parallels that in vmm_shadow_paging.c:v3_init_shdw_impl() 
+// and vmm_config.c:determine_paging_mode.   The determination of which
+// paging mode will be used is determined much later than the allocation of
+// the guest memory regions, so we need to do this here to decide if they
+// need to be below 4 GB or not.
+static int will_use_shadow_paging(struct v3_vm_info *vm)
+{
+    v3_cfg_tree_t * pg_cfg = v3_cfg_subtree(vm->cfg_data->cfg, "paging");
+    char * pg_mode = v3_cfg_val(pg_cfg, "mode");
+   
+    if (pg_mode == NULL) { 
+       return 1; // did not ask, get shadow
+    } else {
+       if (strcasecmp(pg_mode, "nested") == 0) {
+           extern v3_cpu_arch_t v3_mach_type;
+           if ((v3_mach_type == V3_SVM_REV3_CPU) || 
+               (v3_mach_type == V3_VMX_EPT_CPU) ||
+               (v3_mach_type == V3_VMX_EPT_UG_CPU)) {
+               return 0; // ask for nested, get nested
+           } else { 
+               return 1; // ask for nested, get shadow
+           }
+       } else if (strcasecmp(pg_mode, "shadow") != 0) { 
+           return 1;     // ask for shadow, get shadow
+       } else {
+           return 1;     // ask for something else, get shadow
+       }
+    }
+}
 
 
 int v3_init_mem_map(struct v3_vm_info * vm) {
@@ -141,13 +172,12 @@ int v3_init_mem_map(struct v3_vm_info * vm) {
         node_id = gpa_to_node_from_cfg(vm, region->guest_start);
         
         V3_Print(vm, VCORE_NONE, "Allocating block %d on node %d\n", i, node_id);
-        
-        if (node_id != -1) {
-            region->host_addr = (addr_t)V3_AllocPagesNode(block_pages, node_id);
-        } else {
-            region->host_addr = (addr_t)V3_AllocPages(block_pages);
-        }
 
+       region->host_addr = (addr_t)V3_AllocPagesExtended(block_pages,
+                                                         PAGE_SIZE_4KB,
+                                                         node_id,
+                                                         0); // no constraints 
+                                                            
         if ((void *)region->host_addr == NULL) { 
             PrintError(vm, VCORE_NONE, "Could not allocate guest memory\n");
             return -1;
@@ -155,13 +185,18 @@ int v3_init_mem_map(struct v3_vm_info * vm) {
 
        // Clear the memory...
        memset(V3_VAddr((void *)region->host_addr), 0, v3_mem_block_size);
+       
+       // Note assigned numa ID could be different than our request... 
+       region->numa_id = v3_numa_hpa_to_node(region->host_addr);
 
        region->flags.read = 1;
        region->flags.write = 1;
        region->flags.exec = 1;
        region->flags.base = 1;
        region->flags.alloced = 1;
+       region->flags.limit32 = will_use_shadow_paging(vm);
        
+
        region->unhandled = unhandled_err;
     }
 
@@ -317,13 +352,13 @@ int v3_insert_mem_region(struct v3_vm_info * vm, struct v3_mem_region * region)
            v3_mem_mode_t mem_mode = v3_get_vm_mem_mode(info);
            
            if (mem_mode == PHYSICAL_MEM) {
-               rc |= v3_invalidate_passthrough_addr_range(info, region->guest_start, region->guest_end-1);
+             rc |= v3_invalidate_passthrough_addr_range(info, region->guest_start, region->guest_end-1,NULL,NULL);
            } else {
                rc |= v3_invalidate_shadow_pts(info);
            }
            
        } else if (info->shdw_pg_mode == NESTED_PAGING) {
-           rc |= v3_invalidate_nested_addr_range(info, region->guest_start, region->guest_end-1);
+         rc |= v3_invalidate_nested_addr_range(info, region->guest_start, region->guest_end-1,NULL,NULL);
        }
     }
 
@@ -455,7 +490,8 @@ static struct v3_mem_region * get_overlapping_region(struct v3_vm_info * vm, uin
     struct v3_mem_region * start_region = v3_get_mem_region(vm, core_id, start_gpa);
 
     if (start_region == NULL) {
-       PrintError(vm, VCORE_NONE, "Invalid memory region\n");
+        PrintError(vm, VCORE_NONE, "No overlapping region for core=%d, start_gpa=%p\n", core_id, (void*)start_gpa);
+        v3_print_mem_map(vm);
        return NULL;
     }
 
@@ -523,13 +559,13 @@ void v3_delete_mem_region(struct v3_vm_info * vm, struct v3_mem_region * reg) {
            v3_mem_mode_t mem_mode = v3_get_vm_mem_mode(info);
            
            if (mem_mode == PHYSICAL_MEM) {
-             rc |= v3_invalidate_passthrough_addr_range(info,reg->guest_start, reg->guest_end-1);
+             rc |= v3_invalidate_passthrough_addr_range(info,reg->guest_start, reg->guest_end-1,NULL,NULL);
            } else {
              rc |= v3_invalidate_shadow_pts(info);
            }
            
        } else if (info->shdw_pg_mode == NESTED_PAGING) {
-         rc |= v3_invalidate_nested_addr_range(info,reg->guest_start, reg->guest_end-1);
+         rc |= v3_invalidate_nested_addr_range(info,reg->guest_start, reg->guest_end-1,NULL,NULL);
        }
     }