int v3_activate_passthrough_pt(struct guest_info * info);
int v3_invalidate_passthrough_addr(struct guest_info * info, addr_t inv_addr);
+// The range invalidated is minimally [start, end]
+int v3_invalidate_passthrough_addr_range(struct guest_info * info,
+ addr_t inv_addr_start, addr_t inv_addr_end);
+
int v3_invalidate_nested_addr(struct guest_info * info, addr_t inv_addr);
+// The range invalidated is minimally [start, end]
+int v3_invalidate_nested_addr_range(struct guest_info * info,
+ addr_t inv_addr_start, addr_t inv_addr_end);
#endif // ! __V3VEE__
#define PAGE_BASE_ADDR_2MB(x) ((x) >> 21)
#define PAGE_BASE_ADDR_4MB(x) ((x) >> 22)
#define PAGE_BASE_ADDR_1GB(x) ((x) >> 30)
+#define PAGE_BASE_ADDR_512GB(x) ((x) >> 39)
#define BASE_TO_PAGE_ADDR(x) (((addr_t)x) << 12)
#define BASE_TO_PAGE_ADDR_4KB(x) (((addr_t)x) << 12)
#define BASE_TO_PAGE_ADDR_2MB(x) (((addr_t)x) << 21)
#define BASE_TO_PAGE_ADDR_4MB(x) (((addr_t)x) << 22)
#define BASE_TO_PAGE_ADDR_1GB(x) (((addr_t)x) << 30)
+#define BASE_TO_PAGE_ADDR_512GB(x) (((addr_t)x) << 39)
/* *** */
#define PAGE_SIZE_2MB (4096 * 512)
#define PAGE_SIZE_4MB (4096 * 1024)
#define PAGE_SIZE_1GB 0x40000000
+#define PAGE_SIZE_512GB (512ULL * PAGE_SIZE_1GB)
/* *** */
}
+int v3_invalidate_passthrough_addr_range(struct guest_info * info,
+ addr_t inv_addr_start, addr_t inv_addr_end) {
+ v3_cpu_mode_t mode = v3_get_vm_cpu_mode(info);
+
+ switch(mode) {
+ case REAL:
+ case PROTECTED:
+ return invalidate_addr_32_range(info, inv_addr_start, inv_addr_end);
+
+ case PROTECTED_PAE:
+ case LONG:
+ case LONG_32_COMPAT:
+ // Long mode will only use 32PAE page tables...
+ return invalidate_addr_32pae_range(info, inv_addr_start, inv_addr_end);
+
+ default:
+ PrintError(info->vm_info, info, "Unknown CPU Mode\n");
+ break;
+ }
+ return -1;
+}
+
int v3_invalidate_nested_addr(struct guest_info * info, addr_t inv_addr) {
#ifdef __V3_64BIT__
return -1;
}
+
+int v3_invalidate_nested_addr_range(struct guest_info * info,
+ addr_t inv_addr_start, addr_t inv_addr_end) {
+
+#ifdef __V3_64BIT__
+ v3_cpu_mode_t mode = LONG;
+#else
+ v3_cpu_mode_t mode = PROTECTED;
+#endif
+
+ switch(mode) {
+ case REAL:
+ case PROTECTED:
+ return invalidate_addr_32_range(info, inv_addr_start, inv_addr_end);
+
+ case PROTECTED_PAE:
+ return invalidate_addr_32pae_range(info, inv_addr_start, inv_addr_end);
+
+ case LONG:
+ case LONG_32_COMPAT:
+ return invalidate_addr_64_range(info, inv_addr_start, inv_addr_end);
+
+ default:
+ PrintError(info->vm_info, info, "Unknown CPU Mode\n");
+ break;
+ }
+
+ return -1;
+}
-static inline int invalidate_addr_32(struct guest_info * info, addr_t inv_addr) {
+static inline int invalidate_addr_32_internal(struct guest_info * info, addr_t inv_addr,
+ addr_t *actual_start, uint64_t *actual_size) {
pde32_t * pde = NULL;
pte32_t * pte = NULL;
}
if (pde[pde_index].present == 0) {
+ *actual_start = BASE_TO_PAGE_ADDR_4MB(PAGE_BASE_ADDR_4MB(inv_addr));
+ *actual_size = PAGE_SIZE_4MB;
return 0;
} else if (pde[pde_index].large_page) {
pde[pde_index].present = 0;
pde[pde_index].writable = 0;
pde[pde_index].user_page = 0;
+ *actual_start = BASE_TO_PAGE_ADDR_4MB(PAGE_BASE_ADDR_4MB(inv_addr));
+ *actual_size = PAGE_SIZE_4MB;
return 0;
}
pte[pte_index].writable = 0;
pte[pte_index].user_page = 0;
+ *actual_start = BASE_TO_PAGE_ADDR_4KB(PAGE_BASE_ADDR_4KB(inv_addr));
+ *actual_size = PAGE_SIZE_4KB;
+
return 0;
}
+static inline int invalidate_addr_32(struct guest_info * core, addr_t inv_addr)
+{
+ addr_t start;
+ uint64_t len;
+
+ return invalidate_addr_32_internal(core,inv_addr,&start,&len);
+}
+
+static inline int invalidate_addr_32_range(struct guest_info * core, addr_t inv_addr_start, addr_t inv_addr_end)
+{
+ addr_t next;
+ addr_t start;
+ uint64_t len;
+ int rc;
+
+ for (next=inv_addr_start; next<=inv_addr_end; ) {
+ rc = invalidate_addr_32_internal(core,next,&start, &len);
+ if (rc) {
+ return rc;
+ }
+ next = start + len;
+ }
+ return 0;
+}
+
+
+
#endif
}
-static inline int invalidate_addr_32pae(struct guest_info * info, addr_t inv_addr) {
+static inline int invalidate_addr_32pae_internal(struct guest_info * info, addr_t inv_addr,
+ addr_t *actual_start, uint64_t *actual_size) {
pdpe32pae_t * pdpe = NULL;
pde32pae_t * pde = NULL;
pte32pae_t * pte = NULL;
if (pdpe[pdpe_index].present == 0) {
+ *actual_start = BASE_TO_PAGE_ADDR_1GB(PAGE_BASE_ADDR_1GB(inv_addr));
+ *actual_size = PAGE_SIZE_1GB;
return 0;
}
pde = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pdpe[pdpe_index].pd_base_addr));
if (pde[pde_index].present == 0) {
+ *actual_start = BASE_TO_PAGE_ADDR_2MB(PAGE_BASE_ADDR_2MB(inv_addr));
+ *actual_size = PAGE_SIZE_2MB;
return 0;
} else if (pde[pde_index].large_page) {
pde[pde_index].present = 0;
+ *actual_start = BASE_TO_PAGE_ADDR_2MB(PAGE_BASE_ADDR_2MB(inv_addr));
+ *actual_size = PAGE_SIZE_2MB;
return 0;
}
pte[pte_index].present = 0;
+ *actual_start = BASE_TO_PAGE_ADDR_4KB(PAGE_BASE_ADDR_4KB(inv_addr));
+ *actual_size = PAGE_SIZE_4KB;
return 0;
}
+static inline int invalidate_addr_32pae(struct guest_info * core, addr_t inv_addr)
+{
+ addr_t start;
+ uint64_t len;
+
+ return invalidate_addr_32pae_internal(core,inv_addr,&start,&len);
+}
+
+static inline int invalidate_addr_32pae_range(struct guest_info * core, addr_t inv_addr_start, addr_t inv_addr_end)
+{
+ addr_t next;
+ addr_t start;
+ uint64_t len;
+ int rc;
+
+ for (next=inv_addr_start; next<=inv_addr_end; ) {
+ rc = invalidate_addr_32pae_internal(core,next,&start, &len);
+ if (rc) {
+ return rc;
+ }
+ next = start + len;
+ }
+ return 0;
+}
+
#endif
return 0;
}
-static inline int invalidate_addr_64(struct guest_info * core, addr_t inv_addr) {
+static inline int invalidate_addr_64_internal(struct guest_info * core, addr_t inv_addr,
+ addr_t *actual_start, uint64_t *actual_size) {
pml4e64_t * pml = NULL;
pdpe64_t * pdpe = NULL;
pde64_t * pde = NULL;
}
if (pml[pml_index].present == 0) {
- return 0;
+ *actual_start = BASE_TO_PAGE_ADDR_512GB(PAGE_BASE_ADDR_512GB(inv_addr));
+ *actual_size = PAGE_SIZE_512GB;
+ return 0;
}
pdpe = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pml[pml_index].pdp_base_addr));
if (pdpe[pdpe_index].present == 0) {
+ *actual_start = BASE_TO_PAGE_ADDR_1GB(PAGE_BASE_ADDR_1GB(inv_addr));
+ *actual_size = PAGE_SIZE_1GB;
return 0;
} else if (pdpe[pdpe_index].large_page == 1) { // 1GiB
pdpe[pdpe_index].present = 0;
pdpe[pdpe_index].writable = 0;
pdpe[pdpe_index].user_page = 0;
+ *actual_start = BASE_TO_PAGE_ADDR_1GB(PAGE_BASE_ADDR_1GB(inv_addr));
+ *actual_size = PAGE_SIZE_1GB;
return 0;
}
pde = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pdpe[pdpe_index].pd_base_addr));
if (pde[pde_index].present == 0) {
+ *actual_start = BASE_TO_PAGE_ADDR_2MB(PAGE_BASE_ADDR_2MB(inv_addr));
+ *actual_size = PAGE_SIZE_2MB;
return 0;
} else if (pde[pde_index].large_page == 1) { // 2MiB
pde[pde_index].present = 0;
pde[pde_index].writable = 0;
pde[pde_index].user_page = 0;
+ *actual_start = BASE_TO_PAGE_ADDR_2MB(PAGE_BASE_ADDR_2MB(inv_addr));
+ *actual_size = PAGE_SIZE_2MB;
return 0;
}
pte[pte_index].writable = 0;
pte[pte_index].user_page = 0;
+ *actual_start = BASE_TO_PAGE_ADDR_4KB(PAGE_BASE_ADDR_4KB(inv_addr));
+ *actual_size = PAGE_SIZE_4KB;
+
return 0;
}
+static inline int invalidate_addr_64(struct guest_info * core, addr_t inv_addr)
+{
+ addr_t start;
+ uint64_t len;
+
+ return invalidate_addr_64_internal(core,inv_addr,&start,&len);
+}
+
+static inline int invalidate_addr_64_range(struct guest_info * core, addr_t inv_addr_start, addr_t inv_addr_end)
+{
+ addr_t next;
+ addr_t start;
+ uint64_t len;
+ int rc;
+
+ for (next=inv_addr_start; next<=inv_addr_end; ) {
+ rc = invalidate_addr_64_internal(core,next,&start, &len);
+ if (rc) {
+ return rc;
+ }
+ next = start + len;
+ }
+ return 0;
+}
+
#endif
int v3_insert_mem_region(struct v3_vm_info * vm, struct v3_mem_region * region) {
struct v3_mem_region * ret;
int i = 0;
+ int rc;
if ((ret = __insert_mem_region(vm, region))) {
PrintError(vm, VCORE_NONE, "Internal insert failed returned region is from 0x%p to 0x%p on vcore %d\n", (void*)(ret->guest_start), (void*)(ret->guest_end), ret->core_id);
v3_rb_insert_color(&(region->tree_node), &(vm->mem_map.mem_regions));
+ rc = 0;
for (i = 0; i < vm->num_cores; i++) {
struct guest_info * info = &(vm->cores[i]);
v3_mem_mode_t mem_mode = v3_get_vm_mem_mode(info);
if (mem_mode == PHYSICAL_MEM) {
- addr_t cur_addr;
-
- for (cur_addr = region->guest_start;
- cur_addr < region->guest_end;
- cur_addr += PAGE_SIZE_4KB) {
- v3_invalidate_passthrough_addr(info, cur_addr);
- }
+ rc |= v3_invalidate_passthrough_addr_range(info, region->guest_start, region->guest_end-1);
} else {
- v3_invalidate_shadow_pts(info);
+ rc |= v3_invalidate_shadow_pts(info);
}
} else if (info->shdw_pg_mode == NESTED_PAGING) {
- addr_t cur_addr;
-
- for (cur_addr = region->guest_start;
- cur_addr < region->guest_end;
- cur_addr += PAGE_SIZE_4KB) {
-
- v3_invalidate_nested_addr(info, cur_addr);
- }
+ rc |= v3_invalidate_nested_addr_range(info, region->guest_start, region->guest_end-1);
}
}
- return 0;
+ return rc;
}
void v3_delete_mem_region(struct v3_vm_info * vm, struct v3_mem_region * reg) {
int i = 0;
+ int rc;
if (reg == NULL) {
return;
return;
}
+ rc = 0;
+
for (i = 0; i < vm->num_cores; i++) {
struct guest_info * info = &(vm->cores[i]);
v3_mem_mode_t mem_mode = v3_get_vm_mem_mode(info);
if (mem_mode == PHYSICAL_MEM) {
- addr_t cur_addr;
-
- for (cur_addr = reg->guest_start;
- cur_addr < reg->guest_end;
- cur_addr += PAGE_SIZE_4KB) {
- v3_invalidate_passthrough_addr(info, cur_addr);
- }
+ rc |= v3_invalidate_passthrough_addr_range(info,reg->guest_start, reg->guest_end-1);
} else {
- v3_invalidate_shadow_pts(info);
+ rc |= v3_invalidate_shadow_pts(info);
}
} else if (info->shdw_pg_mode == NESTED_PAGING) {
- addr_t cur_addr;
-
- for (cur_addr = reg->guest_start;
- cur_addr < reg->guest_end;
- cur_addr += PAGE_SIZE_4KB) {
-
- v3_invalidate_nested_addr(info, cur_addr);
- }
+ rc |= v3_invalidate_nested_addr_range(info,reg->guest_start, reg->guest_end-1);
}
}
// flush virtual page tables
// 3 cases shadow, shadow passthrough, and nested
+ if (rc) { PrintError(vm, VCORE_NONE, "Error in deleting memory region\n"); }
}
// Determine if a given address can be handled by a large page of the requested size