-struct v3_shadow_region {
+struct v3_mem_region {
addr_t guest_start;
addr_t guest_end;
addr_t host_addr; // This either points to a host address mapping
int (*unhandled)(struct guest_info * info, addr_t guest_va, addr_t guest_pa,
- struct v3_shadow_region * reg, pf_error_t access_info);
+ struct v3_mem_region * reg, pf_error_t access_info);
void * priv_data;
struct v3_mem_map {
- struct v3_shadow_region base_region;
+ struct v3_mem_region base_region;
- struct rb_root shdw_regions;
+ struct rb_root mem_regions;
};
-struct v3_shadow_region * v3_create_mem_region(struct v3_vm_info * vm, uint16_t core_id,
+struct v3_mem_region * v3_create_mem_region(struct v3_vm_info * vm, uint16_t core_id,
addr_t guest_addr_start, addr_t guest_addr_end);
-int v3_insert_shadow_region(struct v3_vm_info * vm, struct v3_shadow_region * reg);
+int v3_insert_mem_region(struct v3_vm_info * vm, struct v3_mem_region * reg);
-void v3_delete_shadow_region(struct v3_vm_info * vm, struct v3_shadow_region * reg);
+void v3_delete_mem_region(struct v3_vm_info * vm, struct v3_mem_region * reg);
/* This is a shortcut function for creating + inserting a memory region which redirects to host memory */
-struct v3_shadow_region * v3_get_shadow_region(struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr);
+struct v3_mem_region * v3_get_mem_region(struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr);
-addr_t v3_get_shadow_addr(struct v3_shadow_region * reg, uint16_t core_id, addr_t guest_addr);
+addr_t v3_get_shadow_addr(struct v3_mem_region * reg, uint16_t core_id, addr_t guest_addr);
}
} else if (vbar->type == PT_BAR_MEM32) {
// remove old mapping
- struct v3_shadow_region * old_reg = v3_get_shadow_region(dev->vm, V3_MEM_CORE_ANY, vbar->addr);
+ struct v3_mem_region * old_reg = v3_get_mem_region(dev->vm, V3_MEM_CORE_ANY, vbar->addr);
if (old_reg == NULL) {
// uh oh...
return -1;
}
- v3_delete_shadow_region(dev->vm, old_reg);
+ v3_delete_mem_region(dev->vm, old_reg);
// clear the low bits to match the size
*src &= ~(pbar->size - 1);
} else if (vbar->type == PT_BAR_MEM64_HI) {
struct pt_bar * lo_vbar = &(state->virt_bars[bar_num - 1]);
- struct v3_shadow_region * old_reg = v3_get_shadow_region(dev->vm, V3_MEM_CORE_ANY, vbar->addr);
+ struct v3_mem_region * old_reg = v3_get_mem_region(dev->vm, V3_MEM_CORE_ANY, vbar->addr);
if (old_reg == NULL) {
// uh oh...
}
// remove old mapping
- v3_delete_shadow_region(dev->vm, old_reg);
+ v3_delete_mem_region(dev->vm, old_reg);
// We don't set size, because we assume region is less than 4GB
// only remove old mapping if present, I.E. if the rom was enabled previously
if ((vrom->val & 0x1) == 0x1) {
- struct v3_shadow_region * old_reg = v3_get_shadow_region(dev->vm, V3_MEM_CORE_ANY, vrom->addr);
+ struct v3_mem_region * old_reg = v3_get_mem_region(dev->vm, V3_MEM_CORE_ANY, vrom->addr);
if (old_reg == NULL) {
// uh oh...
return -1;
}
- v3_delete_shadow_region(dev->vm, old_reg);
+ v3_delete_mem_region(dev->vm, old_reg);
}
// clear the low bits to match the size
pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]);
addr_t guest_pa = BASE_TO_PAGE_ADDR((addr_t)(guest_pte->page_base_addr)) + PAGE_OFFSET(fault_addr);
- struct v3_shadow_region * shdw_reg = v3_get_shadow_region(core->vm_info, core->cpu_id, guest_pa);
+ struct v3_mem_region * shdw_reg = v3_get_mem_region(core->vm_info, core->cpu_id, guest_pa);
if (shdw_reg == NULL) {
// Inject a machine check in the guest
PrintDebug("Handling 4MB fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
PrintDebug("ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde);
- struct v3_shadow_region * shdw_reg = v3_get_shadow_region(core->vm_info, core->cpu_id, guest_fault_pa);
+ struct v3_mem_region * shdw_reg = v3_get_mem_region(core->vm_info, core->cpu_id, guest_fault_pa);
if (shdw_reg == NULL) {
pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]);
addr_t guest_pa = BASE_TO_PAGE_ADDR((addr_t)(guest_pte->page_base_addr)) + PAGE_OFFSET(fault_addr);
- struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info->vm_info, info->cpu_id, guest_pa);
+ struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->cpu_id, guest_pa);
if (shdw_reg == NULL) {
// Inject a machine check in the guest
PrintDebug("Handling 4MB fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
PrintDebug("ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde);
- struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info->vm_info, info->cpu_id, guest_fault_pa);
+ struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->cpu_id, guest_fault_pa);
if (shdw_reg == NULL) {
PrintDebug("Handling PTE fault\n");
- struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info->vm_info, info->cpu_id, guest_pa);
+ struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->cpu_id, guest_pa);
PrintDebug("Handling 2MB fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
PrintDebug("ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde);
- struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info->vm_info, info->cpu_id, guest_fault_pa);
+ struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->cpu_id, guest_fault_pa);
if (shdw_reg == NULL) {
pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]);
addr_t guest_pa = BASE_TO_PAGE_ADDR((addr_t)(guest_pte->page_base_addr)) + PAGE_OFFSET(fault_addr);
- struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info->vm_info, info->cpu_id, guest_pa);
+ struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->cpu_id, guest_pa);
if (shdw_reg == NULL) {
// Inject a machine check in the guest
PrintDebug("Handling 4MB fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
PrintDebug("ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde);
- struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info->vm_info, info->cpu_id, guest_fault_pa);
+ struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->cpu_id, guest_fault_pa);
if (shdw_reg == NULL) {
PrintDebug("Handling PTE fault\n");
- struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info->vm_info, info->cpu_id, guest_pa);
+ struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->cpu_id, guest_pa);
PrintDebug("Handling 2MB fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
PrintDebug("ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde);
- struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info->vm_info, info->cpu_id, guest_fault_pa);
+ struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->cpu_id, guest_fault_pa);
if (shdw_reg == NULL) {
}
int guest_pa_to_host_pa(struct guest_info * info, addr_t guest_pa, addr_t * host_pa) {
- struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info->vm_info, info->cpu_id, guest_pa);
+ struct v3_mem_region * reg = v3_get_mem_region(info->vm_info, info->cpu_id, guest_pa);
- if (shdw_reg == NULL) {
+ if (reg == NULL) {
PrintError("In GPA->HPA: Could not find address in shadow map (addr=%p) (NULL REGION)\n",
(void *)guest_pa);
return -1;
}
- if (shdw_reg->flags.alloced == 0) {
+ if (reg->flags.alloced == 0) {
PrintError("In GPA->HPA: Tried to translate physical address of non allocated page (addr=%p)\n",
(void *)guest_pa);
return -1;
}
- *host_pa = v3_get_shadow_addr(shdw_reg, info->cpu_id, guest_pa);
+ *host_pa = v3_get_shadow_addr(reg, info->cpu_id, guest_pa);
return 0;
}
int pde_index = PDE32_INDEX(fault_addr);
int pte_index = PTE32_INDEX(fault_addr);
- struct v3_shadow_region * region = v3_get_shadow_region(info->vm_info, info->cpu_id, fault_addr);
+ struct v3_mem_region * region = v3_get_mem_region(info->vm_info, info->cpu_id, fault_addr);
if (region == NULL) {
PrintError("Invalid region in passthrough page fault 32, addr=%p\n",
int pde_index = PDE32PAE_INDEX(fault_addr);
int pte_index = PTE32PAE_INDEX(fault_addr);
- struct v3_shadow_region * region = v3_get_shadow_region(info->vm_info, info->cpu_id, fault_addr);
+ struct v3_mem_region * region = v3_get_mem_region(info->vm_info, info->cpu_id, fault_addr);
if (region == NULL) {
PrintError("Invalid region in passthrough page fault 32PAE, addr=%p\n",
- struct v3_shadow_region * region = v3_get_shadow_region(info->vm_info, info->cpu_id, fault_addr);
+ struct v3_mem_region * region = v3_get_mem_region(info->vm_info, info->cpu_id, fault_addr);
if (region == NULL) {
PrintError("Invalid region in passthrough page fault 64, addr=%p\n",
}
static int unhandled_err(struct guest_info * core, addr_t guest_va, addr_t guest_pa,
- struct v3_shadow_region * reg, pf_error_t access_info) {
+ struct v3_mem_region * reg, pf_error_t access_info) {
PrintError("Unhandled memory access error\n");
struct v3_mem_map * map = &(vm->mem_map);
addr_t mem_pages = vm->mem_size >> 12;
- memset(&(map->base_region), 0, sizeof(struct v3_shadow_region));
+ memset(&(map->base_region), 0, sizeof(struct v3_mem_region));
- map->shdw_regions.rb_node = NULL;
+ map->mem_regions.rb_node = NULL;
// There is an underlying region that contains all of the guest memory
void v3_delete_mem_map(struct v3_vm_info * vm) {
- struct rb_node * node = v3_rb_first(&(vm->mem_map.shdw_regions));
- struct v3_shadow_region * reg;
+ struct rb_node * node = v3_rb_first(&(vm->mem_map.mem_regions));
+ struct v3_mem_region * reg;
struct rb_node * tmp_node = NULL;
while (node) {
- reg = rb_entry(node, struct v3_shadow_region, tree_node);
+ reg = rb_entry(node, struct v3_mem_region, tree_node);
tmp_node = node;
node = v3_rb_next(node);
- v3_delete_shadow_region(vm, reg);
+ v3_delete_mem_region(vm, reg);
}
V3_FreePage((void *)(vm->mem_map.base_region.host_addr));
}
-struct v3_shadow_region * v3_create_mem_region(struct v3_vm_info * vm, uint16_t core_id,
+struct v3_mem_region * v3_create_mem_region(struct v3_vm_info * vm, uint16_t core_id,
addr_t guest_addr_start, addr_t guest_addr_end) {
- struct v3_shadow_region * entry = (struct v3_shadow_region *)V3_Malloc(sizeof(struct v3_shadow_region));
- memset(entry, 0, sizeof(struct v3_shadow_region));
+ struct v3_mem_region * entry = (struct v3_mem_region *)V3_Malloc(sizeof(struct v3_mem_region));
+ memset(entry, 0, sizeof(struct v3_mem_region));
entry->guest_start = guest_addr_start;
entry->guest_end = guest_addr_end;
addr_t guest_addr_end,
addr_t host_addr)
{
- struct v3_shadow_region * entry = NULL;
+ struct v3_mem_region * entry = NULL;
entry = v3_create_mem_region(vm, core_id,
guest_addr_start,
entry->flags.exec = 1;
entry->flags.alloced = 1;
- if (v3_insert_shadow_region(vm, entry) == -1) {
+ if (v3_insert_mem_region(vm, entry) == -1) {
V3_Free(entry);
return -1;
}
static inline
-struct v3_shadow_region * __insert_shadow_region(struct v3_vm_info * vm,
- struct v3_shadow_region * region) {
- struct rb_node ** p = &(vm->mem_map.shdw_regions.rb_node);
+struct v3_mem_region * __insert_mem_region(struct v3_vm_info * vm,
+ struct v3_mem_region * region) {
+ struct rb_node ** p = &(vm->mem_map.mem_regions.rb_node);
struct rb_node * parent = NULL;
- struct v3_shadow_region * tmp_region;
+ struct v3_mem_region * tmp_region;
while (*p) {
parent = *p;
- tmp_region = rb_entry(parent, struct v3_shadow_region, tree_node);
+ tmp_region = rb_entry(parent, struct v3_mem_region, tree_node);
if (region->guest_end <= tmp_region->guest_start) {
p = &(*p)->rb_left;
-int v3_insert_shadow_region(struct v3_vm_info * vm,
- struct v3_shadow_region * region) {
- struct v3_shadow_region * ret;
+int v3_insert_mem_region(struct v3_vm_info * vm,
+ struct v3_mem_region * region) {
+ struct v3_mem_region * ret;
int i = 0;
- if ((ret = __insert_shadow_region(vm, region))) {
+ if ((ret = __insert_mem_region(vm, region))) {
return -1;
}
- v3_rb_insert_color(&(region->tree_node), &(vm->mem_map.shdw_regions));
+ v3_rb_insert_color(&(region->tree_node), &(vm->mem_map.mem_regions));
-struct v3_shadow_region * v3_get_shadow_region(struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr) {
- struct rb_node * n = vm->mem_map.shdw_regions.rb_node;
- struct v3_shadow_region * reg = NULL;
+struct v3_mem_region * v3_get_mem_region(struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr) {
+ struct rb_node * n = vm->mem_map.mem_regions.rb_node;
+ struct v3_mem_region * reg = NULL;
while (n) {
- reg = rb_entry(n, struct v3_shadow_region, tree_node);
+ reg = rb_entry(n, struct v3_mem_region, tree_node);
if (guest_addr < reg->guest_start) {
n = n->rb_left;
-void v3_delete_shadow_region(struct v3_vm_info * vm, struct v3_shadow_region * reg) {
+void v3_delete_mem_region(struct v3_vm_info * vm, struct v3_mem_region * reg) {
int i = 0;
if (reg == NULL) {
}
}
- v3_rb_erase(&(reg->tree_node), &(vm->mem_map.shdw_regions));
+ v3_rb_erase(&(reg->tree_node), &(vm->mem_map.mem_regions));
V3_Free(reg);
-addr_t v3_get_shadow_addr(struct v3_shadow_region * reg, uint16_t core_id, addr_t guest_addr) {
+addr_t v3_get_shadow_addr(struct v3_mem_region * reg, uint16_t core_id, addr_t guest_addr) {
if (reg && (reg->flags.alloced == 1)) {
return (guest_addr - reg->guest_start) + reg->host_addr;
} else {
void v3_print_mem_map(struct v3_vm_info * vm) {
- struct rb_node * node = v3_rb_first(&(vm->mem_map.shdw_regions));
- struct v3_shadow_region * reg = &(vm->mem_map.base_region);
+ struct rb_node * node = v3_rb_first(&(vm->mem_map.mem_regions));
+ struct v3_mem_region * reg = &(vm->mem_map.base_region);
int i = 0;
V3_Print("Memory Layout:\n");
}
do {
- reg = rb_entry(node, struct v3_shadow_region, tree_node);
+ reg = rb_entry(node, struct v3_mem_region, tree_node);
V3_Print("%d: 0x%p - 0x%p -> 0x%p\n", i,
(void *)(reg->guest_start),
static int handle_mem_hook(struct guest_info * info, addr_t guest_va, addr_t guest_pa,
- struct v3_shadow_region * reg, pf_error_t access_info) {
+ struct v3_mem_region * reg, pf_error_t access_info) {
struct mem_hook * hook = reg->priv_data;
addr_t op_addr = 0;
addr_t guest_addr_start, addr_t guest_addr_end, addr_t host_addr,
int (*write)(struct guest_info * core, addr_t guest_addr, void * src, uint_t length, void * priv_data),
void * priv_data) {
- struct v3_shadow_region * entry = NULL;
+ struct v3_mem_region * entry = NULL;
struct mem_hook * hook = V3_Malloc(sizeof(struct mem_hook));
// struct v3_mem_hooks * hooks = &(vm->mem_hooks);
entry->flags.exec = 1;
entry->flags.alloced = 1;
- if (v3_insert_shadow_region(vm, entry) == -1) {
+ if (v3_insert_mem_region(vm, entry) == -1) {
V3_Free(entry);
V3_Free(hook);
return -1;
int (*write)(struct guest_info * core, addr_t guest_addr, void * src, uint_t length, void * priv_data),
void * priv_data) {
- struct v3_shadow_region * entry = NULL;
+ struct v3_mem_region * entry = NULL;
struct mem_hook * hook = V3_Malloc(sizeof(struct mem_hook));
struct v3_mem_hooks * hooks = &(vm->mem_hooks);
entry->unhandled = handle_mem_hook;
entry->priv_data = hook;
- if (v3_insert_shadow_region(vm, entry)) {
+ if (v3_insert_mem_region(vm, entry)) {
V3_Free(entry);
V3_Free(hook);
return -1;
// This will unhook the memory hook registered at start address
// We do not support unhooking subregions
int v3_unhook_mem(struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr_start) {
- struct v3_shadow_region * reg = v3_get_shadow_region(vm, core_id, guest_addr_start);
+ struct v3_mem_region * reg = v3_get_mem_region(vm, core_id, guest_addr_start);
struct mem_hook * hook = reg->priv_data;
V3_Free(hook);
- v3_delete_shadow_region(vm, reg);
+ v3_delete_mem_region(vm, reg);
return 0;
}
if (global_state->active == 1) {
// unmap page
- struct v3_shadow_region * old_reg = v3_get_shadow_region(core->vm_info, core->cpu_id,
+ struct v3_mem_region * old_reg = v3_get_mem_region(core->vm_info, core->cpu_id,
(addr_t)global_state->global_guest_pa);
if (old_reg == NULL) {
return -1;
}
- v3_delete_shadow_region(core->vm_info, old_reg);
+ v3_delete_mem_region(core->vm_info, old_reg);
}
global_state->global_guest_pa = src.value;
if (local_state->active == 1) {
// unmap page
- struct v3_shadow_region * old_reg = v3_get_shadow_region(core->vm_info, core->cpu_id,
+ struct v3_mem_region * old_reg = v3_get_mem_region(core->vm_info, core->cpu_id,
(addr_t)local_state->local_guest_pa);
if (old_reg == NULL) {
return -1;
}
- v3_delete_shadow_region(core->vm_info, old_reg);
+ v3_delete_mem_region(core->vm_info, old_reg);
}
local_state->local_guest_pa = src.value;