SHDW_REGION_ALLOCATED, // Region is a section of host memory
} v3_shdw_region_type_t;
-
+#define V3_MEM_CORE_ANY ((uint16_t)-1)
struct v3_shadow_region {
struct rb_root shdw_regions;
void * hook_hvas; // this is an array of pages, equal to the number of cores
-};
+};
int v3_init_mem_map(struct v3_vm_info * vm);
-int v3_add_shadow_mem(struct v3_vm_info * vm,
- addr_t guest_addr_start,
- addr_t guest_addr_end,
- addr_t host_addr);
+int v3_add_shadow_mem(struct v3_vm_info * vm, uint16_t core_id,
+ addr_t guest_addr_start, addr_t guest_addr_end, addr_t host_addr);
-int v3_hook_full_mem(struct v3_vm_info * vm, addr_t guest_addr_start, addr_t guest_addr_end,
+int v3_hook_full_mem(struct v3_vm_info * vm, uint16_t core_id,
+ addr_t guest_addr_start, addr_t guest_addr_end,
int (*read)(addr_t guest_addr, void * dst, uint_t length, void * priv_data),
int (*write)(addr_t guest_addr, void * src, uint_t length, void * priv_data),
void * priv_data);
-int v3_hook_write_mem(struct v3_vm_info * vm, addr_t guest_addr_start, addr_t guest_addr_end,
- addr_t host_addr,
+int v3_hook_write_mem(struct v3_vm_info * vm, uint16_t core_id,
+ addr_t guest_addr_start, addr_t guest_addr_end, addr_t host_addr,
int (*write)(addr_t guest_addr, void * src, uint_t length, void * priv_data),
void * priv_data);
-int v3_unhook_mem(struct v3_vm_info * vm, addr_t guest_addr_start);
+int v3_unhook_mem(struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr_start);
-struct v3_shadow_region * v3_get_shadow_region(struct v3_vm_info * vm, addr_t guest_addr /*, int core_id */);
-addr_t v3_get_shadow_addr(struct v3_shadow_region * reg, addr_t guest_addr);
+struct v3_shadow_region * v3_get_shadow_region(struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr);
+addr_t v3_get_shadow_addr(struct v3_shadow_region * reg, uint16_t core_id, addr_t guest_addr);
break;
}
case PCI_BAR_MEM32: {
- v3_unhook_mem(info->vm_info, (addr_t)(bar->val));
+ v3_unhook_mem(info->vm_info, V3_MEM_CORE_ANY, (addr_t)(bar->val));
if (bar->mem_read) {
- v3_hook_full_mem(info->vm_info, PCI_MEM32_BASE(new_val),
+ v3_hook_full_mem(info->vm_info, V3_MEM_CORE_ANY, PCI_MEM32_BASE(new_val),
PCI_MEM32_BASE(new_val) + (bar->num_pages * PAGE_SIZE_4KB),
bar->mem_read, bar->mem_write, pci->priv_data);
} else {
// hook memory
if (pci_dev->bar[i].mem_read) {
// full hook
- v3_hook_full_mem(vm, pci_dev->bar[i].default_base_addr,
+ v3_hook_full_mem(vm, V3_MEM_CORE_ANY, pci_dev->bar[i].default_base_addr,
pci_dev->bar[i].default_base_addr + (pci_dev->bar[i].num_pages * PAGE_SIZE_4KB),
pci_dev->bar[i].mem_read, pci_dev->bar[i].mem_write, pci_dev->priv_data);
} else if (pci_dev->bar[i].mem_write) {
int guest_pa_to_host_pa(struct guest_info * info, addr_t guest_pa, addr_t * host_pa) {
- struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info->vm_info, guest_pa);
+ struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info->vm_info, info->cpu_id, guest_pa);
if (shdw_reg == NULL) {
PrintError("In GPA->HPA: Could not find address in shadow map (addr=%p) (NULL REGION)\n",
return -1;
}
- *host_pa = v3_get_shadow_addr(shdw_reg, guest_pa);
+ *host_pa = v3_get_shadow_addr(shdw_reg, info->cpu_id, guest_pa);
return 0;
}
addr_t host_addr = atox(v3_cfg_val(mem_region, "host_addr"));
- if (v3_add_shadow_mem(vm, start_addr, end_addr, host_addr) == -1) {
+ if (v3_add_shadow_mem(vm, V3_MEM_CORE_ANY, start_addr, end_addr, host_addr) == -1) {
PrintError("Could not map memory region: %p-%p => %p\n",
(void *)start_addr, (void *)end_addr, (void *)host_addr);
return -1;
extern uint8_t v3_vgabios_start[];
extern uint8_t v3_vgabios_end[];
- addr_t vgabios_dst = v3_get_shadow_addr(&(vm->mem_map.base_region), VGABIOS_START);
+ addr_t vgabios_dst = v3_get_shadow_addr(&(vm->mem_map.base_region), V3_MEM_CORE_ANY, VGABIOS_START);
memcpy(V3_VAddr((void *)vgabios_dst), v3_vgabios_start, v3_vgabios_end - v3_vgabios_start);
}
extern uint8_t v3_rombios_start[];
extern uint8_t v3_rombios_end[];
- addr_t rombios_dst = v3_get_shadow_addr(&(vm->mem_map.base_region), ROMBIOS_START);
+ addr_t rombios_dst = v3_get_shadow_addr(&(vm->mem_map.base_region), V3_MEM_CORE_ANY, ROMBIOS_START);
memcpy(V3_VAddr((void *)rombios_dst), v3_rombios_start, v3_rombios_end - v3_rombios_start);
}
int pde_index = PDE32_INDEX(fault_addr);
int pte_index = PTE32_INDEX(fault_addr);
- struct v3_shadow_region * region = v3_get_shadow_region(info->vm_info, fault_addr);
+ struct v3_shadow_region * region = v3_get_shadow_region(info->vm_info, info->cpu_id, fault_addr);
if (region == NULL) {
PrintError("Invalid region in passthrough page fault 32, addr=%p\n",
return -1;
}
- host_addr = v3_get_shadow_addr(region, fault_addr);
+ host_addr = v3_get_shadow_addr(region, info->cpu_id, fault_addr);
// Lookup the correct PDE address based on the PAGING MODE
if (info->shdw_pg_mode == SHADOW_PAGING) {
int pde_index = PDE32PAE_INDEX(fault_addr);
int pte_index = PTE32PAE_INDEX(fault_addr);
- struct v3_shadow_region * region = v3_get_shadow_region(info->vm_info, fault_addr);
+ struct v3_shadow_region * region = v3_get_shadow_region(info->vm_info, info->cpu_id, fault_addr);
if (region == NULL) {
PrintError("Invalid region in passthrough page fault 32PAE, addr=%p\n",
return -1;
}
- host_addr = v3_get_shadow_addr(region, fault_addr);
+ host_addr = v3_get_shadow_addr(region, info->cpu_id, fault_addr);
// Lookup the correct PDPE address based on the PAGING MODE
if (info->shdw_pg_mode == SHADOW_PAGING) {
- struct v3_shadow_region * region = v3_get_shadow_region(info->vm_info, fault_addr);
+ struct v3_shadow_region * region = v3_get_shadow_region(info->vm_info, info->cpu_id, fault_addr);
if (region == NULL) {
PrintError("Invalid region in passthrough page fault 64, addr=%p\n",
return -1;
}
- host_addr = v3_get_shadow_addr(region, fault_addr);
+ host_addr = v3_get_shadow_addr(region, info->cpu_id, fault_addr);
//
// Lookup the correct PML address based on the PAGING MODE
-int v3_add_shadow_mem( struct v3_vm_info * vm,
+int v3_add_shadow_mem( struct v3_vm_info * vm, uint16_t core_id,
addr_t guest_addr_start,
addr_t guest_addr_end,
addr_t host_addr)
entry->write_hook = NULL;
entry->read_hook = NULL;
entry->priv_data = NULL;
+ entry->core_id = core_id;
if (insert_shadow_region(vm, entry)) {
V3_Free(entry);
-int v3_hook_write_mem(struct v3_vm_info * vm, addr_t guest_addr_start, addr_t guest_addr_end,
- addr_t host_addr,
+int v3_hook_write_mem(struct v3_vm_info * vm, uint16_t core_id,
+ addr_t guest_addr_start, addr_t guest_addr_end, addr_t host_addr,
int (*write)(addr_t guest_addr, void * src, uint_t length, void * priv_data),
void * priv_data) {
entry->write_hook = write;
entry->read_hook = NULL;
entry->priv_data = priv_data;
+ entry->core_id = core_id;
if (insert_shadow_region(vm, entry)) {
V3_Free(entry);
return 0;
}
-int v3_hook_full_mem(struct v3_vm_info * vm, addr_t guest_addr_start, addr_t guest_addr_end,
+int v3_hook_full_mem(struct v3_vm_info * vm, uint16_t core_id,
+ addr_t guest_addr_start, addr_t guest_addr_end,
int (*read)(addr_t guest_addr, void * dst, uint_t length, void * priv_data),
int (*write)(addr_t guest_addr, void * src, uint_t length, void * priv_data),
void * priv_data) {
entry->write_hook = write;
entry->read_hook = read;
entry->priv_data = priv_data;
-
+ entry->core_id = core_id;
+
if (insert_shadow_region(vm, entry)) {
V3_Free(entry);
return -1;
// This will unhook the memory hook registered at start address
// We do not support unhooking subregions
-int v3_unhook_mem(struct v3_vm_info * vm, addr_t guest_addr_start) {
- struct v3_shadow_region * reg = v3_get_shadow_region(vm, guest_addr_start);
+int v3_unhook_mem(struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr_start) {
+ struct v3_shadow_region * reg = v3_get_shadow_region(vm, core_id, guest_addr_start);
if ((reg->host_type != SHDW_REGION_FULL_HOOK) ||
(reg->host_type != SHDW_REGION_WRITE_HOOK)) {
} else if (region->guest_start >= tmp_region->guest_end) {
p = &(*p)->rb_right;
} else {
- return tmp_region;
+ if ((region->guest_end != tmp_region->guest_end) ||
+ (region->guest_start != tmp_region->guest_start)) {
+ PrintError("Trying to map a partial overlapped core specific page...\n");
+ return tmp_region; // This is ugly...
+ } else if (region->core_id == tmp_region->core_id) {
+ return tmp_region;
+ } else if (region->core_id < tmp_region->core_id) {
+ p = &(*p)->rb_left;
+ } else {
+ p = &(*p)->rb_right;
+ }
}
}
-int handle_special_page_fault(struct guest_info * info,
- addr_t fault_gva, addr_t fault_gpa,
- pf_error_t access_info)
+int handle_special_page_fault(struct guest_info * info,
+ addr_t fault_gva, addr_t fault_gpa, pf_error_t access_info)
{
- struct v3_shadow_region * reg = v3_get_shadow_region(info->vm_info, fault_gpa);
+ struct v3_shadow_region * reg = v3_get_shadow_region(info->vm_info, info->cpu_id, fault_gpa);
PrintDebug("Handling Special Page Fault\n");
int v3_handle_mem_wr_hook(struct guest_info * info, addr_t guest_va, addr_t guest_pa,
struct v3_shadow_region * reg, pf_error_t access_info) {
- addr_t dst_addr = (addr_t)V3_VAddr((void *)v3_get_shadow_addr(reg, guest_pa));
+ addr_t dst_addr = (addr_t)V3_VAddr((void *)v3_get_shadow_addr(reg, info->cpu_id, guest_pa));
if (v3_emulate_write_op(info, guest_va, guest_pa, dst_addr,
reg->write_hook, reg->priv_data) == -1) {
-struct v3_shadow_region * v3_get_shadow_region(struct v3_vm_info * vm, addr_t guest_addr) {
+struct v3_shadow_region * v3_get_shadow_region(struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr) {
struct rb_node * n = vm->mem_map.shdw_regions.rb_node;
struct v3_shadow_region * reg = NULL;
} else if (guest_addr >= reg->guest_end) {
n = n->rb_right;
} else {
+ if ((core_id == reg->core_id) ||
+ (reg->core_id == V3_MEM_CORE_ANY)) {
return reg;
+ } else {
+ n = n->rb_right;
+ }
}
}
-addr_t v3_get_shadow_addr(struct v3_shadow_region * reg, addr_t guest_addr) {
+addr_t v3_get_shadow_addr(struct v3_shadow_region * reg, uint16_t core_id, addr_t guest_addr) {
if ( (reg) &&
(reg->host_type != SHDW_REGION_FULL_HOOK)) {
return (guest_addr - reg->guest_start) + reg->host_addr;
pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]);
addr_t guest_pa = BASE_TO_PAGE_ADDR((addr_t)(guest_pte->page_base_addr)) + PAGE_OFFSET(fault_addr);
- struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info->vm_info, guest_pa);
+ struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info->vm_info, info->cpu_id, guest_pa);
if (shdw_reg == NULL) {
// Inject a machine check in the guest
if ((shdw_reg->host_type == SHDW_REGION_ALLOCATED) ||
(shdw_reg->host_type == SHDW_REGION_WRITE_HOOK)) {
- addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, guest_pa);
+ addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, info->cpu_id, guest_pa);
shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
PrintDebug("Handling 4MB fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
PrintDebug("ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde);
- struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info->vm_info, guest_fault_pa);
+ struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info->vm_info, info->cpu_id, guest_fault_pa);
if (shdw_reg == NULL) {
if ((shdw_reg->host_type == SHDW_REGION_ALLOCATED) ||
(shdw_reg->host_type == SHDW_REGION_WRITE_HOOK)) {
- addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, guest_fault_pa);
+ addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, info->cpu_id, guest_fault_pa);
shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
PrintDebug("Handling PTE fault\n");
- struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info->vm_info, guest_pa);
+ struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info->vm_info, info->cpu_id, guest_pa);
if ((shdw_reg->host_type == SHDW_REGION_ALLOCATED) ||
(shdw_reg->host_type == SHDW_REGION_WRITE_HOOK)) {
- addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, guest_pa);
+ addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, info->cpu_id, guest_pa);
shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
PrintDebug("Handling 2MB fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
PrintDebug("ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde);
- struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info->vm_info, guest_fault_pa);
+ struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info->vm_info, info->cpu_id, guest_fault_pa);
if (shdw_reg == NULL) {
if ((shdw_reg->host_type == SHDW_REGION_ALLOCATED) ||
(shdw_reg->host_type == SHDW_REGION_WRITE_HOOK)) {
- addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, guest_fault_pa);
+ addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, info->cpu_id, guest_fault_pa);
shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);