/* Fundamental converters */
// Call out to OS
-int host_va_to_host_pa(addr_t host_va, addr_t * host_pa);
-int host_pa_to_host_va(addr_t host_pa, addr_t * host_va);
+int v3_hva_to_hpa(addr_t host_va, addr_t * host_pa);
+int v3_hpa_to_hva(addr_t host_pa, addr_t * host_va);
// guest_pa -> (shadow map) -> host_pa
-int guest_pa_to_host_pa(struct guest_info * guest_info, addr_t guest_pa, addr_t * host_pa);
+int v3_gpa_to_hpa(struct guest_info * guest_info, addr_t guest_pa, addr_t * host_pa);
/* !! Currently not implemented !! */
// host_pa -> (shadow_map) -> guest_pa
-int host_pa_to_guest_pa(struct guest_info * guest_info, addr_t host_pa, addr_t * guest_pa);
+int v3_hpa_to_gpa(struct guest_info * guest_info, addr_t host_pa, addr_t * guest_pa);
/**********************************/
/* !! Currently not implemented !! */
// host_va -> host_pa -> guest_pa
-int host_va_to_guest_pa(struct guest_info * guest_info, addr_t host_va, addr_t * guest_pa);
+int v3_hva_to_gpa(struct guest_info * guest_info, addr_t host_va, addr_t * guest_pa);
// guest_pa -> host_pa -> host_va
-int guest_pa_to_host_va(struct guest_info * guest_info, addr_t guest_pa, addr_t * host_va);
+int v3_gpa_to_hva(struct guest_info * guest_info, addr_t guest_pa, addr_t * host_va);
// Look up the address in the guests page tables.. This can cause multiple calls that translate
// ------------------------------------------------
// | |
// --> guest_pa -> host_pa -> host_va -> (read table) --> guest_pa
-int guest_va_to_guest_pa(struct guest_info * guest_info, addr_t guest_va, addr_t * guest_pa);
+int v3_gva_to_gpa(struct guest_info * guest_info, addr_t guest_va, addr_t * guest_pa);
// ------------------------------------------------
// | |
// --> guest_pa -> host_pa -> host_va -> (read table) --> guest_va
-int guest_pa_to_guest_va(struct guest_info * guest_info, addr_t guest_pa, addr_t * guest_va);
+int v3_gpa_to_gva(struct guest_info * guest_info, addr_t guest_pa, addr_t * guest_va);
/* GROUP 2 */
/**********************************/
// guest_va -> guest_pa -> host_pa
-int guest_va_to_host_pa(struct guest_info * guest_info, addr_t guest_va, addr_t * host_pa);
+int v3_gva_to_hpa(struct guest_info * guest_info, addr_t guest_va, addr_t * host_pa);
/* !! Currently not implemented !! */
// host_pa -> guest_pa -> guest_va
-int host_pa_to_guest_va(struct guest_info * guest_info, addr_t host_pa, addr_t * guest_va);
+int v3_hpa_to_gva(struct guest_info * guest_info, addr_t host_pa, addr_t * guest_va);
// guest_va -> guest_pa -> host_pa -> host_va
-int guest_va_to_host_va(struct guest_info * guest_info, addr_t guest_va, addr_t * host_va);
+int v3_gva_to_hva(struct guest_info * guest_info, addr_t guest_va, addr_t * host_va);
/* !! Currently not implemented !! */
// host_va -> host_pa -> guest_pa -> guest_va
-int host_va_to_guest_va(struct guest_info * guest_info, addr_t host_va, addr_t * guest_va);
+int v3_hva_to_gva(struct guest_info * guest_info, addr_t host_va, addr_t * guest_va);
-int read_guest_va_memory(struct guest_info * guest_info, addr_t guest_va, int count, uchar_t * dest);
-int read_guest_pa_memory(struct guest_info * guest_info, addr_t guest_pa, int count, uchar_t * dest);
-int write_guest_pa_memory(struct guest_info * guest_info, addr_t guest_pa, int count, uchar_t * src);
+int v3_read_gva_memory(struct guest_info * guest_info, addr_t guest_va, int count, uchar_t * dest);
+int v3_read_gpa_memory(struct guest_info * guest_info, addr_t guest_pa, int count, uchar_t * dest);
+int v3_write_gpa_memory(struct guest_info * guest_info, addr_t guest_pa, int count, uchar_t * src);
// TODO int write_guest_va_memory(struct guest_info * guest_info, addr_t guest_va, int count, char * src);
struct v3_mem_region * v3_get_mem_region(struct v3_vm_info * vm, uint16_t core_id, addr_t guest_addr);
-addr_t v3_get_shadow_addr(struct v3_mem_region * reg, uint16_t core_id, addr_t guest_addr);
-
-
-
-
void v3_print_mem_map(struct v3_vm_info * vm);
PrintDebug("PRD table address = %x\n", channel->dma_prd_addr);
- ret = read_guest_pa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
+ ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
if (ret != sizeof(struct ide_dma_prd)) {
PrintError("Could not read PRD\n");
drive->current_lba++;
- ret = write_guest_pa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
+ ret = v3_write_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
if (ret != bytes_to_write) {
PrintError("Failed to copy data into guest memory... (ret=%d)\n", ret);
PrintDebug("PRD Table address = %x\n", channel->dma_prd_addr);
- ret = read_guest_pa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
+ ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
if (ret != sizeof(struct ide_dma_prd)) {
PrintError("Could not read PRD\n");
bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
- ret = read_guest_pa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
+ ret = v3_read_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
if (ret != bytes_to_write) {
PrintError("Faild to copy data from guest memory... (ret=%d)\n", ret);
tmp_desc->flags, tmp_desc->next);
- if (guest_pa_to_host_va(core, tmp_desc->addr_gpa, (addr_t *)&(page_addr)) == -1) {
+ if (v3_gpa_to_hva(core, tmp_desc->addr_gpa, (addr_t *)&(page_addr)) == -1) {
PrintError("Could not translate block header address\n");
return -1;
}
// round up to next page boundary.
virtio->cur_queue->ring_used_addr = (virtio->cur_queue->ring_used_addr + 0xfff) & ~0xfff;
- if (guest_pa_to_host_va(core, virtio->cur_queue->ring_desc_addr, (addr_t *)&(virtio->cur_queue->desc)) == -1) {
+ if (v3_gpa_to_hva(core, virtio->cur_queue->ring_desc_addr, (addr_t *)&(virtio->cur_queue->desc)) == -1) {
PrintError("Could not translate ring descriptor address\n");
return -1;
}
- if (guest_pa_to_host_va(core, virtio->cur_queue->ring_avail_addr, (addr_t *)&(virtio->cur_queue->avail)) == -1) {
+ if (v3_gpa_to_hva(core, virtio->cur_queue->ring_avail_addr, (addr_t *)&(virtio->cur_queue->avail)) == -1) {
PrintError("Could not translate ring available address\n");
return -1;
}
- if (guest_pa_to_host_va(core, virtio->cur_queue->ring_used_addr, (addr_t *)&(virtio->cur_queue->used)) == -1) {
+ if (v3_gpa_to_hva(core, virtio->cur_queue->ring_used_addr, (addr_t *)&(virtio->cur_queue->used)) == -1) {
PrintError("Could not translate ring used address\n");
return -1;
}
uint8_t * buf = NULL;
PrintDebug("Handling Block op\n");
- if (guest_pa_to_host_va(core, buf_desc->addr_gpa, (addr_t *)&(buf)) == -1) {
+ if (v3_gpa_to_hva(core, buf_desc->addr_gpa, (addr_t *)&(buf)) == -1) {
PrintError("Could not translate buffer address\n");
return -1;
}
PrintDebug("Header Descriptor (ptr=%p) gpa=%p, len=%d, flags=%x, next=%d\n", hdr_desc,
(void *)(hdr_desc->addr_gpa), hdr_desc->length, hdr_desc->flags, hdr_desc->next);
- if (guest_pa_to_host_va(core, hdr_desc->addr_gpa, &(hdr_addr)) == -1) {
+ if (v3_gpa_to_hva(core, hdr_desc->addr_gpa, &(hdr_addr)) == -1) {
PrintError("Could not translate block header address\n");
return -1;
}
PrintDebug("Status Descriptor (ptr=%p) gpa=%p, len=%d, flags=%x, next=%d\n", status_desc,
(void *)(status_desc->addr_gpa), status_desc->length, status_desc->flags, status_desc->next);
- if (guest_pa_to_host_va(core, status_desc->addr_gpa, (addr_t *)&(status_ptr)) == -1) {
+ if (v3_gpa_to_hva(core, status_desc->addr_gpa, (addr_t *)&(status_ptr)) == -1) {
PrintError("Could not translate status address\n");
return -1;
}
// round up to next page boundary.
blk_state->queue.ring_used_addr = (blk_state->queue.ring_used_addr + 0xfff) & ~0xfff;
- if (guest_pa_to_host_va(core, blk_state->queue.ring_desc_addr, (addr_t *)&(blk_state->queue.desc)) == -1) {
+ if (v3_gpa_to_hva(core, blk_state->queue.ring_desc_addr, (addr_t *)&(blk_state->queue.desc)) == -1) {
PrintError("Could not translate ring descriptor address\n");
return -1;
}
- if (guest_pa_to_host_va(core, blk_state->queue.ring_avail_addr, (addr_t *)&(blk_state->queue.avail)) == -1) {
+ if (v3_gpa_to_hva(core, blk_state->queue.ring_avail_addr, (addr_t *)&(blk_state->queue.avail)) == -1) {
PrintError("Could not translate ring available address\n");
return -1;
}
- if (guest_pa_to_host_va(core, blk_state->queue.ring_used_addr, (addr_t *)&(blk_state->queue.used)) == -1) {
+ if (v3_gpa_to_hva(core, blk_state->queue.ring_used_addr, (addr_t *)&(blk_state->queue.used)) == -1) {
PrintError("Could not translate ring used address\n");
return -1;
}
tmp_desc->flags, tmp_desc->next);
- if (guest_pa_to_host_va(core, tmp_desc->addr_gpa, (addr_t *)&(page_addr)) == -1) {
+ if (v3_gpa_to_hva(core, tmp_desc->addr_gpa, (addr_t *)&(page_addr)) == -1) {
PrintError("Could not translate block header address\n");
return -1;
}
// round up to next page boundary.
sym_state->cur_queue->ring_used_addr = (sym_state->cur_queue->ring_used_addr + 0xfff) & ~0xfff;
- if (guest_pa_to_host_va(core, sym_state->cur_queue->ring_desc_addr, (addr_t *)&(sym_state->cur_queue->desc)) == -1) {
+ if (v3_gpa_to_hva(core, sym_state->cur_queue->ring_desc_addr, (addr_t *)&(sym_state->cur_queue->desc)) == -1) {
PrintError("Could not translate ring descriptor address\n");
return -1;
}
- if (guest_pa_to_host_va(core, sym_state->cur_queue->ring_avail_addr, (addr_t *)&(sym_state->cur_queue->avail)) == -1) {
+ if (v3_gpa_to_hva(core, sym_state->cur_queue->ring_avail_addr, (addr_t *)&(sym_state->cur_queue->avail)) == -1) {
PrintError("Could not translate ring available address\n");
return -1;
}
- if (guest_pa_to_host_va(core, sym_state->cur_queue->ring_used_addr, (addr_t *)&(sym_state->cur_queue->used)) == -1) {
+ if (v3_gpa_to_hva(core, sym_state->cur_queue->ring_used_addr, (addr_t *)&(sym_state->cur_queue->used)) == -1) {
PrintError("Could not translate ring used address\n");
return -1;
}
cmd_desc = &(q->desc[desc_idx]);
- if (guest_pa_to_host_va(core, cmd_desc->addr_gpa, (addr_t *)&cmd) == -1) {
+ if (v3_gpa_to_hva(core, cmd_desc->addr_gpa, (addr_t *)&cmd) == -1) {
PrintError("Could not translate SYMMOD header address\n");
return -1;
}
-
+
desc_idx = cmd_desc->next;
if (cmd->cmd == CMD_LOAD) {
name_desc = &(q->desc[desc_idx]);
- if (guest_pa_to_host_va(core, name_desc->addr_gpa, (addr_t *)&name) == -1) {
+ if (v3_gpa_to_hva(core, name_desc->addr_gpa, (addr_t *)&name) == -1) {
PrintError("Could not translate SYMMOD header address\n");
return -1;
}
buf_desc = &(q->desc[desc_idx]);
- if (guest_pa_to_host_va(core, buf_desc->addr_gpa, (addr_t *)&(buf)) == -1) {
+ if (v3_gpa_to_hva(core, buf_desc->addr_gpa, (addr_t *)&(buf)) == -1) {
PrintError("Could not translate buffer address\n");
return -1;
}
status_desc = &(q->desc[desc_idx]);
- if (guest_pa_to_host_va(core, status_desc->addr_gpa, (addr_t *)&status_ptr) == -1) {
+ if (v3_gpa_to_hva(core, status_desc->addr_gpa, (addr_t *)&status_ptr) == -1) {
PrintError("SYMMOD Error could not translate status address\n");
return -1;
}
hdr_desc = &(q->desc[desc_idx]);
- if (guest_pa_to_host_va(core, hdr_desc->addr_gpa, (addr_t *)&hdr) == -1) {
+ if (v3_gpa_to_hva(core, hdr_desc->addr_gpa, (addr_t *)&hdr) == -1) {
PrintError("Could not translate SYMMOD header address\n");
return -1;
}
// round up to next page boundary.
sym_state->cur_queue->ring_used_addr = (sym_state->cur_queue->ring_used_addr + 0xfff) & ~0xfff;
- if (guest_pa_to_host_va(core, sym_state->cur_queue->ring_desc_addr, (addr_t *)&(sym_state->cur_queue->desc)) == -1) {
+ if (v3_gpa_to_hva(core, sym_state->cur_queue->ring_desc_addr, (addr_t *)&(sym_state->cur_queue->desc)) == -1) {
PrintError("Could not translate ring descriptor address\n");
return -1;
}
- if (guest_pa_to_host_va(core, sym_state->cur_queue->ring_avail_addr, (addr_t *)&(sym_state->cur_queue->avail)) == -1) {
+ if (v3_gpa_to_hva(core, sym_state->cur_queue->ring_avail_addr, (addr_t *)&(sym_state->cur_queue->avail)) == -1) {
PrintError("Could not translate ring available address\n");
return -1;
}
- if (guest_pa_to_host_va(core, sym_state->cur_queue->ring_used_addr, (addr_t *)&(sym_state->cur_queue->used)) == -1) {
+ if (v3_gpa_to_hva(core, sym_state->cur_queue->ring_used_addr, (addr_t *)&(sym_state->cur_queue->used)) == -1) {
PrintError("Could not translate ring used address\n");
return -1;
}
PrintDebug("SYMMOD: Notifier Descriptor (ptr=%p) gpa=%p, len=%d, flags=%x, next=%d\n", notifier_desc,
(void *)(notifier_desc->addr_gpa), notifier_desc->length, notifier_desc->flags, notifier_desc->next);
- if (guest_pa_to_host_va(&(vm->cores[0]), notifier_desc->addr_gpa, (addr_t *)&(notifier)) == -1) {
+ if (v3_gpa_to_hva(&(vm->cores[0]), notifier_desc->addr_gpa, (addr_t *)&(notifier)) == -1) {
PrintError("Could not translate receive buffer address\n");
return -1;
}
hdr_desc = &(q->desc[desc_idx]);
- if (guest_pa_to_host_va(core, hdr_desc->addr_gpa, (addr_t *)&hdr) == -1) {
+ if (v3_gpa_to_hva(core, hdr_desc->addr_gpa, (addr_t *)&hdr) == -1) {
PrintError("Could not translate VirtioVNET header address\n");
return -1;
}
buf_desc = &(q->desc[desc_idx]);
- if (guest_pa_to_host_va(core, buf_desc->addr_gpa, (addr_t *)&(route)) == -1) {
+ if (v3_gpa_to_hva(core, buf_desc->addr_gpa, (addr_t *)&(route)) == -1) {
PrintError("Could not translate route address\n");
return -1;
}
status_desc = &(q->desc[desc_idx]);
- if (guest_pa_to_host_va(core, status_desc->addr_gpa, (addr_t *)&status_ptr) == -1) {
+ if (v3_gpa_to_hva(core, status_desc->addr_gpa, (addr_t *)&status_ptr) == -1) {
PrintError("VirtioVNET Error could not translate status address\n");
return -1;
}
pkt_desc = &(q->desc[pkt_idx]);
PrintDebug("VNET Bridge RX: buffer desc len: %d\n", pkt_desc->length);
- if (guest_pa_to_host_va(&(vm->cores[0]), pkt_desc->addr_gpa, (addr_t *)&(virtio_pkt)) == -1) {
+ if (v3_gpa_to_hva(&(vm->cores[0]), pkt_desc->addr_gpa, (addr_t *)&(virtio_pkt)) == -1) {
PrintError("Could not translate buffer address\n");
goto exit;
}
PrintDebug("VNET Bridge: Handle TX desc buf_len: %d\n", pkt_desc->length);
- if (guest_pa_to_host_va(core, pkt_desc->addr_gpa, (addr_t *)&(virtio_pkt)) == -1) {
+ if (v3_gpa_to_hva(core, pkt_desc->addr_gpa, (addr_t *)&(virtio_pkt)) == -1) {
PrintError("Could not translate buffer address\n");
return -1;
}
// round up to next page boundary.
vnet_state->cur_queue->ring_used_addr = (vnet_state->cur_queue->ring_used_addr + 0xfff) & ~0xfff;
- if (guest_pa_to_host_va(core, vnet_state->cur_queue->ring_desc_addr, (addr_t *)&(vnet_state->cur_queue->desc)) == -1) {
+ if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_desc_addr, (addr_t *)&(vnet_state->cur_queue->desc)) == -1) {
PrintError("Could not translate ring descriptor address\n");
return -1;
}
- if (guest_pa_to_host_va(core, vnet_state->cur_queue->ring_avail_addr, (addr_t *)&(vnet_state->cur_queue->avail)) == -1) {
+ if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_avail_addr, (addr_t *)&(vnet_state->cur_queue->avail)) == -1) {
PrintError("Could not translate ring available address\n");
return -1;
}
- if (guest_pa_to_host_va(core, vnet_state->cur_queue->ring_used_addr, (addr_t *)&(vnet_state->cur_queue->used)) == -1) {
+ if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_used_addr, (addr_t *)&(vnet_state->cur_queue->used)) == -1) {
PrintError("Could not translate ring used address\n");
return -1;
}
}
if (buf_is_va == 1) {
- if (read_guest_va_memory(info, msg_gpa, msg_len, (uchar_t *)state->debug_buf) != msg_len) {
+ if (v3_read_gva_memory(info, msg_gpa, msg_len, (uchar_t *)state->debug_buf) != msg_len) {
PrintError("Could not read debug message\n");
return -1;
}
} else {
- if (read_guest_pa_memory(info, msg_gpa, msg_len, (uchar_t *)state->debug_buf) != msg_len) {
+ if (v3_read_gpa_memory(info, msg_gpa, msg_len, (uchar_t *)state->debug_buf) != msg_len) {
PrintError("Could not read debug message\n");
return -1;
}
PrintDebug("Shadow page fault handler: %p\n", (void*) fault_addr );
PrintDebug("Handling PDE32 Fault\n");
- if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pd) == -1) {
+ if (v3_gpa_to_hva(info, guest_cr3, (addr_t*)&guest_pd) == -1) {
PrintError("Invalid Guest PDE Address: 0x%p\n", (void *)guest_cr3);
return -1;
}
if (guest_pde->large_page == 0) {
- if (guest_pa_to_host_va(info, BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr), (addr_t*)&guest_pt) == -1) {
+ if (v3_gpa_to_hva(info, BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr), (addr_t*)&guest_pt) == -1) {
// Machine check the guest
PrintDebug("Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr));
v3_raise_exception(info, MC_EXCEPTION);
if ((shdw_reg->flags.alloced == 1) ||
(shdw_reg->flags.read == 1)) {
- addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, info->cpu_id, guest_pa);
+ addr_t shadow_pa = 0;
+
+ if (v3_gpa_to_hpa(info, guest_pa, &shadow_pa) == -1) {
+ PrintError("could not translate page fault address (%p)\n", (void *)guest_pa);
+ return -1;
+ }
shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
if ((shdw_reg->flags.alloced == 1) ||
(shdw_reg->flags.read == 1)) {
- addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, info->cpu_id, guest_fault_pa);
+ addr_t shadow_pa = 0;
+ if (v3_gpa_to_hpa(info, guest_fault_pa, &shadow_pa) == -1) {
+ PrintError("could not translate page fault address (%p)\n", (void *)guest_fault_pa);
+ return -1;
+ }
shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
PrintDebug("\tMapping shadow page (%p)\n", (void *)BASE_TO_PAGE_ADDR(shadow_pte->page_base_addr));
pde32_t * guest_pd = NULL;
pde32_t * guest_pde;
- if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pd) == -1) {
+ if (v3_gpa_to_hva(info, guest_cr3, (addr_t*)&guest_pd) == -1) {
PrintError("Invalid Guest PDE Address: 0x%p\n", (void *)guest_cr3);
return -1;
}
PrintDebug("64 bit Shadow page fault handler: %p\n", (void *)fault_addr);
PrintDebug("Handling PML fault\n");
- if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pml) == -1) {
+ if (v3_gpa_to_hva(info, guest_cr3, (addr_t*)&guest_pml) == -1) {
PrintError("Invalid Guest PML4E Address: 0x%p\n", (void *)guest_cr3);
return -1;
}
// Continue processing at the next level
- if (guest_pa_to_host_va(info, BASE_TO_PAGE_ADDR(guest_pml4e->pdp_base_addr), (addr_t *)&guest_pdp) == -1) {
+ if (v3_gpa_to_hva(info, BASE_TO_PAGE_ADDR(guest_pml4e->pdp_base_addr), (addr_t *)&guest_pdp) == -1) {
// Machine check the guest
PrintError("Invalid Guest PDP Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pml4e->pdp_base_addr));
v3_raise_exception(info, MC_EXCEPTION);
// Continue processing at the next level
- if (guest_pa_to_host_va(info, BASE_TO_PAGE_ADDR(guest_pdpe->pd_base_addr), (addr_t *)&guest_pd) == -1) {
+ if (v3_gpa_to_hva(info, BASE_TO_PAGE_ADDR(guest_pdpe->pd_base_addr), (addr_t *)&guest_pd) == -1) {
// Machine check the guest
PrintError("Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pdpe->pd_base_addr));
v3_raise_exception(info, MC_EXCEPTION);
// Continue processing at the next level
if (guest_pde->large_page == 0) {
- if (guest_pa_to_host_va(info, BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr), (addr_t *)&guest_pt) == -1) {
+ if (v3_gpa_to_hva(info, BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr), (addr_t *)&guest_pt) == -1) {
// Machine check the guest
PrintError("Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr));
v3_raise_exception(info, MC_EXCEPTION);
if ((shdw_reg->flags.alloced == 1) ||
(shdw_reg->flags.read == 1)) {
- addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, info->cpu_id, guest_pa);
+ addr_t shadow_pa = 0;
+
+ if (v3_gpa_to_hpa(info, guest_pa, &shadow_pa) == -1) {
+ PrintError("could not translate page fault address (%p)\n", (void *)guest_pa);
+ return -1;
+ }
shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
if ((shdw_reg->flags.alloced == 1) ||
(shdw_reg->flags.read == 1)) {
- addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, info->cpu_id, guest_fault_pa);
+ addr_t shadow_pa = 0;
+
+ if (v3_gpa_to_hpa(info, guest_fault_pa, &shadow_pa) == -1) {
+ PrintError("could not translate page fault address (%p)\n", (void *)guest_fault_pa);
+ return -1;
+ }
shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
PrintDebug("Shadow page fault handler: %p\n", (void*) fault_addr );
PrintDebug("Handling PDE32 Fault\n");
- if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pd) == -1) {
+ if (v3_gpa_to_hva(info, guest_cr3, (addr_t*)&guest_pd) == -1) {
PrintError("Invalid Guest PDE Address: 0x%p\n", (void *)guest_cr3);
return -1;
}
if (guest_pde->large_page == 0) {
- if (guest_pa_to_host_va(info, BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr), (addr_t*)&guest_pt) == -1) {
+ if (v3_gpa_to_hva(info, BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr), (addr_t*)&guest_pt) == -1) {
// Machine check the guest
PrintDebug("Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr));
v3_raise_exception(info, MC_EXCEPTION);
PrintDebug("guest_pa =%p\n", (void *)guest_pa);
if ((shdw_reg->flags.alloced == 1) && (shdw_reg->flags.read == 1)) {
- addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, info->cpu_id, guest_pa);
-
+ addr_t shadow_pa = 0;
+
+ if (v3_gpa_to_hpa(info, guest_pa, &shadow_pa) == -1) {
+ PrintError("could not translate page fault address (%p)\n", (void *)guest_pa);
+ return -1;
+ }
+
shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
PrintDebug("\tMapping shadow page (%p)\n", (void *)BASE_TO_PAGE_ADDR(shadow_pte->page_base_addr));
if ((shdw_reg->flags.alloced == 1) &&
(shdw_reg->flags.read == 1)) {
- addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, info->cpu_id, guest_fault_pa);
+ addr_t shadow_pa = 0;
+
+
+ if (v3_gpa_to_hpa(info, guest_fault_pa, &shadow_pa) == -1) {
+ PrintError("could not translate page fault address (%p)\n", (void *)guest_fault_pa);
+ return -1;
+ }
shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
pde32_t * guest_pd = NULL;
pde32_t * guest_pde;
- if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pd) == -1) {
+ if (v3_gpa_to_hva(info, guest_cr3, (addr_t*)&guest_pd) == -1) {
PrintError("Invalid Guest PDE Address: 0x%p\n", (void *)guest_cr3);
return -1;
}
PrintDebug("64 bit Shadow page fault handler: %p\n", (void *)fault_addr);
PrintDebug("Handling PML fault\n");
- if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pml) == -1) {
+ if (v3_gpa_to_hva(info, guest_cr3, (addr_t*)&guest_pml) == -1) {
PrintError("Invalid Guest PML4E Address: 0x%p\n", (void *)guest_cr3);
return -1;
}
// Continue processing at the next level
- if (guest_pa_to_host_va(info, BASE_TO_PAGE_ADDR(guest_pml4e->pdp_base_addr), (addr_t *)&guest_pdp) == -1) {
+ if (v3_gpa_to_hva(info, BASE_TO_PAGE_ADDR(guest_pml4e->pdp_base_addr), (addr_t *)&guest_pdp) == -1) {
// Machine check the guest
PrintError("Invalid Guest PDP Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pml4e->pdp_base_addr));
v3_raise_exception(info, MC_EXCEPTION);
// Continue processing at the next level
- if (guest_pa_to_host_va(info, BASE_TO_PAGE_ADDR(guest_pdpe->pd_base_addr), (addr_t *)&guest_pd) == -1) {
+ if (v3_gpa_to_hva(info, BASE_TO_PAGE_ADDR(guest_pdpe->pd_base_addr), (addr_t *)&guest_pd) == -1) {
// Machine check the guest
PrintError("Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pdpe->pd_base_addr));
v3_raise_exception(info, MC_EXCEPTION);
// Continue processing at the next level
if (guest_pde->large_page == 0) {
- if (guest_pa_to_host_va(info, BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr), (addr_t *)&guest_pt) == -1) {
+ if (v3_gpa_to_hva(info, BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr), (addr_t *)&guest_pt) == -1) {
// Machine check the guest
PrintError("Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr));
v3_raise_exception(info, MC_EXCEPTION);
if ((shdw_reg->flags.alloced == 1) ||
(shdw_reg->flags.read == 1)) {
- addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, info->cpu_id, guest_pa);
-
+ addr_t shadow_pa = 0;
+
+ if (v3_gpa_to_hpa(info, guest_pa, &shadow_pa) == -1) {
+ PrintError("could not translate page fault address (%p)\n", (void *)guest_pa);
+ return -1;
+ }
+
shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
shadow_pte->present = guest_pte->present;
if ((shdw_reg->flags.alloced == 1) ||
(shdw_reg->flags.read == 1)) {
- addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, info->cpu_id, guest_fault_pa);
+ addr_t shadow_pa = 0;
+
+ if (v3_gpa_to_hpa(info, guest_fault_pa, &shadow_pa) == -1) {
+ PrintError("could not translate page fault address (%p)\n", (void *)guest_fault_pa);
+ return -1;
+ }
shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
if (info->mem_mode == PHYSICAL_MEM) {
- guest_pa_to_host_va(info, linear_addr, &host_addr);
+ v3_gpa_to_hva(info, linear_addr, &host_addr);
} else if (info->mem_mode == VIRTUAL_MEM) {
- guest_va_to_host_va(info, linear_addr, &host_addr);
+ v3_gva_to_hva(info, linear_addr, &host_addr);
}
V3_Print("Host Address of rip = 0x%p\n", (void *)host_addr);
- if (guest_va_to_host_va(core, get_addr_linear(core, core->rip, &(core->segments.cs)), &inst_ptr) == -1) {
+ if (v3_gva_to_hva(core, get_addr_linear(core, core->rip, &(core->segments.cs)), &inst_ptr) == -1) {
PrintError("Can't access instruction\n");
return -1;
}
// PrintDebug("Writing 0x%p\n", (void *)dst_addr);
- if (guest_va_to_host_va(core, dst_addr, &host_addr) == -1) {
+ if (v3_gva_to_hva(core, dst_addr, &host_addr) == -1) {
// either page fault or gpf...
PrintError("Could not convert Guest VA to host VA\n");
return -1;
- if (guest_va_to_host_va(core, get_addr_linear(core, core->rip, &(core->segments.cs)), &inst_ptr) == -1) {
+ if (v3_gva_to_hva(core, get_addr_linear(core, core->rip, &(core->segments.cs)), &inst_ptr) == -1) {
PrintError("Can't access instruction\n");
return -1;
}
dst_addr = get_addr_linear(core, (core->vm_regs.rsi & mask), theseg);
- if (guest_va_to_host_va(core, dst_addr, &host_addr) == -1) {
+ if (v3_gva_to_hva(core, dst_addr, &host_addr) == -1) {
// either page fault or gpf...
}
return -1;
}
- if (guest_va_to_host_va(info, gdt->base, &gdt_addr) == -1) {
+ if (v3_gva_to_hva(info, gdt->base, &gdt_addr) == -1) {
PrintError("Unable to translate GDT address\n");
return -1;
}
V3_Print("Stack at %p:\n", (void *)linear_addr);
if (info->mem_mode == PHYSICAL_MEM) {
- if (guest_pa_to_host_va(info, linear_addr, &host_addr) == -1) {
+ if (v3_gpa_to_hva(info, linear_addr, &host_addr) == -1) {
PrintError("Could not translate Stack address\n");
return;
}
} else if (info->mem_mode == VIRTUAL_MEM) {
- if (guest_va_to_host_va(info, linear_addr, &host_addr) == -1) {
+ if (v3_gva_to_hva(info, linear_addr, &host_addr) == -1) {
PrintError("Could not translate Virtual Stack address\n");
return;
}
/* GROUP 0 */
/**********************************/
-int host_va_to_host_pa(addr_t host_va, addr_t * host_pa) {
+int v3_hva_to_hpa(addr_t hva, addr_t * hpa) {
if ((os_hooks) && (os_hooks)->vaddr_to_paddr) {
- *host_pa = (addr_t)(os_hooks)->vaddr_to_paddr((void *)host_va);
+ *hpa = (addr_t)(os_hooks)->vaddr_to_paddr((void *)hva);
- if (*host_pa == 0) {
+ if (*hpa == 0) {
PrintError("In HVA->HPA: Invalid HVA(%p)->HPA lookup\n",
- (void *)host_va);
+ (void *)hva);
return -1;
}
} else {
}
-int host_pa_to_host_va(addr_t host_pa, addr_t * host_va) {
+int v3_hpa_to_hva(addr_t hpa, addr_t * hva) {
if ((os_hooks) && (os_hooks)->paddr_to_vaddr) {
- *host_va = (addr_t)(os_hooks)->paddr_to_vaddr((void *)host_pa);
+ *hva = (addr_t)(os_hooks)->paddr_to_vaddr((void *)hpa);
- if (*host_va == 0) {
+ if (*hva == 0) {
PrintError("In HPA->HVA: Invalid HPA(%p)->HVA lookup\n",
- (void *)host_pa);
+ (void *)hpa);
return -1;
}
} else {
return 0;
}
-int guest_pa_to_host_pa(struct guest_info * info, addr_t guest_pa, addr_t * host_pa) {
- struct v3_mem_region * reg = v3_get_mem_region(info->vm_info, info->cpu_id, guest_pa);
+int v3_gpa_to_hpa(struct guest_info * info, addr_t gpa, addr_t * hpa) {
+ struct v3_mem_region * reg = v3_get_mem_region(info->vm_info, info->cpu_id, gpa);
if (reg == NULL) {
PrintError("In GPA->HPA: Could not find address in shadow map (addr=%p) (NULL REGION)\n",
- (void *)guest_pa);
+ (void *)gpa);
return -1;
}
if (reg->flags.alloced == 0) {
PrintError("In GPA->HPA: Tried to translate physical address of non allocated page (addr=%p)\n",
- (void *)guest_pa);
+ (void *)gpa);
return -1;
}
- *host_pa = v3_get_shadow_addr(reg, info->cpu_id, guest_pa);
+ *hpa = (gpa - reg->guest_start) + reg->host_addr;
return 0;
}
// This is a scan of the shadow map
// For now we ignore it
//
-int host_pa_to_guest_pa(struct guest_info * guest_info, addr_t host_pa, addr_t * guest_pa) {
- *guest_pa = 0;
+int v3_hpa_to_gpa(struct guest_info * guest_info, addr_t hpa, addr_t * gpa) {
+ *gpa = 0;
PrintError("ERROR!!! HPA->GPA currently not implemented!!!\n");
return -1;
/* !! Currently not implemented !! */
-// This will return negative until we implement host_pa_to_guest_pa()
-int host_va_to_guest_pa(struct guest_info * guest_info, addr_t host_va, addr_t * guest_pa) {
- addr_t host_pa = 0;
- *guest_pa = 0;
+// This will return negative until we implement hpa_to_guest_pa()
+int v3_hva_to_gpa(struct guest_info * guest_info, addr_t hva, addr_t * gpa) {
+ addr_t hpa = 0;
+ *gpa = 0;
- if (host_va_to_host_pa(host_va, &host_pa) != 0) {
+ if (v3_hva_to_hpa(hva, &hpa) != 0) {
PrintError("In HVA->GPA: Invalid HVA(%p)->HPA lookup\n",
- (void *)host_va);
+ (void *)hva);
return -1;
}
- if (host_pa_to_guest_pa(guest_info, host_pa, guest_pa) != 0) {
+ if (v3_hpa_to_gpa(guest_info, hpa, gpa) != 0) {
PrintError("In HVA->GPA: Invalid HPA(%p)->GPA lookup\n",
- (void *)host_pa);
+ (void *)hpa);
return -1;
}
-int guest_pa_to_host_va(struct guest_info * guest_info, addr_t guest_pa, addr_t * host_va) {
- addr_t host_pa = 0;
+int v3_gpa_to_hva(struct guest_info * guest_info, addr_t gpa, addr_t * hva) {
+ addr_t hpa = 0;
- *host_va = 0;
+ *hva = 0;
- if (guest_pa_to_host_pa(guest_info, guest_pa, &host_pa) != 0) {
+ if (v3_gpa_to_hpa(guest_info, gpa, &hpa) != 0) {
PrintError("In GPA->HVA: Invalid GPA(%p)->HPA lookup\n",
- (void *)guest_pa);
+ (void *)gpa);
return -1;
}
- if (host_pa_to_host_va(host_pa, host_va) != 0) {
+ if (v3_hpa_to_hva(hpa, hva) != 0) {
PrintError("In GPA->HVA: Invalid HPA(%p)->HVA lookup\n",
- (void *)host_pa);
+ (void *)hpa);
return -1;
}
}
-int guest_va_to_guest_pa(struct guest_info * guest_info, addr_t guest_va, addr_t * guest_pa) {
+int v3_gva_to_gpa(struct guest_info * guest_info, addr_t gva, addr_t * gpa) {
v3_reg_t guest_cr3 = 0;
if (guest_info->mem_mode == PHYSICAL_MEM) {
// guest virtual address is the same as the physical
- *guest_pa = guest_va;
+ *gpa = gva;
return 0;
}
// Guest Is in Paged mode
switch (guest_info->cpu_mode) {
case PROTECTED:
- if (v3_translate_guest_pt_32(guest_info, guest_cr3, guest_va, guest_pa) == -1) {
+ if (v3_translate_guest_pt_32(guest_info, guest_cr3, gva, gpa) == -1) {
PrintDebug("Could not translate addr (%p) through 32 bit guest PT at %p\n",
- (void *)guest_va, (void *)(addr_t)guest_cr3);
+ (void *)gva, (void *)(addr_t)guest_cr3);
return -1;
}
break;
case PROTECTED_PAE:
- if (v3_translate_guest_pt_32pae(guest_info, guest_cr3, guest_va, guest_pa) == -1) {
+ if (v3_translate_guest_pt_32pae(guest_info, guest_cr3, gva, gpa) == -1) {
PrintDebug("Could not translate addr (%p) through 32 bitpae guest PT at %p\n",
- (void *)guest_va, (void *)(addr_t)guest_cr3);
+ (void *)gva, (void *)(addr_t)guest_cr3);
return -1;
}
break;
case LONG:
case LONG_32_COMPAT:
case LONG_16_COMPAT:
- if (v3_translate_guest_pt_64(guest_info, guest_cr3, guest_va, guest_pa) == -1) {
+ if (v3_translate_guest_pt_64(guest_info, guest_cr3, gva, gpa) == -1) {
PrintDebug("Could not translate addr (%p) through 64 bit guest PT at %p\n",
- (void *)guest_va, (void *)(addr_t)guest_cr3);
+ (void *)gva, (void *)(addr_t)guest_cr3);
return -1;
}
break;
*
* For now we ignore it...
*/
-int guest_pa_to_guest_va(struct guest_info * guest_info, addr_t guest_pa, addr_t * guest_va) {
- *guest_va = 0;
+int v3_gpa_to_gva(struct guest_info * guest_info, addr_t gpa, addr_t * gva) {
+ *gva = 0;
PrintError("ERROR!!: GPA->GVA Not Implemented!!\n");
return -1;
}
/**********************************/
-int guest_va_to_host_pa(struct guest_info * guest_info, addr_t guest_va, addr_t * host_pa) {
- addr_t guest_pa = 0;
+int v3_gva_to_hpa(struct guest_info * guest_info, addr_t gva, addr_t * hpa) {
+ addr_t gpa = 0;
- *host_pa = 0;
+ *hpa = 0;
- if (guest_va_to_guest_pa(guest_info, guest_va, &guest_pa) != 0) {
+ if (v3_gva_to_gpa(guest_info, gva, &gpa) != 0) {
PrintError("In GVA->HPA: Invalid GVA(%p)->GPA lookup\n",
- (void *)guest_va);
+ (void *)gva);
return -1;
}
- if (guest_pa_to_host_pa(guest_info, guest_pa, host_pa) != 0) {
+ if (v3_gpa_to_hpa(guest_info, gpa, hpa) != 0) {
PrintError("In GVA->HPA: Invalid GPA(%p)->HPA lookup\n",
- (void *)guest_pa);
+ (void *)gpa);
return -1;
}
}
/* !! Currently not implemented !! */
-int host_pa_to_guest_va(struct guest_info * guest_info, addr_t host_pa, addr_t * guest_va) {
- addr_t guest_pa = 0;
+int v3_hpa_to_gva(struct guest_info * guest_info, addr_t hpa, addr_t * gva) {
+ addr_t gpa = 0;
- *guest_va = 0;
+ *gva = 0;
- if (host_pa_to_guest_pa(guest_info, host_pa, &guest_pa) != 0) {
+ if (v3_hpa_to_gpa(guest_info, hpa, &gpa) != 0) {
PrintError("In HPA->GVA: Invalid HPA(%p)->GPA lookup\n",
- (void *)host_pa);
+ (void *)hpa);
return -1;
}
- if (guest_pa_to_guest_va(guest_info, guest_pa, guest_va) != 0) {
+ if (v3_gpa_to_gva(guest_info, gpa, gva) != 0) {
PrintError("In HPA->GVA: Invalid GPA(%p)->GVA lookup\n",
- (void *)guest_pa);
+ (void *)gpa);
return -1;
}
-int guest_va_to_host_va(struct guest_info * guest_info, addr_t guest_va, addr_t * host_va) {
- addr_t guest_pa = 0;
- addr_t host_pa = 0;
+int v3_gva_to_hva(struct guest_info * guest_info, addr_t gva, addr_t * hva) {
+ addr_t gpa = 0;
+ addr_t hpa = 0;
- *host_va = 0;
+ *hva = 0;
- if (guest_va_to_guest_pa(guest_info, guest_va, &guest_pa) != 0) {
+ if (v3_gva_to_gpa(guest_info, gva, &gpa) != 0) {
PrintError("In GVA->HVA: Invalid GVA(%p)->GPA lookup\n",
- (void *)guest_va);
+ (void *)gva);
return -1;
}
- if (guest_pa_to_host_pa(guest_info, guest_pa, &host_pa) != 0) {
+ if (v3_gpa_to_hpa(guest_info, gpa, &hpa) != 0) {
PrintError("In GVA->HVA: Invalid GPA(%p)->HPA lookup\n",
- (void *)guest_pa);
+ (void *)gpa);
return -1;
}
- if (host_pa_to_host_va(host_pa, host_va) != 0) {
+ if (v3_hpa_to_hva(hpa, hva) != 0) {
PrintError("In GVA->HVA: Invalid HPA(%p)->HVA lookup\n",
- (void *)host_pa);
+ (void *)hpa);
return -1;
}
/* !! Currently not implemented !! */
-int host_va_to_guest_va(struct guest_info * guest_info, addr_t host_va, addr_t * guest_va) {
- addr_t host_pa = 0;
- addr_t guest_pa = 0;
+int v3_hva_to_gva(struct guest_info * guest_info, addr_t hva, addr_t * gva) {
+ addr_t hpa = 0;
+ addr_t gpa = 0;
- *guest_va = 0;
+ *gva = 0;
- if (host_va_to_host_pa(host_va, &host_pa) != 0) {
+ if (v3_hva_to_hpa(hva, &hpa) != 0) {
PrintError("In HVA->GVA: Invalid HVA(%p)->HPA lookup\n",
- (void *)host_va);
+ (void *)hva);
return -1;
}
- if (host_pa_to_guest_pa(guest_info, host_pa, &guest_pa) != 0) {
+ if (v3_hpa_to_gpa(guest_info, hpa, &gpa) != 0) {
PrintError("In HVA->GVA: Invalid HPA(%p)->GPA lookup\n",
- (void *)host_va);
+ (void *)hva);
return -1;
}
- if (guest_pa_to_guest_va(guest_info, guest_pa, guest_va) != 0) {
+ if (v3_gpa_to_gva(guest_info, gpa, gva) != 0) {
PrintError("In HVA->GVA: Invalid GPA(%p)->GVA lookup\n",
- (void *)guest_pa);
+ (void *)gpa);
return -1;
}
/* This is a straight address conversion + copy,
* except for the tiny little issue of crossing page boundries.....
*/
-int read_guest_va_memory(struct guest_info * guest_info, addr_t guest_va, int count, uchar_t * dest) {
- addr_t cursor = guest_va;
+int v3_read_gva_memory(struct guest_info * guest_info, addr_t gva, int count, uchar_t * dest) {
+ addr_t cursor = gva;
int bytes_read = 0;
addr_t host_addr = 0;
- if (guest_va_to_host_va(guest_info, cursor, &host_addr) != 0) {
+ if (v3_gva_to_hva(guest_info, cursor, &host_addr) != 0) {
PrintDebug("Invalid GVA(%p)->HVA lookup\n", (void *)cursor);
return bytes_read;
}
/* This is a straight address conversion + copy,
* except for the tiny little issue of crossing page boundries.....
*/
-int read_guest_pa_memory(struct guest_info * guest_info, addr_t guest_pa, int count, uchar_t * dest) {
- addr_t cursor = guest_pa;
+int v3_read_gpa_memory(struct guest_info * guest_info, addr_t gpa, int count, uchar_t * dest) {
+ addr_t cursor = gpa;
int bytes_read = 0;
while (count > 0) {
int bytes_to_copy = (dist_to_pg_edge > count) ? count : dist_to_pg_edge;
addr_t host_addr = 0;
- if (guest_pa_to_host_va(guest_info, cursor, &host_addr) != 0) {
+ if (v3_gpa_to_hva(guest_info, cursor, &host_addr) != 0) {
return bytes_read;
}
/* This is a straight address conversion + copy,
* except for the tiny little issue of crossing page boundries.....
*/
-int write_guest_pa_memory(struct guest_info * guest_info, addr_t guest_pa, int count, uchar_t * src) {
- addr_t cursor = guest_pa;
+int v3_write_gpa_memory(struct guest_info * guest_info, addr_t gpa, int count, uchar_t * src) {
+ addr_t cursor = gpa;
int bytes_written = 0;
while (count > 0) {
int bytes_to_copy = (dist_to_pg_edge > count) ? count : dist_to_pg_edge;
addr_t host_addr;
- if (guest_pa_to_host_va(guest_info, cursor, &host_addr) != 0) {
+ if (v3_gpa_to_hva(guest_info, cursor, &host_addr) != 0) {
return bytes_written;
}
* redistribute, and modify it as specified in the file "V3VEE_LICENSE".
*/
+#include <palacios/vm_guest_mem.h>
+
static int pre_config_pc_core(struct guest_info * info, v3_cfg_tree_t * cfg) {
{
extern uint8_t v3_vgabios_start[];
extern uint8_t v3_vgabios_end[];
-
- addr_t vgabios_dst = v3_get_shadow_addr(&(vm->mem_map.base_region), V3_MEM_CORE_ANY, VGABIOS_START);
- memcpy(V3_VAddr((void *)vgabios_dst), v3_vgabios_start, v3_vgabios_end - v3_vgabios_start);
+ addr_t vgabios_dst = 0;
+
+ if (v3_gpa_to_hpa(&(vm->cores[0]), VGABIOS_START, &vgabios_dst) == -1) {
+ PrintError("Could not find VGABIOS destination address\n");
+ return -1;
+ }
+
+ memcpy(V3_VAddr((void *)vgabios_dst), v3_vgabios_start,
+ v3_vgabios_end - v3_vgabios_start);
}
/* layout rombios */
{
extern uint8_t v3_rombios_start[];
extern uint8_t v3_rombios_end[];
+ addr_t rombios_dst = 0;
+
+ if (v3_gpa_to_hpa(&(vm->cores[0]), ROMBIOS_START, &rombios_dst) == -1) {
+ PrintError("Could not find ROMBIOS destination address\n");
+ return -1;
+ }
- addr_t rombios_dst = v3_get_shadow_addr(&(vm->mem_map.base_region), V3_MEM_CORE_ANY, ROMBIOS_START);
- memcpy(V3_VAddr((void *)rombios_dst), v3_rombios_start, v3_rombios_end - v3_rombios_start);
+ memcpy(V3_VAddr((void *)rombios_dst), v3_rombios_start,
+ v3_rombios_end - v3_rombios_start);
}
return 0;
struct x86_instr dec_instr;
if (info->mem_mode == PHYSICAL_MEM) {
- ret = read_guest_pa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
+ ret = v3_read_gpa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
} else {
- ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
+ ret = v3_read_gva_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
}
if (v3_decode(info, (addr_t)instr, &dec_instr) == -1) {
struct x86_instr dec_instr;
if (info->mem_mode == PHYSICAL_MEM) {
- ret = read_guest_pa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
+ ret = v3_read_gpa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
} else {
- ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
+ ret = v3_read_gva_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
}
struct x86_instr dec_instr;
if (info->mem_mode == PHYSICAL_MEM) {
- ret = read_guest_pa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
+ ret = v3_read_gpa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
} else {
- ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
+ ret = v3_read_gva_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
}
if (v3_decode(info, (addr_t)instr, &dec_instr) == -1) {
struct x86_instr dec_instr;
if (info->mem_mode == PHYSICAL_MEM) {
- ret = read_guest_pa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
+ ret = v3_read_gpa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
} else {
- ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
+ ret = v3_read_gva_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
}
if (v3_decode(info, (addr_t)instr, &dec_instr) == -1) {
v3_cpu_mode_t cpu_mode = v3_get_vm_cpu_mode(info);
if (info->mem_mode == PHYSICAL_MEM) {
- ret = read_guest_pa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
+ ret = v3_read_gpa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
} else {
- ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
+ ret = v3_read_gva_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
}
if (v3_decode(info, (addr_t)instr, &dec_instr) == -1) {
return -1;
}
- host_addr = v3_get_shadow_addr(region, info->cpu_id, fault_addr);
-
+ if (v3_gpa_to_hpa(info, fault_addr, &host_addr) == -1) {
+ PrintError("Could not translate fault address (%p)\n", (void *)fault_addr);
+ return -1;
+ }
+
// Lookup the correct PDE address based on the PAGING MODE
if (info->shdw_pg_mode == SHADOW_PAGING) {
pde = CR3_TO_PDE32_VA(info->ctrl_regs.cr3);
return -1;
}
- host_addr = v3_get_shadow_addr(region, info->cpu_id, fault_addr);
+ if (v3_gpa_to_hpa(info, fault_addr, &host_addr) == -1) {
+ PrintError("Could not translate fault address (%p)\n", (void *)fault_addr);
+ return -1;
+ }
// Lookup the correct PDPE address based on the PAGING MODE
if (info->shdw_pg_mode == SHADOW_PAGING) {
return -1;
}
- host_addr = v3_get_shadow_addr(region, info->cpu_id, fault_addr);
- //
+ if (v3_gpa_to_hpa(info, fault_addr, &host_addr) == -1) {
+ PrintError("Error Could not translate fault addr (%p)\n", (void *)fault_addr);
+ return -1;
+ }
+
// Lookup the correct PML address based on the PAGING MODE
if (info->shdw_pg_mode == SHADOW_PAGING) {
// figure out addresses here....
if (info->mem_mode == PHYSICAL_MEM) {
- if (guest_pa_to_host_va(info, dec_instr->src_operand.operand, &src_addr) == -1) {
+ if (v3_gpa_to_hva(info, dec_instr->src_operand.operand, &src_addr) == -1) {
PrintError("Could not translate write Source (Physical) to host VA\n");
return -1;
}
} else {
- if (guest_va_to_host_va(info, dec_instr->src_operand.operand, &src_addr) == -1) {
+ if (v3_gva_to_hva(info, dec_instr->src_operand.operand, &src_addr) == -1) {
PrintError("Could not translate write Source (Virtual) to host VA\n");
return -1;
}
PrintDebug("GVA=%p Dst_Addr=%p\n", (void *)write_gva, (void *)dst_addr);
if (info->mem_mode == PHYSICAL_MEM) {
- ret = read_guest_pa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
+ ret = v3_read_gpa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
} else {
- ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
+ ret = v3_read_gva_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
}
if (ret == -1) {
if (dec_instr.src_operand.type == MEM_OPERAND) {
if (info->mem_mode == PHYSICAL_MEM) {
- if (guest_pa_to_host_va(info, dec_instr.src_operand.operand, &src_addr) == -1) {
+ if (v3_gpa_to_hva(info, dec_instr.src_operand.operand, &src_addr) == -1) {
PrintError("Could not translate write Source (Physical) to host VA\n");
return -1;
}
} else {
- if (guest_va_to_host_va(info, dec_instr.src_operand.operand, &src_addr) == -1) {
+ if (v3_gva_to_hva(info, dec_instr.src_operand.operand, &src_addr) == -1) {
PrintError("Could not translate write Source (Virtual) to host VA\n");
return -1;
}
PrintDebug("GVA=%p\n", (void *)read_gva);
if (info->mem_mode == PHYSICAL_MEM) {
- ret = read_guest_pa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
+ ret = v3_read_gpa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
} else {
- ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
+ ret = v3_read_gva_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
}
if (ret == -1) {
if (dec_instr.dst_operand.type == MEM_OPERAND) {
if (info->mem_mode == PHYSICAL_MEM) {
- if (guest_pa_to_host_va(info, dec_instr.dst_operand.operand, &dst_addr) == -1) {
+ if (v3_gpa_to_hva(info, dec_instr.dst_operand.operand, &dst_addr) == -1) {
PrintError("Could not translate Read Destination (Physical) to host VA\n");
return -1;
}
} else {
- if (guest_va_to_host_va(info, dec_instr.dst_operand.operand, &dst_addr) == -1) {
+ if (v3_gva_to_hva(info, dec_instr.dst_operand.operand, &dst_addr) == -1) {
PrintError("Could not translate Read Destination (Virtual) to host VA\n");
return -1;
}
-
-addr_t v3_get_shadow_addr(struct v3_mem_region * reg, uint16_t core_id, addr_t guest_addr) {
- if (reg && (reg->flags.alloced == 1)) {
- return (guest_addr - reg->guest_start) + reg->host_addr;
- } else {
- // PrintError("MEM Region Invalid\n");
- return 0;
- }
-
-}
-
-
-
void v3_print_mem_map(struct v3_vm_info * vm) {
struct rb_node * node = v3_rb_first(&(vm->mem_map.mem_regions));
struct v3_mem_region * reg = &(vm->mem_map.base_region);
#include <palacios/vm_guest.h>
#include <palacios/vmm_mem_hook.h>
#include <palacios/vmm_emulator.h>
+#include <palacios/vm_guest_mem.h>
struct mem_hook {
if (reg->flags.alloced == 0) {
op_addr = hook->hook_hva;
} else {
- op_addr = (addr_t)V3_VAddr((void *)v3_get_shadow_addr(reg, info->cpu_id, guest_pa));
+ if (v3_gpa_to_hva(info, guest_pa, &op_addr) == -1) {
+ PrintError("Could not translate hook address (%p)\n", (void *)guest_pa);
+ return -1;
+ }
}
int ret;
- if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
+ if (v3_gpa_to_hva(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
PrintError("Could not get virtual address of Guest PDE32 (PA=%p)\n",
(void *)guest_pde_pa);
return -1;
addr_t large_page_pa = (addr_t)guest_pte_pa;
addr_t large_page_va = 0;
- if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
+ if (v3_gpa_to_hva(info, large_page_pa, &large_page_va) == -1) {
PrintError("Could not get virtual address of Guest Page 4MB (PA=%p)\n",
(void *)large_page_va);
return -1;
pte32_t * guest_pte = NULL;
addr_t page_pa;
- if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t*)&guest_pte) == -1) {
+ if (v3_gpa_to_hva(info, guest_pte_pa, (addr_t*)&guest_pte) == -1) {
PrintError("Could not get virtual address of Guest PTE32 (PA=%p)\n",
(void *)guest_pte_pa);
return -1;
} else {
addr_t page_va;
- if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
+ if (v3_gpa_to_hva(info, page_pa, &page_va) == -1) {
PrintError("Could not get virtual address of Guest Page 4KB (PA=%p)\n",
(void *)page_pa);
return -1;
addr_t guest_pde_pa = 0;
int ret = 0;
- if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t*)&guest_pdpe) == -1) {
+ if (v3_gpa_to_hva(info, guest_pdpe_pa, (addr_t*)&guest_pdpe) == -1) {
PrintError("Could not get virtual address of Guest PDPE32PAE (PA=%p)\n",
(void *)guest_pdpe_pa);
return -1;
pde32pae_t * guest_pde = NULL;
addr_t guest_pte_pa = 0;
- if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
+ if (v3_gpa_to_hva(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
PrintError("Could not get virtual Address of Guest PDE32PAE (PA=%p)\n",
(void *)guest_pde_pa);
return -1;
addr_t large_page_pa = (addr_t)guest_pte_pa;
addr_t large_page_va = 0;
- if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
+ if (v3_gpa_to_hva(info, large_page_pa, &large_page_va) == -1) {
PrintDebug("Could not get virtual address of Guest Page 2MB (PA=%p)\n",
(void *)large_page_va);
pte32pae_t * guest_pte = NULL;
addr_t page_pa;
- if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
+ if (v3_gpa_to_hva(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
PrintError("Could not get virtual Address of Guest PTE32PAE (PA=%p)\n",
(void *)guest_pte_pa);
return -1;
} else {
addr_t page_va;
- if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
+ if (v3_gpa_to_hva(info, page_pa, &page_va) == -1) {
PrintError("Could not get virtual address of Guest Page 4KB (PA=%p)\n",
(void *)page_pa);
return -1;
addr_t guest_pdpe_pa = 0;
int ret = 0;
- if (guest_pa_to_host_va(info, guest_pml4_pa, (addr_t*)&guest_pmle) == -1) {
+ if (v3_gpa_to_hva(info, guest_pml4_pa, (addr_t*)&guest_pmle) == -1) {
PrintError("Could not get virtual address of Guest PML4E64 (PA=%p)\n",
(void *)guest_pml4_pa);
return -1;
pdpe64_t * guest_pdp = NULL;
addr_t guest_pde_pa = 0;
- if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t *)&guest_pdp) == -1) {
+ if (v3_gpa_to_hva(info, guest_pdpe_pa, (addr_t *)&guest_pdp) == -1) {
PrintError("Could not get virtual address of Guest PDPE64 (PA=%p)\n",
(void *)guest_pdpe_pa);
return -1;
addr_t large_page_pa = (addr_t)guest_pde_pa;
addr_t large_page_va = 0;
- if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
+ if (v3_gpa_to_hva(info, large_page_pa, &large_page_va) == -1) {
PrintDebug("Could not get virtual address of Guest Page 1GB (PA=%p)\n",
(void *)large_page_va);
pde64_t * guest_pde = NULL;
addr_t guest_pte_pa = 0;
- if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
+ if (v3_gpa_to_hva(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
PrintError("Could not get virtual address of guest PDE64 (PA=%p)\n",
(void *)guest_pde_pa);
return -1;
addr_t large_page_pa = (addr_t)guest_pte_pa;
addr_t large_page_va = 0;
- if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
+ if (v3_gpa_to_hva(info, large_page_pa, &large_page_va) == -1) {
PrintDebug("Could not get virtual address of Guest Page 2MB (PA=%p)\n",
(void *)large_page_va);
pte64_t * guest_pte = NULL;
addr_t page_pa;
- if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
+ if (v3_gpa_to_hva(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
PrintError("Could not get virtual address of guest PTE64 (PA=%p)\n",
(void *)guest_pte_pa);
return -1;
} else {
addr_t page_va;
- if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
+ if (v3_gpa_to_hva(info, page_pa, &page_va) == -1) {
PrintError("Could not get virtual address of Guest Page 4KB (PA=%p)\n",
(void *)page_pa);
return -1;
return -1;
}
- if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
+ if (v3_gpa_to_hva(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
PrintError("Could not get virtual address of Guest PDE32 (PA=%p)\n",
(void *)guest_pde_pa);
return -1;
addr_t large_page_pa = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
addr_t large_page_va = 0;
- if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
+ if (v3_gpa_to_hva(info, large_page_pa, &large_page_va) == -1) {
PrintDebug("Could not get virtual address of Guest 4MB Page (PA=%p)\n",
(void *)large_page_pa);
// We'll let it through for data pages because they may be unmapped or hooked
addr_t pte_pa = BASE_TO_PAGE_ADDR(guest_pde[i].pt_base_addr);
pte32_t * tmp_pte = NULL;
- if (guest_pa_to_host_va(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
+ if (v3_gpa_to_hva(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
PrintError("Could not get virtual address of Guest PTE32 (PA=%p)\n",
(void *)pte_pa);
return -1;
addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[j].page_base_addr);
addr_t page_va = 0;
- if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
+ if (v3_gpa_to_hva(info, page_pa, &page_va) == -1) {
PrintDebug("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
(void *)page_pa);
// We'll let it through for data pages because they may be unmapped or hooked
return -1;
}
- if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t *)&guest_pdpe) == -1) {
+ if (v3_gpa_to_hva(info, guest_pdpe_pa, (addr_t *)&guest_pdpe) == -1) {
PrintError("Could not get virtual address of Guest PDPE32PAE (PA=%p)\n",
(void *)guest_pdpe_pa);
return -1;
addr_t pde_pa = BASE_TO_PAGE_ADDR(guest_pdpe[i].pd_base_addr);
pde32pae_t * tmp_pde = NULL;
- if (guest_pa_to_host_va(info, pde_pa, (addr_t *)&tmp_pde) == -1) {
+ if (v3_gpa_to_hva(info, pde_pa, (addr_t *)&tmp_pde) == -1) {
PrintError("Could not get virtual address of Guest PDE32PAE (PA=%p)\n",
(void *)pde_pa);
return -1;
addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
addr_t large_page_va = 0;
- if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
+ if (v3_gpa_to_hva(info, large_page_pa, &large_page_va) == -1) {
PrintDebug("Could not get virtual address of Guest 2MB Page (PA=%p)\n",
(void *)large_page_pa);
// We'll let it through for data pages because they may be unmapped or hooked
addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[j].pt_base_addr);
pte32pae_t * tmp_pte = NULL;
- if (guest_pa_to_host_va(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
+ if (v3_gpa_to_hva(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
PrintError("Could not get virtual address of Guest PTE32PAE (PA=%p)\n",
(void *)pte_pa);
return -1;
addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[k].page_base_addr);
addr_t page_va = 0;
- if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
+ if (v3_gpa_to_hva(info, page_pa, &page_va) == -1) {
PrintDebug("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
(void *)page_pa);
// We'll let it through for data pages because they may be unmapped or hooked
return -1;
}
- if (guest_pa_to_host_va(info, guest_pml_pa, (addr_t *)&guest_pml) == -1) {
+ if (v3_gpa_to_hva(info, guest_pml_pa, (addr_t *)&guest_pml) == -1) {
PrintError("Could not get virtual address of Guest PML464 (PA=%p)\n",
(void *)guest_pml);
return -1;
pdpe64_t * tmp_pdpe = NULL;
- if (guest_pa_to_host_va(info, pdpe_pa, (addr_t *)&tmp_pdpe) == -1) {
+ if (v3_gpa_to_hva(info, pdpe_pa, (addr_t *)&tmp_pdpe) == -1) {
PrintError("Could not get virtual address of Guest PDPE64 (PA=%p)\n",
(void *)pdpe_pa);
return -1;
addr_t large_page_pa = BASE_TO_PAGE_ADDR_1GB(large_pdpe->page_base_addr);
addr_t large_page_va = 0;
- if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
+ if (v3_gpa_to_hva(info, large_page_pa, &large_page_va) == -1) {
PrintDebug("Could not get virtual address of Guest 1GB page (PA=%p)\n",
(void *)large_page_pa);
// We'll let it through for data pages because they may be unmapped or hooked
addr_t pde_pa = BASE_TO_PAGE_ADDR(tmp_pdpe[j].pd_base_addr);
pde64_t * tmp_pde = NULL;
- if (guest_pa_to_host_va(info, pde_pa, (addr_t *)&tmp_pde) == -1) {
+ if (v3_gpa_to_hva(info, pde_pa, (addr_t *)&tmp_pde) == -1) {
PrintError("Could not get virtual address of Guest PDE64 (PA=%p)\n",
(void *)pde_pa);
return -1;
addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
addr_t large_page_va = 0;
- if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
+ if (v3_gpa_to_hva(info, large_page_pa, &large_page_va) == -1) {
PrintDebug("Could not get virtual address of Guest 2MB page (PA=%p)\n",
(void *)large_page_pa);
// We'll let it through for data pages because they may be unmapped or hooked
addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[k].pt_base_addr);
pte64_t * tmp_pte = NULL;
- if (guest_pa_to_host_va(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
+ if (v3_gpa_to_hva(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
PrintError("Could not get virtual address of Guest PTE64 (PA=%p)\n",
(void *)pte_pa);
return -1;
addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[m].page_base_addr);
addr_t page_va = 0;
- if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
+ if (v3_gpa_to_hva(info, page_pa, &page_va) == -1) {
PrintDebug("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
(void *)page_pa);
// We'll let it through for data pages because they may be unmapped or hooked
}
if (v3_get_vm_mem_mode(core) == PHYSICAL_MEM) {
- ret = read_guest_pa_memory(core, get_addr_linear(core, core->rip, &(core->segments.cs)), 15, instr);
+ ret = v3_read_gpa_memory(core, get_addr_linear(core, core->rip, &(core->segments.cs)), 15, instr);
} else {
- ret = read_guest_va_memory(core, get_addr_linear(core, core->rip, &(core->segments.cs)), 15, instr);
+ ret = v3_read_gva_memory(core, get_addr_linear(core, core->rip, &(core->segments.cs)), 15, instr);
}
if (ret == -1) {
addr_t sym_gva = sym_start_gva + (sizeof(struct v3_symbol_def32) * i);
- if (guest_va_to_host_va(core, sym_gva, (addr_t *)&(tmp_symbol)) == -1) {
+ if (v3_gva_to_hva(core, sym_gva, (addr_t *)&(tmp_symbol)) == -1) {
PrintError("Could not locate symbiotic symbol definition\n");
continue;
}
- if (guest_va_to_host_va(core, tmp_symbol->name_gva, (addr_t *)&(sym_name)) == -1) {
+ if (v3_gva_to_hva(core, tmp_symbol->name_gva, (addr_t *)&(sym_name)) == -1) {
PrintError("Could not locate symbiotic symbol name\n");
continue;
}
#define VMXASSIST_GDT 0x10000
addr_t vmxassist_gdt = 0;
- if (guest_pa_to_host_va(info, VMXASSIST_GDT, &vmxassist_gdt) == -1) {
+ if (v3_gpa_to_hva(info, VMXASSIST_GDT, &vmxassist_gdt) == -1) {
PrintError("Could not find VMXASSIST GDT destination\n");
return -1;
}
extern uint8_t v3_vmxassist_end[];
addr_t vmxassist_dst = 0;
- if (guest_pa_to_host_va(info, VMXASSIST_START, &vmxassist_dst) == -1) {
+ if (v3_gpa_to_hva(info, VMXASSIST_START, &vmxassist_dst) == -1) {
PrintError("Could not find VMXASSIST destination\n");
return -1;
}
- if (guest_pa_to_host_va(info, VMXASSIST_BASE, (addr_t *)&hdr) == -1) {
+ if (v3_gpa_to_hva(info, VMXASSIST_BASE, (addr_t *)&hdr) == -1) {
PrintError("Could not translate address for vmxassist header\n");
return -1;
}
}
- if (guest_pa_to_host_va(info, (addr_t)(hdr->old_ctx_gpa), (addr_t *)&(old_ctx)) == -1) {
+ if (v3_gpa_to_hva(info, (addr_t)(hdr->old_ctx_gpa), (addr_t *)&(old_ctx)) == -1) {
PrintError("Could not translate address for VMXASSIST old context\n");
return -1;
}
- if (guest_pa_to_host_va(info, (addr_t)(hdr->new_ctx_gpa), (addr_t *)&(new_ctx)) == -1) {
+ if (v3_gpa_to_hva(info, (addr_t)(hdr->new_ctx_gpa), (addr_t *)&(new_ctx)) == -1) {
PrintError("Could not translate address for VMXASSIST new context\n");
return -1;
}
- if (guest_va_to_host_va(core, guest_va, &host_addr) == -1) {
+ if (v3_gva_to_hva(core, guest_va, &host_addr) == -1) {
PrintError("Could not convert Guest VA to host VA\n");
return -1;
}
PrintDebug("OUTS size=%d for %ld steps\n", write_size, rep_num);
- if (guest_va_to_host_va(core, guest_va, &host_addr) == -1) {
+ if (v3_gva_to_hva(core, guest_va, &host_addr) == -1) {
PrintError("Could not convert guest VA to host VA\n");
return -1;
}