1 #include <geekos/vm_guest_mem.c>
3 #include <geekos/vmm_paging.h>
5 extern struct vmm_os_hooks * os_hooks;
9 int guest_va_to_guest_pa(guest_info_t * guest_info, addr_t guest_va, addr_t * guest_pa) {
10 if (guest_info->page_mode == SHADOW_PAGING) {
11 switch (guest_info->cpu_mode) {
16 // guest virtual address is the same as the physical
23 addr_t guest_pde = CR3_TO_PDE32(guest_info->shadow_page_state.guest_cr3);
25 if (guest_pa_to_host_va(guest_info, guest_pde, (addr_t *)&pde) == -1) {
29 switch (pde32_lookup(pde, guest_va, &tmp_pa)) {
40 if (guest_pa_to_host_va(guest_info, tmp_pa, (addr_t*)&pte) == -1) {
44 if (pte32_lookup(pte, guest_va, guest_pa) != 0) {
54 case PROTECTED_PAE_PG:
65 } else if (guest_info->page_mode == NESTED_PAGING) {
83 int guest_pa_to_host_va(guest_info_t * guest_info, addr_t guest_pa, addr_t * host_va) {
86 if (guest_pa_to_host_pa(guest_info, guest_pa, &host_pa) != 0) {
90 if (host_pa_to_host_va(host_pa, host_va) != 0) {
98 int guest_pa_to_host_pa(guest_info_t * guest_info, addr_t guest_pa, addr_t * host_pa) {
99 // we use the shadow map here...
100 if (lookup_shadow_map_addr(guest_info->shadow_map, guest_pa, host_pa) != HOST_REGION_PHYSICAL_MEMORY) {
110 int host_va_to_host_pa(addr_t host_va, addr_t * host_pa) {
111 *host_pa = os_hooks->vaddr_to_paddr(host_va);
121 int host_pa_to_host_va(addr_t host_pa, addr_t * host_va) {
122 *host_va = os_hooks->paddr_to_vaddr(host_pa);