2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm_paging.h>
22 #include <palacios/vmm.h>
24 #include <palacios/vm_guest_mem.h>
27 static pt_entry_type_t pde32_lookup(pde32_t * pd, addr_t addr, addr_t * entry);
28 static pt_entry_type_t pte32_lookup(pte32_t * pt, addr_t addr, addr_t * entry);
30 static pt_entry_type_t pdpe32pae_lookup(pdpe32pae_t * pdp, addr_t addr, addr_t * entry);
31 static pt_entry_type_t pde32pae_lookup(pde32pae_t * pd, addr_t addr, addr_t * entry);
32 static pt_entry_type_t pte32pae_lookup(pte32pae_t * pt, addr_t addr, addr_t * entry);
34 static pt_entry_type_t pml4e64_lookup(pml4e64_t * pml, addr_t addr, addr_t * entry);
35 static pt_entry_type_t pdpe64_lookup(pdpe64_t * pdp, addr_t addr, addr_t * entry);
36 static pt_entry_type_t pde64_lookup(pde64_t * pd, addr_t addr, addr_t * entry);
37 static pt_entry_type_t pte64_lookup(pte64_t * pt, addr_t addr, addr_t * entry);
42 #define USE_VMM_PAGING_DEBUG
43 // All of the debug functions defined in vmm_paging.h are implemented in this file
44 #include "vmm_paging_debug.h"
45 #undef USE_VMM_PAGING_DEBUG
51 void delete_page_tables_32(pde32_t * pde) {
58 for (i = 0; (i < MAX_PDE32_ENTRIES); i++) {
60 // We double cast, first to an addr_t to handle 64 bit issues, then to the pointer
61 PrintDebug("PTE base addr %x \n", pde[i].pt_base_addr);
62 pte32_t * pte = (pte32_t *)((addr_t)(uint_t)(pde[i].pt_base_addr << PAGE_POWER));
64 PrintDebug("Deleting PTE %d (%p)\n", i, pte);
69 PrintDebug("Deleting PDE (%p)\n", pde);
70 V3_FreePage(V3_PAddr(pde));
73 void delete_page_tables_32PAE(pdpe32pae_t * pdpe) {
74 PrintError("Unimplemented function\n");
77 void delete_page_tables_64(pml4e64_t * pml4) {
78 PrintError("Unimplemented function\n");
84 static int translate_pt_32_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
85 addr_t * paddr = (addr_t *)private_data;
92 *paddr = page_pa + PAGE_OFFSET_4MB(vaddr);
95 *paddr = page_pa + PAGE_OFFSET_4KB(vaddr);
98 PrintError("Inavlid page type (%s) in tranlate pt 32 callback\n", v3_page_type_to_str(type));
103 static int translate_pt_32pae_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
104 addr_t * paddr = (addr_t *)private_data;
112 *paddr = page_pa + PAGE_OFFSET_2MB(vaddr);
115 *paddr = page_pa + PAGE_OFFSET_4KB(vaddr);
118 PrintError("Inavlid page type (%s) in translate pt 32pae callback\n", v3_page_type_to_str(type));
123 static int translate_pt_64_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
124 addr_t * paddr = (addr_t *)private_data;
133 *paddr = page_pa + PAGE_OFFSET_1GB(vaddr);
136 *paddr = page_pa + PAGE_OFFSET_2MB(vaddr);
139 *paddr = page_pa + PAGE_OFFSET_4KB(vaddr);
142 PrintError("Inavlid page type (%s) in translate pt 64 callback\n", v3_page_type_to_str(type));
148 int v3_translate_host_pt_32(v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
149 return v3_drill_host_pt_32(host_cr3, vaddr, translate_pt_32_cb, paddr);
151 int v3_translate_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
152 return v3_drill_guest_pt_32(info, guest_cr3, vaddr, translate_pt_32_cb, paddr);
156 int v3_translate_host_pt_32pae(v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
157 return v3_drill_host_pt_32pae(host_cr3, vaddr, translate_pt_32pae_cb, paddr);
159 int v3_translate_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
160 return v3_drill_guest_pt_32pae(info, guest_cr3, vaddr, translate_pt_32pae_cb, paddr);
164 int v3_translate_host_pt_64(v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
165 return v3_drill_host_pt_64(host_cr3, vaddr, translate_pt_64_cb, paddr);
167 int v3_translate_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
168 return v3_drill_guest_pt_64(info, guest_cr3, vaddr, translate_pt_64_cb, paddr);
173 struct pt_find_data {
175 addr_t * pt_page_addr;
178 static int find_pt_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
179 struct pt_find_data * pt_data = (struct pt_find_data *)private_data;
181 if (type == pt_data->type) {
182 *(pt_data->pt_page_addr) = page_ptr;
190 int v3_find_host_pt_32_page(v3_reg_t host_cr3, page_type_t type, addr_t vaddr, addr_t * page_addr) {
191 struct pt_find_data data;
194 data.pt_page_addr = page_addr;
196 return v3_drill_host_pt_32(host_cr3, vaddr, find_pt_cb, &data);
199 int v3_find_host_pt_32pae_page(v3_reg_t host_cr3, page_type_t type, addr_t vaddr, addr_t * page_addr) {
200 struct pt_find_data data;
203 data.pt_page_addr = page_addr;
205 return v3_drill_host_pt_32pae(host_cr3, vaddr, find_pt_cb, &data);
208 int v3_find_host_pt_64_page(v3_reg_t host_cr3, page_type_t type, addr_t vaddr, addr_t * page_addr) {
209 struct pt_find_data data;
212 data.pt_page_addr = page_addr;
214 return v3_drill_host_pt_64(host_cr3, vaddr, find_pt_cb, &data);
216 int v3_find_guest_pt_32_page(struct guest_info * info, v3_reg_t guest_cr3, page_type_t type, addr_t vaddr, addr_t * page_addr) {
217 struct pt_find_data data;
220 data.pt_page_addr = page_addr;
222 return v3_drill_guest_pt_32(info, guest_cr3, vaddr, find_pt_cb, &data);
225 int v3_find_guest_pt_32pae_page(struct guest_info * info, v3_reg_t guest_cr3, page_type_t type, addr_t vaddr, addr_t * page_addr) {
226 struct pt_find_data data;
229 data.pt_page_addr = page_addr;
231 return v3_drill_guest_pt_32pae(info, guest_cr3, vaddr, find_pt_cb, &data);
234 int v3_find_guest_pt_64_page(struct guest_info * info, v3_reg_t guest_cr3, page_type_t type, addr_t vaddr, addr_t * page_addr) {
235 struct pt_find_data data;
238 data.pt_page_addr = page_addr;
240 return v3_drill_guest_pt_64(info, guest_cr3, vaddr, find_pt_cb, &data);
246 * Page Table Access Checks
251 struct pt_check_data {
252 pf_error_t access_type;
253 pt_access_status_t * access_status;
256 static int check_pt_32_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
257 struct pt_check_data * chk_data = (struct pt_check_data *)private_data;
261 *(chk_data->access_status) = v3_can_access_pde32((pde32_t *)page_ptr, vaddr, chk_data->access_type);
264 *(chk_data->access_status) = v3_can_access_pte32((pte32_t *)page_ptr, vaddr, chk_data->access_type);
270 PrintError("Inavlid page type (%s) in check pt 32 callback\n", v3_page_type_to_str(type));
274 if (chk_data->access_status != PT_ACCESS_OK) {
282 static int check_pt_32pae_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
283 struct pt_check_data * chk_data = (struct pt_check_data *)private_data;
287 *(chk_data->access_status) = v3_can_access_pdpe32pae((pdpe32pae_t *)page_ptr, vaddr, chk_data->access_type);
290 *(chk_data->access_status) = v3_can_access_pde32pae((pde32pae_t *)page_ptr, vaddr, chk_data->access_type);
293 *(chk_data->access_status) = v3_can_access_pte32pae((pte32pae_t *)page_ptr, vaddr, chk_data->access_type);
299 PrintError("Inavlid page type (%s) in check pt 32pae callback\n", v3_page_type_to_str(type));
303 if (chk_data->access_status != PT_ACCESS_OK) {
311 static int check_pt_64_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
312 struct pt_check_data * chk_data = (struct pt_check_data *)private_data;
316 *(chk_data->access_status) = v3_can_access_pml4e64((pml4e64_t *)page_ptr, vaddr, chk_data->access_type);
319 *(chk_data->access_status) = v3_can_access_pdpe64((pdpe64_t *)page_ptr, vaddr, chk_data->access_type);
322 *(chk_data->access_status) = v3_can_access_pde64((pde64_t *)page_ptr, vaddr, chk_data->access_type);
325 *(chk_data->access_status) = v3_can_access_pte64((pte64_t *)page_ptr, vaddr, chk_data->access_type);
332 PrintError("Inavlid page type (%s) in check pt 64 callback\n", v3_page_type_to_str(type));
336 if (chk_data->access_status != PT_ACCESS_OK) {
345 int v3_check_host_pt_32(v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
346 struct pt_check_data access_data;
348 access_data.access_type = access_type;
349 access_data.access_status = access_status;
351 return v3_drill_host_pt_32(host_cr3, vaddr, check_pt_32_cb, &access_data);
354 int v3_check_host_pt_32pae(v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
355 struct pt_check_data access_data;
357 access_data.access_type = access_type;
358 access_data.access_status = access_status;
360 return v3_drill_host_pt_32pae(host_cr3, vaddr, check_pt_32pae_cb, &access_data);
365 int v3_check_host_pt_64(v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
366 struct pt_check_data access_data;
368 access_data.access_type = access_type;
369 access_data.access_status = access_status;
371 return v3_drill_host_pt_64(host_cr3, vaddr, check_pt_64_cb, &access_data);
376 int v3_check_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
377 pf_error_t access_type, pt_access_status_t * access_status) {
378 struct pt_check_data access_data;
380 access_data.access_type = access_type;
381 access_data.access_status = access_status;
383 return v3_drill_guest_pt_32(info, guest_cr3, vaddr, check_pt_32_cb, &access_data);
390 int v3_check_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
391 pf_error_t access_type, pt_access_status_t * access_status) {
392 struct pt_check_data access_data;
394 access_data.access_type = access_type;
395 access_data.access_status = access_status;
397 return v3_drill_guest_pt_32pae(info, guest_cr3, vaddr, check_pt_32pae_cb, &access_data);
402 int v3_check_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
403 pf_error_t access_type, pt_access_status_t * access_status) {
404 struct pt_check_data access_data;
406 access_data.access_type = access_type;
407 access_data.access_status = access_status;
409 return v3_drill_guest_pt_64(info, guest_cr3, vaddr, check_pt_64_cb, &access_data);
415 * PAGE TABLE LOOKUP FUNCTIONS
418 * The value of entry is a return type:
419 * Page not present: *entry = 0
420 * Large Page: *entry = translated physical address (byte granularity)
421 * PTE entry: *entry is the address of the PTE Page
426 * 32 bit Page Table lookup functions
430 static pt_entry_type_t pde32_lookup(pde32_t * pd, addr_t addr, addr_t * entry) {
431 pde32_t * pde_entry = &(pd[PDE32_INDEX(addr)]);
433 if (!pde_entry->present) {
435 return PT_ENTRY_NOT_PRESENT;
436 } else if (pde_entry->large_page) {
437 pde32_4MB_t * large_pde = (pde32_4MB_t *)pde_entry;
439 *entry = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
441 return PT_ENTRY_LARGE_PAGE;
443 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
444 return PT_ENTRY_PAGE;
450 /* Takes a virtual addr (addr) and returns the physical addr (entry) as defined in the page table
452 static pt_entry_type_t pte32_lookup(pte32_t * pt, addr_t addr, addr_t * entry) {
453 pte32_t * pte_entry = &(pt[PTE32_INDEX(addr)]);
455 if (!pte_entry->present) {
457 // PrintDebug("Lookup at non present page (index=%d)\n", PTE32_INDEX(addr));
458 return PT_ENTRY_NOT_PRESENT;
460 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr);
462 return PT_ENTRY_PAGE;
471 * 32 bit PAE Page Table lookup functions
474 static pt_entry_type_t pdpe32pae_lookup(pdpe32pae_t * pdp, addr_t addr, addr_t * entry) {
475 pdpe32pae_t * pdpe_entry = &(pdp[PDPE32PAE_INDEX(addr)]);
477 if (!pdpe_entry->present) {
479 return PT_ENTRY_NOT_PRESENT;
481 *entry = BASE_TO_PAGE_ADDR(pdpe_entry->pd_base_addr);
482 return PT_ENTRY_PAGE;
486 static pt_entry_type_t pde32pae_lookup(pde32pae_t * pd, addr_t addr, addr_t * entry) {
487 pde32pae_t * pde_entry = &(pd[PDE32PAE_INDEX(addr)]);
489 if (!pde_entry->present) {
491 return PT_ENTRY_NOT_PRESENT;
492 } else if (pde_entry->large_page) {
493 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)pde_entry;
495 *entry = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
497 return PT_ENTRY_LARGE_PAGE;
499 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
500 return PT_ENTRY_PAGE;
504 static pt_entry_type_t pte32pae_lookup(pte32pae_t * pt, addr_t addr, addr_t * entry) {
505 pte32pae_t * pte_entry = &(pt[PTE32PAE_INDEX(addr)]);
507 if (!pte_entry->present) {
509 return PT_ENTRY_NOT_PRESENT;
511 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr);
512 return PT_ENTRY_PAGE;
520 * 64 bit Page Table lookup functions
523 static pt_entry_type_t pml4e64_lookup(pml4e64_t * pml, addr_t addr, addr_t * entry) {
524 pml4e64_t * pml_entry = &(pml[PML4E64_INDEX(addr)]);
526 if (!pml_entry->present) {
528 return PT_ENTRY_NOT_PRESENT;
530 *entry = BASE_TO_PAGE_ADDR(pml_entry->pdp_base_addr);
531 return PT_ENTRY_PAGE;
535 static pt_entry_type_t pdpe64_lookup(pdpe64_t * pdp, addr_t addr, addr_t * entry) {
536 pdpe64_t * pdpe_entry = &(pdp[PDPE64_INDEX(addr)]);
538 if (!pdpe_entry->present) {
540 return PT_ENTRY_NOT_PRESENT;
541 } else if (pdpe_entry->large_page) {
542 PrintError("1 Gigabyte pages not supported\n");
546 *entry = BASE_TO_PAGE_ADDR(pdpe_entry->pd_base_addr);
547 return PT_ENTRY_PAGE;
551 static pt_entry_type_t pde64_lookup(pde64_t * pd, addr_t addr, addr_t * entry) {
552 pde64_t * pde_entry = &(pd[PDE64_INDEX(addr)]);
554 if (!pde_entry->present) {
556 return PT_ENTRY_NOT_PRESENT;
557 } else if (pde_entry->large_page) {
558 pde64_2MB_t * large_pde = (pde64_2MB_t *)pde_entry;
560 *entry = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
562 return PT_ENTRY_LARGE_PAGE;
564 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
565 return PT_ENTRY_PAGE;
569 static pt_entry_type_t pte64_lookup(pte64_t * pt, addr_t addr, addr_t * entry) {
570 pte64_t * pte_entry = &(pt[PTE64_INDEX(addr)]);
572 if (!pte_entry->present) {
574 return PT_ENTRY_NOT_PRESENT;
576 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr);
577 return PT_ENTRY_PAGE;
584 static pt_access_status_t can_access_pt_entry(gen_pt_t * pt, pf_error_t access_type) {
585 if (pt->present == 0) {
586 return PT_ACCESS_NOT_PRESENT;
587 } else if ((pt->writable == 0) && (access_type.write == 1)) {
588 return PT_ACCESS_WRITE_ERROR;
589 } else if ((pt->user_page == 0) && (access_type.user == 1)) {
591 return PT_ACCESS_USER_ERROR;
600 * 32 bit access checks
602 pt_access_status_t inline v3_can_access_pde32(pde32_t * pde, addr_t addr, pf_error_t access_type) {
603 gen_pt_t * entry = (gen_pt_t *)&pde[PDE32_INDEX(addr)];
604 return can_access_pt_entry(entry, access_type);
607 pt_access_status_t inline v3_can_access_pte32(pte32_t * pte, addr_t addr, pf_error_t access_type) {
608 gen_pt_t * entry = (gen_pt_t *)&pte[PTE32_INDEX(addr)];
609 return can_access_pt_entry(entry, access_type);
614 * 32 bit PAE access checks
616 pt_access_status_t inline v3_can_access_pdpe32pae(pdpe32pae_t * pdpe, addr_t addr, pf_error_t access_type) {
617 gen_pt_t * entry = (gen_pt_t *)&pdpe[PDPE32PAE_INDEX(addr)];
618 return can_access_pt_entry(entry, access_type);
621 pt_access_status_t inline v3_can_access_pde32pae(pde32pae_t * pde, addr_t addr, pf_error_t access_type) {
622 gen_pt_t * entry = (gen_pt_t *)&pde[PDE32PAE_INDEX(addr)];
623 return can_access_pt_entry(entry, access_type);
626 pt_access_status_t inline v3_can_access_pte32pae(pte32pae_t * pte, addr_t addr, pf_error_t access_type) {
627 gen_pt_t * entry = (gen_pt_t *)&pte[PTE32PAE_INDEX(addr)];
628 return can_access_pt_entry(entry, access_type);
632 * 64 Bit access checks
634 pt_access_status_t inline v3_can_access_pml4e64(pml4e64_t * pmle, addr_t addr, pf_error_t access_type) {
635 gen_pt_t * entry = (gen_pt_t *)&pmle[PML4E64_INDEX(addr)];
636 return can_access_pt_entry(entry, access_type);
639 pt_access_status_t inline v3_can_access_pdpe64(pdpe64_t * pdpe, addr_t addr, pf_error_t access_type) {
640 gen_pt_t * entry = (gen_pt_t *)&pdpe[PDPE64_INDEX(addr)];
641 return can_access_pt_entry(entry, access_type);
644 pt_access_status_t inline v3_can_access_pde64(pde64_t * pde, addr_t addr, pf_error_t access_type) {
645 gen_pt_t * entry = (gen_pt_t *)&pde[PDE32_INDEX(addr)];
646 return can_access_pt_entry(entry, access_type);
649 pt_access_status_t inline v3_can_access_pte64(pte64_t * pte, addr_t addr, pf_error_t access_type) {
650 gen_pt_t * entry = (gen_pt_t *)&pte[PTE64_INDEX(addr)];
651 return can_access_pt_entry(entry, access_type);
663 /* We generate a page table to correspond to a given memory layout
664 * pulling pages from the mem_list when necessary
665 * If there are any gaps in the layout, we add them as unmapped pages
667 pde32_t * create_passthrough_pts_32(struct guest_info * guest_info) {
668 addr_t current_page_addr = 0;
670 struct shadow_map * map = &(guest_info->mem_map);
672 pde32_t * pde = V3_VAddr(V3_AllocPages(1));
674 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
676 pte32_t * pte = V3_VAddr(V3_AllocPages(1));
679 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
680 struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
683 (region->host_type == HOST_REGION_HOOK) ||
684 (region->host_type == HOST_REGION_UNALLOCATED) ||
685 (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) ||
686 (region->host_type == HOST_REGION_REMOTE) ||
687 (region->host_type == HOST_REGION_SWAPPED)) {
690 pte[j].user_page = 0;
691 pte[j].write_through = 0;
692 pte[j].cache_disable = 0;
696 pte[j].global_page = 0;
698 pte[j].page_base_addr = 0;
703 pte[j].user_page = 1;
704 pte[j].write_through = 0;
705 pte[j].cache_disable = 0;
709 pte[j].global_page = 0;
712 if (guest_pa_to_host_pa(guest_info, current_page_addr, &host_addr) == -1) {
718 pte[j].page_base_addr = host_addr >> 12;
723 current_page_addr += PAGE_SIZE;
726 if (pte_present == 0) {
727 V3_FreePage(V3_PAddr(pte));
731 pde[i].user_page = 0;
732 pde[i].write_through = 0;
733 pde[i].cache_disable = 0;
736 pde[i].large_page = 0;
737 pde[i].global_page = 0;
739 pde[i].pt_base_addr = 0;
743 pde[i].user_page = 1;
744 pde[i].write_through = 0;
745 pde[i].cache_disable = 0;
748 pde[i].large_page = 0;
749 pde[i].global_page = 0;
751 pde[i].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
760 /* We generate a page table to correspond to a given memory layout
761 * pulling pages from the mem_list when necessary
762 * If there are any gaps in the layout, we add them as unmapped pages
764 pdpe32pae_t * create_passthrough_pts_32PAE(struct guest_info * guest_info) {
765 addr_t current_page_addr = 0;
767 struct shadow_map * map = &(guest_info->mem_map);
769 pdpe32pae_t * pdpe = V3_VAddr(V3_AllocPages(1));
770 memset(pdpe, 0, PAGE_SIZE);
772 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
774 pde32pae_t * pde = V3_VAddr(V3_AllocPages(1));
776 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
780 pte32pae_t * pte = V3_VAddr(V3_AllocPages(1));
783 for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
784 struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
787 (region->host_type == HOST_REGION_HOOK) ||
788 (region->host_type == HOST_REGION_UNALLOCATED) ||
789 (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) ||
790 (region->host_type == HOST_REGION_REMOTE) ||
791 (region->host_type == HOST_REGION_SWAPPED)) {
794 pte[k].user_page = 0;
795 pte[k].write_through = 0;
796 pte[k].cache_disable = 0;
800 pte[k].global_page = 0;
802 pte[k].page_base_addr = 0;
808 pte[k].user_page = 1;
809 pte[k].write_through = 0;
810 pte[k].cache_disable = 0;
814 pte[k].global_page = 0;
817 if (guest_pa_to_host_pa(guest_info, current_page_addr, &host_addr) == -1) {
823 pte[k].page_base_addr = host_addr >> 12;
829 current_page_addr += PAGE_SIZE;
832 if (pte_present == 0) {
833 V3_FreePage(V3_PAddr(pte));
837 pde[j].user_page = 0;
838 pde[j].write_through = 0;
839 pde[j].cache_disable = 0;
842 pde[j].large_page = 0;
843 pde[j].global_page = 0;
845 pde[j].pt_base_addr = 0;
850 pde[j].user_page = 1;
851 pde[j].write_through = 0;
852 pde[j].cache_disable = 0;
855 pde[j].large_page = 0;
856 pde[j].global_page = 0;
858 pde[j].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
866 if (pde_present == 0) {
867 V3_FreePage(V3_PAddr(pde));
871 pdpe[i].write_through = 0;
872 pdpe[i].cache_disable = 0;
873 pdpe[i].accessed = 0;
876 pdpe[i].vmm_info = 0;
877 pdpe[i].pd_base_addr = 0;
882 pdpe[i].write_through = 0;
883 pdpe[i].cache_disable = 0;
884 pdpe[i].accessed = 0;
887 pdpe[i].vmm_info = 0;
888 pdpe[i].pd_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pde));
903 pml4e64_t * create_passthrough_pts_64(struct guest_info * info) {
904 addr_t current_page_addr = 0;
906 struct shadow_map * map = &(info->mem_map);
908 pml4e64_t * pml = V3_VAddr(V3_AllocPages(1));
910 for (i = 0; i < 1; i++) {
911 int pdpe_present = 0;
912 pdpe64_t * pdpe = V3_VAddr(V3_AllocPages(1));
914 for (j = 0; j < 20; j++) {
916 pde64_t * pde = V3_VAddr(V3_AllocPages(1));
918 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
920 pte64_t * pte = V3_VAddr(V3_AllocPages(1));
923 for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
924 struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
929 (region->host_type == HOST_REGION_HOOK) ||
930 (region->host_type == HOST_REGION_UNALLOCATED) ||
931 (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) ||
932 (region->host_type == HOST_REGION_REMOTE) ||
933 (region->host_type == HOST_REGION_SWAPPED)) {
936 pte[m].user_page = 0;
937 pte[m].write_through = 0;
938 pte[m].cache_disable = 0;
942 pte[m].global_page = 0;
944 pte[m].page_base_addr = 0;
949 pte[m].user_page = 1;
950 pte[m].write_through = 0;
951 pte[m].cache_disable = 0;
955 pte[m].global_page = 0;
958 if (guest_pa_to_host_pa(info, current_page_addr, &host_addr) == -1) {
964 pte[m].page_base_addr = PAGE_BASE_ADDR(host_addr);
966 //PrintPTE64(current_page_addr, &(pte[m]));
974 current_page_addr += PAGE_SIZE;
977 if (pte_present == 0) {
978 V3_FreePage(V3_PAddr(pte));
982 pde[k].user_page = 0;
983 pde[k].write_through = 0;
984 pde[k].cache_disable = 0;
987 pde[k].large_page = 0;
988 //pde[k].global_page = 0;
990 pde[k].pt_base_addr = 0;
994 pde[k].user_page = 1;
995 pde[k].write_through = 0;
996 pde[k].cache_disable = 0;
999 pde[k].large_page = 0;
1000 //pde[k].global_page = 0;
1001 pde[k].vmm_info = 0;
1002 pde[k].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
1008 if (pde_present == 0) {
1009 V3_FreePage(V3_PAddr(pde));
1011 pdpe[j].present = 0;
1012 pdpe[j].writable = 0;
1013 pdpe[j].user_page = 0;
1014 pdpe[j].write_through = 0;
1015 pdpe[j].cache_disable = 0;
1016 pdpe[j].accessed = 0;
1018 pdpe[j].large_page = 0;
1019 //pdpe[j].global_page = 0;
1020 pdpe[j].vmm_info = 0;
1021 pdpe[j].pd_base_addr = 0;
1023 pdpe[j].present = 1;
1024 pdpe[j].writable = 1;
1025 pdpe[j].user_page = 1;
1026 pdpe[j].write_through = 0;
1027 pdpe[j].cache_disable = 0;
1028 pdpe[j].accessed = 0;
1030 pdpe[j].large_page = 0;
1031 //pdpe[j].global_page = 0;
1032 pdpe[j].vmm_info = 0;
1033 pdpe[j].pd_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pde));
1041 PrintDebug("PML index=%d\n", i);
1043 if (pdpe_present == 0) {
1044 V3_FreePage(V3_PAddr(pdpe));
1047 pml[i].writable = 0;
1048 pml[i].user_page = 0;
1049 pml[i].write_through = 0;
1050 pml[i].cache_disable = 0;
1051 pml[i].accessed = 0;
1052 pml[i].reserved = 0;
1053 //pml[i].large_page = 0;
1054 //pml[i].global_page = 0;
1055 pml[i].vmm_info = 0;
1056 pml[i].pdp_base_addr = 0;
1059 pml[i].writable = 1;
1060 pml[i].user_page = 1;
1061 pml[i].write_through = 0;
1062 pml[i].cache_disable = 0;
1063 pml[i].accessed = 0;
1064 pml[i].reserved = 0;
1065 //pml[i].large_page = 0;
1066 //pml[i].global_page = 0;
1067 pml[i].vmm_info = 0;
1068 pml[i].pdp_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pdpe));
1076 int v3_drill_host_pt_32(v3_reg_t host_cr3, addr_t vaddr,
1077 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1078 void * private_data) {
1079 pde32_t * host_pde = (pde32_t *)CR3_TO_PDE32_VA(host_cr3);
1080 addr_t host_pde_pa = CR3_TO_PDE32_PA(host_cr3);
1081 addr_t host_pte_pa = 0;
1085 if ((ret = callback(PAGE_PD32, vaddr, (addr_t)host_pde, host_pde_pa, private_data)) != 0) {
1086 return (ret == -1) ? -1 : PAGE_PD32;
1089 switch (pde32_lookup(host_pde, vaddr, &host_pte_pa)) {
1090 case PT_ENTRY_NOT_PRESENT:
1092 case PT_ENTRY_LARGE_PAGE:
1093 if ((ret == callback(PAGE_4MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
1094 return (ret == -1) ? -1 : PAGE_4MB;
1098 if ((ret = callback(PAGE_PT32, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data) != 0)) {
1099 return (ret == -1) ? -1 : PAGE_PT32;
1102 if (pte32_lookup(V3_VAddr((void *)host_pte_pa), vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1105 if ((ret = callback(PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)PAGE_BASE_ADDR(page_pa)), page_pa, private_data)) != 0) {
1106 return (ret == -1) ? -1 : PAGE_4KB;
1116 int v3_drill_host_pt_32pae(v3_reg_t host_cr3, addr_t vaddr,
1117 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1118 void * private_data) {
1119 pdpe32pae_t * host_pdpe = (pdpe32pae_t *)CR3_TO_PDPE32PAE_VA(host_cr3);
1120 addr_t host_pdpe_pa = CR3_TO_PDPE32PAE_PA(host_cr3);
1121 addr_t host_pde_pa = 0;
1122 addr_t host_pte_pa = 0;
1126 if ((ret = callback(PAGE_PDP32PAE, vaddr, (addr_t)host_pdpe, host_pdpe_pa, private_data)) != 0) {
1127 return (ret == -1) ? -1 : PAGE_PDP32PAE;
1130 switch (pdpe32pae_lookup(host_pdpe, vaddr, &host_pde_pa)) {
1131 case PT_ENTRY_NOT_PRESENT:
1135 if ((ret = callback(PAGE_PD32PAE, vaddr, (addr_t)V3_VAddr((void *)host_pde_pa), host_pde_pa, private_data) != 0)) {
1136 return (ret == -1) ? -1 : PAGE_PD32PAE;
1139 switch (pde32pae_lookup(V3_VAddr((void *)host_pde_pa), vaddr, &host_pte_pa)) {
1140 case PT_ENTRY_NOT_PRESENT:
1142 case PT_ENTRY_LARGE_PAGE:
1143 if ((ret == callback(PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
1144 return (ret == -1) ? -1 : PAGE_2MB;
1148 if ((ret = callback(PAGE_PT32PAE, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data) != 0)) {
1149 return (ret == -1) ? -1 : PAGE_PT32PAE;
1152 if (pte32pae_lookup(V3_VAddr((void *)host_pte_pa), vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1155 if ((ret = callback(PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)PAGE_BASE_ADDR(page_pa)), page_pa, private_data)) != 0) {
1156 return (ret == -1) ? -1 : PAGE_4KB;
1165 // should never get here
1170 int v3_drill_host_pt_64(v3_reg_t host_cr3, addr_t vaddr,
1171 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1172 void * private_data) {
1173 pml4e64_t * host_pmle = (pml4e64_t *)CR3_TO_PML4E64_VA(host_cr3);
1174 addr_t host_pmle_pa = CR3_TO_PML4E64_PA(host_cr3);
1175 addr_t host_pdpe_pa = 0;
1176 addr_t host_pde_pa = 0;
1177 addr_t host_pte_pa = 0;
1181 if ((ret = callback(PAGE_PML464, vaddr, (addr_t)host_pmle, host_pmle_pa, private_data)) != 0) {
1182 return (ret == -1) ? -1 : PAGE_PML464;
1185 switch(pml4e64_lookup(host_pmle, vaddr, &host_pdpe_pa)) {
1186 case PT_ENTRY_NOT_PRESENT:
1190 if ((ret = callback(PAGE_PDP64, vaddr, (addr_t)V3_VAddr((void *)host_pdpe_pa), host_pdpe_pa, private_data)) != 0) {
1191 return (ret == -1) ? -1 : PAGE_PDP64;
1194 switch(pdpe64_lookup(V3_VAddr((void *)host_pdpe_pa), vaddr, &host_pde_pa)) {
1195 case PT_ENTRY_NOT_PRESENT:
1197 case PT_ENTRY_LARGE_PAGE:
1198 if ((ret == callback(PAGE_1GB, vaddr, (addr_t)V3_VAddr((void *)host_pde_pa), host_pde_pa, private_data)) != 0) {
1199 return (ret == -1) ? -1 : PAGE_1GB;
1201 PrintError("1 Gigabyte Pages not supported\n");
1205 if ((ret = callback(PAGE_PD64, vaddr, (addr_t)V3_VAddr((void *)host_pde_pa), host_pde_pa, private_data) != 0)) {
1206 return (ret == -1) ? -1 : PAGE_PD64;
1209 switch (pde64_lookup(V3_VAddr((void *)host_pde_pa), vaddr, &host_pte_pa)) {
1210 case PT_ENTRY_NOT_PRESENT:
1212 case PT_ENTRY_LARGE_PAGE:
1213 if ((ret == callback(PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
1214 return (ret == -1) ? -1 : PAGE_2MB;
1219 if ((ret = callback(PAGE_PT64, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data) != 0)) {
1220 return (ret == -1) ? -1 : PAGE_PT64;
1223 if (pte64_lookup(V3_VAddr((void *)host_pte_pa), vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1226 if ((ret = callback(PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)PAGE_BASE_ADDR(page_pa)), page_pa, private_data)) != 0) {
1227 return (ret == -1) ? -1 : PAGE_4KB;
1236 // should never get here
1246 int v3_drill_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
1247 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1248 void * private_data) {
1249 addr_t guest_pde_pa = CR3_TO_PDE32_PA(guest_cr3);
1250 pde32_t * guest_pde = NULL;
1251 addr_t guest_pte_pa = 0;
1255 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t*)&guest_pde) == -1) {
1256 PrintError("Could not get virtual address of Guest PDE32 (PA=%p)\n",
1257 (void *)guest_pde_pa);
1261 if ((ret = callback(PAGE_PD32, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data)) != 0) {
1262 return (ret == -1) ? -1 : PAGE_PD32;
1265 switch (pde32_lookup(guest_pde, vaddr, &guest_pte_pa)) {
1266 case PT_ENTRY_NOT_PRESENT:
1268 case PT_ENTRY_LARGE_PAGE:
1270 addr_t large_page_pa = (addr_t)guest_pte_pa;
1271 addr_t large_page_va = 0;
1273 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1274 PrintError("Could not get virtual address of Guest Page 4MB (PA=%p)\n",
1275 (void *)large_page_va);
1280 if ((ret == callback(PAGE_4MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1281 return (ret == -1) ? -1 : PAGE_4MB;
1287 pte32_t * guest_pte = NULL;
1290 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t*)&guest_pte) == -1) {
1291 PrintError("Could not get virtual address of Guest PTE32 (PA=%p)\n",
1292 (void *)guest_pte_pa);
1296 if ((ret = callback(PAGE_PT32, vaddr, (addr_t)guest_pte, guest_pte_pa, private_data) != 0)) {
1297 return (ret == -1) ? -1 : PAGE_PT32;
1300 if (pte32_lookup(guest_pte, vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1305 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1306 PrintError("Could not get virtual address of Guest Page 4KB (PA=%p)\n",
1311 if ((ret = callback(PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1312 return (ret == -1) ? -1 : PAGE_4KB;
1319 // should never get here
1320 PrintError("End of drill function (guest 32)... Should never have gotten here...\n");
1326 int v3_drill_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
1327 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1328 void * private_data) {
1329 addr_t guest_pdpe_pa = CR3_TO_PDPE32PAE_PA(guest_cr3);
1330 pdpe32pae_t * guest_pdpe = 0;
1331 addr_t guest_pde_pa = 0;
1334 if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t*)&guest_pdpe) == -1) {
1335 PrintError("Could not get virtual address of Guest PDPE32PAE (PA=%p)\n",
1336 (void *)guest_pdpe_pa);
1340 if ((ret = callback(PAGE_PDP32PAE, vaddr, (addr_t)guest_pdpe, guest_pdpe_pa, private_data)) != 0) {
1341 return (ret == -1) ? -1 : PAGE_PDP32PAE;
1344 switch (pdpe32pae_lookup(guest_pdpe, vaddr, &guest_pde_pa))
1346 case PT_ENTRY_NOT_PRESENT:
1350 pde32pae_t * guest_pde = NULL;
1351 addr_t guest_pte_pa = 0;
1353 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
1354 PrintError("Could not get virtual Address of Guest PDE32PAE (PA=%p)\n",
1355 (void *)guest_pde_pa);
1359 if ((ret = callback(PAGE_PD32PAE, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data)) != 0) {
1360 return (ret == -1) ? -1 : PAGE_PD32PAE;
1363 switch (pde32pae_lookup(guest_pde, vaddr, &guest_pte_pa))
1365 case PT_ENTRY_NOT_PRESENT:
1367 case PT_ENTRY_LARGE_PAGE:
1369 addr_t large_page_pa = (addr_t)guest_pte_pa;
1370 addr_t large_page_va = 0;
1372 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1373 PrintDebug("Could not get virtual address of Guest Page 2MB (PA=%p)\n",
1374 (void *)large_page_va);
1378 if ((ret == callback(PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1379 return (ret == -1) ? -1 : PAGE_2MB;
1385 pte32pae_t * guest_pte = NULL;
1388 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
1389 PrintError("Could not get virtual Address of Guest PTE32PAE (PA=%p)\n",
1390 (void *)guest_pte_pa);
1394 if ((ret = callback(PAGE_PT32PAE, vaddr, (addr_t)guest_pte, guest_pte_pa, private_data) != 0)) {
1395 return (ret == -1) ? -1 : PAGE_PT32PAE;
1398 if (pte32pae_lookup(guest_pte, vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1403 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1404 PrintError("Could not get virtual address of Guest Page 4KB (PA=%p)\n",
1409 if ((ret = callback(PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1410 return (ret == -1) ? -1 : PAGE_4KB;
1418 PrintError("Invalid page type for PD32PAE\n");
1422 // should never get here
1423 PrintError("End of drill function (guest 32pae)... Should never have gotten here...\n");
1427 int v3_drill_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
1428 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1429 void * private_data) {
1430 addr_t guest_pml4_pa = CR3_TO_PML4E64_PA(guest_cr3);
1431 pml4e64_t * guest_pmle = 0;
1432 addr_t guest_pdpe_pa = 0;
1435 if (guest_pa_to_host_va(info, guest_pml4_pa, (addr_t*)&guest_pmle) == -1) {
1436 PrintError("Could not get virtual address of Guest PML4E64 (PA=%p)\n",
1437 (void *)guest_pml4_pa);
1441 if ((ret = callback(PAGE_PML464, vaddr, (addr_t)guest_pmle, guest_pml4_pa, private_data)) != 0) {
1442 return (ret == -1) ? -1 : PAGE_PML464;
1445 switch (pml4e64_lookup(guest_pmle, vaddr, &guest_pdpe_pa)) {
1446 case PT_ENTRY_NOT_PRESENT:
1450 pdpe64_t * guest_pdp = NULL;
1451 addr_t guest_pde_pa = 0;
1453 if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t *)&guest_pdp) == -1) {
1454 PrintError("Could not get virtual address of Guest PDPE64 (PA=%p)\n",
1455 (void *)guest_pdpe_pa);
1459 if ((ret = callback(PAGE_PDP64, vaddr, (addr_t)guest_pdp, guest_pdpe_pa, private_data)) != 0) {
1460 return (ret == -1) ? -1 : PAGE_PDP64;
1463 switch (pdpe64_lookup(guest_pdp, vaddr, &guest_pde_pa)) {
1464 case PT_ENTRY_NOT_PRESENT:
1466 case PT_ENTRY_LARGE_PAGE:
1468 addr_t large_page_pa = (addr_t)guest_pde_pa;
1469 addr_t large_page_va = 0;
1471 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1472 PrintDebug("Could not get virtual address of Guest Page 1GB (PA=%p)\n",
1473 (void *)large_page_va);
1477 if ((ret == callback(PAGE_1GB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1478 return (ret == -1) ? -1 : PAGE_1GB;
1480 PrintError("1 Gigabyte Pages not supported\n");
1485 pde64_t * guest_pde = NULL;
1486 addr_t guest_pte_pa = 0;
1488 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
1489 PrintError("Could not get virtual address of guest PDE64 (PA=%p)\n",
1490 (void *)guest_pde_pa);
1494 if ((ret = callback(PAGE_PD64, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data)) != 0) {
1495 return (ret == -1) ? -1 : PAGE_PD64;
1498 switch (pde64_lookup(guest_pde, vaddr, &guest_pte_pa)) {
1499 case PT_ENTRY_NOT_PRESENT:
1501 case PT_ENTRY_LARGE_PAGE:
1503 addr_t large_page_pa = (addr_t)guest_pte_pa;
1504 addr_t large_page_va = 0;
1506 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1507 PrintDebug("Could not get virtual address of Guest Page 2MB (PA=%p)\n",
1508 (void *)large_page_va);
1512 if ((ret == callback(PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1513 return (ret == -1) ? -1 : PAGE_2MB;
1519 pte64_t * guest_pte = NULL;
1522 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
1523 PrintError("Could not get virtual address of guest PTE64 (PA=%p)\n",
1524 (void *)guest_pte_pa);
1528 if ((ret = callback(PAGE_PT64, vaddr, (addr_t)guest_pte, guest_pte_pa, private_data) != 0)) {
1529 return (ret == -1) ? -1 : PAGE_PT64;
1532 if (pte64_lookup(guest_pte, vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1537 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1538 PrintError("Could not get virtual address of Guest Page 4KB (PA=%p)\n",
1543 if ((ret = callback(PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1544 return (ret == -1) ? -1 : PAGE_4KB;
1558 // should never get here
1559 PrintError("End of drill function (guest 64)... Should never have gotten here...\n");
1566 int v3_walk_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3,
1567 void (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1568 void * private_data) {
1569 addr_t guest_pde_pa = CR3_TO_PDE32_PA(guest_cr3);
1570 pde32_t * guest_pde = NULL;
1575 PrintError("Call back was not specified\n");
1579 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
1580 PrintError("Could not get virtual address of Guest PDE32 (PA=%p)\n",
1581 (void *)guest_pde_pa);
1585 callback(PAGE_PD32, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data);
1587 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
1588 if (guest_pde[i].present) {
1589 if (guest_pde[i].large_page) {
1590 pde32_4MB_t * large_pde = (pde32_4MB_t *)&(guest_pde[i]);
1591 addr_t large_page_pa = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
1592 addr_t large_page_va = 0;
1594 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1595 PrintDebug("Could not get virtual address of Guest 4MB Page (PA=%p)\n",
1596 (void *)large_page_pa);
1597 // We'll let it through for data pages because they may be unmapped or hooked
1601 callback(PAGE_4MB, vaddr, large_page_va, large_page_pa, private_data);
1603 vaddr += PAGE_SIZE_4MB;
1605 addr_t pte_pa = BASE_TO_PAGE_ADDR(guest_pde[i].pt_base_addr);
1606 pte32_t * tmp_pte = NULL;
1608 if (guest_pa_to_host_va(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
1609 PrintError("Could not get virtual address of Guest PTE32 (PA=%p)\n",
1614 callback(PAGE_PT32, vaddr, (addr_t)tmp_pte, pte_pa, private_data);
1616 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
1617 if (tmp_pte[j].present) {
1618 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[j].page_base_addr);
1621 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1622 PrintDebug("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
1624 // We'll let it through for data pages because they may be unmapped or hooked
1628 callback(PAGE_4KB, vaddr, page_va, page_pa, private_data);
1631 vaddr += PAGE_SIZE_4KB;
1635 vaddr += PAGE_SIZE_4MB;
1642 int v3_walk_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3,
1643 void (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1644 void * private_data) {
1645 addr_t guest_pdpe_pa = CR3_TO_PDPE32PAE_PA(guest_cr3);
1646 pdpe32pae_t * guest_pdpe = NULL;
1651 PrintError("Call back was not specified\n");
1655 if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t *)&guest_pdpe) == -1) {
1656 PrintError("Could not get virtual address of Guest PDPE32PAE (PA=%p)\n",
1657 (void *)guest_pdpe_pa);
1663 callback(PAGE_PDP32PAE, vaddr, (addr_t)guest_pdpe, guest_pdpe_pa, private_data);
1665 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
1666 if (guest_pdpe[i].present) {
1667 addr_t pde_pa = BASE_TO_PAGE_ADDR(guest_pdpe[i].pd_base_addr);
1668 pde32pae_t * tmp_pde = NULL;
1670 if (guest_pa_to_host_va(info, pde_pa, (addr_t *)&tmp_pde) == -1) {
1671 PrintError("Could not get virtual address of Guest PDE32PAE (PA=%p)\n",
1676 callback(PAGE_PD32PAE, vaddr, (addr_t)tmp_pde, pde_pa, private_data);
1678 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
1679 if (tmp_pde[j].present) {
1680 if (tmp_pde[j].large_page) {
1681 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)&(tmp_pde[j]);
1682 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
1683 addr_t large_page_va = 0;
1685 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1686 PrintDebug("Could not get virtual address of Guest 2MB Page (PA=%p)\n",
1687 (void *)large_page_pa);
1688 // We'll let it through for data pages because they may be unmapped or hooked
1692 callback(PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data);
1694 vaddr += PAGE_SIZE_2MB;
1696 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[j].pt_base_addr);
1697 pte32pae_t * tmp_pte = NULL;
1699 if (guest_pa_to_host_va(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
1700 PrintError("Could not get virtual address of Guest PTE32PAE (PA=%p)\n",
1705 callback(PAGE_PT32PAE, vaddr, (addr_t)tmp_pte, pte_pa, private_data);
1707 for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
1708 if (tmp_pte[k].present) {
1709 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[k].page_base_addr);
1712 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1713 PrintDebug("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
1715 // We'll let it through for data pages because they may be unmapped or hooked
1719 callback(PAGE_4KB, vaddr, page_va, page_pa, private_data);
1722 vaddr += PAGE_SIZE_4KB;
1726 vaddr += PAGE_SIZE_2MB;
1730 vaddr += PAGE_SIZE_2MB * MAX_PDE32PAE_ENTRIES;
1739 int v3_walk_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3,
1740 void (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1741 void * private_data) {
1742 addr_t guest_pml_pa = CR3_TO_PML4E64_PA(guest_cr3);
1743 pml4e64_t * guest_pml = NULL;
1748 PrintError("Call back was not specified\n");
1752 if (guest_pa_to_host_va(info, guest_pml_pa, (addr_t *)&guest_pml) == -1) {
1753 PrintError("Could not get virtual address of Guest PML464 (PA=%p)\n",
1759 callback(PAGE_PML464, vaddr, (addr_t)guest_pml, guest_pml_pa, private_data);
1761 for (i = 0; i < MAX_PML4E64_ENTRIES; i++) {
1762 if (guest_pml[i].present) {
1763 addr_t pdpe_pa = BASE_TO_PAGE_ADDR(guest_pml[i].pdp_base_addr);
1764 pdpe64_t * tmp_pdpe = NULL;
1767 if (guest_pa_to_host_va(info, pdpe_pa, (addr_t *)&tmp_pdpe) == -1) {
1768 PrintError("Could not get virtual address of Guest PDPE64 (PA=%p)\n",
1773 callback(PAGE_PDP64, vaddr, (addr_t)tmp_pdpe, pdpe_pa, private_data);
1775 for (j = 0; j < MAX_PDPE64_ENTRIES; j++) {
1776 if (tmp_pdpe[j].present) {
1777 if (tmp_pdpe[j].large_page) {
1778 pdpe64_1GB_t * large_pdpe = (pdpe64_1GB_t *)&(tmp_pdpe[j]);
1779 addr_t large_page_pa = BASE_TO_PAGE_ADDR_1GB(large_pdpe->page_base_addr);
1780 addr_t large_page_va = 0;
1782 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1783 PrintDebug("Could not get virtual address of Guest 1GB page (PA=%p)\n",
1784 (void *)large_page_pa);
1785 // We'll let it through for data pages because they may be unmapped or hooked
1789 callback(PAGE_1GB, vaddr, (addr_t)large_page_va, large_page_pa, private_data);
1791 vaddr += PAGE_SIZE_1GB;
1793 addr_t pde_pa = BASE_TO_PAGE_ADDR(tmp_pdpe[j].pd_base_addr);
1794 pde64_t * tmp_pde = NULL;
1796 if (guest_pa_to_host_va(info, pde_pa, (addr_t *)&tmp_pde) == -1) {
1797 PrintError("Could not get virtual address of Guest PDE64 (PA=%p)\n",
1802 callback(PAGE_PD64, vaddr, (addr_t)tmp_pde, pde_pa, private_data);
1804 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
1805 if (tmp_pde[k].present) {
1806 if (tmp_pde[k].large_page) {
1807 pde64_2MB_t * large_pde = (pde64_2MB_t *)&(tmp_pde[k]);
1808 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
1809 addr_t large_page_va = 0;
1811 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1812 PrintDebug("Could not get virtual address of Guest 2MB page (PA=%p)\n",
1813 (void *)large_page_pa);
1814 // We'll let it through for data pages because they may be unmapped or hooked
1818 callback(PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data);
1820 vaddr += PAGE_SIZE_2MB;
1822 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[k].pt_base_addr);
1823 pte64_t * tmp_pte = NULL;
1825 if (guest_pa_to_host_va(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
1826 PrintError("Could not get virtual address of Guest PTE64 (PA=%p)\n",
1831 callback(PAGE_PT64, vaddr, (addr_t)tmp_pte, pte_pa, private_data);
1833 for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
1834 if (tmp_pte[m].present) {
1835 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[m].page_base_addr);
1838 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1839 PrintDebug("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
1841 // We'll let it through for data pages because they may be unmapped or hooked
1845 callback(PAGE_4KB, vaddr, page_va, page_pa, private_data);
1848 vaddr += PAGE_SIZE_4KB;
1852 vaddr += PAGE_SIZE_2MB;
1857 vaddr += PAGE_SIZE_1GB;
1861 vaddr += ((ullong_t)PAGE_SIZE_1GB * (ullong_t)MAX_PDPE64_ENTRIES);
1867 int v3_walk_host_pt_32(v3_reg_t host_cr3,
1868 void (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1869 void * private_data) {
1870 pde32_t * host_pde = (pde32_t *)CR3_TO_PDE32_VA(host_cr3);
1871 addr_t pde_pa = CR3_TO_PDE32_PA(host_cr3);
1876 PrintError("Call back was not specified\n");
1880 callback(PAGE_PD32, vaddr, (addr_t)host_pde, pde_pa, private_data);
1882 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
1883 if (host_pde[i].present) {
1884 if (host_pde[i].large_page) {
1885 pde32_4MB_t * large_pde = (pde32_4MB_t *)&(host_pde[i]);
1886 addr_t large_page_pa = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
1888 callback(PAGE_4MB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data);
1890 vaddr += PAGE_SIZE_4MB;
1892 addr_t pte_pa = BASE_TO_PAGE_ADDR(host_pde[i].pt_base_addr);
1893 pte32_t * tmp_pte = (pte32_t *)V3_VAddr((void *)pte_pa);
1895 callback(PAGE_PT32, vaddr, (addr_t)tmp_pte, pte_pa, private_data);
1897 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
1898 if (tmp_pte[j].present) {
1899 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[j].page_base_addr);
1900 callback(PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)page_pa), page_pa, private_data);
1903 vaddr += PAGE_SIZE_4KB;
1907 vaddr += PAGE_SIZE_4MB;
1917 int v3_walk_host_pt_32pae(v3_reg_t host_cr3,
1918 void (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1919 void * private_data) {
1920 pdpe32pae_t * host_pdpe = (pdpe32pae_t *)CR3_TO_PDPE32PAE_VA(host_cr3);
1921 addr_t pdpe_pa = CR3_TO_PDPE32PAE_PA(host_cr3);
1926 PrintError("Callback was not specified\n");
1930 callback(PAGE_PDP32PAE, vaddr, (addr_t)host_pdpe, pdpe_pa, private_data);
1932 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
1933 if (host_pdpe[i].present) {
1934 addr_t pde_pa = BASE_TO_PAGE_ADDR(host_pdpe[i].pd_base_addr);
1935 pde32pae_t * tmp_pde = (pde32pae_t *)V3_VAddr((void *)pde_pa);
1937 callback(PAGE_PD32PAE, vaddr, (addr_t)tmp_pde, pde_pa, private_data);
1939 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
1940 if (tmp_pde[j].present) {
1942 if (tmp_pde[j].large_page) {
1943 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)&(tmp_pde[j]);
1944 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
1946 callback(PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data);
1948 vaddr += PAGE_SIZE_2MB;
1950 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[j].pt_base_addr);
1951 pte32pae_t * tmp_pte = (pte32pae_t *)V3_VAddr((void *)pte_pa);
1953 callback(PAGE_PT32PAE, vaddr, (addr_t)tmp_pte, pte_pa, private_data);
1955 for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
1956 if (tmp_pte[k].present) {
1957 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[k].page_base_addr);
1958 callback(PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)page_pa), page_pa, private_data);
1961 vaddr += PAGE_SIZE_4KB;
1965 vaddr += PAGE_SIZE_2MB;
1969 vaddr += PAGE_SIZE_2MB * MAX_PDE32PAE_ENTRIES;
1976 int v3_walk_host_pt_64(v3_reg_t host_cr3,
1977 void (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1978 void * private_data) {
1979 pml4e64_t * host_pml = (pml4e64_t *)CR3_TO_PML4E64_VA(host_cr3);
1980 addr_t pml_pa = CR3_TO_PML4E64_PA(host_cr3);
1985 PrintError("Callback was not specified\n");
1989 callback(PAGE_PML464, vaddr, (addr_t)host_pml, pml_pa, private_data);
1991 for (i = 0; i < MAX_PML4E64_ENTRIES; i++) {
1992 if (host_pml[i].present) {
1993 addr_t pdpe_pa = BASE_TO_PAGE_ADDR(host_pml[i].pdp_base_addr);
1994 pdpe64_t * tmp_pdpe = (pdpe64_t *)V3_VAddr((void *)pdpe_pa);
1996 callback(PAGE_PDP64, vaddr, (addr_t)tmp_pdpe, pdpe_pa, private_data);
1998 for (j = 0; j < MAX_PDPE64_ENTRIES; j++) {
1999 if (tmp_pdpe[j].present) {
2000 if (tmp_pdpe[j].large_page) {
2001 pdpe64_1GB_t * large_pdp = (pdpe64_1GB_t *)&(tmp_pdpe[j]);
2002 addr_t large_page_pa = BASE_TO_PAGE_ADDR_1GB(large_pdp->page_base_addr);
2004 callback(PAGE_1GB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data);
2006 vaddr += PAGE_SIZE_1GB;
2008 addr_t pde_pa = BASE_TO_PAGE_ADDR(tmp_pdpe[j].pd_base_addr);
2009 pde64_t * tmp_pde = (pde64_t *)V3_VAddr((void *)pde_pa);
2011 callback(PAGE_PD64, vaddr, (addr_t)tmp_pde, pde_pa, private_data);
2013 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
2014 if (tmp_pde[k].present) {
2015 if (tmp_pde[k].large_page) {
2016 pde64_2MB_t * large_pde = (pde64_2MB_t *)&(tmp_pde[k]);
2017 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
2019 callback(PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data);
2021 vaddr += PAGE_SIZE_2MB;
2023 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[k].pt_base_addr);
2024 pte64_t * tmp_pte = (pte64_t *)V3_VAddr((void *)pte_pa);
2026 callback(PAGE_PT64, vaddr, (addr_t)tmp_pte, pte_pa, private_data);
2028 for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
2029 if (tmp_pte[m].present) {
2030 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[m].page_base_addr);
2031 callback(PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)page_pa), page_pa, private_data);
2033 vaddr += PAGE_SIZE_4KB;
2037 vaddr += PAGE_SIZE_2MB;
2042 vaddr += PAGE_SIZE_1GB;
2046 vaddr += (ullong_t)PAGE_SIZE_1GB * (ullong_t)MAX_PDPE64_ENTRIES;
2054 static const uchar_t PAGE_4KB_STR[] = "4KB_PAGE";
2055 static const uchar_t PAGE_2MB_STR[] = "2MB_PAGE";
2056 static const uchar_t PAGE_4MB_STR[] = "4MB_PAGE";
2057 static const uchar_t PAGE_1GB_STR[] = "1GB_PAGE";
2058 static const uchar_t PAGE_PT32_STR[] = "32 Bit PT";
2059 static const uchar_t PAGE_PD32_STR[] = "32 Bit PD";
2060 static const uchar_t PAGE_PDP32PAE_STR[] = "32 Bit PAE PDP";
2061 static const uchar_t PAGE_PD32PAE_STR[] = "32 Bit PAE PD";
2062 static const uchar_t PAGE_PT32PAE_STR[] = "32 Bit PAE PT";
2063 static const uchar_t PAGE_PML464_STR[] = "64 Bit PML4";
2064 static const uchar_t PAGE_PDP64_STR[] = "64 Bit PDP";
2065 static const uchar_t PAGE_PD64_STR[] = "64 Bit PD";
2066 static const uchar_t PAGE_PT64_STR[] = "64 Bit PT";
2069 const uchar_t * v3_page_type_to_str(page_type_t type) {
2072 return PAGE_4KB_STR;
2074 return PAGE_2MB_STR;
2076 return PAGE_4MB_STR;
2078 return PAGE_1GB_STR;
2080 return PAGE_PT32_STR;
2082 return PAGE_PD32_STR;
2084 return PAGE_PDP32PAE_STR;
2086 return PAGE_PD32PAE_STR;
2088 return PAGE_PT32PAE_STR;
2090 return PAGE_PML464_STR;
2092 return PAGE_PDP64_STR;
2094 return PAGE_PD64_STR;
2096 return PAGE_PT64_STR;